aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVitaly Buka <vitalybuka@google.com>2024-09-24 19:47:53 -0700
committerVitaly Buka <vitalybuka@google.com>2024-09-24 19:47:53 -0700
commit0e43e154e9421044a564af0d75c2cb046fb3751f (patch)
tree0c70984ddd3f11895c99b3013a1d92360373808d
parent1b29ef2e8342cb78c1114fc68dba1aab96baf5d1 (diff)
parent4ca4460bae12eefe90bf69704a33bdd5b1c9f142 (diff)
downloadllvm-users/vitalybuka/spr/main.hwasan-check-order-of-mapping-flags.zip
llvm-users/vitalybuka/spr/main.hwasan-check-order-of-mapping-flags.tar.gz
llvm-users/vitalybuka/spr/main.hwasan-check-order-of-mapping-flags.tar.bz2
[𝘀𝗽𝗿] changes introduced through rebaseusers/vitalybuka/spr/main.hwasan-check-order-of-mapping-flags
Created using spr 1.3.4 [skip ci]
-rw-r--r--bolt/include/bolt/Core/BinaryContext.h9
-rw-r--r--bolt/include/bolt/Core/BinaryData.h1
-rw-r--r--bolt/include/bolt/Core/BinaryFunction.h1
-rw-r--r--bolt/include/bolt/Rewrite/RewriteInstance.h11
-rw-r--r--bolt/lib/Core/BinaryContext.cpp3
-rw-r--r--bolt/lib/Passes/RetpolineInsertion.cpp2
-rw-r--r--bolt/lib/Profile/DataAggregator.cpp3
-rw-r--r--bolt/lib/Rewrite/RewriteInstance.cpp12
-rw-r--r--bolt/lib/RuntimeLibs/InstrumentationRuntimeLibrary.cpp1
-rw-r--r--bolt/test/AArch64/ifunc.test (renamed from bolt/test/AArch64/ifunc.c)23
-rw-r--r--bolt/test/Inputs/ifunc.c12
-rw-r--r--bolt/test/Inputs/iplt.ld (renamed from bolt/test/AArch64/Inputs/iplt.ld)0
-rw-r--r--bolt/test/X86/ifunc.test47
-rw-r--r--bolt/test/X86/log.test4
-rw-r--r--bolt/test/perf2bolt/perf_test.test9
-rw-r--r--bolt/unittests/Core/BinaryContext.cpp36
-rw-r--r--clang-tools-extra/docs/clang-tidy/ExternalClang-TidyExamples.rst30
-rw-r--r--clang-tools-extra/docs/clang-tidy/index.rst1
-rw-r--r--clang/CMakeLists.txt2
-rw-r--r--clang/docs/LanguageExtensions.rst23
-rw-r--r--clang/docs/OpenMPSupport.rst2
-rw-r--r--clang/docs/ReleaseNotes.rst20
-rw-r--r--clang/include/clang/Basic/AttrDocs.td6
-rw-r--r--clang/include/clang/Basic/BuiltinsNVPTX.def8
-rw-r--r--clang/include/clang/Basic/CodeGenOptions.def1
-rw-r--r--clang/include/clang/Basic/DiagnosticSemaKinds.td6
-rw-r--r--clang/include/clang/Driver/Options.td11
-rw-r--r--clang/include/clang/Frontend/ASTUnit.h4
-rw-r--r--clang/include/clang/Frontend/MultiplexConsumer.h3
-rw-r--r--clang/include/clang/Interpreter/Interpreter.h58
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h4
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h6
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h4
-rw-r--r--clang/lib/AST/ASTContext.cpp3
-rw-r--r--clang/lib/AST/ByteCode/Compiler.cpp115
-rw-r--r--clang/lib/AST/ByteCode/Interp.cpp87
-rw-r--r--clang/lib/AST/ByteCode/Interp.h11
-rw-r--r--clang/lib/AST/ByteCode/InterpBuiltin.cpp14
-rw-r--r--clang/lib/AST/ByteCode/InterpFrame.cpp26
-rw-r--r--clang/lib/AST/ByteCode/InterpFrame.h2
-rw-r--r--clang/lib/AST/ByteCode/Opcodes.td14
-rw-r--r--clang/lib/Basic/Targets/BPF.cpp3
-rw-r--r--clang/lib/Basic/Targets/RISCV.cpp12
-rw-r--r--clang/lib/Basic/Targets/RISCV.h3
-rw-r--r--clang/lib/Basic/Targets/SPIR.h6
-rw-r--r--clang/lib/CodeGen/BackendUtil.cpp2
-rw-r--r--clang/lib/CodeGen/CGAtomic.cpp13
-rw-r--r--clang/lib/CodeGen/CGBuiltin.cpp13
-rw-r--r--clang/lib/CodeGen/CGDebugInfo.cpp6
-rw-r--r--clang/lib/CodeGen/CGExprAgg.cpp40
-rw-r--r--clang/lib/CodeGen/CGExprConstant.cpp93
-rw-r--r--clang/lib/CodeGen/CGExprScalar.cpp2
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntime.cpp10
-rw-r--r--clang/lib/CodeGen/CGStmt.cpp1
-rw-r--r--clang/lib/CodeGen/CGStmtOpenMP.cpp15
-rw-r--r--clang/lib/CodeGen/CodeGenModule.cpp9
-rw-r--r--clang/lib/CodeGen/CodeGenModule.h51
-rw-r--r--clang/lib/CodeGen/SanitizerMetadata.cpp2
-rw-r--r--clang/lib/CodeGen/Targets/SPIR.cpp36
-rw-r--r--clang/lib/CrossTU/CrossTranslationUnit.cpp6
-rw-r--r--clang/lib/Frontend/ASTUnit.cpp2
-rw-r--r--clang/lib/Frontend/FrontendAction.cpp10
-rw-r--r--clang/lib/Frontend/MultiplexConsumer.cpp7
-rw-r--r--clang/lib/Frontend/Rewrite/RewriteObjC.cpp82
-rw-r--r--clang/lib/Interpreter/CMakeLists.txt1
-rw-r--r--clang/lib/Interpreter/DeviceOffload.cpp21
-rw-r--r--clang/lib/Interpreter/DeviceOffload.h21
-rw-r--r--clang/lib/Interpreter/IncrementalExecutor.cpp2
-rw-r--r--clang/lib/Interpreter/IncrementalParser.cpp272
-rw-r--r--clang/lib/Interpreter/IncrementalParser.h53
-rw-r--r--clang/lib/Interpreter/Interpreter.cpp650
-rw-r--r--clang/lib/Interpreter/InterpreterValuePrinter.cpp400
-rw-r--r--clang/lib/Sema/CheckExprLifetime.cpp19
-rw-r--r--clang/lib/Sema/CheckExprLifetime.h6
-rw-r--r--clang/lib/Sema/SemaDecl.cpp18
-rw-r--r--clang/lib/Sema/SemaHLSL.cpp20
-rw-r--r--clang/lib/Sema/SemaLookup.cpp5
-rw-r--r--clang/lib/Sema/SemaStmt.cpp10
-rw-r--r--clang/lib/Sema/SemaTemplate.cpp1
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Core/ProgramState.cpp30
-rw-r--r--clang/lib/StaticAnalyzer/Core/RegionStore.cpp90
-rw-r--r--clang/lib/StaticAnalyzer/Core/SValBuilder.cpp4
-rw-r--r--clang/test/AST/ByteCode/codegen.m5
-rw-r--r--clang/test/AST/ByteCode/cxx20.cpp2
-rw-r--r--clang/test/AST/ByteCode/new-delete.cpp77
-rw-r--r--clang/test/AST/ByteCode/placement-new.cpp263
-rw-r--r--clang/test/Analysis/stack-addr-ps.cpp33
-rw-r--r--clang/test/CXX/class/class.friend/p7-cxx20.cpp47
-rw-r--r--clang/test/CXX/class/class.mfct/p1-cxx20.cpp38
-rw-r--r--clang/test/CodeGen/2008-07-22-bitfield-init-after-zero-len-array.c2
-rw-r--r--clang/test/CodeGen/2008-08-07-AlignPadding1.c4
-rw-r--r--clang/test/CodeGen/2009-06-14-anonymous-union-init.c4
-rw-r--r--clang/test/CodeGen/64bit-swiftcall.c12
-rw-r--r--clang/test/CodeGen/PowerPC/builtins-ppc-build-pair-mma.c2
-rw-r--r--clang/test/CodeGen/PowerPC/builtins-ppc-pair-mma-types.c8
-rw-r--r--clang/test/CodeGen/arm-swiftcall.c4
-rw-r--r--clang/test/CodeGen/bpf-attr-type-tag-atomic.c16
-rw-r--r--clang/test/CodeGen/const-init.c4
-rw-r--r--clang/test/CodeGen/decl.c4
-rw-r--r--clang/test/CodeGen/designated-initializers.c12
-rw-r--r--clang/test/CodeGen/ext-int.c18
-rw-r--r--clang/test/CodeGen/flexible-array-init.c24
-rw-r--r--clang/test/CodeGen/global-init.c2
-rw-r--r--clang/test/CodeGen/init.c19
-rw-r--r--clang/test/CodeGen/inline-asm-output-variant.c26
-rw-r--r--clang/test/CodeGen/linux-kernel-struct-union-initializer.c267
-rw-r--r--clang/test/CodeGen/linux-kernel-struct-union-initializer2.c140
-rw-r--r--clang/test/CodeGen/mingw-long-double.c9
-rw-r--r--clang/test/CodeGen/mms-bitfields.c4
-rw-r--r--clang/test/CodeGen/scoped-atomic-ops.c336
-rw-r--r--clang/test/CodeGen/union-init2.c4
-rw-r--r--clang/test/CodeGen/windows-swiftcall.c12
-rw-r--r--clang/test/CodeGenCXX/debug-info-line-if-2.cpp45
-rw-r--r--clang/test/CodeGenObjC/designated-initializers.m2
-rw-r--r--clang/test/CodeGenOpenCL/atomic-builtins-default-to-device-scope.cl235
-rw-r--r--clang/test/CodeGenOpenCL/builtins-amdgcn.cl6
-rw-r--r--clang/test/Driver/relax.s2
-rw-r--r--clang/test/Misc/cc1as-output-asm-variant.c8
-rw-r--r--clang/test/Modules/GH109879-1.cpp25
-rw-r--r--clang/test/Modules/GH109879-2.cpp29
-rw-r--r--clang/test/Preprocessor/bpf-predefined-macros.c8
-rw-r--r--clang/test/Sema/riscv-asm.c17
-rw-r--r--clang/test/Sema/scoped-atomic-ops.c1
-rw-r--r--clang/test/SemaCXX/attr-gsl-owner-pointer-std.cpp17
-rw-r--r--clang/test/SemaCXX/attr-musttail.cpp31
-rw-r--r--clang/tools/CMakeLists.txt2
-rw-r--r--clang/tools/c-index-test/core_main.cpp12
-rw-r--r--clang/tools/clang-extdef-mapping/ClangExtDefMapGen.cpp2
-rw-r--r--clang/unittests/Frontend/ASTUnitTest.cpp4
-rw-r--r--clang/unittests/Interpreter/CodeCompletionTest.cpp2
-rw-r--r--clang/unittests/Interpreter/InterpreterExtensionsTest.cpp64
-rw-r--r--cmake/Modules/CMakePolicy.cmake6
-rw-r--r--compiler-rt/lib/builtins/fp_lib.h5
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerUtilWindows.cpp5
-rw-r--r--compiler-rt/lib/nsan/nsan.cpp4
-rw-r--r--compiler-rt/lib/nsan/nsan_flags.inc5
-rw-r--r--compiler-rt/lib/rtsan/rtsan.cpp49
-rw-r--r--compiler-rt/lib/rtsan/rtsan_diagnostics.cpp58
-rw-r--r--compiler-rt/lib/rtsan/rtsan_diagnostics.h19
-rw-r--r--compiler-rt/lib/rtsan/rtsan_flags.inc3
-rw-r--r--compiler-rt/lib/rtsan/rtsan_interceptors.cpp56
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc43
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp23
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp24
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h3
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_posix.cpp10
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_posix.h1
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report.cpp15
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp20
-rw-r--r--compiler-rt/lib/ubsan/ubsan_handlers.cpp6
-rw-r--r--compiler-rt/test/asan/TestCases/Linux/preinstalled_signal.cpp3
-rw-r--r--compiler-rt/test/asan/TestCases/Posix/coverage-fork.cpp7
-rw-r--r--compiler-rt/test/asan/lit.cfg.py22
-rw-r--r--compiler-rt/test/builtins/Unit/multf3_test.c6
-rw-r--r--compiler-rt/test/nsan/fcmp.cpp28
-rw-r--r--compiler-rt/test/profile/Posix/instrprof-dlopen-norpath.test8
-rw-r--r--compiler-rt/test/rtsan/halt_on_error.cpp26
-rw-r--r--compiler-rt/test/sanitizer_common/TestCases/Linux/prctl.cpp10
-rw-r--r--compiler-rt/test/ubsan/TestCases/Integer/suppressions-builtin.cpp18
-rw-r--r--compiler-rt/test/ubsan/TestCases/Misc/builtins.cpp14
-rw-r--r--flang/include/flang/Optimizer/Transforms/Passes.h1
-rw-r--r--flang/include/flang/Optimizer/Transforms/Passes.td7
-rw-r--r--flang/lib/Optimizer/Transforms/CMakeLists.txt1
-rw-r--r--flang/lib/Optimizer/Transforms/CUFAddConstructor.cpp75
-rw-r--r--flang/test/Driver/target-cpu-features.f9012
-rw-r--r--flang/test/Fir/CUDA/cuda-constructor.f9012
-rw-r--r--libc/cmake/modules/CheckCompilerFeatures.cmake19
-rw-r--r--libc/cmake/modules/compiler_features/check_float16_conversion.cpp30
-rw-r--r--libc/config/gpu/entrypoints.txt2
-rw-r--r--libc/config/gpu/headers.txt1
-rw-r--r--libc/docs/gpu/support.rst26
-rw-r--r--libc/include/llvm-libc-macros/math-function-macros.h1
-rw-r--r--libc/include/llvm-libc-types/rpc_opcodes_t.h2
-rw-r--r--libc/newhdrgen/yaml/stdlib.yaml6
-rw-r--r--libc/spec/stdc.td2
-rw-r--r--libc/src/__support/FPUtil/CMakeLists.txt27
-rw-r--r--libc/src/__support/FPUtil/ManipulationFunctions.h12
-rw-r--r--libc/src/__support/FPUtil/cast.h65
-rw-r--r--libc/src/__support/FPUtil/dyadic_float.h131
-rw-r--r--libc/src/__support/FPUtil/except_value_utils.h18
-rw-r--r--libc/src/__support/FPUtil/generic/CMakeLists.txt7
-rw-r--r--libc/src/__support/FPUtil/generic/FMA.h5
-rw-r--r--libc/src/__support/FPUtil/generic/add_sub.h5
-rw-r--r--libc/src/__support/FPUtil/generic/sqrt.h3
-rw-r--r--libc/src/math/generic/CMakeLists.txt24
-rw-r--r--libc/src/math/generic/ceilf16.cpp3
-rw-r--r--libc/src/math/generic/exp10f16.cpp11
-rw-r--r--libc/src/math/generic/exp2f16.cpp3
-rw-r--r--libc/src/math/generic/expf16.cpp5
-rw-r--r--libc/src/math/generic/expm1f16.cpp7
-rw-r--r--libc/src/math/generic/floorf16.cpp3
-rw-r--r--libc/src/math/generic/rintf16.cpp3
-rw-r--r--libc/src/math/generic/roundevenf16.cpp3
-rw-r--r--libc/src/math/generic/roundf16.cpp3
-rw-r--r--libc/src/math/generic/truncf16.cpp3
-rw-r--r--libc/src/stdio/gpu/CMakeLists.txt11
-rw-r--r--libc/src/stdio/gpu/rename.cpp30
-rw-r--r--libc/src/stdlib/CMakeLists.txt7
-rw-r--r--libc/src/stdlib/gpu/CMakeLists.txt13
-rw-r--r--libc/src/stdlib/gpu/system.cpp29
-rw-r--r--libc/src/stdlib/system.h20
-rw-r--r--libc/src/sys/socket/linux/CMakeLists.txt4
-rw-r--r--libc/src/sys/socket/linux/recv.cpp4
-rw-r--r--libc/src/sys/socket/linux/recvfrom.cpp4
-rw-r--r--libc/src/sys/socket/linux/recvmsg.cpp9
-rw-r--r--libc/src/sys/socket/linux/socketpair.cpp6
-rw-r--r--libc/test/include/CMakeLists.txt45
-rw-r--r--libc/test/include/IsSubnormalTest.h49
-rw-r--r--libc/test/include/issubnormal_test.c24
-rw-r--r--libc/test/include/issubnormal_test.cpp12
-rw-r--r--libc/test/include/issubnormalf_test.cpp12
-rw-r--r--libc/test/include/issubnormall_test.cpp12
-rw-r--r--libc/test/src/math/smoke/AddTest.h42
-rw-r--r--libc/test/src/math/smoke/CMakeLists.txt15
-rw-r--r--libc/test/src/math/smoke/DivTest.h82
-rw-r--r--libc/test/src/math/smoke/FModTest.h64
-rw-r--r--libc/test/src/math/smoke/FmaTest.h28
-rw-r--r--libc/test/src/math/smoke/ModfTest.h2
-rw-r--r--libc/test/src/math/smoke/MulTest.h52
-rw-r--r--libc/test/src/math/smoke/NextTowardTest.h14
-rw-r--r--libc/test/src/math/smoke/SqrtTest.h16
-rw-r--r--libc/test/src/math/smoke/SubTest.h40
-rw-r--r--libc/test/src/math/smoke/exp10f16_test.cpp14
-rw-r--r--libc/test/src/math/smoke/exp2f16_test.cpp14
-rw-r--r--libc/test/src/math/smoke/expf16_test.cpp14
-rw-r--r--libc/test/src/math/smoke/expm1f16_test.cpp44
-rw-r--r--libc/utils/MPFRWrapper/CMakeLists.txt1
-rw-r--r--libc/utils/MPFRWrapper/MPFRUtils.cpp3
-rw-r--r--libc/utils/gpu/server/rpc_server.cpp29
-rw-r--r--libcxx/docs/Status/Cxx23Issues.csv2
-rw-r--r--libcxx/include/__memory/unique_temporary_buffer.h2
-rw-r--r--libcxx/utils/ci/docker-compose.yml2
-rw-r--r--libcxxabi/src/demangle/ItaniumDemangle.h1
-rw-r--r--libcxxabi/test/test_demangle.pass.cpp3
-rw-r--r--lld/COFF/Driver.cpp26
-rw-r--r--lld/COFF/Driver.h2
-rw-r--r--lld/COFF/Options.td3
-rw-r--r--lld/COFF/Writer.cpp8
-rw-r--r--lld/Common/DriverDispatcher.cpp3
-rw-r--r--lld/ELF/Arch/ARM.cpp44
-rw-r--r--lld/ELF/Arch/PPC64.cpp2
-rw-r--r--lld/ELF/Config.h2
-rw-r--r--lld/ELF/Driver.cpp54
-rw-r--r--lld/ELF/ICF.cpp4
-rw-r--r--lld/ELF/InputFiles.cpp49
-rw-r--r--lld/ELF/InputSection.cpp2
-rw-r--r--lld/ELF/LTO.cpp2
-rw-r--r--lld/ELF/LinkerScript.cpp12
-rw-r--r--lld/ELF/MarkLive.cpp24
-rw-r--r--lld/ELF/Relocations.cpp14
-rw-r--r--lld/ELF/ScriptParser.cpp2
-rw-r--r--lld/ELF/SymbolTable.cpp2
-rw-r--r--lld/ELF/SymbolTable.h2
-rw-r--r--lld/ELF/SyntheticSections.cpp15
-rw-r--r--lld/ELF/SyntheticSections.h2
-rw-r--r--lld/ELF/Writer.cpp111
-rw-r--r--lld/ELF/Writer.h4
-rw-r--r--lld/MachO/InputSection.cpp3
-rw-r--r--lld/docs/ReleaseNotes.rst1
-rw-r--r--lld/test/COFF/Inputs/include1d.yaml29
-rw-r--r--lld/test/COFF/include.test54
-rw-r--r--lld/test/COFF/include2.test10
-rw-r--r--lld/test/wasm/unsupported-pic-relocations.s6
-rw-r--r--lld/test/wasm/unsupported-pic-relocations64.s6
-rw-r--r--lld/wasm/Relocations.cpp10
-rw-r--r--lldb/bindings/python/python-wrapper.swig73
-rw-r--r--lldb/docs/index.rst1
-rw-r--r--lldb/docs/resources/addinglanguagesupport.md95
-rw-r--r--lldb/docs/use/python-reference.rst185
-rw-r--r--lldb/examples/python/cmdtemplate.py15
-rw-r--r--lldb/examples/python/templates/parsed_cmd.py97
-rw-r--r--lldb/include/lldb/Interpreter/ScriptInterpreter.h14
-rw-r--r--lldb/include/lldb/Symbol/UnwindPlan.h21
-rw-r--r--lldb/include/lldb/Target/ABI.h6
-rw-r--r--lldb/include/lldb/Target/RegisterContextUnwind.h25
-rw-r--r--lldb/include/lldb/Target/UnwindLLDB.h8
-rw-r--r--lldb/include/lldb/Utility/CompletionRequest.h2
-rw-r--r--lldb/include/lldb/Utility/Status.h83
-rw-r--r--lldb/source/Commands/CommandObjectCommands.cpp191
-rw-r--r--lldb/source/Commands/CommandObjectFrame.cpp2
-rw-r--r--lldb/source/Commands/CommandObjectProcess.cpp2
-rw-r--r--lldb/source/Commands/CommandObjectScripting.cpp2
-rw-r--r--lldb/source/Commands/Options.td2
-rw-r--r--lldb/source/Interpreter/CommandInterpreter.cpp2
-rw-r--r--lldb/source/Interpreter/Options.cpp5
-rw-r--r--lldb/source/Plugins/ABI/AArch64/ABIAArch64.cpp40
-rw-r--r--lldb/source/Plugins/ABI/SystemZ/ABISysV_s390x.cpp2
-rw-r--r--lldb/source/Plugins/ABI/SystemZ/ABISysV_s390x.h3
-rw-r--r--lldb/source/Plugins/Process/FreeBSD/NativeProcessFreeBSD.cpp2
-rw-r--r--lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp10
-rw-r--r--lldb/source/Plugins/ScriptInterpreter/Python/PythonDataObjects.cpp29
-rw-r--r--lldb/source/Plugins/ScriptInterpreter/Python/SWIGPythonBridge.h9
-rw-r--r--lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp40
-rw-r--r--lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPythonImpl.h8
-rw-r--r--lldb/source/Plugins/SymbolFile/Breakpad/SymbolFileBreakpad.cpp4
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/DWARFFormValue.cpp54
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/DWARFFormValue.h24
-rw-r--r--lldb/source/Plugins/UnwindAssembly/x86/UnwindAssembly-x86.cpp4
-rw-r--r--lldb/source/Plugins/UnwindAssembly/x86/x86AssemblyInspectionEngine.cpp8
-rw-r--r--lldb/source/Symbol/ArmUnwindInfo.cpp2
-rw-r--r--lldb/source/Symbol/DWARFCallFrameInfo.cpp16
-rw-r--r--lldb/source/Symbol/FuncUnwinders.cpp4
-rw-r--r--lldb/source/Symbol/UnwindPlan.cpp45
-rw-r--r--lldb/source/Target/ABI.cpp2
-rw-r--r--lldb/source/Target/RegisterContextUnwind.cpp86
-rw-r--r--lldb/source/Target/Target.cpp134
-rw-r--r--lldb/source/Target/TargetProperties.td2
-rw-r--r--lldb/source/Target/UnwindLLDB.cpp10
-rw-r--r--lldb/source/Utility/Status.cpp256
-rw-r--r--lldb/test/API/commands/command/script/add/TestAddParsedCommand.py132
-rw-r--r--lldb/test/API/commands/command/script/add/test_commands.py69
-rw-r--r--lldb/test/API/functionalities/gdb_remote_client/TestAArch64XMLRegistersSVEOnly.py121
-rw-r--r--lldb/test/API/functionalities/gdb_remote_client/TestGDBRemotePlatformFile.py12
-rw-r--r--lldb/test/API/macosx/expedited-thread-pcs/Makefile11
-rw-r--r--lldb/test/API/macosx/expedited-thread-pcs/TestExpeditedThreadPCs.py91
-rw-r--r--lldb/test/API/macosx/expedited-thread-pcs/foo.c1
-rw-r--r--lldb/test/API/macosx/expedited-thread-pcs/main.cpp62
-rw-r--r--lldb/unittests/ScriptInterpreter/Python/PythonTestSuite.cpp13
-rw-r--r--lldb/unittests/UnwindAssembly/ARM64/TestArm64InstEmulation.cpp14
-rw-r--r--lldb/unittests/UnwindAssembly/PPC64/TestPPC64InstEmulation.cpp4
-rw-r--r--lldb/unittests/UnwindAssembly/x86/Testx86AssemblyInspectionEngine.cpp102
-rw-r--r--lldb/unittests/Utility/StatusTest.cpp8
-rw-r--r--llvm/cmake/modules/CMakeLists.txt3
-rw-r--r--llvm/cmake/modules/LLVMConfig.cmake.in3
-rw-r--r--llvm/docs/AMDGPUUsage.rst4
-rw-r--r--llvm/docs/CMake.rst12
-rw-r--r--llvm/docs/NVPTXUsage.rst63
-rw-r--r--llvm/docs/ReleaseNotes.rst26
-rw-r--r--llvm/docs/TableGen/ProgRef.rst18
-rw-r--r--llvm/include/llvm-c/Core.h6
-rw-r--r--llvm/include/llvm/Analysis/CtxProfAnalysis.h3
-rw-r--r--llvm/include/llvm/Analysis/DXILMetadataAnalysis.h8
-rw-r--r--llvm/include/llvm/Analysis/Loads.h14
-rw-r--r--llvm/include/llvm/Analysis/PtrUseVisitor.h33
-rw-r--r--llvm/include/llvm/Analysis/ScalarEvolution.h20
-rw-r--r--llvm/include/llvm/Analysis/ValueTracking.h4
-rw-r--r--llvm/include/llvm/BinaryFormat/ELFRelocs/x86_64.def1
-rw-r--r--llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h3
-rw-r--r--llvm/include/llvm/CodeGen/ISDOpcodes.h2
-rw-r--r--llvm/include/llvm/CodeGen/LiveInterval.h10
-rw-r--r--llvm/include/llvm/CodeGen/MIRYamlMapping.h11
-rw-r--r--llvm/include/llvm/CodeGen/MachineFunction.h5
-rw-r--r--llvm/include/llvm/CodeGen/RuntimeLibcallUtil.h4
-rw-r--r--llvm/include/llvm/Demangle/ItaniumDemangle.h1
-rw-r--r--llvm/include/llvm/ExecutionEngine/JITLink/x86_64.h22
-rw-r--r--llvm/include/llvm/IR/Attributes.h2
-rw-r--r--llvm/include/llvm/IR/IRBuilder.h1
-rw-r--r--llvm/include/llvm/IR/IntrinsicsNVVM.td76
-rw-r--r--llvm/include/llvm/IR/LLVMContext.h11
-rw-r--r--llvm/include/llvm/IR/Type.h12
-rw-r--r--llvm/include/llvm/MC/MCTargetOptions.h2
-rw-r--r--llvm/include/llvm/Option/OptTable.h2
-rw-r--r--llvm/include/llvm/ProfileData/MemProf.h31
-rw-r--r--llvm/include/llvm/SandboxIR/SandboxIR.h96
-rw-r--r--llvm/include/llvm/SandboxIR/SandboxIRValues.def1
-rw-r--r--llvm/include/llvm/SandboxIR/Type.h1
-rw-r--r--llvm/include/llvm/SandboxIR/Utils.h54
-rw-r--r--llvm/include/llvm/Support/OptionStrCmp.h64
-rw-r--r--llvm/include/llvm/Support/raw_ostream.h31
-rw-r--r--llvm/include/llvm/TableGen/Record.h3
-rw-r--r--llvm/include/llvm/Target/GlobalISel/Combine.td10
-rw-r--r--llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h22
-rw-r--r--llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Region.h13
-rw-r--r--llvm/lib/Analysis/CtxProfAnalysis.cpp9
-rw-r--r--llvm/lib/Analysis/DXILMetadataAnalysis.cpp8
-rw-r--r--llvm/lib/Analysis/Loads.cpp28
-rw-r--r--llvm/lib/Analysis/LoopAccessAnalysis.cpp17
-rw-r--r--llvm/lib/Analysis/MemDerefPrinter.cpp4
-rw-r--r--llvm/lib/Analysis/ScalarEvolution.cpp52
-rw-r--r--llvm/lib/Analysis/VectorUtils.cpp2
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp5
-rw-r--r--llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp106
-rw-r--r--llvm/lib/CodeGen/LLVMTargetMachine.cpp8
-rw-r--r--llvm/lib/CodeGen/LiveInterval.cpp66
-rw-r--r--llvm/lib/CodeGen/LiveIntervals.cpp2
-rw-r--r--llvm/lib/CodeGen/MIRParser/MIRParser.cpp54
-rw-r--r--llvm/lib/CodeGen/MIRPrinter.cpp7
-rw-r--r--llvm/lib/CodeGen/MachineBlockPlacement.cpp557
-rw-r--r--llvm/lib/CodeGen/MachineOperand.cpp3
-rw-r--r--llvm/lib/CodeGen/MachineScheduler.cpp8
-rw-r--r--llvm/lib/CodeGen/MachineSink.cpp5
-rw-r--r--llvm/lib/CodeGen/MachineVerifier.cpp250
-rw-r--r--llvm/lib/CodeGen/RegAllocGreedy.cpp10
-rw-r--r--llvm/lib/CodeGen/RegisterCoalescer.cpp7
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp15
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp106
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp64
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp7
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp16
-rw-r--r--llvm/lib/CodeGen/StackProtector.cpp2
-rw-r--r--llvm/lib/CodeGen/TargetLoweringBase.cpp5
-rw-r--r--llvm/lib/DWARFLinker/Classic/DWARFStreamer.cpp5
-rw-r--r--llvm/lib/DWARFLinker/Parallel/DWARFLinkerTypeUnit.cpp11
-rw-r--r--llvm/lib/DWARFLinker/Parallel/DebugLineSectionEmitter.h4
-rw-r--r--llvm/lib/ExecutionEngine/JITLink/ELF_x86_64.cpp3
-rw-r--r--llvm/lib/ExecutionEngine/JITLink/x86_64.cpp2
-rw-r--r--llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp4
-rw-r--r--llvm/lib/IR/Attributes.cpp2
-rw-r--r--llvm/lib/IR/AutoUpgrade.cpp203
-rw-r--r--llvm/lib/IR/Core.cpp10
-rw-r--r--llvm/lib/IR/LLVMContext.cpp8
-rw-r--r--llvm/lib/MC/ELFObjectWriter.cpp7
-rw-r--r--llvm/lib/MC/MCTargetOptionsCommandFlags.cpp4
-rw-r--r--llvm/lib/Object/COFFObjectFile.cpp2
-rw-r--r--llvm/lib/SandboxIR/SandboxIR.cpp8
-rw-r--r--llvm/lib/Support/ModRef.cpp104
-rw-r--r--llvm/lib/Support/OptionStrCmp.cpp86
-rw-r--r--llvm/lib/TableGen/Record.cpp29
-rw-r--r--llvm/lib/TableGen/TGLexer.cpp1
-rw-r--r--llvm/lib/TableGen/TGLexer.h1
-rw-r--r--llvm/lib/TableGen/TGParser.cpp32
-rw-r--r--llvm/lib/Target/AArch64/AArch64Combine.td2
-rw-r--r--llvm/lib/Target/AArch64/AArch64FrameLowering.cpp11
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp50
-rw-r--r--llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td2
-rw-r--r--llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp32
-rw-r--r--llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp90
-rw-r--r--llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp3
-rw-r--r--llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp2
-rw-r--r--llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.cpp50
-rw-r--r--llvm/lib/Target/AArch64/SVEInstrFormats.td6
-rw-r--r--llvm/lib/Target/AArch64/Utils/AArch64BaseInfo.h21
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPU.h3
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPU.td17
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUArgumentUsageInfo.h2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp9
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUInsertSingleUseVDST.cpp245
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp10
-rw-r--r--llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp13
-rw-r--r--llvm/lib/Target/AMDGPU/BUFInstructions.td5
-rw-r--r--llvm/lib/Target/AMDGPU/CMakeLists.txt1
-rw-r--r--llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp1
-rw-r--r--llvm/lib/Target/AMDGPU/FLATInstructions.td8
-rw-r--r--llvm/lib/Target/AMDGPU/GCNRewritePartialRegUses.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/GCNSubtarget.h3
-rw-r--r--llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp8
-rw-r--r--llvm/lib/Target/AMDGPU/SIFrameLowering.cpp13
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.cpp5
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.cpp19
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.h10
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.td2
-rw-r--r--llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp13
-rw-r--r--llvm/lib/Target/AMDGPU/SIMachineScheduler.h4
-rw-r--r--llvm/lib/Target/AMDGPU/SIRegisterInfo.td11
-rw-r--r--llvm/lib/Target/AMDGPU/SOPInstructions.td11
-rw-r--r--llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp18
-rw-r--r--llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h2
-rw-r--r--llvm/lib/Target/AMDGPU/VOP1Instructions.td18
-rw-r--r--llvm/lib/Target/AMDGPU/VOP2Instructions.td6
-rw-r--r--llvm/lib/Target/AMDGPU/VOP3Instructions.td35
-rw-r--r--llvm/lib/Target/AMDGPU/VOP3PInstructions.td12
-rw-r--r--llvm/lib/Target/AMDGPU/VOPCInstructions.td13
-rw-r--r--llvm/lib/Target/AMDGPU/VOPInstructions.td20
-rw-r--r--llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp5
-rw-r--r--llvm/lib/Target/ARM/ARMFeatures.td3
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.cpp4
-rw-r--r--llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp3
-rw-r--r--llvm/lib/Target/ARM/ARMProcessors.td2
-rw-r--r--llvm/lib/Target/BPF/BPFInstrInfo.td134
-rw-r--r--llvm/lib/Target/BPF/BPFMIChecking.cpp6
-rw-r--r--llvm/lib/Target/BPF/BTFDebug.cpp29
-rw-r--r--llvm/lib/Target/DirectX/CMakeLists.txt1
-rw-r--r--llvm/lib/Target/DirectX/DXContainerGlobals.cpp10
-rw-r--r--llvm/lib/Target/DirectX/DXILMetadata.cpp335
-rw-r--r--llvm/lib/Target/DirectX/DXILMetadata.h43
-rw-r--r--llvm/lib/Target/DirectX/DXILPrepare.cpp8
-rw-r--r--llvm/lib/Target/DirectX/DXILTranslateMetadata.cpp318
-rw-r--r--llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp4
-rw-r--r--llvm/lib/Target/Mips/Mips.h11
-rw-r--r--llvm/lib/Target/Mips/MipsBranchExpansion.cpp70
-rw-r--r--llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp6
-rw-r--r--llvm/lib/Target/Mips/MipsISelLowering.cpp2
-rw-r--r--llvm/lib/Target/Mips/MipsInstrInfo.cpp15
-rw-r--r--llvm/lib/Target/Mips/MipsInstrInfo.h4
-rw-r--r--llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp285
-rw-r--r--llvm/lib/Target/NVPTX/NVPTX.h34
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp16
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp279
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h24
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp21
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXInstrInfo.td449
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXIntrinsics.td235
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXLowerUnreachable.cpp21
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp7
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXSubtarget.cpp13
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXSubtarget.h3
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXUtilities.cpp139
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXUtilities.h81
-rw-r--r--llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp7
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp2
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.cpp59
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfo.cpp10
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td2
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td4
-rw-r--r--llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp20
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp3
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp128
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.h11
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp57
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp4
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVRegularizer.cpp10
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVStripConvergentIntrinsics.cpp2
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVUtils.cpp25
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVUtils.h2
-rw-r--r--llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp94
-rw-r--r--llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp368
-rw-r--r--llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.h8
-rw-r--r--llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp6
-rw-r--r--llvm/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp7
-rw-r--r--llvm/lib/Target/X86/MCTargetDesc/X86FixupKinds.h4
-rw-r--r--llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp19
-rw-r--r--llvm/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp6
-rw-r--r--llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp2
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp70
-rw-r--r--llvm/lib/Target/X86/X86InstrAVX512.td27
-rw-r--r--llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp7
-rw-r--r--llvm/lib/Transforms/IPO/ElimAvailExtern.cpp19
-rw-r--r--llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp44
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp5
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp26
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp30
-rw-r--r--llvm/lib/Transforms/InstCombine/InstructionCombining.cpp25
-rw-r--r--llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp14
-rw-r--r--llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp2
-rw-r--r--llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp5
-rw-r--r--llvm/lib/Transforms/Instrumentation/PGOCtxProfFlattening.cpp52
-rw-r--r--llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp46
-rw-r--r--llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp26
-rw-r--r--llvm/lib/Transforms/Utils/InlineFunction.cpp16
-rw-r--r--llvm/lib/Transforms/Utils/LoopConstrainer.cpp2
-rw-r--r--llvm/lib/Transforms/Utils/LoopUnroll.cpp2
-rw-r--r--llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp2
-rw-r--r--llvm/lib/Transforms/Utils/LoopVersioning.cpp2
-rw-r--r--llvm/lib/Transforms/Utils/SimplifyCFG.cpp22
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp79
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h5
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorize.cpp29
-rw-r--r--llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp99
-rw-r--r--llvm/lib/Transforms/Vectorize/SandboxVectorizer/Region.cpp40
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlan.cpp85
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlan.h80
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp209
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp8
-rw-r--r--llvm/test/Analysis/CostModel/AArch64/reduce-fadd.ll104
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/arith-fp.ll256
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/rvv-intrinsics.ll16
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/rvv-select.ll350
-rw-r--r--llvm/test/Analysis/CtxProfAnalysis/handle-select.ll76
-rw-r--r--llvm/test/Analysis/ScalarEvolution/exit-count-non-strict.ll6
-rw-r--r--llvm/test/Analysis/ScalarEvolution/finite-trip-count.ll6
-rw-r--r--llvm/test/Analysis/ScalarEvolution/ne-overflow.ll3
-rw-r--r--llvm/test/Analysis/ScalarEvolution/predicated-exit-count.ll4
-rw-r--r--llvm/test/Analysis/ScalarEvolution/predicated-symbolic-max-backedge-taken-count.ll6
-rw-r--r--llvm/test/Analysis/ScalarEvolution/trip-count-implied-addrec.ll15
-rw-r--r--llvm/test/Assembler/auto_upgrade_nvvm_intrinsics.ll71
-rw-r--r--llvm/test/Bindings/llvm-c/atomics.ll3
-rw-r--r--llvm/test/Bindings/llvm-c/debug_info_new_format.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-freeze.mir24
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-insert-vector-elt.mir11
-rw-r--r--llvm/test/CodeGen/AArch64/bswap.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/concat-vector.ll16
-rw-r--r--llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll22
-rw-r--r--llvm/test/CodeGen/AArch64/fptoi.ll52
-rw-r--r--llvm/test/CodeGen/AArch64/itofp.ll11
-rw-r--r--llvm/test/CodeGen/AArch64/mlicm-stack-write-check.mir4
-rw-r--r--llvm/test/CodeGen/AArch64/shift.ll90
-rw-r--r--llvm/test/CodeGen/AArch64/shufflevector.ll45
-rw-r--r--llvm/test/CodeGen/AArch64/sincos-stack-slots.ll255
-rw-r--r--llvm/test/CodeGen/AArch64/sme-streaming-mode-changing-call-disable-stackslot-scavenging.ll45
-rw-r--r--llvm/test/CodeGen/AArch64/sve-bf16-converts.ll129
-rw-r--r--llvm/test/CodeGen/AArch64/wide-scalar-shift-by-byte-multiple-legalization.ll156
-rw-r--r--llvm/test/CodeGen/AArch64/wide-scalar-shift-legalization.ll117
-rw-r--r--llvm/test/CodeGen/AArch64/xtn.ll17
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.f32-no-rtn.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.f32-rtn.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.f64.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.v2f16-no-rtn.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.v2f16-rtn.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgpu-atomic-cmpxchg-global.mir8
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy.mir2
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fract.f64.mir12
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-global-saddr.mir80
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/vni8-across-blocks.ll22
-rw-r--r--llvm/test/CodeGen/AMDGPU/accvgpr-copy.mir36
-rw-r--r--llvm/test/CodeGen/AMDGPU/agpr-to-agpr-copy.mir12
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdhsa-kernarg-preload-num-sgprs.ll73
-rw-r--r--llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll18
-rw-r--r--llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll74
-rw-r--r--llvm/test/CodeGen/AMDGPU/expand-si-indirect.mir2
-rw-r--r--llvm/test/CodeGen/AMDGPU/global-atomic-fadd.f32-no-rtn.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/global-atomic-fadd.f32-rtn.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_scan_fadd.ll82
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmax.ll22
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmin.ll22
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_scan_fsub.ll82
-rw-r--r--llvm/test/CodeGen/AMDGPU/high-RP-reschedule.mir2
-rw-r--r--llvm/test/CodeGen/AMDGPU/inline-asm.i128.ll24
-rw-r--r--llvm/test/CodeGen/AMDGPU/insert-singleuse-vdst.mir1420
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll144
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.barrier.wait.ll2151
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wave.reduce.umax.mir8
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wave.reduce.umin.mir8
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wqm.demote.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/merge-flat-with-global-load-store.mir16
-rw-r--r--llvm/test/CodeGen/AMDGPU/merge-global-load-store.mir52
-rw-r--r--llvm/test/CodeGen/AMDGPU/move-load-addr-to-valu.mir18
-rw-r--r--llvm/test/CodeGen/AMDGPU/move-to-valu-addsubu64.ll10
-rw-r--r--llvm/test/CodeGen/AMDGPU/move-to-valu-pseudo-scalar-trans.ll20
-rw-r--r--llvm/test/CodeGen/AMDGPU/optimize-exec-mask-pre-ra-non-empty-but-used-interval.mir4
-rw-r--r--llvm/test/CodeGen/AMDGPU/partial-regcopy-and-spill-missed-at-regalloc.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/postra-sink-update-dependency.mir66
-rw-r--r--llvm/test/CodeGen/AMDGPU/ran-out-of-sgprs-allocation-failure.mir4
-rw-r--r--llvm/test/CodeGen/AMDGPU/sched-barrier-hang-weak-dep.mir4
-rw-r--r--llvm/test/CodeGen/AMDGPU/sched-barrier-pre-RA.mir48
-rw-r--r--llvm/test/CodeGen/AMDGPU/sched-group-barrier-pipeline-solver.mir24
-rw-r--r--llvm/test/CodeGen/AMDGPU/sched-group-barrier-pre-RA.mir24
-rw-r--r--llvm/test/CodeGen/AMDGPU/set-inactive-wwm-overwrite.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/sgpr-spill-fi-skip-processing-stack-arg-dbg-value-list.mir53
-rw-r--r--llvm/test/CodeGen/AMDGPU/should-not-hoist-set-inactive.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/shrink-true16.mir2
-rw-r--r--llvm/test/CodeGen/AMDGPU/shrink-v-cmp-wave32-dead-vcc-lo.mir55
-rw-r--r--llvm/test/CodeGen/AMDGPU/skip-if-dead.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/valu-mask-write-hazard.mir7
-rw-r--r--llvm/test/CodeGen/AMDGPU/vgpr-liverange-ir.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/vgpr-liverange.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/vgpr-spill-fi-skip-processing-stack-arg-dbg-value-list.mir52
-rw-r--r--llvm/test/CodeGen/AMDGPU/wave32.ll4
-rw-r--r--llvm/test/CodeGen/ARM/expand-pseudos.mir21
-rw-r--r--llvm/test/CodeGen/ARM/preferred-function-alignment.ll5
-rw-r--r--llvm/test/CodeGen/ARM/vbsl.ll204
-rw-r--r--llvm/test/CodeGen/BPF/BTF/atomics.ll151
-rw-r--r--llvm/test/CodeGen/BPF/BTF/print_btf.py295
-rw-r--r--llvm/test/CodeGen/BPF/atomics_mem_order_v1.ll385
-rw-r--r--llvm/test/CodeGen/BPF/atomics_mem_order_v3.ll781
-rw-r--r--llvm/test/CodeGen/BPF/atomics_sub64_relaxed_v1.ll27
-rw-r--r--llvm/test/CodeGen/BPF/xaddd_v1.ll25
-rw-r--r--llvm/test/CodeGen/DirectX/Metadata/lib-entries.ll37
-rw-r--r--llvm/test/CodeGen/DirectX/Metadata/multiple-entries-cs-error.ll23
-rw-r--r--llvm/test/CodeGen/DirectX/Metadata/target-profile-error.ll12
-rw-r--r--llvm/test/CodeGen/DirectX/legalize-module-flags.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/legalize-module-flags2.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/strip-call-attrs.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/typed_ptr.ll2
-rw-r--r--llvm/test/CodeGen/Generic/allow-check.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/expand-condsets-impuse2.mir2
-rw-r--r--llvm/test/CodeGen/Hexagon/expand-condsets-phys-reg.mir2
-rw-r--r--llvm/test/CodeGen/Hexagon/expand-condsets-rm-reg.mir2
-rw-r--r--llvm/test/CodeGen/MIR/Generic/machine-function-optionally-computed-properties-conflict.mir35
-rw-r--r--llvm/test/CodeGen/MIR/Generic/machine-function-optionally-computed-properties.mir64
-rw-r--r--llvm/test/CodeGen/MIR/NVPTX/floating-point-immediate-operands.mir8
-rw-r--r--llvm/test/CodeGen/Mips/cconv/illegal-vectors.ll115
-rw-r--r--llvm/test/CodeGen/Mips/llvm-ir/ashr.ll343
-rw-r--r--llvm/test/CodeGen/Mips/llvm-ir/lshr.ll335
-rw-r--r--llvm/test/CodeGen/Mips/llvm-ir/sdiv.ll135
-rw-r--r--llvm/test/CodeGen/Mips/llvm-ir/shl.ll302
-rw-r--r--llvm/test/CodeGen/Mips/llvm-ir/srem.ll131
-rw-r--r--llvm/test/CodeGen/Mips/llvm-ir/two-consecutive-mult.ll60
-rw-r--r--llvm/test/CodeGen/Mips/llvm-ir/two-consecutive-sdiv.ll133
-rw-r--r--llvm/test/CodeGen/Mips/llvm-ir/two-consecutive-srem.ll133
-rw-r--r--llvm/test/CodeGen/Mips/llvm-ir/two-consecutive-udiv.ll133
-rw-r--r--llvm/test/CodeGen/Mips/llvm-ir/two-consecutive-urem.ll133
-rw-r--r--llvm/test/CodeGen/Mips/llvm-ir/udiv.ll131
-rw-r--r--llvm/test/CodeGen/Mips/llvm-ir/urem.ll143
-rw-r--r--llvm/test/CodeGen/NVPTX/fence-sm-90.ll30
-rw-r--r--llvm/test/CodeGen/NVPTX/fence.ll76
-rw-r--r--llvm/test/CodeGen/NVPTX/intrin-nocapture.ll21
-rw-r--r--llvm/test/CodeGen/NVPTX/load-store-sm-70.ll2906
-rw-r--r--llvm/test/CodeGen/NVPTX/load-store-sm-90.ll1423
-rw-r--r--llvm/test/CodeGen/NVPTX/load-store.ll507
-rw-r--r--llvm/test/CodeGen/NVPTX/rotate.ll433
-rw-r--r--llvm/test/CodeGen/NVPTX/rotate_64.ll33
-rw-r--r--llvm/test/CodeGen/NVPTX/unreachable.ll26
-rw-r--r--llvm/test/CodeGen/PowerPC/ctrloop-sh.ll240
-rw-r--r--llvm/test/CodeGen/PowerPC/pr59074.ll83
-rw-r--r--llvm/test/CodeGen/PowerPC/wide-scalar-shift-by-byte-multiple-legalization.ll1418
-rw-r--r--llvm/test/CodeGen/PowerPC/wide-scalar-shift-legalization.ll702
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll42
-rw-r--r--llvm/test/CodeGen/RISCV/shifts.ll366
-rw-r--r--llvm/test/CodeGen/RISCV/wide-scalar-shift-by-byte-multiple-legalization.ll5989
-rw-r--r--llvm/test/CodeGen/RISCV/wide-scalar-shift-legalization.ll3571
-rw-r--r--llvm/test/CodeGen/SPARC/salvage-debug-isel.ll69
-rw-r--r--llvm/test/CodeGen/SPIRV/AtomicCompareExchange.ll6
-rw-r--r--llvm/test/CodeGen/SPIRV/atomicrmw.ll25
-rw-r--r--llvm/test/CodeGen/SPIRV/debug-info/debug-compilation-unit.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_double.ll7
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_float.ll7
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_half.ll7
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_double.ll7
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_float.ll7
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_half.ll7
-rw-r--r--llvm/test/CodeGen/SPIRV/fence.ll10
-rw-r--r--llvm/test/CodeGen/SPIRV/instructions/atomic-ptr.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/instructions/atomic.ll31
-rw-r--r--llvm/test/CodeGen/SPIRV/instructions/atomic_acqrel.ll4
-rw-r--r--llvm/test/CodeGen/SPIRV/instructions/atomic_seq.ll4
-rw-r--r--llvm/test/CodeGen/SPIRV/scoped_atomicrmw.ll163
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-soft-float-abi.ll66
-rw-r--r--llvm/test/CodeGen/X86/canonicalize-vars-f16-type.ll270
-rw-r--r--llvm/test/CodeGen/X86/canonicalize-vars.ll672
-rw-r--r--llvm/test/CodeGen/X86/div-rem-pair-recomposition-signed.ll488
-rw-r--r--llvm/test/CodeGen/X86/div-rem-pair-recomposition-unsigned.ll505
-rw-r--r--llvm/test/CodeGen/X86/extractelement-fp.ll10
-rw-r--r--llvm/test/CodeGen/X86/pmulh.ll51
-rw-r--r--llvm/test/CodeGen/X86/pr38539.ll160
-rw-r--r--llvm/test/CodeGen/X86/pr57673.ll4
-rw-r--r--llvm/test/CodeGen/X86/scheduler-backtracking.ll140
-rw-r--r--llvm/test/CodeGen/X86/section-stats.ll2
-rw-r--r--llvm/test/CodeGen/X86/shift-i128.ll657
-rw-r--r--llvm/test/CodeGen/X86/shift-i256.ll418
-rw-r--r--llvm/test/CodeGen/X86/sjlj-shadow-stack-liveness.mir3
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-combining-avx512vbmi.ll15
-rw-r--r--llvm/test/CodeGen/X86/wide-scalar-shift-by-byte-multiple-legalization.ll23081
-rw-r--r--llvm/test/CodeGen/X86/wide-scalar-shift-legalization.ll7677
-rw-r--r--llvm/test/CodeGen/X86/widen-load-of-small-alloca-with-zero-upper-half.ll3560
-rw-r--r--llvm/test/CodeGen/X86/widen-load-of-small-alloca.ll1645
-rw-r--r--llvm/test/DebugInfo/Generic/debug-ranges-duplication.ll2
-rw-r--r--llvm/test/DebugInfo/NVPTX/debug-info.ll20
-rw-r--r--llvm/test/ExecutionEngine/JITLink/x86-64/ELF_R_X86_64_PC.s (renamed from llvm/test/ExecutionEngine/JITLink/x86-64/ELF_R_X86_64_PC8.s)5
-rw-r--r--llvm/test/MC/AMDGPU/amdhsa-kd-kernarg-preload.s21
-rw-r--r--llvm/test/MC/AMDGPU/flat-global.s5
-rw-r--r--llvm/test/MC/AMDGPU/gfx10_flat_instructions_err.s268
-rw-r--r--llvm/test/MC/AMDGPU/gfx10_unsupported.s3
-rw-r--r--llvm/test/MC/AMDGPU/gfx1150_asm_sopp.s10
-rw-r--r--llvm/test/MC/AMDGPU/gfx11_asm_vop3-fake16.s6199
-rw-r--r--llvm/test/MC/AMDGPU/gfx11_asm_vop3.s8
-rw-r--r--llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp16-fake16.s4695
-rw-r--r--llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp16.s8
-rw-r--r--llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp8-fake16.s2968
-rw-r--r--llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp8.s8
-rw-r--r--llvm/test/MC/AMDGPU/gfx11_flat_instructions_err.s253
-rw-r--r--llvm/test/MC/AMDGPU/gfx11_unsupported.s6
-rw-r--r--llvm/test/MC/AMDGPU/gfx12_asm_sopp.s9
-rw-r--r--llvm/test/MC/AMDGPU/gfx12_asm_vop3-fake16.s7294
-rw-r--r--llvm/test/MC/AMDGPU/gfx12_asm_vop3.s8
-rw-r--r--llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp16-fake16.s5764
-rw-r--r--llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp16.s8
-rw-r--r--llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp8-fake16.s3814
-rw-r--r--llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp8.s8
-rw-r--r--llvm/test/MC/AMDGPU/gfx12_flat_instructions_err.s289
-rw-r--r--llvm/test/MC/AMDGPU/gfx12_unsupported.s6
-rw-r--r--llvm/test/MC/AMDGPU/gfx940_unsupported.s11
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/decode-err.txt5
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx10_flat.txt3
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx1150_dasm_sopp.txt10
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3.txt232
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp16.txt217
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp8.txt37
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_sopp.txt8
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3.txt258
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp16.txt221
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp8.txt37
-rw-r--r--llvm/test/MC/Disassembler/X86/apx/kmov.txt16
-rw-r--r--llvm/test/MC/ELF/relocation-alias.s3
-rw-r--r--llvm/test/MC/RISCV/machine-csr-names-invalid.s40
-rw-r--r--llvm/test/MC/RISCV/rv64-relax-all.s6
-rw-r--r--llvm/test/MC/WebAssembly/type-checker-errors.s22
-rw-r--r--llvm/test/MC/X86/gotpcrelx.s34
-rw-r--r--llvm/test/MC/X86/reloc-directive-elf-64.s3
-rw-r--r--llvm/test/TableGen/listflatten-error.td6
-rw-r--r--llvm/test/TableGen/listflatten.td32
-rw-r--r--llvm/test/Transforms/AggressiveInstCombine/inline-strcmp-debugloc.ll56
-rw-r--r--llvm/test/Transforms/DFAJumpThreading/dfa-jump-threading-transform.ll123
-rw-r--r--llvm/test/Transforms/EliminateAvailableExternally/transform-to-local.ll18
-rw-r--r--llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-insr.ll73
-rw-r--r--llvm/test/Transforms/InstCombine/compare-3way.ll39
-rw-r--r--llvm/test/Transforms/InstCombine/icmp-inttoptr.ll97
-rw-r--r--llvm/test/Transforms/InstCombine/phi-with-multiple-unsimplifiable-values.ll32
-rw-r--r--llvm/test/Transforms/InstCombine/scmp.ll130
-rw-r--r--llvm/test/Transforms/InstCombine/select-select.ll30
-rw-r--r--llvm/test/Transforms/InstCombine/sink_to_unreachable.ll9
-rw-r--r--llvm/test/Transforms/InstCombine/ucmp.ll14
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/scalable-call.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/scalable-fp-ext-trunc-illegal-type.ll76
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/veclib-function-calls.ll93
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/vector-call-linear-args.ll151
-rw-r--r--llvm/test/Transforms/LoopVectorize/float-induction.ll109
-rw-r--r--llvm/test/Transforms/LoopVectorize/load-deref-pred-align.ll127
-rw-r--r--llvm/test/Transforms/LoopVectorize/simple_early_exit.ll28
-rw-r--r--llvm/test/Transforms/MemCpyOpt/fca2memcpy.ll15
-rw-r--r--llvm/test/Transforms/SLPVectorizer/AArch64/reduce-fadd.ll113
-rw-r--r--llvm/test/Transforms/SLPVectorizer/RISCV/revec-getGatherCost.ll42
-rw-r--r--llvm/test/Transforms/SLPVectorizer/RISCV/select-profitability.ll55
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/reduced-value-vectorized-later.ll41
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/splat-score-adjustment.ll89
-rw-r--r--llvm/test/Transforms/SLPVectorizer/alternate-cmp-swapped-pred-parent.ll5
-rw-r--r--llvm/test/Transforms/SLPVectorizer/alternate-opcode-sindle-bv.ll5
-rw-r--r--llvm/test/Transforms/SLPVectorizer/arith-div-undef.ll5
-rw-r--r--llvm/test/Transforms/SLPVectorizer/bool-logical-op-reduction-with-poison.ll6
-rw-r--r--llvm/test/Transforms/SLPVectorizer/buildvector-insert-mask-size.ll5
-rw-r--r--llvm/test/Transforms/SLPVectorizer/buildvector-nodes-dependency.ll5
-rw-r--r--llvm/test/Transforms/SLPVectorizer/call-arg-reduced-by-minbitwidth.ll5
-rw-r--r--llvm/test/Transforms/SLPVectorizer/catchswitch.ll5
-rw-r--r--llvm/test/Transforms/SLPVectorizer/crash_exceed_scheduling.ll5
-rw-r--r--llvm/test/Transforms/SLPVectorizer/diamond_broadcast.ll5
-rw-r--r--llvm/test/Transforms/SLPVectorizer/ext-int-reduced-not-operand.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/ext-int-reduced-not-operand.ll)9
-rw-r--r--llvm/test/Transforms/SLPVectorizer/extended-vectorized-gathered-inst.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/extended-vectorized-gathered-inst.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/external-user-instruction-minbitwidth.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/external-user-instruction-minbitwidth.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/extract-many-users-buildvector.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/extract-many-users-buildvector.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/extractelement-insertpoint.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/extractelement-insertpoint.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/extractlements-gathered-first-node.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/extractlements-gathered-first-node.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/extracts-with-undefs.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/extracts-with-undefs.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/gather_extract_from_vectorbuild.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/gather_extract_from_vectorbuild.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/gep-with-extractelement-many-users.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/gep-with-extractelement-many-users.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/insert-crash-index.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/insert-crash-index.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/insert-element-build-vector-const-undef.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/insert-element-build-vector-const-undef.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/insert-element-build-vector-inseltpoison.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/insert-element-build-vector-inseltpoison.ll)12
-rw-r--r--llvm/test/Transforms/SLPVectorizer/insert-element-build-vector.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/insert-element-build-vector.ll)12
-rw-r--r--llvm/test/Transforms/SLPVectorizer/insert-element-multiple-uses.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/insert-element-multiple-uses.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/insertelement-postpone.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/insertelement-postpone.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/insertelement-uses-vectorized-index.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/insertelement-uses-vectorized-index.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/int-bitcast-minbitwidth.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/int-bitcast-minbitwidth.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/jumbled_store_crash.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/jumbled_store_crash.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/minbitwidth-multiuse-with-insertelement.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/minbitwidth-multiuse-with-insertelement.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/minbitwidth-node-with-multi-users.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/minbitwidth-node-with-multi-users.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/minbitwidth-user-not-min.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/minbitwidth-user-not-min.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/multi-node-vectorized-insts.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/multi-node-vectorized-insts.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/multi-uses-with-deps-in-first.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/multi-uses-with-deps-in-first.ll)4
-rw-r--r--llvm/test/Transforms/SLPVectorizer/one-element-vector.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/one-element-vector.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/peek-through-shuffle.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/peek-through-shuffle.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/phi-node-bitwidt-op-not.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/phi-node-bitwidt-op-not.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/phi-undef-input.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/phi-undef-input.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/postponed_gathers.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/postponed_gathers.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/pr31599-inseltpoison.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/pr31599-inseltpoison.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/pr31599.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/pr31599.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/reduction-gather-non-scheduled-extracts.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/reduction-gather-non-scheduled-extracts.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/reduction-modified-values.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/reduction-modified-values.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/reorder-clustered-node.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/reorder-clustered-node.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/reordered-top-scalars.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/reordered-top-scalars.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/reordering-single-phi.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/reordering-single-phi.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/reused-buildvector-matching-vectorized-node.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/reused-buildvector-matching-vectorized-node.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/revec-fix-109835.ll70
-rw-r--r--llvm/test/Transforms/SLPVectorizer/root-trunc-extract-reuse.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/root-trunc-extract-reuse.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/same-scalar-in-same-phi-extract.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/same-scalar-in-same-phi-extract.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/scalarazied-result.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/scalarazied-result.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/scalarization-overhead.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/scalarization-overhead.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/shrink_after_reorder2.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/shrink_after_reorder2.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/shuffle-multivector.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/shuffle-multivector.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/shufflebuilder-bug.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/shufflebuilder-bug.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/stores-non-ordered.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/stores-non-ordered.ll)3
-rw-r--r--llvm/test/Transforms/SLPVectorizer/unknown-entries.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/unknown-entries.ll)5
-rw-r--r--llvm/test/Transforms/SLPVectorizer/zext-incoming-for-neg-icmp.ll (renamed from llvm/test/Transforms/SLPVectorizer/X86/zext-incoming-for-neg-icmp.ll)3
-rw-r--r--llvm/test/Transforms/SimplifyCFG/X86/hoist-loads-stores-with-cf.ll35
-rw-r--r--llvm/test/Transforms/SimplifyCFG/X86/sink-common-code.ll61
-rw-r--r--llvm/test/Transforms/SimplifyCFG/X86/switch_to_lookup_table.ll25
-rw-r--r--llvm/test/Transforms/SimplifyCFG/speculate-derefable-load.ll11
-rw-r--r--llvm/test/Transforms/VectorCombine/RISCV/shuffle-of-intrinsics.ll7
-rw-r--r--llvm/test/lit.cfg.py8
-rw-r--r--llvm/test/tools/UpdateTestChecks/lit.local.cfg4
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_mc_test_checks/Inputs/amdgpu_asm.s3
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_mc_test_checks/Inputs/amdgpu_asm.s.expected5
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_mc_test_checks/Inputs/amdgpu_asm_err.s3
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_mc_test_checks/Inputs/amdgpu_asm_err.s.expected5
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_mc_test_checks/Inputs/amdgpu_dasm.txt5
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_mc_test_checks/Inputs/amdgpu_dasm.txt.expected8
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_mc_test_checks/Inputs/amdgpu_multirun_dasm.txt6
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_mc_test_checks/Inputs/amdgpu_multirun_dasm.txt.expected10
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_mc_test_checks/amdgpu-basic.test11
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_mc_test_checks/lit.local.cfg4
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/if_target.ll11
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/if_target.ll.expected19
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_test_checks/if_target.test6
-rw-r--r--llvm/test/tools/dsymutil/X86/dwarf5-many-include-directories.test213
-rw-r--r--llvm/test/tools/llvm-exegesis/X86/latency/cpu-pinning-execution-mode.s5
-rw-r--r--llvm/test/tools/llvm-exegesis/X86/latency/cpu-pinning.s5
-rw-r--r--llvm/test/tools/llvm-readobj/COFF/arm64ec-chpe.yaml31
-rw-r--r--llvm/test/tools/llvm-reduce/mir/preserve-func-info.mir6
-rw-r--r--llvm/tools/gold/gold-plugin.cpp8
-rw-r--r--llvm/tools/llvm-c-test/main.c3
-rw-r--r--llvm/tools/llvm-ctxprof-util/llvm-ctxprof-util.cpp3
-rw-r--r--llvm/tools/llvm-debuginfod-find/llvm-debuginfod-find.cpp39
-rw-r--r--llvm/tools/llvm-exegesis/lib/BenchmarkRunner.cpp76
-rw-r--r--llvm/tools/llvm-exegesis/lib/BenchmarkRunner.h6
-rw-r--r--llvm/tools/llvm-exegesis/llvm-exegesis.cpp11
-rw-r--r--llvm/tools/llvm-extract/llvm-extract.cpp3
-rw-r--r--llvm/tools/llvm-ifs/llvm-ifs.cpp7
-rw-r--r--llvm/tools/llvm-reduce/ReducerWorkItem.cpp22
-rw-r--r--llvm/tools/llvm-reduce/TestRunner.cpp24
-rw-r--r--llvm/tools/llvm-reduce/TestRunner.h4
-rw-r--r--llvm/tools/llvm-reduce/deltas/ReduceDistinctMetadata.cpp14
-rw-r--r--llvm/unittests/IR/IRBuilderTest.cpp8
-rw-r--r--llvm/unittests/MI/LiveIntervalTest.cpp4
-rw-r--r--llvm/unittests/Option/OptionMarshallingTest.cpp2
-rw-r--r--llvm/unittests/SandboxIR/SandboxIRTest.cpp151
-rw-r--r--llvm/unittests/Support/raw_ostream_test.cpp10
-rw-r--r--llvm/unittests/Transforms/Vectorize/SandboxVectorizer/RegionTest.cpp104
-rw-r--r--llvm/utils/TableGen/CMakeLists.txt4
-rw-r--r--llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp158
-rw-r--r--llvm/utils/TableGen/Common/CodeGenDAGPatterns.h84
-rw-r--r--llvm/utils/TableGen/Common/CodeGenHwModes.cpp10
-rw-r--r--llvm/utils/TableGen/Common/CodeGenInstAlias.cpp33
-rw-r--r--llvm/utils/TableGen/Common/CodeGenInstAlias.h2
-rw-r--r--llvm/utils/TableGen/Common/CodeGenRegisters.cpp19
-rw-r--r--llvm/utils/TableGen/Common/CodeGenTarget.cpp57
-rw-r--r--llvm/utils/TableGen/Common/CodeGenTarget.h8
-rw-r--r--llvm/utils/TableGen/Common/GlobalISel/PatternParser.cpp4
-rw-r--r--llvm/utils/TableGen/DAGISelMatcherGen.cpp8
-rw-r--r--llvm/utils/TableGen/FastISelEmitter.cpp4
-rw-r--r--llvm/utils/TableGen/GlobalISelEmitter.cpp72
-rw-r--r--llvm/utils/TableGen/OptionParserEmitter.cpp (renamed from llvm/utils/TableGen/OptParserEmitter.cpp)8
-rw-r--r--llvm/utils/TableGen/OptionRSTEmitter.cpp (renamed from llvm/utils/TableGen/OptRSTEmitter.cpp)10
-rw-r--r--llvm/utils/UpdateTestChecks/common.py2
-rw-r--r--llvm/utils/gn/build/BUILD.gn1
-rw-r--r--llvm/utils/gn/build/toolchain/target_flags.gni3
-rw-r--r--llvm/utils/gn/secondary/BUILD.gn4
-rw-r--r--llvm/utils/gn/secondary/clang-tools-extra/clang-doc/tool/BUILD.gn2
-rw-r--r--llvm/utils/gn/secondary/clang-tools-extra/clangd/BUILD.gn2
-rw-r--r--llvm/utils/gn/secondary/clang/lib/Interpreter/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/clang/test/BUILD.gn4
-rw-r--r--llvm/utils/gn/secondary/clang/unittests/InstallAPI/BUILD.gn2
-rw-r--r--llvm/utils/gn/secondary/compiler-rt/lib/sanitizer_common/BUILD.gn2
-rw-r--r--llvm/utils/gn/secondary/compiler-rt/test/hwasan/BUILD.gn2
-rw-r--r--llvm/utils/gn/secondary/compiler-rt/test/lsan/BUILD.gn8
-rw-r--r--llvm/utils/gn/secondary/libcxx/src/BUILD.gn6
-rw-r--r--llvm/utils/gn/secondary/lld/unittests/AsLibAll/BUILD.gn2
-rw-r--r--llvm/utils/gn/secondary/lld/unittests/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/lldb/test/BUILD.gn2
-rw-r--r--llvm/utils/gn/secondary/llvm/include/llvm/TargetParser/BUILD.gn2
-rw-r--r--llvm/utils/gn/secondary/llvm/lib/CodeGenTypes/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/llvm/lib/DebugInfo/BTF/BUILD.gn2
-rw-r--r--llvm/utils/gn/secondary/llvm/lib/Target/AMDGPU/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/llvm/lib/Target/WebAssembly/Utils/BUILD.gn4
-rw-r--r--llvm/utils/gn/secondary/llvm/tools/llc/BUILD.gn2
-rw-r--r--llvm/utils/gn/secondary/llvm/tools/llvm-dwp/BUILD.gn2
-rw-r--r--llvm/utils/gn/secondary/llvm/tools/llvm-libtool-darwin/BUILD.gn2
-rw-r--r--llvm/utils/gn/secondary/llvm/tools/llvm-ml/BUILD.gn2
-rw-r--r--llvm/utils/gn/secondary/llvm/tools/sancov/BUILD.gn2
-rw-r--r--llvm/utils/gn/secondary/llvm/unittests/Transforms/Instrumentation/BUILD.gn2
-rw-r--r--llvm/utils/gn/secondary/llvm/utils/TableGen/BUILD.gn6
-rwxr-xr-xllvm/utils/update_mc_test_checks.py329
-rwxr-xr-xllvm/utils/update_test_checks.py8
-rw-r--r--mlir/include/mlir-c/BuiltinTypes.h10
-rw-r--r--mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td18
-rw-r--r--mlir/include/mlir/Dialect/XeGPU/IR/XeGPUAttrs.td64
-rw-r--r--mlir/include/mlir/Dialect/XeGPU/IR/XeGPUOps.td86
-rw-r--r--mlir/include/mlir/Dialect/XeGPU/IR/XeGPUTypes.td73
-rw-r--r--mlir/include/mlir/IR/Builders.h1
-rw-r--r--mlir/include/mlir/IR/BuiltinTypes.h15
-rw-r--r--mlir/include/mlir/IR/BuiltinTypes.td21
-rw-r--r--mlir/include/mlir/IR/CommonTypeConstraints.td2
-rw-r--r--mlir/include/mlir/IR/Types.h1
-rw-r--r--mlir/lib/AsmParser/TokenKinds.def1
-rw-r--r--mlir/lib/AsmParser/TypeParser.cpp4
-rw-r--r--mlir/lib/Bindings/Python/IRTypes.cpp22
-rw-r--r--mlir/lib/CAPI/IR/BuiltinTypes.cpp12
-rw-r--r--mlir/lib/Conversion/GPUCommon/OpToFuncCallLowering.h24
-rw-r--r--mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp6
-rw-r--r--mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp11
-rw-r--r--mlir/lib/Conversion/LLVMCommon/TypeConverter.cpp2
-rw-r--r--mlir/lib/Conversion/MathToROCDL/MathToROCDL.cpp62
-rw-r--r--mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp13
-rw-r--r--mlir/lib/Dialect/Arith/Transforms/EmulateUnsupportedFloats.cpp1
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp23
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp105
-rw-r--r--mlir/lib/Dialect/SCF/Transforms/LoopPipelining.cpp7
-rw-r--r--mlir/lib/Dialect/Tensor/IR/TensorOps.cpp34
-rw-r--r--mlir/lib/Dialect/XeGPU/IR/XeGPUDialect.cpp46
-rw-r--r--mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp109
-rw-r--r--mlir/lib/IR/AsmPrinter.cpp1
-rw-r--r--mlir/lib/IR/Builders.cpp4
-rw-r--r--mlir/lib/IR/BuiltinTypes.cpp2
-rw-r--r--mlir/lib/IR/MLIRContext.cpp5
-rw-r--r--mlir/lib/IR/Types.cpp1
-rw-r--r--mlir/python/mlir/_mlir_libs/_mlir/ir.pyi14
-rw-r--r--mlir/python/mlir/extras/types.py2
-rw-r--r--mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir163
-rw-r--r--mlir/test/Conversion/MathToROCDL/math-to-rocdl.mlir208
-rw-r--r--mlir/test/Dialect/Linalg/vectorize-convolution.mlir65
-rw-r--r--mlir/test/Dialect/Linalg/vectorize-tensor-extract.mlir90
-rw-r--r--mlir/test/Dialect/SCF/loop-pipelining.mlir5
-rw-r--r--mlir/test/Dialect/XeGPU/XeGPUOps.mlir74
-rw-r--r--mlir/test/Dialect/XeGPU/invalid.mlir75
-rw-r--r--mlir/test/IR/attribute.mlir4
-rw-r--r--mlir/test/Target/LLVMIR/llvmir.mlir3
-rw-r--r--mlir/test/python/ir/builtin_types.py9
-rw-r--r--mlir/utils/lldb-scripts/mlirDataFormatters.py1
-rw-r--r--mlir/utils/tree-sitter-mlir/grammar.js2
-rw-r--r--offload/include/OpenMP/OMPT/Callback.h6
-rw-r--r--offload/include/OpenMP/OMPT/Interface.h12
-rw-r--r--offload/plugins-nextgen/common/CMakeLists.txt6
-rw-r--r--offload/plugins-nextgen/common/OMPT/OmptCallback.cpp75
-rw-r--r--offload/src/OpenMP/OMPT/Callback.cpp54
-rw-r--r--offload/src/exports1
-rw-r--r--polly/lib/CodeGen/RuntimeDebugBuilder.cpp4
-rw-r--r--utils/bazel/llvm-project-overlay/libc/BUILD.bazel83
-rw-r--r--utils/bazel/llvm-project-overlay/libc/libc_build_rules.bzl11
-rw-r--r--utils/bazel/llvm-project-overlay/libc/test/src/math/libc_math_test_rules.bzl3
-rw-r--r--utils/bazel/llvm-project-overlay/libc/test/src/sys/socket/BUILD.bazel2
-rw-r--r--utils/bazel/llvm-project-overlay/libc/utils/MPFRWrapper/BUILD.bazel1
-rw-r--r--utils/bazel/llvm-project-overlay/mlir/BUILD.bazel1
993 files changed, 99639 insertions, 24672 deletions
diff --git a/bolt/include/bolt/Core/BinaryContext.h b/bolt/include/bolt/Core/BinaryContext.h
index 5fb32a1..08ce892 100644
--- a/bolt/include/bolt/Core/BinaryContext.h
+++ b/bolt/include/bolt/Core/BinaryContext.h
@@ -71,14 +71,15 @@ struct SegmentInfo {
uint64_t FileOffset; /// Offset in the file.
uint64_t FileSize; /// Size in file.
uint64_t Alignment; /// Alignment of the segment.
+ bool IsExecutable; /// Is the executable bit set on the Segment?
void print(raw_ostream &OS) const {
- OS << "SegmentInfo { Address: 0x"
- << Twine::utohexstr(Address) << ", Size: 0x"
- << Twine::utohexstr(Size) << ", FileOffset: 0x"
+ OS << "SegmentInfo { Address: 0x" << Twine::utohexstr(Address)
+ << ", Size: 0x" << Twine::utohexstr(Size) << ", FileOffset: 0x"
<< Twine::utohexstr(FileOffset) << ", FileSize: 0x"
<< Twine::utohexstr(FileSize) << ", Alignment: 0x"
- << Twine::utohexstr(Alignment) << "}";
+ << Twine::utohexstr(Alignment) << ", " << (IsExecutable ? "x" : " ")
+ << "}";
};
};
diff --git a/bolt/include/bolt/Core/BinaryData.h b/bolt/include/bolt/Core/BinaryData.h
index 8a67b3e..6a773c4 100644
--- a/bolt/include/bolt/Core/BinaryData.h
+++ b/bolt/include/bolt/Core/BinaryData.h
@@ -226,7 +226,6 @@ inline raw_ostream &operator<<(raw_ostream &OS,
Sep = ",\n ";
TotalCount += AccessInfo.Count;
}
- SS.flush();
OS << TotalCount << " total counts : " << TempString;
return OS;
diff --git a/bolt/include/bolt/Core/BinaryFunction.h b/bolt/include/bolt/Core/BinaryFunction.h
index 6ebbaf9..fc0375b 100644
--- a/bolt/include/bolt/Core/BinaryFunction.h
+++ b/bolt/include/bolt/Core/BinaryFunction.h
@@ -117,7 +117,6 @@ inline raw_ostream &operator<<(raw_ostream &OS,
TotalCount += CSP.Count;
TotalMispreds += CSP.Mispreds;
}
- SS.flush();
OS << TotalCount << " (" << TotalMispreds << " misses) :" << TempString;
return OS;
diff --git a/bolt/include/bolt/Rewrite/RewriteInstance.h b/bolt/include/bolt/Rewrite/RewriteInstance.h
index 16a82d5..e5b7ad6 100644
--- a/bolt/include/bolt/Rewrite/RewriteInstance.h
+++ b/bolt/include/bolt/Rewrite/RewriteInstance.h
@@ -510,12 +510,11 @@ private:
};
/// Different types of X86-64 PLT sections.
- const PLTSectionInfo X86_64_PLTSections[4] = {
- { ".plt", 16 },
- { ".plt.got", 8 },
- { ".plt.sec", 8 },
- { nullptr, 0 }
- };
+ const PLTSectionInfo X86_64_PLTSections[5] = {{".plt", 16},
+ {".plt.got", 8},
+ {".plt.sec", 8},
+ {".iplt", 16},
+ {nullptr, 0}};
/// AArch64 PLT sections.
const PLTSectionInfo AArch64_PLTSections[4] = {
diff --git a/bolt/lib/Core/BinaryContext.cpp b/bolt/lib/Core/BinaryContext.cpp
index cd137f4..1347047 100644
--- a/bolt/lib/Core/BinaryContext.cpp
+++ b/bolt/lib/Core/BinaryContext.cpp
@@ -2021,6 +2021,9 @@ BinaryContext::getBaseAddressForMapping(uint64_t MMapAddress,
// Find a segment with a matching file offset.
for (auto &KV : SegmentMapInfo) {
const SegmentInfo &SegInfo = KV.second;
+ // Only consider executable segments.
+ if (!SegInfo.IsExecutable)
+ continue;
// FileOffset is got from perf event,
// and it is equal to alignDown(SegInfo.FileOffset, pagesize).
// If the pagesize is not equal to SegInfo.Alignment.
diff --git a/bolt/lib/Passes/RetpolineInsertion.cpp b/bolt/lib/Passes/RetpolineInsertion.cpp
index 2808575..171177d 100644
--- a/bolt/lib/Passes/RetpolineInsertion.cpp
+++ b/bolt/lib/Passes/RetpolineInsertion.cpp
@@ -181,7 +181,6 @@ std::string createRetpolineFunctionTag(BinaryContext &BC,
if (BrInfo.isReg()) {
BC.InstPrinter->printRegName(TagOS, BrInfo.BranchReg);
TagOS << "_";
- TagOS.flush();
return Tag;
}
@@ -212,7 +211,6 @@ std::string createRetpolineFunctionTag(BinaryContext &BC,
BC.InstPrinter->printRegName(TagOS, MemRef.SegRegNum);
}
- TagOS.flush();
return Tag;
}
diff --git a/bolt/lib/Profile/DataAggregator.cpp b/bolt/lib/Profile/DataAggregator.cpp
index fcde6f5..0a63148 100644
--- a/bolt/lib/Profile/DataAggregator.cpp
+++ b/bolt/lib/Profile/DataAggregator.cpp
@@ -2043,7 +2043,8 @@ std::error_code DataAggregator::parseMMapEvents() {
// size of the mapping, but we know it should not exceed the segment
// alignment value. Hence we are performing an approximate check.
return SegInfo.Address >= MMapInfo.MMapAddress &&
- SegInfo.Address - MMapInfo.MMapAddress < SegInfo.Alignment;
+ SegInfo.Address - MMapInfo.MMapAddress < SegInfo.Alignment &&
+ SegInfo.IsExecutable;
});
if (!MatchFound) {
errs() << "PERF2BOLT-WARNING: ignoring mapping of " << NameToUse
diff --git a/bolt/lib/Rewrite/RewriteInstance.cpp b/bolt/lib/Rewrite/RewriteInstance.cpp
index adacb50d..32ec7ab 100644
--- a/bolt/lib/Rewrite/RewriteInstance.cpp
+++ b/bolt/lib/Rewrite/RewriteInstance.cpp
@@ -526,11 +526,9 @@ Error RewriteInstance::discoverStorage() {
NextAvailableOffset = std::max(NextAvailableOffset,
Phdr.p_offset + Phdr.p_filesz);
- BC->SegmentMapInfo[Phdr.p_vaddr] = SegmentInfo{Phdr.p_vaddr,
- Phdr.p_memsz,
- Phdr.p_offset,
- Phdr.p_filesz,
- Phdr.p_align};
+ BC->SegmentMapInfo[Phdr.p_vaddr] = SegmentInfo{
+ Phdr.p_vaddr, Phdr.p_memsz, Phdr.p_offset,
+ Phdr.p_filesz, Phdr.p_align, ((Phdr.p_flags & ELF::PF_X) != 0)};
if (BC->TheTriple->getArch() == llvm::Triple::x86_64 &&
Phdr.p_vaddr >= BinaryContext::KernelStartX86_64)
BC->IsLinuxKernel = true;
@@ -1533,7 +1531,7 @@ void RewriteInstance::createPLTBinaryFunction(uint64_t TargetAddress,
MCSymbol *Symbol = Rel->Symbol;
if (!Symbol) {
- if (!BC->isAArch64() || !Rel->Addend || !Rel->isIRelative())
+ if (BC->isRISCV() || !Rel->Addend || !Rel->isIRelative())
return;
// IFUNC trampoline without symbol
@@ -4247,7 +4245,6 @@ void RewriteInstance::addBoltInfoSection() {
<< "command line:";
for (int I = 0; I < Argc; ++I)
DescOS << " " << Argv[I];
- DescOS.flush();
// Encode as GNU GOLD VERSION so it is easily printable by 'readelf -n'
const std::string BoltInfo =
@@ -4270,7 +4267,6 @@ void RewriteInstance::encodeBATSection() {
raw_string_ostream DescOS(DescStr);
BAT->write(*BC, DescOS);
- DescOS.flush();
const std::string BoltInfo =
BinarySection::encodeELFNote("BOLT", DescStr, BinarySection::NT_BOLT_BAT);
diff --git a/bolt/lib/RuntimeLibs/InstrumentationRuntimeLibrary.cpp b/bolt/lib/RuntimeLibs/InstrumentationRuntimeLibrary.cpp
index 53a0c811..f3199eb 100644
--- a/bolt/lib/RuntimeLibs/InstrumentationRuntimeLibrary.cpp
+++ b/bolt/lib/RuntimeLibs/InstrumentationRuntimeLibrary.cpp
@@ -314,7 +314,6 @@ std::string InstrumentationRuntimeLibrary::buildTables(BinaryContext &BC) {
}
// Our string table lives immediately after descriptions vector
OS << Summary->StringTable;
- OS.flush();
return TablesStr;
}
diff --git a/bolt/test/AArch64/ifunc.c b/bolt/test/AArch64/ifunc.test
index 1744976..3da42c6 100644
--- a/bolt/test/AArch64/ifunc.c
+++ b/bolt/test/AArch64/ifunc.test
@@ -1,8 +1,6 @@
-// This test checks that IFUNC trampoline is properly recognised by BOLT
-
// With -O0 indirect call is performed on IPLT trampoline. IPLT trampoline
// has IFUNC symbol.
-// RUN: %clang %cflags -nostdlib -O0 -no-pie %s -fuse-ld=lld \
+// RUN: %clang %cflags -nostdlib -O0 -no-pie %p/../Inputs/ifunc.c -fuse-ld=lld \
// RUN: -o %t.O0.exe -Wl,-q
// RUN: llvm-bolt %t.O0.exe -o %t.O0.bolt.exe \
// RUN: --print-disasm --print-only=_start | \
@@ -12,7 +10,7 @@
// Non-pie static executable doesn't generate PT_DYNAMIC, check relocation
// is readed successfully and IPLT trampoline has been identified by bolt.
-// RUN: %clang %cflags -nostdlib -O3 %s -fuse-ld=lld -no-pie \
+// RUN: %clang %cflags -nostdlib -O3 %p/../Inputs/ifunc.c -fuse-ld=lld -no-pie \
// RUN: -o %t.O3_nopie.exe -Wl,-q
// RUN: llvm-readelf -l %t.O3_nopie.exe | \
// RUN: FileCheck --check-prefix=NON_DYN_CHECK %s
@@ -25,7 +23,7 @@
// With -O3 direct call is performed on IPLT trampoline. IPLT trampoline
// doesn't have associated symbol. The ifunc symbol has the same address as
// IFUNC resolver function.
-// RUN: %clang %cflags -nostdlib -O3 %s -fuse-ld=lld -fPIC -pie \
+// RUN: %clang %cflags -nostdlib -O3 %p/../Inputs/ifunc.c -fuse-ld=lld -fPIC -pie \
// RUN: -o %t.O3_pie.exe -Wl,-q
// RUN: llvm-bolt %t.O3_pie.exe -o %t.O3_pie.bolt.exe \
// RUN: --print-disasm --print-only=_start | \
@@ -35,8 +33,8 @@
// Check that IPLT trampoline located in .plt section are normally handled by
// BOLT. The gnu-ld linker doesn't use separate .iplt section.
-// RUN: %clang %cflags -nostdlib -O3 %s -fuse-ld=lld -fPIC -pie \
-// RUN: -T %p/Inputs/iplt.ld -o %t.iplt_O3_pie.exe -Wl,-q
+// RUN: %clang %cflags -nostdlib -O3 %p/../Inputs/ifunc.c -fuse-ld=lld -fPIC -pie \
+// RUN: -T %p/../Inputs/iplt.ld -o %t.iplt_O3_pie.exe -Wl,-q
// RUN: llvm-bolt %t.iplt_O3_pie.exe -o %t.iplt_O3_pie.bolt.exe \
// RUN: --print-disasm --print-only=_start | \
// RUN: FileCheck --check-prefix=CHECK %s
@@ -49,14 +47,3 @@
// REL_CHECK: R_AARCH64_IRELATIVE [[#%x,REL_SYMB_ADDR:]]
// REL_CHECK: [[#REL_SYMB_ADDR]] {{.*}} FUNC {{.*}} resolver_foo
-
-static void foo() {}
-static void bar() {}
-
-extern int use_foo;
-
-static void *resolver_foo(void) { return use_foo ? foo : bar; }
-
-__attribute__((ifunc("resolver_foo"))) void ifoo();
-
-void _start() { ifoo(); }
diff --git a/bolt/test/Inputs/ifunc.c b/bolt/test/Inputs/ifunc.c
new file mode 100644
index 0000000..3fa62be
--- /dev/null
+++ b/bolt/test/Inputs/ifunc.c
@@ -0,0 +1,12 @@
+// This test checks that IFUNC trampoline is properly recognised by BOLT
+
+static void foo() {}
+static void bar() {}
+
+extern int use_foo;
+
+static void *resolver_foo(void) { return use_foo ? foo : bar; }
+
+__attribute__((ifunc("resolver_foo"))) void ifoo();
+
+void _start() { ifoo(); }
diff --git a/bolt/test/AArch64/Inputs/iplt.ld b/bolt/test/Inputs/iplt.ld
index 1e54a24..1e54a24 100644
--- a/bolt/test/AArch64/Inputs/iplt.ld
+++ b/bolt/test/Inputs/iplt.ld
diff --git a/bolt/test/X86/ifunc.test b/bolt/test/X86/ifunc.test
new file mode 100644
index 0000000..befefbe
--- /dev/null
+++ b/bolt/test/X86/ifunc.test
@@ -0,0 +1,47 @@
+// Check if BOLT can process ifunc symbols from .plt section
+// RUN: %clang %cflags -nostdlib -no-pie %p/../Inputs/ifunc.c -fuse-ld=lld \
+// RUN: -o %t.exe -Wl,-q
+// RUN: llvm-bolt %t.exe -o %t.bolt.exe \
+// RUN: --print-disasm --print-only=_start | \
+// RUN: FileCheck --check-prefix=CHECK %s
+// RUN: llvm-readelf -aW %t.bolt.exe | \
+// RUN: FileCheck --check-prefix=REL_CHECK %s
+
+// Check if BOLT can process ifunc symbols from .plt section in non-pie static
+// executable case.
+// RUN: %clang %cflags -nostdlib %p/../Inputs/ifunc.c -fuse-ld=lld -no-pie \
+// RUN: -o %t.nopie.exe -Wl,-q
+// RUN: llvm-readelf -l %t.nopie.exe | \
+// RUN: FileCheck --check-prefix=NON_DYN_CHECK %s
+// RUN: llvm-bolt %t.nopie.exe -o %t.nopie.bolt.exe \
+// RUN: --print-disasm --print-only=_start | \
+// RUN: FileCheck --check-prefix=CHECK %s
+// RUN: llvm-readelf -aW %t.nopie.bolt.exe | \
+// RUN: FileCheck --check-prefix=REL_CHECK %s
+
+// Check if BOLT can process ifunc symbols from .plt section in pie executable
+// case.
+// RUN: %clang %cflags -nostdlib %p/../Inputs/ifunc.c -fuse-ld=lld -fPIC -pie \
+// RUN: -o %t.pie.exe -Wl,-q
+// RUN: llvm-bolt %t.pie.exe -o %t.pie.bolt.exe \
+// RUN: --print-disasm --print-only=_start | \
+// RUN: FileCheck --check-prefix=CHECK %s
+// RUN: llvm-readelf -aW %t.pie.bolt.exe | \
+// RUN: FileCheck --check-prefix=REL_CHECK %s
+
+// Check that IPLT trampoline located in .plt section are normally handled by
+// BOLT. The gnu-ld linker doesn't use separate .iplt section.
+// RUN: %clang %cflags -nostdlib %p/../Inputs/ifunc.c -fuse-ld=lld -fPIC -pie \
+// RUN: -T %p/../Inputs/iplt.ld -o %t.iplt_pie.exe -Wl,-q
+// RUN: llvm-bolt %t.iplt_pie.exe -o %t.iplt_pie.bolt.exe \
+// RUN: --print-disasm --print-only=_start | \
+// RUN: FileCheck --check-prefix=CHECK %s
+// RUN: llvm-readelf -aW %t.iplt_pie.bolt.exe | \
+// RUN: FileCheck --check-prefix=REL_CHECK %s
+
+// NON_DYN_CHECK-NOT: DYNAMIC
+
+// CHECK: callq "resolver_foo/1@PLT"
+
+// REL_CHECK: R_X86_64_IRELATIVE [[#%x,REL_SYMB_ADDR:]]
+// REL_CHECK: [[#REL_SYMB_ADDR]] {{.*}} FUNC {{.*}} resolver_foo
diff --git a/bolt/test/X86/log.test b/bolt/test/X86/log.test
index 42109db..2c006e9 100644
--- a/bolt/test/X86/log.test
+++ b/bolt/test/X86/log.test
@@ -6,7 +6,7 @@ RUN: yaml2obj %p/Inputs/blarge.yaml &> %t.exe
RUN: llvm-bolt %t.exe -o %t.null --data %p/Inputs/blarge.fdata -v=2 \
RUN: --reorder-blocks=normal --print-finalized --log-file=%t.log 2>&1 \
RUN: | FileCheck --check-prefix=CHECK --allow-empty %s
-RUN: cat %t.log | FileCheck %s --check-prefix=CHECK-LOG
+RUN: FileCheck %s --check-prefix=CHECK-LOG --input-file %t.log
CHECK-NOT: BOLT-INFO
CHECK-NOT: BOLT-WARNING
@@ -16,4 +16,4 @@ CHECK-NOT: BOLT-ERROR
CHECK-LOG: BOLT-INFO: Target architecture
CHECK-LOG: BOLT-INFO: BOLT version
CHECK-LOG: BOLT-INFO: basic block reordering modified layout
-CHECK-LOG: Binary Function "usqrt"
+CHECK-LOG: Binary Function "main"
diff --git a/bolt/test/perf2bolt/perf_test.test b/bolt/test/perf2bolt/perf_test.test
index 44db899..7bec442 100644
--- a/bolt/test/perf2bolt/perf_test.test
+++ b/bolt/test/perf2bolt/perf_test.test
@@ -3,15 +3,12 @@
REQUIRES: system-linux, perf
RUN: %clang %S/Inputs/perf_test.c -fuse-ld=lld -Wl,--script=%S/Inputs/perf_test.lds -o %t
-RUN: perf record -e cycles:u -o %t2 -- %t
+RUN: perf record -Fmax -e cycles:u -o %t2 -- %t
RUN: perf2bolt %t -p=%t2 -o %t3 -nl -ignore-build-id 2>&1 | FileCheck %s
CHECK-NOT: PERF2BOLT-ERROR
CHECK-NOT: !! WARNING !! This high mismatch ratio indicates the input binary is probably not the same binary used during profiling collection.
RUN: %clang %S/Inputs/perf_test.c -no-pie -fuse-ld=lld -o %t4
-RUN: perf record -e cycles:u -o %t5 -- %t4
-RUN: perf2bolt %t4 -p=%t5 -o %t6 -nl -ignore-build-id 2>&1 | FileCheck %s --check-prefix=CHECK-NO-PIE
-
-CHECK-NO-PIE-NOT: PERF2BOLT-ERROR
-CHECK-NO-PIE-NOT: !! WARNING !! This high mismatch ratio indicates the input binary is probably not the same binary used during profiling collection. \ No newline at end of file
+RUN: perf record -Fmax -e cycles:u -o %t5 -- %t4
+RUN: perf2bolt %t4 -p=%t5 -o %t6 -nl -ignore-build-id 2>&1 | FileCheck %s
diff --git a/bolt/unittests/Core/BinaryContext.cpp b/bolt/unittests/Core/BinaryContext.cpp
index 6c32881..05b898d 100644
--- a/bolt/unittests/Core/BinaryContext.cpp
+++ b/bolt/unittests/Core/BinaryContext.cpp
@@ -160,13 +160,14 @@ TEST_P(BinaryContextTester, FlushPendingRelocJUMP26) {
TEST_P(BinaryContextTester, BaseAddress) {
// Check that base address calculation is correct for a binary with the
// following segment layout:
- BC->SegmentMapInfo[0] = SegmentInfo{0, 0x10e8c2b4, 0, 0x10e8c2b4, 0x1000};
+ BC->SegmentMapInfo[0] =
+ SegmentInfo{0, 0x10e8c2b4, 0, 0x10e8c2b4, 0x1000, true};
BC->SegmentMapInfo[0x10e8d2b4] =
- SegmentInfo{0x10e8d2b4, 0x3952faec, 0x10e8c2b4, 0x3952faec, 0x1000};
+ SegmentInfo{0x10e8d2b4, 0x3952faec, 0x10e8c2b4, 0x3952faec, 0x1000, true};
BC->SegmentMapInfo[0x4a3bddc0] =
- SegmentInfo{0x4a3bddc0, 0x148e828, 0x4a3bbdc0, 0x148e828, 0x1000};
+ SegmentInfo{0x4a3bddc0, 0x148e828, 0x4a3bbdc0, 0x148e828, 0x1000, true};
BC->SegmentMapInfo[0x4b84d5e8] =
- SegmentInfo{0x4b84d5e8, 0x294f830, 0x4b84a5e8, 0x3d3820, 0x1000};
+ SegmentInfo{0x4b84d5e8, 0x294f830, 0x4b84a5e8, 0x3d3820, 0x1000, true};
std::optional<uint64_t> BaseAddress =
BC->getBaseAddressForMapping(0x7f13f5556000, 0x10e8c000);
@@ -181,13 +182,13 @@ TEST_P(BinaryContextTester, BaseAddress2) {
// Check that base address calculation is correct for a binary if the
// alignment in ELF file are different from pagesize.
// The segment layout is as follows:
- BC->SegmentMapInfo[0] = SegmentInfo{0, 0x2177c, 0, 0x2177c, 0x10000};
+ BC->SegmentMapInfo[0] = SegmentInfo{0, 0x2177c, 0, 0x2177c, 0x10000, true};
BC->SegmentMapInfo[0x31860] =
- SegmentInfo{0x31860, 0x370, 0x21860, 0x370, 0x10000};
+ SegmentInfo{0x31860, 0x370, 0x21860, 0x370, 0x10000, true};
BC->SegmentMapInfo[0x41c20] =
- SegmentInfo{0x41c20, 0x1f8, 0x21c20, 0x1f8, 0x10000};
+ SegmentInfo{0x41c20, 0x1f8, 0x21c20, 0x1f8, 0x10000, true};
BC->SegmentMapInfo[0x54e18] =
- SegmentInfo{0x54e18, 0x51, 0x24e18, 0x51, 0x10000};
+ SegmentInfo{0x54e18, 0x51, 0x24e18, 0x51, 0x10000, true};
std::optional<uint64_t> BaseAddress =
BC->getBaseAddressForMapping(0xaaaaea444000, 0x21000);
@@ -197,3 +198,22 @@ TEST_P(BinaryContextTester, BaseAddress2) {
BaseAddress = BC->getBaseAddressForMapping(0xaaaaea444000, 0x11000);
ASSERT_FALSE(BaseAddress.has_value());
}
+
+TEST_P(BinaryContextTester, BaseAddressSegmentsSmallerThanAlignment) {
+ // Check that the correct segment is used to compute the base address
+ // when multiple segments are close together in the ELF file (closer
+ // than the required alignment in the process space).
+ // See https://github.com/llvm/llvm-project/issues/109384
+ BC->SegmentMapInfo[0] = SegmentInfo{0, 0x1d1c, 0, 0x1d1c, 0x10000, false};
+ BC->SegmentMapInfo[0x11d40] =
+ SegmentInfo{0x11d40, 0x11e0, 0x1d40, 0x11e0, 0x10000, true};
+ BC->SegmentMapInfo[0x22f20] =
+ SegmentInfo{0x22f20, 0x10e0, 0x2f20, 0x1f0, 0x10000, false};
+ BC->SegmentMapInfo[0x33110] =
+ SegmentInfo{0x33110, 0x89, 0x3110, 0x88, 0x10000, false};
+
+ std::optional<uint64_t> BaseAddress =
+ BC->getBaseAddressForMapping(0xaaaaaaab1000, 0x1000);
+ ASSERT_TRUE(BaseAddress.has_value());
+ ASSERT_EQ(*BaseAddress, 0xaaaaaaaa0000ULL);
+} \ No newline at end of file
diff --git a/clang-tools-extra/docs/clang-tidy/ExternalClang-TidyExamples.rst b/clang-tools-extra/docs/clang-tidy/ExternalClang-TidyExamples.rst
new file mode 100644
index 0000000..3d654e2
--- /dev/null
+++ b/clang-tools-extra/docs/clang-tidy/ExternalClang-TidyExamples.rst
@@ -0,0 +1,30 @@
+============================
+External Clang-Tidy Examples
+============================
+
+Introduction
+============
+
+This page provides examples of what people have done with :program:`clang-tidy` that
+might serve as useful guides (or starting points) to develop your own checks.
+They may be helpful for necessary things such as how to write the `CMakeLists.txt`
+for an out-of-tree plugin of :program:`clang-tidy` checks.
+
+If you know of (or wrote!) a tool or project using :program:`clang-tidy`, please share it
+on `the Discourse forums (Clang Frontend category)
+<https://discourse.llvm.org/c/clang/6>`_ for wider visibility and open a
+pull-request on `LLVM Github`_ to have it added here. Since the primary purpose of
+this page is to provide examples that can help developers, the listed projects should
+have code available.
+
+As :program:`clang-tidy` is using, for example, the AST Matchers and diagnostics of Clang,
+`External Clang Examples`_ may also be useful to look at for such examples.
+
+.. _LLVM Github: https://github.com/llvm/llvm-project
+.. _External Clang Examples: https://clang.llvm.org/docs/ExternalClangExamples.html
+
+List of projects and tools
+==========================
+
+`<https://github.com/coveooss/clang-tidy-plugin-examples>`_
+ "This folder contains :program:`clang-tidy` plugins."
diff --git a/clang-tools-extra/docs/clang-tidy/index.rst b/clang-tools-extra/docs/clang-tidy/index.rst
index c8fc34c..e38141b 100644
--- a/clang-tools-extra/docs/clang-tidy/index.rst
+++ b/clang-tools-extra/docs/clang-tidy/index.rst
@@ -12,6 +12,7 @@ See also:
The list of clang-tidy checks <checks/list>
Clang-tidy IDE/Editor Integrations <Integrations>
Getting Involved <Contributing>
+ External Clang-Tidy Examples <ExternalClang-TidyExamples>
:program:`clang-tidy` is a clang-based C++ "linter" tool. Its purpose is to
provide an extensible framework for diagnosing and fixing typical programming
diff --git a/clang/CMakeLists.txt b/clang/CMakeLists.txt
index 94ce596..27e8095 100644
--- a/clang/CMakeLists.txt
+++ b/clang/CMakeLists.txt
@@ -917,7 +917,7 @@ if (CLANG_BOLT AND NOT LLVM_BUILD_INSTRUMENTED)
-data ${BOLT_FDATA}
-reorder-blocks=ext-tsp -reorder-functions=cdsort -split-functions
-split-all-cold -split-eh -dyno-stats -use-gnu-stack
- -split-strategy=cdsplit -update-debug-sections
+ -update-debug-sections
${BOLT_NO_LBR}
COMMENT "Optimizing Clang with BOLT"
USES_TERMINAL
diff --git a/clang/docs/LanguageExtensions.rst b/clang/docs/LanguageExtensions.rst
index 0c6b9b1..f4be97047 100644
--- a/clang/docs/LanguageExtensions.rst
+++ b/clang/docs/LanguageExtensions.rst
@@ -5860,3 +5860,26 @@ specify the starting offset to begin embedding from. The resources is treated
as being empty if the specified offset is larger than the number of bytes in
the resource. The offset will be applied *before* any ``limit`` parameters are
applied.
+
+Union and aggregate initialization in C
+=======================================
+
+In C23 (N2900), when an object is initialized from initializer ``= {}``, all
+elements of arrays, all members of structs, and the first members of unions are
+empty-initialized recursively. In addition, all padding bits are initialized to
+zero.
+
+Clang guarantees the following behaviors:
+
+* ``1:`` Clang supports initializer ``= {}`` mentioned above in all C
+ standards.
+
+* ``2:`` When unions are initialized from initializer ``= {}``, bytes outside
+ of the first members of unions are also initialized to zero.
+
+* ``3:`` When unions, structures and arrays are initialized from initializer
+ ``= { initializer-list }``, all members not explicitly initialized in
+ the initializer list are empty-initialized recursively. In addition, all
+ padding bits are initialized to zero.
+
+Currently, the above extension only applies to C source code, not C++.
diff --git a/clang/docs/OpenMPSupport.rst b/clang/docs/OpenMPSupport.rst
index 72f1385..6c7afc1 100644
--- a/clang/docs/OpenMPSupport.rst
+++ b/clang/docs/OpenMPSupport.rst
@@ -294,7 +294,7 @@ implementation.
+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
| misc | error directive | :good:`done` | D139166 |
+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| misc | scope construct | :none:`worked on` | D157933 |
+| misc | scope construct | :good:`done` | D157933, https://github.com/llvm/llvm-project/pull/109197 |
+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
| misc | routines for controlling and querying team regions | :part:`partial` | D95003 (libomp only) |
+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst
index da52050..5923888 100644
--- a/clang/docs/ReleaseNotes.rst
+++ b/clang/docs/ReleaseNotes.rst
@@ -43,7 +43,7 @@ code bases.
still supporting SPARC V8 CPUs need to specify ``-mcpu=v8`` with a
`config file
<https://clang.llvm.org/docs/UsersManual.html#configuration-files>`_.
-
+
- The ``clang-rename`` tool has been removed.
C/C++ Language Potentially Breaking Changes
@@ -115,7 +115,7 @@ C++ Language Changes
- Allow single element access of GCC vector/ext_vector_type object to be
constant expression. Supports the `V.xyzw` syntax and other tidbits
as seen in OpenCL. Selecting multiple elements is left as a future work.
-- Implement `CWG1815 <https://wg21.link/CWG1815>`_. Support lifetime extension
+- Implement `CWG1815 <https://wg21.link/CWG1815>`_. Support lifetime extension
of temporary created by aggregate initialization using a default member
initializer.
@@ -268,6 +268,14 @@ Attribute Changes in Clang
- Introduced a new attribute ``[[clang::coro_await_elidable_argument]]`` on function parameters
to propagate safe elide context to arguments if such function is also under a safe elide context.
+- The documentation of the ``[[clang::musttail]]`` attribute was updated to
+ note that the lifetimes of all local variables end before the call. This does
+ not change the behaviour of the compiler, as this was true for previous
+ versions.
+
+- Fix a bug where clang doesn't automatically apply the ``[[gsl::Owner]]`` or
+ ``[[gsl::Pointer]]`` to STL explicit template specialization decls. (#GH109442)
+
Improvements to Clang's diagnostics
-----------------------------------
@@ -324,6 +332,10 @@ Improvements to Clang's diagnostics
- Don't emit bogus dangling diagnostics when ``[[gsl::Owner]]`` and `[[clang::lifetimebound]]` are used together (#GH108272).
+- The ``-Wreturn-stack-address`` warning now also warns about addresses of
+ local variables passed to function calls using the ``[[clang::musttail]]``
+ attribute.
+
Improvements to Clang's time-trace
----------------------------------
@@ -440,6 +452,9 @@ Miscellaneous Clang Crashes Fixed
- Fixed ``-ast-dump`` crashes on codes involving ``concept`` with ``-ast-dump-decl-types``. (#GH94928)
+- Fixed internal assertion firing when a declaration in the implicit global
+ module is found through ADL. (GH#109879)
+
OpenACC Specific Changes
------------------------
@@ -625,6 +640,7 @@ Python Binding Changes
OpenMP Support
--------------
- Added support for 'omp assume' directive.
+- Added support for 'omp scope' directive.
Improvements
^^^^^^^^^^^^
diff --git a/clang/include/clang/Basic/AttrDocs.td b/clang/include/clang/Basic/AttrDocs.td
index 8ef151b..f23a148 100644
--- a/clang/include/clang/Basic/AttrDocs.td
+++ b/clang/include/clang/Basic/AttrDocs.td
@@ -637,6 +637,12 @@ return value must be trivially destructible. The calling convention of the
caller and callee must match, and they must not be variadic functions or have
old style K&R C function declarations.
+The lifetimes of all local variables and function parameters end immediately
+before the call to the function. This means that it is undefined behaviour to
+pass a pointer or reference to a local variable to the called function, which
+is not the case without the attribute. Clang will emit a warning in common
+cases where this happens.
+
``clang::musttail`` provides assurances that the tail call can be optimized on
all targets, not just one.
}];
diff --git a/clang/include/clang/Basic/BuiltinsNVPTX.def b/clang/include/clang/Basic/BuiltinsNVPTX.def
index 20f038a..6fff562 100644
--- a/clang/include/clang/Basic/BuiltinsNVPTX.def
+++ b/clang/include/clang/Basic/BuiltinsNVPTX.def
@@ -599,14 +599,6 @@ TARGET_BUILTIN(__nvvm_e4m3x2_to_f16x2_rn_relu, "V2hs", "", AND(SM_89,PTX81))
TARGET_BUILTIN(__nvvm_e5m2x2_to_f16x2_rn, "V2hs", "", AND(SM_89,PTX81))
TARGET_BUILTIN(__nvvm_e5m2x2_to_f16x2_rn_relu, "V2hs", "", AND(SM_89,PTX81))
-// Bitcast
-
-BUILTIN(__nvvm_bitcast_f2i, "if", "")
-BUILTIN(__nvvm_bitcast_i2f, "fi", "")
-
-BUILTIN(__nvvm_bitcast_ll2d, "dLLi", "")
-BUILTIN(__nvvm_bitcast_d2ll, "LLid", "")
-
// FNS
TARGET_BUILTIN(__nvvm_fns, "UiUiUii", "n", PTX60)
diff --git a/clang/include/clang/Basic/CodeGenOptions.def b/clang/include/clang/Basic/CodeGenOptions.def
index b600198..2893377 100644
--- a/clang/include/clang/Basic/CodeGenOptions.def
+++ b/clang/include/clang/Basic/CodeGenOptions.def
@@ -96,6 +96,7 @@ CODEGENOPT(EmulatedTLS , 1, 0) ///< Set by default or -f[no-]emulated-tls.
ENUM_CODEGENOPT(EmbedBitcode, EmbedBitcodeKind, 2, Embed_Off)
/// Inline asm dialect, -masm=(att|intel)
ENUM_CODEGENOPT(InlineAsmDialect, InlineAsmDialectKind, 1, IAD_ATT)
+CODEGENOPT(OutputAsmVariant, 2, 3) ///< Set the asm variant for output (3: unspecified).
CODEGENOPT(ForbidGuardVariables , 1, 0) ///< Issue errors if C++ guard variables
///< are required.
CODEGENOPT(FunctionSections , 1, 0) ///< Set when -ffunction-sections is enabled.
diff --git a/clang/include/clang/Basic/DiagnosticSemaKinds.td b/clang/include/clang/Basic/DiagnosticSemaKinds.td
index ba813af..e4e04bf 100644
--- a/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -10101,11 +10101,15 @@ def err_lifetimebound_ctor_dtor : Error<
// CHECK: returning address/reference of stack memory
def warn_ret_stack_addr_ref : Warning<
"%select{address of|reference to}0 stack memory associated with "
- "%select{local variable|parameter|compound literal}2 %1 returned">,
+ "%select{local variable|parameter|compound literal}2 %1 "
+ "%select{returned|passed to musttail function}3">,
InGroup<ReturnStackAddress>;
def warn_ret_local_temp_addr_ref : Warning<
"returning %select{address of|reference to}0 local temporary object">,
InGroup<ReturnStackAddress>;
+def warn_musttail_local_temp_addr_ref : Warning<
+ "passing %select{address of|reference to}0 local temporary object to musttail function">,
+ InGroup<ReturnStackAddress>;
def err_ret_local_temp_ref : Error<
"returning reference to local temporary object">;
def warn_ret_addr_label : Warning<
diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td
index 376d7d4..23bd686 100644
--- a/clang/include/clang/Driver/Options.td
+++ b/clang/include/clang/Driver/Options.td
@@ -6352,8 +6352,10 @@ def mcx16 : Flag<["-"], "mcx16">, Group<m_x86_Features_Group>;
def mno_cx16 : Flag<["-"], "mno-cx16">, Group<m_x86_Features_Group>;
def menqcmd : Flag<["-"], "menqcmd">, Group<m_x86_Features_Group>;
def mno_enqcmd : Flag<["-"], "mno-enqcmd">, Group<m_x86_Features_Group>;
-def mevex512 : Flag<["-"], "mevex512">, Group<m_x86_Features_Group>;
-def mno_evex512 : Flag<["-"], "mno-evex512">, Group<m_x86_Features_Group>;
+def mevex512 : Flag<["-"], "mevex512">, Group<m_x86_Features_Group>,
+ Visibility<[ClangOption, CLOption, FlangOption]>;
+def mno_evex512 : Flag<["-"], "mno-evex512">, Group<m_x86_Features_Group>,
+ Visibility<[ClangOption, CLOption, FlangOption]>;
def mf16c : Flag<["-"], "mf16c">, Group<m_x86_Features_Group>;
def mno_f16c : Flag<["-"], "mno-f16c">, Group<m_x86_Features_Group>;
def mfma : Flag<["-"], "mfma">, Group<m_x86_Features_Group>;
@@ -7215,6 +7217,9 @@ def fuse_ctor_homing: Flag<["-"], "fuse-ctor-homing">,
def as_secure_log_file : Separate<["-"], "as-secure-log-file">,
HelpText<"Emit .secure_log_unique directives to this filename.">,
MarshallingInfoString<CodeGenOpts<"AsSecureLogFile">>;
+def output_asm_variant : Joined<["--"], "output-asm-variant=">,
+ HelpText<"Select the asm variant (integer) to use for output (3: unspecified)">,
+ MarshallingInfoInt<CodeGenOpts<"OutputAsmVariant">, "3">;
} // let Visibility = [CC1Option, CC1AsOption]
@@ -8305,8 +8310,6 @@ def filetype : Separate<["-"], "filetype">,
HelpText<"Specify the output file type ('asm', 'null', or 'obj')">;
// Transliterate Options
-def output_asm_variant : Separate<["-"], "output-asm-variant">,
- HelpText<"Select the asm variant index to use for output">;
def show_encoding : Flag<["-"], "show-encoding">,
HelpText<"Show instruction encoding information in transliterate mode">;
def show_inst : Flag<["-"], "show-inst">,
diff --git a/clang/include/clang/Frontend/ASTUnit.h b/clang/include/clang/Frontend/ASTUnit.h
index 0808448..8cefae8 100644
--- a/clang/include/clang/Frontend/ASTUnit.h
+++ b/clang/include/clang/Frontend/ASTUnit.h
@@ -692,8 +692,8 @@ public:
///
/// \returns - The initialized ASTUnit or null if the AST failed to load.
static std::unique_ptr<ASTUnit>
- LoadFromASTFile(const std::string &Filename,
- const PCHContainerReader &PCHContainerRdr, WhatToLoad ToLoad,
+ LoadFromASTFile(StringRef Filename, const PCHContainerReader &PCHContainerRdr,
+ WhatToLoad ToLoad,
IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
const FileSystemOptions &FileSystemOpts,
std::shared_ptr<HeaderSearchOptions> HSOpts,
diff --git a/clang/include/clang/Frontend/MultiplexConsumer.h b/clang/include/clang/Frontend/MultiplexConsumer.h
index 3a7670d..b190750 100644
--- a/clang/include/clang/Frontend/MultiplexConsumer.h
+++ b/clang/include/clang/Frontend/MultiplexConsumer.h
@@ -53,6 +53,7 @@ class MultiplexConsumer : public SemaConsumer {
public:
// Takes ownership of the pointers in C.
MultiplexConsumer(std::vector<std::unique_ptr<ASTConsumer>> C);
+ MultiplexConsumer(std::unique_ptr<ASTConsumer> C);
~MultiplexConsumer() override;
// ASTConsumer
@@ -80,7 +81,7 @@ public:
void InitializeSema(Sema &S) override;
void ForgetSema() override;
-private:
+protected:
std::vector<std::unique_ptr<ASTConsumer>> Consumers; // Owns these.
std::unique_ptr<MultiplexASTMutationListener> MutationListener;
std::unique_ptr<MultiplexASTDeserializationListener> DeserializationListener;
diff --git a/clang/include/clang/Interpreter/Interpreter.h b/clang/include/clang/Interpreter/Interpreter.h
index 1234608..1230a3a 100644
--- a/clang/include/clang/Interpreter/Interpreter.h
+++ b/clang/include/clang/Interpreter/Interpreter.h
@@ -14,11 +14,9 @@
#ifndef LLVM_CLANG_INTERPRETER_INTERPRETER_H
#define LLVM_CLANG_INTERPRETER_INTERPRETER_H
-#include "clang/AST/Decl.h"
#include "clang/AST/GlobalDecl.h"
#include "clang/Interpreter/PartialTranslationUnit.h"
#include "clang/Interpreter/Value.h"
-#include "clang/Sema/Ownership.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ExecutionEngine/JITSymbol.h"
@@ -38,6 +36,9 @@ class ThreadSafeContext;
namespace clang {
class CompilerInstance;
+class CodeGenerator;
+class CXXRecordDecl;
+class Decl;
class IncrementalExecutor;
class IncrementalParser;
@@ -77,26 +78,26 @@ private:
llvm::StringRef CudaSDKPath;
};
-/// Generate glue code between the Interpreter's built-in runtime and user code.
-class RuntimeInterfaceBuilder {
-public:
- virtual ~RuntimeInterfaceBuilder() = default;
-
- using TransformExprFunction = ExprResult(RuntimeInterfaceBuilder *Builder,
- Expr *, ArrayRef<Expr *>);
- virtual TransformExprFunction *getPrintValueTransformer() = 0;
-};
+class IncrementalAction;
+class InProcessPrintingASTConsumer;
/// Provides top-level interfaces for incremental compilation and execution.
class Interpreter {
+ friend class Value;
+ friend InProcessPrintingASTConsumer;
+
std::unique_ptr<llvm::orc::ThreadSafeContext> TSCtx;
+ /// Long-lived, incremental parsing action.
+ std::unique_ptr<IncrementalAction> Act;
std::unique_ptr<IncrementalParser> IncrParser;
std::unique_ptr<IncrementalExecutor> IncrExecutor;
- std::unique_ptr<RuntimeInterfaceBuilder> RuntimeIB;
// An optional parser for CUDA offloading
std::unique_ptr<IncrementalParser> DeviceParser;
+ /// List containing information about each incrementally parsed piece of code.
+ std::list<PartialTranslationUnit> PTUs;
+
unsigned InitPTUSize = 0;
// This member holds the last result of the value printing. It's a class
@@ -104,15 +105,18 @@ class Interpreter {
// printing happens, it's in an invalid state.
Value LastValue;
- // Add a call to an Expr to report its result. We query the function from
- // RuntimeInterfaceBuilder once and store it as a function pointer to avoid
- // frequent virtual function calls.
- RuntimeInterfaceBuilder::TransformExprFunction *AddPrintValueCall = nullptr;
+ /// When CodeGen is created the first llvm::Module gets cached in many places
+ /// and we must keep it alive.
+ std::unique_ptr<llvm::Module> CachedInCodeGenModule;
+
+ /// Compiler instance performing the incremental compilation.
+ std::unique_ptr<CompilerInstance> CI;
protected:
// Derived classes can use an extended interface of the Interpreter.
- Interpreter(std::unique_ptr<CompilerInstance> CI, llvm::Error &Err,
- std::unique_ptr<llvm::orc::LLJITBuilder> JITBuilder = nullptr);
+ Interpreter(std::unique_ptr<CompilerInstance> Instance, llvm::Error &Err,
+ std::unique_ptr<llvm::orc::LLJITBuilder> JITBuilder = nullptr,
+ std::unique_ptr<clang::ASTConsumer> Consumer = nullptr);
// Create the internal IncrementalExecutor, or re-create it after calling
// ResetExecutor().
@@ -122,15 +126,8 @@ protected:
// JIT engine. In particular, it doesn't run cleanup or destructors.
void ResetExecutor();
- // Lazily construct the RuntimeInterfaceBuilder. The provided instance will be
- // used for the entire lifetime of the interpreter. The default implementation
- // targets the in-process __clang_Interpreter runtime. Override this to use a
- // custom runtime.
- virtual std::unique_ptr<RuntimeInterfaceBuilder> FindRuntimeInterface();
-
public:
virtual ~Interpreter();
-
static llvm::Expected<std::unique_ptr<Interpreter>>
create(std::unique_ptr<CompilerInstance> CI);
static llvm::Expected<std::unique_ptr<Interpreter>>
@@ -145,7 +142,6 @@ public:
llvm::Expected<PartialTranslationUnit &> Parse(llvm::StringRef Code);
llvm::Error Execute(PartialTranslationUnit &T);
llvm::Error ParseAndExecute(llvm::StringRef Code, Value *V = nullptr);
- llvm::Expected<llvm::orc::ExecutorAddr> CompileDtorCall(CXXRecordDecl *CXXRD);
/// Undo N previous incremental inputs.
llvm::Error Undo(unsigned N = 1);
@@ -167,8 +163,6 @@ public:
llvm::Expected<llvm::orc::ExecutorAddr>
getSymbolAddressFromLinkerName(llvm::StringRef LinkerName) const;
- enum InterfaceKind { NoAlloc, WithAlloc, CopyArray, NewTag };
-
const llvm::SmallVectorImpl<Expr *> &getValuePrintingInfo() const {
return ValuePrintingInfo;
}
@@ -178,7 +172,15 @@ public:
private:
size_t getEffectivePTUSize() const;
void markUserCodeStart();
+ llvm::Expected<Expr *> ExtractValueFromExpr(Expr *E);
+ llvm::Expected<llvm::orc::ExecutorAddr> CompileDtorCall(CXXRecordDecl *CXXRD);
+
+ CodeGenerator *getCodeGen() const;
+ std::unique_ptr<llvm::Module> GenModule();
+ PartialTranslationUnit &RegisterPTU(TranslationUnitDecl *TU);
+ // A cache for the compiled destructors used to for de-allocation of managed
+ // clang::Values.
llvm::DenseMap<CXXRecordDecl *, llvm::orc::ExecutorAddr> Dtors;
llvm::SmallVector<Expr *, 4> ValuePrintingInfo;
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h
index 2f6cd48..eef7a54 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h
@@ -326,14 +326,14 @@ public:
/// \param ITraits information about special handling for particular regions
/// or symbols.
[[nodiscard]] ProgramStateRef
- invalidateRegions(ArrayRef<const MemRegion *> Regions, const Expr *E,
+ invalidateRegions(ArrayRef<const MemRegion *> Regions, const Stmt *S,
unsigned BlockCount, const LocationContext *LCtx,
bool CausesPointerEscape, InvalidatedSymbols *IS = nullptr,
const CallEvent *Call = nullptr,
RegionAndSymbolInvalidationTraits *ITraits = nullptr) const;
[[nodiscard]] ProgramStateRef
- invalidateRegions(ArrayRef<SVal> Values, const Expr *E, unsigned BlockCount,
+ invalidateRegions(ArrayRef<SVal> Values, const Stmt *S, unsigned BlockCount,
const LocationContext *LCtx, bool CausesPointerEscape,
InvalidatedSymbols *IS = nullptr,
const CallEvent *Call = nullptr,
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h
index 6eedaf0..ec2b2b2 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h
@@ -202,11 +202,9 @@ public:
const Expr *expr,
const LocationContext *LCtx,
unsigned count);
- DefinedOrUnknownSVal conjureSymbolVal(const void *symbolTag,
- const Expr *expr,
+ DefinedOrUnknownSVal conjureSymbolVal(const void *symbolTag, const Stmt *S,
const LocationContext *LCtx,
- QualType type,
- unsigned count);
+ QualType type, unsigned count);
DefinedOrUnknownSVal conjureSymbolVal(const Stmt *stmt,
const LocationContext *LCtx,
QualType type,
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h
index e08d5e1..332855a 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h
@@ -215,7 +215,7 @@ public:
///
/// \param[in] store The initial store.
/// \param[in] Values The values to invalidate.
- /// \param[in] E The current statement being evaluated. Used to conjure
+ /// \param[in] S The current statement being evaluated. Used to conjure
/// symbols to mark the values of invalidated regions.
/// \param[in] Count The current block count. Used to conjure
/// symbols to mark the values of invalidated regions.
@@ -233,7 +233,7 @@ public:
/// even if they do not currently have bindings. Pass \c NULL if this
/// information will not be used.
virtual StoreRef invalidateRegions(
- Store store, ArrayRef<SVal> Values, const Expr *Ex, unsigned Count,
+ Store store, ArrayRef<SVal> Values, const Stmt *S, unsigned Count,
const LocationContext *LCtx, const CallEvent *Call,
InvalidatedSymbols &IS, RegionAndSymbolInvalidationTraits &ITraits,
InvalidatedRegions *TopLevelRegions, InvalidatedRegions *Invalidated) = 0;
diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp
index 8bd5abf..fd8aa8d 100644
--- a/clang/lib/AST/ASTContext.cpp
+++ b/clang/lib/AST/ASTContext.cpp
@@ -12587,8 +12587,7 @@ void ASTContext::forEachMultiversionedFunctionVersion(
FD->getDeclContext()->getRedeclContext()->lookup(FD->getDeclName())) {
FunctionDecl *CurFD = CurDecl->getAsFunction()->getMostRecentDecl();
if (CurFD && hasSameType(CurFD->getType(), FD->getType()) &&
- !SeenDecls.contains(CurFD)) {
- SeenDecls.insert(CurFD);
+ SeenDecls.insert(CurFD).second) {
Pred(CurFD);
}
}
diff --git a/clang/lib/AST/ByteCode/Compiler.cpp b/clang/lib/AST/ByteCode/Compiler.cpp
index 68c3cdf..7859188 100644
--- a/clang/lib/AST/ByteCode/Compiler.cpp
+++ b/clang/lib/AST/ByteCode/Compiler.cpp
@@ -431,6 +431,7 @@ bool Compiler<Emitter>::VisitCastExpr(const CastExpr *CE) {
case CK_NoOp:
case CK_UserDefinedConversion:
case CK_AddressSpaceConversion:
+ case CK_CPointerToObjCPointerCast:
return this->delegate(SubExpr);
case CK_BitCast: {
@@ -1281,9 +1282,8 @@ bool Compiler<Emitter>::VisitVectorBinOp(const BinaryOperator *E) {
? BinaryOperator::getOpForCompoundAssignment(E->getOpcode())
: E->getOpcode();
- // The LHS and RHS of a comparison operator must have the same type. So we
- // just use LHS vector element type here.
PrimType ElemT = this->classifyVectorElementType(LHS->getType());
+ PrimType RHSElemT = this->classifyVectorElementType(RHS->getType());
PrimType ResultElemT = this->classifyVectorElementType(E->getType());
// Evaluate LHS and save value to LHSOffset.
@@ -1311,7 +1311,7 @@ bool Compiler<Emitter>::VisitVectorBinOp(const BinaryOperator *E) {
PrimType PromotT = classifyPrim(PromotTy);
PrimType OpT = NeedIntPromot ? PromotT : ElemT;
- auto getElem = [=](unsigned Offset, unsigned Index) {
+ auto getElem = [=](unsigned Offset, PrimType ElemT, unsigned Index) {
if (!this->emitGetLocal(PT_Ptr, Offset, E))
return false;
if (!this->emitArrayElemPop(ElemT, Index, E))
@@ -1341,9 +1341,9 @@ bool Compiler<Emitter>::VisitVectorBinOp(const BinaryOperator *E) {
}
for (unsigned I = 0; I != VecTy->getNumElements(); ++I) {
- if (!getElem(LHSOffset, I))
+ if (!getElem(LHSOffset, ElemT, I))
return false;
- if (!getElem(RHSOffset, I))
+ if (!getElem(RHSOffset, RHSElemT, I))
return false;
switch (Op) {
case BO_Add:
@@ -1371,11 +1371,11 @@ bool Compiler<Emitter>::VisitVectorBinOp(const BinaryOperator *E) {
return false;
break;
case BO_Shl:
- if (!this->emitShl(OpT, ElemT, E))
+ if (!this->emitShl(OpT, RHSElemT, E))
return false;
break;
case BO_Shr:
- if (!this->emitShr(OpT, ElemT, E))
+ if (!this->emitShr(OpT, RHSElemT, E))
return false;
break;
case BO_EQ:
@@ -3097,12 +3097,11 @@ bool Compiler<Emitter>::VisitCXXNewExpr(const CXXNewExpr *E) {
QualType ElementType = E->getAllocatedType();
std::optional<PrimType> ElemT = classify(ElementType);
unsigned PlacementArgs = E->getNumPlacementArgs();
+ const FunctionDecl *OperatorNew = E->getOperatorNew();
+ const Expr *PlacementDest = nullptr;
bool IsNoThrow = false;
- // FIXME: Better diagnostic. diag::note_constexpr_new_placement
if (PlacementArgs != 0) {
- // The only new-placement list we support is of the form (std::nothrow).
- //
// FIXME: There is no restriction on this, but it's not clear that any
// other form makes any sense. We get here for cases such as:
//
@@ -3111,27 +3110,44 @@ bool Compiler<Emitter>::VisitCXXNewExpr(const CXXNewExpr *E) {
// (which should presumably be valid only if N is a multiple of
// alignof(int), and in any case can't be deallocated unless N is
// alignof(X) and X has new-extended alignment).
- if (PlacementArgs != 1 || !E->getPlacementArg(0)->getType()->isNothrowT())
- return this->emitInvalid(E);
+ if (PlacementArgs == 1) {
+ const Expr *Arg1 = E->getPlacementArg(0);
+ if (Arg1->getType()->isNothrowT()) {
+ if (!this->discard(Arg1))
+ return false;
+ IsNoThrow = true;
+ } else {
+ // Invalid unless we have C++26 or are in a std:: function.
+ if (!this->emitInvalidNewDeleteExpr(E, E))
+ return false;
- if (!this->discard(E->getPlacementArg(0)))
- return false;
- IsNoThrow = true;
- }
+ // If we have a placement-new destination, we'll later use that instead
+ // of allocating.
+ if (OperatorNew->isReservedGlobalPlacementOperator())
+ PlacementDest = Arg1;
+ }
+ } else {
+ // Always invalid.
+ return this->emitInvalid(E);
+ }
+ } else if (!OperatorNew->isReplaceableGlobalAllocationFunction())
+ return this->emitInvalidNewDeleteExpr(E, E);
const Descriptor *Desc;
- if (ElemT) {
- if (E->isArray())
- Desc = nullptr; // We're not going to use it in this case.
- else
- Desc = P.createDescriptor(E, *ElemT, Descriptor::InlineDescMD,
- /*IsConst=*/false, /*IsTemporary=*/false,
- /*IsMutable=*/false);
- } else {
- Desc = P.createDescriptor(
- E, ElementType.getTypePtr(),
- E->isArray() ? std::nullopt : Descriptor::InlineDescMD,
- /*IsConst=*/false, /*IsTemporary=*/false, /*IsMutable=*/false, Init);
+ if (!PlacementDest) {
+ if (ElemT) {
+ if (E->isArray())
+ Desc = nullptr; // We're not going to use it in this case.
+ else
+ Desc = P.createDescriptor(E, *ElemT, Descriptor::InlineDescMD,
+ /*IsConst=*/false, /*IsTemporary=*/false,
+ /*IsMutable=*/false);
+ } else {
+ Desc = P.createDescriptor(
+ E, ElementType.getTypePtr(),
+ E->isArray() ? std::nullopt : Descriptor::InlineDescMD,
+ /*IsConst=*/false, /*IsTemporary=*/false, /*IsMutable=*/false, Init);
+ }
}
if (E->isArray()) {
@@ -3148,26 +3164,42 @@ bool Compiler<Emitter>::VisitCXXNewExpr(const CXXNewExpr *E) {
PrimType SizeT = classifyPrim(Stripped->getType());
- if (!this->visit(Stripped))
- return false;
-
- if (ElemT) {
- // N primitive elements.
- if (!this->emitAllocN(SizeT, *ElemT, E, IsNoThrow, E))
+ if (PlacementDest) {
+ if (!this->visit(PlacementDest))
+ return false;
+ if (!this->visit(Stripped))
+ return false;
+ if (!this->emitCheckNewTypeMismatchArray(SizeT, E, E))
return false;
} else {
- // N Composite elements.
- if (!this->emitAllocCN(SizeT, Desc, IsNoThrow, E))
+ if (!this->visit(Stripped))
return false;
+
+ if (ElemT) {
+ // N primitive elements.
+ if (!this->emitAllocN(SizeT, *ElemT, E, IsNoThrow, E))
+ return false;
+ } else {
+ // N Composite elements.
+ if (!this->emitAllocCN(SizeT, Desc, IsNoThrow, E))
+ return false;
+ }
}
if (Init && !this->visitInitializer(Init))
return false;
} else {
- // Allocate just one element.
- if (!this->emitAlloc(Desc, E))
- return false;
+ if (PlacementDest) {
+ if (!this->visit(PlacementDest))
+ return false;
+ if (!this->emitCheckNewTypeMismatch(E, E))
+ return false;
+ } else {
+ // Allocate just one element.
+ if (!this->emitAlloc(Desc, E))
+ return false;
+ }
if (Init) {
if (ElemT) {
@@ -3194,6 +3226,11 @@ template <class Emitter>
bool Compiler<Emitter>::VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
const Expr *Arg = E->getArgument();
+ const FunctionDecl *OperatorDelete = E->getOperatorDelete();
+
+ if (!OperatorDelete->isReplaceableGlobalAllocationFunction())
+ return this->emitInvalidNewDeleteExpr(E, E);
+
// Arg must be an lvalue.
if (!this->visit(Arg))
return false;
diff --git a/clang/lib/AST/ByteCode/Interp.cpp b/clang/lib/AST/ByteCode/Interp.cpp
index 0587ffd..8b578cc 100644
--- a/clang/lib/AST/ByteCode/Interp.cpp
+++ b/clang/lib/AST/ByteCode/Interp.cpp
@@ -1286,6 +1286,93 @@ bool CallPtr(InterpState &S, CodePtr OpPC, uint32_t ArgSize,
return Call(S, OpPC, F, VarArgSize);
}
+bool CheckNewTypeMismatch(InterpState &S, CodePtr OpPC, const Expr *E,
+ std::optional<uint64_t> ArraySize) {
+ const Pointer &Ptr = S.Stk.peek<Pointer>();
+
+ if (!CheckStore(S, OpPC, Ptr))
+ return false;
+
+ if (!InvalidNewDeleteExpr(S, OpPC, E))
+ return false;
+
+ // Assume proper types in std functions.
+ if (S.Current->isStdFunction())
+ return true;
+
+ const auto *NewExpr = cast<CXXNewExpr>(E);
+ QualType StorageType = Ptr.getType();
+
+ if (isa_and_nonnull<CXXNewExpr>(Ptr.getFieldDesc()->asExpr())) {
+ // FIXME: Are there other cases where this is a problem?
+ StorageType = StorageType->getPointeeType();
+ }
+
+ const ASTContext &ASTCtx = S.getASTContext();
+ QualType AllocType;
+ if (ArraySize) {
+ AllocType = ASTCtx.getConstantArrayType(
+ NewExpr->getAllocatedType(),
+ APInt(64, static_cast<uint64_t>(*ArraySize), false), nullptr,
+ ArraySizeModifier::Normal, 0);
+ } else {
+ AllocType = NewExpr->getAllocatedType();
+ }
+
+ unsigned StorageSize = 1;
+ unsigned AllocSize = 1;
+ if (const auto *CAT = dyn_cast<ConstantArrayType>(AllocType))
+ AllocSize = CAT->getZExtSize();
+ if (const auto *CAT = dyn_cast<ConstantArrayType>(StorageType))
+ StorageSize = CAT->getZExtSize();
+
+ if (AllocSize > StorageSize ||
+ !ASTCtx.hasSimilarType(ASTCtx.getBaseElementType(AllocType),
+ ASTCtx.getBaseElementType(StorageType))) {
+ S.FFDiag(S.Current->getLocation(OpPC),
+ diag::note_constexpr_placement_new_wrong_type)
+ << StorageType << AllocType;
+ return false;
+ }
+ return true;
+}
+
+bool InvalidNewDeleteExpr(InterpState &S, CodePtr OpPC, const Expr *E) {
+ assert(E);
+ const auto &Loc = S.Current->getSource(OpPC);
+
+ if (S.getLangOpts().CPlusPlus26)
+ return true;
+
+ if (const auto *NewExpr = dyn_cast<CXXNewExpr>(E)) {
+ const FunctionDecl *OperatorNew = NewExpr->getOperatorNew();
+
+ if (!S.getLangOpts().CPlusPlus26 && NewExpr->getNumPlacementArgs() > 0) {
+ // This is allowed pre-C++26, but only an std function.
+ if (S.Current->isStdFunction())
+ return true;
+ S.FFDiag(Loc, diag::note_constexpr_new_placement)
+ << /*C++26 feature*/ 1 << E->getSourceRange();
+ } else if (NewExpr->getNumPlacementArgs() == 1 &&
+ !OperatorNew->isReservedGlobalPlacementOperator()) {
+ S.FFDiag(Loc, diag::note_constexpr_new_placement)
+ << /*Unsupported*/ 0 << E->getSourceRange();
+ } else if (!OperatorNew->isReplaceableGlobalAllocationFunction()) {
+ S.FFDiag(Loc, diag::note_constexpr_new_non_replaceable)
+ << isa<CXXMethodDecl>(OperatorNew) << OperatorNew;
+ }
+ } else {
+ const auto *DeleteExpr = cast<CXXDeleteExpr>(E);
+ const FunctionDecl *OperatorDelete = DeleteExpr->getOperatorDelete();
+ if (!OperatorDelete->isReplaceableGlobalAllocationFunction()) {
+ S.FFDiag(Loc, diag::note_constexpr_new_non_replaceable)
+ << isa<CXXMethodDecl>(OperatorDelete) << OperatorDelete;
+ }
+ }
+
+ return false;
+}
+
bool Interpret(InterpState &S, APValue &Result) {
// The current stack frame when we started Interpret().
// This is being used by the ops to determine wheter
diff --git a/clang/lib/AST/ByteCode/Interp.h b/clang/lib/AST/ByteCode/Interp.h
index 4aceb83..1f4c302 100644
--- a/clang/lib/AST/ByteCode/Interp.h
+++ b/clang/lib/AST/ByteCode/Interp.h
@@ -2947,6 +2947,17 @@ static inline bool IsConstantContext(InterpState &S, CodePtr OpPC) {
return true;
}
+/// Check if the initializer and storage types of a placement-new expression
+/// match.
+bool CheckNewTypeMismatch(InterpState &S, CodePtr OpPC, const Expr *E,
+ std::optional<uint64_t> ArraySize = std::nullopt);
+
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+bool CheckNewTypeMismatchArray(InterpState &S, CodePtr OpPC, const Expr *E) {
+ const auto &Size = S.Stk.pop<T>();
+ return CheckNewTypeMismatch(S, OpPC, E, static_cast<uint64_t>(Size));
+}
+bool InvalidNewDeleteExpr(InterpState &S, CodePtr OpPC, const Expr *E);
//===----------------------------------------------------------------------===//
// Read opcode arguments
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index 51c77b7..68710f6 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -1306,7 +1306,16 @@ static bool interp__builtin_operator_new(InterpState &S, CodePtr OpPC,
return false;
}
- // FIXME: CheckArraySize for NumElems?
+ // NB: The same check we're using in CheckArraySize()
+ if (NumElems.getActiveBits() >
+ ConstantArrayType::getMaxSizeBits(S.getASTContext()) ||
+ NumElems.ugt(Descriptor::MaxArrayElemBytes / ElemSize.getQuantity())) {
+ // FIXME: NoThrow check?
+ const SourceInfo &Loc = S.Current->getSource(OpPC);
+ S.FFDiag(Loc, diag::note_constexpr_new_too_large)
+ << NumElems.getZExtValue();
+ return false;
+ }
std::optional<PrimType> ElemT = S.getContext().classify(ElemType);
DynamicAllocator &Allocator = S.getAllocator();
@@ -1336,8 +1345,7 @@ static bool interp__builtin_operator_new(InterpState &S, CodePtr OpPC,
assert(!ElemT);
// Structs etc.
const Descriptor *Desc = S.P.createDescriptor(
- Call, ElemType.getTypePtr(),
- NumElems.ule(1) ? std::nullopt : Descriptor::InlineDescMD,
+ Call, ElemType.getTypePtr(), Descriptor::InlineDescMD,
/*IsConst=*/false, /*IsTemporary=*/false, /*IsMutable=*/false,
/*Init=*/nullptr);
diff --git a/clang/lib/AST/ByteCode/InterpFrame.cpp b/clang/lib/AST/ByteCode/InterpFrame.cpp
index 28e189b..7f02464 100644
--- a/clang/lib/AST/ByteCode/InterpFrame.cpp
+++ b/clang/lib/AST/ByteCode/InterpFrame.cpp
@@ -102,14 +102,26 @@ static void print(llvm::raw_ostream &OS, const T &V, ASTContext &ASTCtx,
V.toAPValue(ASTCtx).printPretty(OS, ASTCtx, Ty);
}
+static bool shouldSkipInBacktrace(const Function *F) {
+ if (F->isBuiltin())
+ return true;
+ if (F->isLambdaStaticInvoker())
+ return true;
+
+ const FunctionDecl *FD = F->getDecl();
+ if (FD->getDeclName().getCXXOverloadedOperator() == OO_New ||
+ FD->getDeclName().getCXXOverloadedOperator() == OO_Array_New)
+ return true;
+ return false;
+}
+
void InterpFrame::describe(llvm::raw_ostream &OS) const {
// We create frames for builtin functions as well, but we can't reliably
// diagnose them. The 'in call to' diagnostics for them add no value to the
// user _and_ it doesn't generally work since the argument types don't always
// match the function prototype. Just ignore them.
// Similarly, for lambda static invokers, we would just print __invoke().
- if (const auto *F = getFunction();
- F && (F->isBuiltin() || F->isLambdaStaticInvoker()))
+ if (const auto *F = getFunction(); F && shouldSkipInBacktrace(F))
return;
const Expr *CallExpr = Caller->getExpr(getRetPC());
@@ -245,3 +257,13 @@ SourceRange InterpFrame::getRange(CodePtr PC) const {
return S.getRange(Func, PC);
}
+
+bool InterpFrame::isStdFunction() const {
+ if (!Func)
+ return false;
+ for (const DeclContext *DC = Func->getDecl(); DC; DC = DC->getParent())
+ if (DC->isStdNamespace())
+ return true;
+
+ return false;
+}
diff --git a/clang/lib/AST/ByteCode/InterpFrame.h b/clang/lib/AST/ByteCode/InterpFrame.h
index 802777a..7cfc3ac 100644
--- a/clang/lib/AST/ByteCode/InterpFrame.h
+++ b/clang/lib/AST/ByteCode/InterpFrame.h
@@ -117,6 +117,8 @@ public:
unsigned getDepth() const { return Depth; }
+ bool isStdFunction() const;
+
void dump() const { dump(llvm::errs(), 0); }
void dump(llvm::raw_ostream &OS, unsigned Indent = 0) const;
diff --git a/clang/lib/AST/ByteCode/Opcodes.td b/clang/lib/AST/ByteCode/Opcodes.td
index e3a88c0..36191f0 100644
--- a/clang/lib/AST/ByteCode/Opcodes.td
+++ b/clang/lib/AST/ByteCode/Opcodes.td
@@ -787,4 +787,18 @@ def Free : Opcode {
let Args = [ArgBool];
}
+def CheckNewTypeMismatch : Opcode {
+ let Args = [ArgExpr];
+}
+
+def InvalidNewDeleteExpr : Opcode {
+ let Args = [ArgExpr];
+}
+
+def CheckNewTypeMismatchArray : Opcode {
+ let Types = [IntegerTypeClass];
+ let Args = [ArgExpr];
+ let HasGroup = 1;
+}
+
def IsConstantContext: Opcode;
diff --git a/clang/lib/Basic/Targets/BPF.cpp b/clang/lib/Basic/Targets/BPF.cpp
index a94ceee5..f468476 100644
--- a/clang/lib/Basic/Targets/BPF.cpp
+++ b/clang/lib/Basic/Targets/BPF.cpp
@@ -37,6 +37,8 @@ void BPFTargetInfo::getTargetDefines(const LangOptions &Opts,
}
Builder.defineMacro("__BPF_FEATURE_ADDR_SPACE_CAST");
+ Builder.defineMacro("__BPF_FEATURE_MAY_GOTO");
+ Builder.defineMacro("__BPF_FEATURE_ATOMIC_MEM_ORDERING");
if (CPU.empty())
CPU = "v3";
@@ -48,7 +50,6 @@ void BPFTargetInfo::getTargetDefines(const LangOptions &Opts,
std::string CpuVerNumStr = CPU.substr(1);
Builder.defineMacro("__BPF_CPU_VERSION__", CpuVerNumStr);
- Builder.defineMacro("__BPF_FEATURE_MAY_GOTO");
int CpuVerNum = std::stoi(CpuVerNumStr);
if (CpuVerNum >= 2)
diff --git a/clang/lib/Basic/Targets/RISCV.cpp b/clang/lib/Basic/Targets/RISCV.cpp
index a4925e8..b6ea444 100644
--- a/clang/lib/Basic/Targets/RISCV.cpp
+++ b/clang/lib/Basic/Targets/RISCV.cpp
@@ -486,3 +486,15 @@ bool RISCVTargetInfo::validateCpuSupports(StringRef Feature) const {
bool RISCVTargetInfo::isValidFeatureName(StringRef Name) const {
return llvm::RISCVISAInfo::isSupportedExtensionFeature(Name);
}
+
+bool RISCVTargetInfo::validateGlobalRegisterVariable(
+ StringRef RegName, unsigned RegSize, bool &HasSizeMismatch) const {
+ if (RegName == "ra" || RegName == "sp" || RegName == "gp" ||
+ RegName == "tp" || RegName.starts_with("x") || RegName.starts_with("a") ||
+ RegName.starts_with("s") || RegName.starts_with("t")) {
+ unsigned XLen = getTriple().isArch64Bit() ? 64 : 32;
+ HasSizeMismatch = RegSize != XLen;
+ return true;
+ }
+ return false;
+}
diff --git a/clang/lib/Basic/Targets/RISCV.h b/clang/lib/Basic/Targets/RISCV.h
index b808ccc8..351ef21 100644
--- a/clang/lib/Basic/Targets/RISCV.h
+++ b/clang/lib/Basic/Targets/RISCV.h
@@ -131,6 +131,9 @@ public:
bool supportsCpuInit() const override { return getTriple().isOSLinux(); }
bool validateCpuSupports(StringRef Feature) const override;
bool isValidFeatureName(StringRef Name) const override;
+
+ bool validateGlobalRegisterVariable(StringRef RegName, unsigned RegSize,
+ bool &HasSizeMismatch) const override;
};
class LLVM_LIBRARY_VISIBILITY RISCV32TargetInfo : public RISCVTargetInfo {
public:
diff --git a/clang/lib/Basic/Targets/SPIR.h b/clang/lib/Basic/Targets/SPIR.h
index 37cf9d7..8a26db7 100644
--- a/clang/lib/Basic/Targets/SPIR.h
+++ b/clang/lib/Basic/Targets/SPIR.h
@@ -335,6 +335,9 @@ public:
PointerWidth = PointerAlign = 32;
SizeType = TargetInfo::UnsignedInt;
PtrDiffType = IntPtrType = TargetInfo::SignedInt;
+ // SPIR-V has core support for atomic ops, and Int32 is always available;
+ // we take the maximum because it's possible the Host supports wider types.
+ MaxAtomicInlineWidth = std::max<unsigned char>(MaxAtomicInlineWidth, 32);
resetDataLayout("e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-"
"v96:128-v192:256-v256:256-v512:512-v1024:1024-G1");
}
@@ -356,6 +359,9 @@ public:
PointerWidth = PointerAlign = 64;
SizeType = TargetInfo::UnsignedLong;
PtrDiffType = IntPtrType = TargetInfo::SignedLong;
+ // SPIR-V has core support for atomic ops, and Int64 is always available;
+ // we take the maximum because it's possible the Host supports wider types.
+ MaxAtomicInlineWidth = std::max<unsigned char>(MaxAtomicInlineWidth, 64);
resetDataLayout("e-i64:64-v16:16-v24:32-v32:32-v48:64-"
"v96:128-v192:256-v256:256-v512:512-v1024:1024-G1");
}
diff --git a/clang/lib/CodeGen/BackendUtil.cpp b/clang/lib/CodeGen/BackendUtil.cpp
index fa49763..916c92a 100644
--- a/clang/lib/CodeGen/BackendUtil.cpp
+++ b/clang/lib/CodeGen/BackendUtil.cpp
@@ -509,6 +509,8 @@ static bool initTargetOptions(DiagnosticsEngine &Diags,
Options.MCOptions.X86RelaxRelocations = CodeGenOpts.X86RelaxRelocations;
Options.MCOptions.CompressDebugSections =
CodeGenOpts.getCompressDebugSections();
+ if (CodeGenOpts.OutputAsmVariant != 3) // 3 (default): not specified
+ Options.MCOptions.OutputAsmVariant = CodeGenOpts.OutputAsmVariant;
Options.MCOptions.ABIName = TargetOpts.ABI;
for (const auto &Entry : HSOpts.UserEntries)
if (!Entry.IsFramework &&
diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp
index fbe9569..a2a87e0 100644
--- a/clang/lib/CodeGen/CGAtomic.cpp
+++ b/clang/lib/CodeGen/CGAtomic.cpp
@@ -766,8 +766,19 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
// LLVM atomic instructions always have synch scope. If clang atomic
// expression has no scope operand, use default LLVM synch scope.
if (!ScopeModel) {
+ llvm::SyncScope::ID SS;
+ if (CGF.getLangOpts().OpenCL)
+ // OpenCL approach is: "The functions that do not have memory_scope
+ // argument have the same semantics as the corresponding functions with
+ // the memory_scope argument set to memory_scope_device." See ref.:
+ // https://registry.khronos.org/OpenCL/specs/3.0-unified/html/OpenCL_C.html#atomic-functions
+ SS = CGF.getTargetHooks().getLLVMSyncScopeID(CGF.getLangOpts(),
+ SyncScope::OpenCLDevice,
+ Order, CGF.getLLVMContext());
+ else
+ SS = llvm::SyncScope::System;
EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
- Order, CGF.CGM.getLLVMContext().getOrInsertSyncScopeID(""));
+ Order, SS);
return;
}
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 78b4324..9424682 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -587,9 +587,10 @@ static Value *emitCallMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
// matching the argument type. It is assumed that only the first argument is
// overloaded.
template <unsigned N>
-Value *emitBuiltinWithOneOverloadedType(CodeGenFunction &CGF, const CallExpr *E,
- unsigned IntrinsicID,
- llvm::StringRef Name = "") {
+static Value *emitBuiltinWithOneOverloadedType(CodeGenFunction &CGF,
+ const CallExpr *E,
+ unsigned IntrinsicID,
+ llvm::StringRef Name = "") {
static_assert(N, "expect non-empty argument");
SmallVector<Value *, N> Args;
for (unsigned I = 0; I < N; ++I)
@@ -13649,7 +13650,7 @@ Value *CodeGenFunction::EmitBPFBuiltinExpr(unsigned BuiltinID,
else
InitValStr = std::to_string(InitVal.getZExtValue());
std::string EnumStr = Enumerator->getNameAsString() + ":" + InitValStr;
- Value *EnumStrVal = Builder.CreateGlobalStringPtr(EnumStr);
+ Value *EnumStrVal = Builder.CreateGlobalString(EnumStr);
ConstantInt *Flag = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
Value *FlagValue = ConstantInt::get(Int64Ty, Flag->getSExtValue());
@@ -18175,7 +18176,7 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
CallOps.push_back(Ops[i]);
llvm::Function *F = CGM.getIntrinsic(ID);
Value *Call = Builder.CreateCall(F, CallOps);
- return Builder.CreateAlignedStore(Call, Ops[0], MaybeAlign(64));
+ return Builder.CreateAlignedStore(Call, Ops[0], MaybeAlign());
}
case PPC::BI__builtin_ppc_compare_and_swap:
@@ -18569,7 +18570,7 @@ llvm::Value *CodeGenFunction::EmitScalarOrConstFoldImmArg(unsigned ICEArguments,
}
// Return dot product intrinsic that corresponds to the QT scalar type
-Intrinsic::ID getDotProductIntrinsic(CGHLSLRuntime &RT, QualType QT) {
+static Intrinsic::ID getDotProductIntrinsic(CGHLSLRuntime &RT, QualType QT) {
if (QT->isFloatingType())
return RT.getFDotIntrinsic();
if (QT->isSignedIntegerType())
diff --git a/clang/lib/CodeGen/CGDebugInfo.cpp b/clang/lib/CodeGen/CGDebugInfo.cpp
index 2d2c280..4782e80 100644
--- a/clang/lib/CodeGen/CGDebugInfo.cpp
+++ b/clang/lib/CodeGen/CGDebugInfo.cpp
@@ -1249,8 +1249,12 @@ llvm::DIType *CGDebugInfo::CreatePointerLikeType(llvm::dwarf::Tag Tag,
CGM.getTarget().getDWARFAddressSpace(
CGM.getTypes().getTargetAddressSpace(PointeeTy));
+ const BTFTagAttributedType *BTFAttrTy;
+ if (auto *Atomic = PointeeTy->getAs<AtomicType>())
+ BTFAttrTy = dyn_cast<BTFTagAttributedType>(Atomic->getValueType());
+ else
+ BTFAttrTy = dyn_cast<BTFTagAttributedType>(PointeeTy);
SmallVector<llvm::Metadata *, 4> Annots;
- auto *BTFAttrTy = dyn_cast<BTFTagAttributedType>(PointeeTy);
while (BTFAttrTy) {
StringRef Tag = BTFAttrTy->getAttr()->getBTFTypeTag();
if (!Tag.empty()) {
diff --git a/clang/lib/CodeGen/CGExprAgg.cpp b/clang/lib/CodeGen/CGExprAgg.cpp
index bbfc667..43f3bcc 100644
--- a/clang/lib/CodeGen/CGExprAgg.cpp
+++ b/clang/lib/CodeGen/CGExprAgg.cpp
@@ -1698,6 +1698,17 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr(
// Prepare a 'this' for CXXDefaultInitExprs.
CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddress());
+ const bool ZeroInitPadding =
+ CGF.CGM.shouldZeroInitPadding() && !Dest.isZeroed();
+ const Address BaseLoc = Dest.getAddress().withElementType(CGF.Int8Ty);
+ auto DoZeroInitPadding = [&](CharUnits Offset, CharUnits Size) {
+ if (Size.isPositive()) {
+ Address Loc = CGF.Builder.CreateConstGEP(BaseLoc, Offset.getQuantity());
+ llvm::Constant *SizeVal = CGF.Builder.getInt64(Size.getQuantity());
+ CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, false);
+ }
+ };
+
if (record->isUnion()) {
// Only initialize one field of a union. The field itself is
// specified by the initializer list.
@@ -1722,17 +1733,37 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr(
if (NumInitElements) {
// Store the initializer into the field
EmitInitializationToLValue(InitExprs[0], FieldLoc);
+ if (ZeroInitPadding) {
+ CharUnits TotalSize =
+ Dest.getPreferredSize(CGF.getContext(), DestLV.getType());
+ CharUnits FieldSize =
+ CGF.getContext().getTypeSizeInChars(FieldLoc.getType());
+ DoZeroInitPadding(FieldSize, TotalSize - FieldSize);
+ }
} else {
// Default-initialize to null.
- EmitNullInitializationToLValue(FieldLoc);
+ if (ZeroInitPadding)
+ EmitNullInitializationToLValue(DestLV);
+ else
+ EmitNullInitializationToLValue(FieldLoc);
}
-
return;
}
// Here we iterate over the fields; this makes it simpler to both
// default-initialize fields and skip over unnamed fields.
+ const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(record);
+ CharUnits SizeSoFar = CharUnits::Zero();
for (const auto *field : record->fields()) {
+ if (ZeroInitPadding) {
+ unsigned FieldNo = field->getFieldIndex();
+ CharUnits Offset =
+ CGF.getContext().toCharUnitsFromBits(Layout.getFieldOffset(FieldNo));
+ DoZeroInitPadding(SizeSoFar, Offset - SizeSoFar);
+ CharUnits FieldSize =
+ CGF.getContext().getTypeSizeInChars(field->getType());
+ SizeSoFar = Offset + FieldSize;
+ }
// We're done once we hit the flexible array member.
if (field->getType()->isIncompleteArrayType())
break;
@@ -1774,6 +1805,11 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr(
}
}
}
+ if (ZeroInitPadding) {
+ CharUnits TotalSize =
+ Dest.getPreferredSize(CGF.getContext(), DestLV.getType());
+ DoZeroInitPadding(SizeSoFar, TotalSize - SizeSoFar);
+ }
}
void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
diff --git a/clang/lib/CodeGen/CGExprConstant.cpp b/clang/lib/CodeGen/CGExprConstant.cpp
index dd65080..66bc064 100644
--- a/clang/lib/CodeGen/CGExprConstant.cpp
+++ b/clang/lib/CodeGen/CGExprConstant.cpp
@@ -42,6 +42,16 @@ using namespace CodeGen;
namespace {
class ConstExprEmitter;
+llvm::Constant *getPadding(const CodeGenModule &CGM, CharUnits PadSize) {
+ llvm::Type *Ty = CGM.CharTy;
+ if (PadSize > CharUnits::One())
+ Ty = llvm::ArrayType::get(Ty, PadSize.getQuantity());
+ if (CGM.shouldZeroInitPadding()) {
+ return llvm::Constant::getNullValue(Ty);
+ }
+ return llvm::UndefValue::get(Ty);
+}
+
struct ConstantAggregateBuilderUtils {
CodeGenModule &CGM;
@@ -61,10 +71,7 @@ struct ConstantAggregateBuilderUtils {
}
llvm::Constant *getPadding(CharUnits PadSize) const {
- llvm::Type *Ty = CGM.CharTy;
- if (PadSize > CharUnits::One())
- Ty = llvm::ArrayType::get(Ty, PadSize.getQuantity());
- return llvm::UndefValue::get(Ty);
+ return ::getPadding(CGM, PadSize);
}
llvm::Constant *getZeroes(CharUnits ZeroSize) const {
@@ -591,6 +598,11 @@ private:
bool Build(const InitListExpr *ILE, bool AllowOverwrite);
bool Build(const APValue &Val, const RecordDecl *RD, bool IsPrimaryBase,
const CXXRecordDecl *VTableClass, CharUnits BaseOffset);
+ bool DoZeroInitPadding(const ASTRecordLayout &Layout, unsigned FieldNo,
+ const FieldDecl &Field, bool AllowOverwrite,
+ CharUnits &FieldSize, CharUnits &SizeSoFar);
+ bool DoZeroInitPadding(const ASTRecordLayout &Layout, bool AllowOverwrite,
+ CharUnits &SizeSoFar);
llvm::Constant *Finalize(QualType Ty);
};
@@ -715,6 +727,10 @@ bool ConstStructBuilder::Build(const InitListExpr *ILE, bool AllowOverwrite) {
if (CXXRD->getNumBases())
return false;
+ const bool ZeroInitPadding = CGM.shouldZeroInitPadding();
+ CharUnits FieldSize = CharUnits::Zero();
+ CharUnits SizeSoFar = CharUnits::Zero();
+
for (FieldDecl *Field : RD->fields()) {
++FieldNo;
@@ -732,8 +748,13 @@ bool ConstStructBuilder::Build(const InitListExpr *ILE, bool AllowOverwrite) {
const Expr *Init = nullptr;
if (ElementNo < ILE->getNumInits())
Init = ILE->getInit(ElementNo++);
- if (isa_and_nonnull<NoInitExpr>(Init))
+ if (isa_and_nonnull<NoInitExpr>(Init)) {
+ if (ZeroInitPadding &&
+ !DoZeroInitPadding(Layout, FieldNo, *Field, AllowOverwrite, FieldSize,
+ SizeSoFar))
+ return false;
continue;
+ }
// Zero-sized fields are not emitted, but their initializers may still
// prevent emission of this struct as a constant.
@@ -743,6 +764,11 @@ bool ConstStructBuilder::Build(const InitListExpr *ILE, bool AllowOverwrite) {
continue;
}
+ if (ZeroInitPadding &&
+ !DoZeroInitPadding(Layout, FieldNo, *Field, AllowOverwrite, FieldSize,
+ SizeSoFar))
+ return false;
+
// When emitting a DesignatedInitUpdateExpr, a nested InitListExpr
// represents additional overwriting of our current constant value, and not
// a new constant to emit independently.
@@ -768,6 +794,10 @@ bool ConstStructBuilder::Build(const InitListExpr *ILE, bool AllowOverwrite) {
if (!EltInit)
return false;
+ if (ZeroInitPadding && FieldSize.isZero())
+ SizeSoFar += CharUnits::fromQuantity(
+ CGM.getDataLayout().getTypeAllocSize(EltInit->getType()));
+
if (!Field->isBitField()) {
// Handle non-bitfield members.
if (!AppendField(Field, Layout.getFieldOffset(FieldNo), EltInit,
@@ -785,6 +815,9 @@ bool ConstStructBuilder::Build(const InitListExpr *ILE, bool AllowOverwrite) {
}
}
+ if (ZeroInitPadding && !DoZeroInitPadding(Layout, AllowOverwrite, SizeSoFar))
+ return false;
+
return true;
}
@@ -849,6 +882,9 @@ bool ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD,
unsigned FieldNo = 0;
uint64_t OffsetBits = CGM.getContext().toBits(Offset);
+ const bool ZeroInitPadding = CGM.shouldZeroInitPadding();
+ CharUnits FieldSize = CharUnits::Zero();
+ CharUnits SizeSoFar = CharUnits::Zero();
bool AllowOverwrite = false;
for (RecordDecl::field_iterator Field = RD->field_begin(),
@@ -870,6 +906,15 @@ bool ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD,
if (!EltInit)
return false;
+ if (ZeroInitPadding) {
+ if (!DoZeroInitPadding(Layout, FieldNo, **Field, AllowOverwrite,
+ FieldSize, SizeSoFar))
+ return false;
+ if (FieldSize.isZero())
+ SizeSoFar += CharUnits::fromQuantity(
+ CGM.getDataLayout().getTypeAllocSize(EltInit->getType()));
+ }
+
if (!Field->isBitField()) {
// Handle non-bitfield members.
if (!AppendField(*Field, Layout.getFieldOffset(FieldNo) + OffsetBits,
@@ -886,7 +931,35 @@ bool ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD,
return false;
}
}
+ if (ZeroInitPadding && !DoZeroInitPadding(Layout, AllowOverwrite, SizeSoFar))
+ return false;
+
+ return true;
+}
+bool ConstStructBuilder::DoZeroInitPadding(
+ const ASTRecordLayout &Layout, unsigned FieldNo, const FieldDecl &Field,
+ bool AllowOverwrite, CharUnits &FieldSize, CharUnits &SizeSoFar) {
+ CharUnits Offset =
+ CGM.getContext().toCharUnitsFromBits(Layout.getFieldOffset(FieldNo));
+ if (SizeSoFar < Offset)
+ if (!AppendBytes(SizeSoFar, getPadding(CGM, Offset - SizeSoFar),
+ AllowOverwrite))
+ return false;
+ FieldSize = CGM.getContext().getTypeSizeInChars(Field.getType());
+ SizeSoFar = Offset + FieldSize;
+ return true;
+}
+
+bool ConstStructBuilder::DoZeroInitPadding(const ASTRecordLayout &Layout,
+ bool AllowOverwrite,
+ CharUnits &SizeSoFar) {
+ CharUnits TotalSize = Layout.getSize();
+ if (SizeSoFar < TotalSize)
+ if (!AppendBytes(SizeSoFar, getPadding(CGM, TotalSize - SizeSoFar),
+ AllowOverwrite))
+ return false;
+ SizeSoFar = TotalSize;
return true;
}
@@ -1127,12 +1200,10 @@ public:
assert(CurSize <= TotalSize && "Union size mismatch!");
if (unsigned NumPadBytes = TotalSize - CurSize) {
- llvm::Type *Ty = CGM.CharTy;
- if (NumPadBytes > 1)
- Ty = llvm::ArrayType::get(Ty, NumPadBytes);
-
- Elts.push_back(llvm::UndefValue::get(Ty));
- Types.push_back(Ty);
+ llvm::Constant *Padding =
+ getPadding(CGM, CharUnits::fromQuantity(NumPadBytes));
+ Elts.push_back(Padding);
+ Types.push_back(Padding->getType());
}
llvm::StructType *STy = llvm::StructType::get(VMContext, Types, false);
diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp
index 82caf65..b7f5b93 100644
--- a/clang/lib/CodeGen/CGExprScalar.cpp
+++ b/clang/lib/CodeGen/CGExprScalar.cpp
@@ -1807,7 +1807,7 @@ ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) {
ASTContext &Context = CGF.getContext();
unsigned AddrSpace =
Context.getTargetAddressSpace(CGF.CGM.GetGlobalConstantAddressSpace());
- llvm::Constant *GlobalConstStr = Builder.CreateGlobalStringPtr(
+ llvm::Constant *GlobalConstStr = Builder.CreateGlobalString(
E->ComputeName(Context), "__usn_str", AddrSpace);
llvm::Type *ExprTy = ConvertType(E->getType());
diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
index 7a94c4d..8593cb5 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -1490,7 +1490,7 @@ llvm::Type *CGOpenMPRuntime::getKmpc_MicroPointerTy() {
return llvm::PointerType::getUnqual(Kmpc_MicroTy);
}
-llvm::OffloadEntriesInfoManager::OMPTargetDeviceClauseKind
+static llvm::OffloadEntriesInfoManager::OMPTargetDeviceClauseKind
convertDeviceClause(const VarDecl *VD) {
std::optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
OMPDeclareTargetDeclAttr::getDeviceType(VD);
@@ -1513,7 +1513,7 @@ convertDeviceClause(const VarDecl *VD) {
}
}
-llvm::OffloadEntriesInfoManager::OMPTargetGlobalVarEntryKind
+static llvm::OffloadEntriesInfoManager::OMPTargetGlobalVarEntryKind
convertCaptureClause(const VarDecl *VD) {
std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> MapType =
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
@@ -8836,7 +8836,7 @@ static ValueDecl *getDeclFromThisExpr(const Expr *E) {
/// Emit a string constant containing the names of the values mapped to the
/// offloading runtime library.
-llvm::Constant *
+static llvm::Constant *
emitMappingInformation(CodeGenFunction &CGF, llvm::OpenMPIRBuilder &OMPBuilder,
MappableExprsHandler::MappingExprInfo &MapExprs) {
@@ -9450,8 +9450,8 @@ static llvm::Value *emitDeviceID(
return DeviceID;
}
-llvm::Value *emitDynCGGroupMem(const OMPExecutableDirective &D,
- CodeGenFunction &CGF) {
+static llvm::Value *emitDynCGGroupMem(const OMPExecutableDirective &D,
+ CodeGenFunction &CGF) {
llvm::Value *DynCGroupMem = CGF.Builder.getInt32(0);
if (auto *DynMemClause = D.getSingleClause<OMPXDynCGroupMemClause>()) {
diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp
index 27cf3de..623857b 100644
--- a/clang/lib/CodeGen/CGStmt.cpp
+++ b/clang/lib/CodeGen/CGStmt.cpp
@@ -815,6 +815,7 @@ void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
// C99 6.8.4.1: The first substatement is executed if the expression compares
// unequal to 0. The condition must be a scalar type.
LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
+ ApplyDebugLocation DL(*this, S.getCond());
if (S.getInit())
EmitStmt(S.getInit());
diff --git a/clang/lib/CodeGen/CGStmtOpenMP.cpp b/clang/lib/CodeGen/CGStmtOpenMP.cpp
index b11df83..71a27d0 100644
--- a/clang/lib/CodeGen/CGStmtOpenMP.cpp
+++ b/clang/lib/CodeGen/CGStmtOpenMP.cpp
@@ -2734,8 +2734,8 @@ GetAlignedMapping(const OMPLoopDirective &S, CodeGenFunction &CGF) {
// Pass OMPLoopDirective (instead of OMPSimdDirective) to make this function
// available for "loop bind(thread)", which maps to "simd".
-void emitOMPSimdDirective(const OMPLoopDirective &S, CodeGenFunction &CGF,
- CodeGenModule &CGM) {
+static void emitOMPSimdDirective(const OMPLoopDirective &S,
+ CodeGenFunction &CGF, CodeGenModule &CGM) {
bool UseOMPIRBuilder =
CGM.getLangOpts().OpenMPIRBuilder && isSimdSupportedByOpenMPIRBuilder(S);
if (UseOMPIRBuilder) {
@@ -3987,8 +3987,8 @@ convertClauseKindToSchedKind(OpenMPScheduleClauseKind ScheduleClauseKind) {
// Pass OMPLoopDirective (instead of OMPForDirective) to make this function
// available for "loop bind(parallel)", which maps to "for".
-void emitOMPForDirective(const OMPLoopDirective &S, CodeGenFunction &CGF,
- CodeGenModule &CGM, bool HasCancel) {
+static void emitOMPForDirective(const OMPLoopDirective &S, CodeGenFunction &CGF,
+ CodeGenModule &CGM, bool HasCancel) {
bool HasLastprivates = false;
bool UseOMPIRBuilder = CGM.getLangOpts().OpenMPIRBuilder &&
isForSupportedByOpenMPIRBuilder(S, HasCancel);
@@ -5447,7 +5447,7 @@ void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) {
CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getBeginLoc(), Data);
}
-bool isSupportedByOpenMPIRBuilder(const OMPTaskgroupDirective &T) {
+static bool isSupportedByOpenMPIRBuilder(const OMPTaskgroupDirective &T) {
return T.clauses().empty();
}
@@ -5968,8 +5968,9 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
// Pass OMPLoopDirective (instead of OMPDistributeDirective) to make this
// function available for "loop bind(teams)", which maps to "distribute".
-void emitOMPDistributeDirective(const OMPLoopDirective &S, CodeGenFunction &CGF,
- CodeGenModule &CGM) {
+static void emitOMPDistributeDirective(const OMPLoopDirective &S,
+ CodeGenFunction &CGF,
+ CodeGenModule &CGM) {
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc());
};
diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp
index 17b82b2..d53d479 100644
--- a/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/clang/lib/CodeGen/CodeGenModule.cpp
@@ -784,8 +784,9 @@ getLLVMVisibility(clang::LangOptions::VisibilityFromDLLStorageClassKinds K) {
llvm_unreachable("unknown option value!");
}
-void setLLVMVisibility(llvm::GlobalValue &GV,
- std::optional<llvm::GlobalValue::VisibilityTypes> V) {
+static void
+setLLVMVisibility(llvm::GlobalValue &GV,
+ std::optional<llvm::GlobalValue::VisibilityTypes> V) {
if (!V)
return;
@@ -4224,8 +4225,8 @@ TargetMVPriority(const TargetInfo &TI,
// in the cases of CPUDispatch, this causes issues. This also makes sure we
// work with internal linkage functions, so that the same function name can be
// used with internal linkage in multiple TUs.
-llvm::GlobalValue::LinkageTypes getMultiversionLinkage(CodeGenModule &CGM,
- GlobalDecl GD) {
+static llvm::GlobalValue::LinkageTypes
+getMultiversionLinkage(CodeGenModule &CGM, GlobalDecl GD) {
const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
if (FD->getFormalLinkage() == Linkage::Internal)
return llvm::GlobalValue::InternalLinkage;
diff --git a/clang/lib/CodeGen/CodeGenModule.h b/clang/lib/CodeGen/CodeGenModule.h
index c58bb88..fcdfef0 100644
--- a/clang/lib/CodeGen/CodeGenModule.h
+++ b/clang/lib/CodeGen/CodeGenModule.h
@@ -1676,6 +1676,57 @@ public:
MustTailCallUndefinedGlobals.insert(Global);
}
+ bool shouldZeroInitPadding() const {
+ // In C23 (N3096) $6.7.10:
+ // """
+ // If any object is initialized with an empty iniitializer, then it is
+ // subject to default initialization:
+ // - if it is an aggregate, every member is initialized (recursively)
+ // according to these rules, and any padding is initialized to zero bits;
+ // - if it is a union, the first named member is initialized (recursively)
+ // according to these rules, and any padding is initialized to zero bits.
+ //
+ // If the aggregate or union contains elements or members that are
+ // aggregates or unions, these rules apply recursively to the subaggregates
+ // or contained unions.
+ //
+ // If there are fewer initializers in a brace-enclosed list than there are
+ // elements or members of an aggregate, or fewer characters in a string
+ // literal used to initialize an array of known size than there are elements
+ // in the array, the remainder of the aggregate is subject to default
+ // initialization.
+ // """
+ //
+ // From my understanding, the standard is ambiguous in the following two
+ // areas:
+ // 1. For a union type with empty initializer, if the first named member is
+ // not the largest member, then the bytes comes after the first named member
+ // but before padding are left unspecified. An example is:
+ // union U { int a; long long b;};
+ // union U u = {}; // The first 4 bytes are 0, but 4-8 bytes are left
+ // unspecified.
+ //
+ // 2. It only mentions padding for empty initializer, but doesn't mention
+ // padding for a non empty initialization list. And if the aggregation or
+ // union contains elements or members that are aggregates or unions, and
+ // some are non empty initializers, while others are empty initiailizers,
+ // the padding initialization is unclear. An example is:
+ // struct S1 { int a; long long b; };
+ // struct S2 { char c; struct S1 s1; };
+ // // The values for paddings between s2.c and s2.s1.a, between s2.s1.a
+ // and s2.s1.b are unclear.
+ // struct S2 s2 = { 'c' };
+ //
+ // Here we choose to zero initiailize left bytes of a union type. Because
+ // projects like the Linux kernel are relying on this behavior. If we don't
+ // explicitly zero initialize them, the undef values can be optimized to
+ // return gabage data. We also choose to zero initialize paddings for
+ // aggregates and unions, no matter they are initialized by empty
+ // initializers or non empty initializers. This can provide a consistent
+ // behavior. So projects like the Linux kernel can rely on it.
+ return !getLangOpts().CPlusPlus;
+ }
+
private:
bool shouldDropDLLAttribute(const Decl *D, const llvm::GlobalValue *GV) const;
diff --git a/clang/lib/CodeGen/SanitizerMetadata.cpp b/clang/lib/CodeGen/SanitizerMetadata.cpp
index afa104a..5b212a1 100644
--- a/clang/lib/CodeGen/SanitizerMetadata.cpp
+++ b/clang/lib/CodeGen/SanitizerMetadata.cpp
@@ -27,7 +27,7 @@ static bool isAsanHwasanOrMemTag(const SanitizerSet &SS) {
SanitizerKind::HWAddress | SanitizerKind::MemTag);
}
-SanitizerMask expandKernelSanitizerMasks(SanitizerMask Mask) {
+static SanitizerMask expandKernelSanitizerMasks(SanitizerMask Mask) {
if (Mask & (SanitizerKind::Address | SanitizerKind::KernelAddress))
Mask |= SanitizerKind::Address | SanitizerKind::KernelAddress;
// Note: KHWASan doesn't support globals.
diff --git a/clang/lib/CodeGen/Targets/SPIR.cpp b/clang/lib/CodeGen/Targets/SPIR.cpp
index cc52925..d5e8e4f 100644
--- a/clang/lib/CodeGen/Targets/SPIR.cpp
+++ b/clang/lib/CodeGen/Targets/SPIR.cpp
@@ -58,7 +58,36 @@ public:
SPIRVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
: CommonSPIRTargetCodeGenInfo(std::make_unique<SPIRVABIInfo>(CGT)) {}
void setCUDAKernelCallingConvention(const FunctionType *&FT) const override;
+ llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts,
+ SyncScope Scope,
+ llvm::AtomicOrdering Ordering,
+ llvm::LLVMContext &Ctx) const override;
};
+
+inline StringRef mapClangSyncScopeToLLVM(SyncScope Scope) {
+ switch (Scope) {
+ case SyncScope::HIPSingleThread:
+ case SyncScope::SingleScope:
+ return "singlethread";
+ case SyncScope::HIPWavefront:
+ case SyncScope::OpenCLSubGroup:
+ case SyncScope::WavefrontScope:
+ return "subgroup";
+ case SyncScope::HIPWorkgroup:
+ case SyncScope::OpenCLWorkGroup:
+ case SyncScope::WorkgroupScope:
+ return "workgroup";
+ case SyncScope::HIPAgent:
+ case SyncScope::OpenCLDevice:
+ case SyncScope::DeviceScope:
+ return "device";
+ case SyncScope::SystemScope:
+ case SyncScope::HIPSystem:
+ case SyncScope::OpenCLAllSVMDevices:
+ return "";
+ }
+ return "";
+}
} // End anonymous namespace.
void CommonSPIRABIInfo::setCCs() {
@@ -188,6 +217,13 @@ void SPIRVTargetCodeGenInfo::setCUDAKernelCallingConvention(
}
}
+llvm::SyncScope::ID
+SPIRVTargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &, SyncScope Scope,
+ llvm::AtomicOrdering,
+ llvm::LLVMContext &Ctx) const {
+ return Ctx.getOrInsertSyncScopeID(mapClangSyncScopeToLLVM(Scope));
+}
+
/// Construct a SPIR-V target extension type for the given OpenCL image type.
static llvm::Type *getSPIRVImageType(llvm::LLVMContext &Ctx, StringRef BaseType,
StringRef OpenCLName,
diff --git a/clang/lib/CrossTU/CrossTranslationUnit.cpp b/clang/lib/CrossTU/CrossTranslationUnit.cpp
index 9864700..9faf2a8 100644
--- a/clang/lib/CrossTU/CrossTranslationUnit.cpp
+++ b/clang/lib/CrossTU/CrossTranslationUnit.cpp
@@ -566,9 +566,9 @@ CrossTranslationUnitContext::ASTLoader::loadFromDump(StringRef ASTDumpPath) {
IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
new DiagnosticsEngine(DiagID, &*DiagOpts, DiagClient));
return ASTUnit::LoadFromASTFile(
- std::string(ASTDumpPath.str()),
- CI.getPCHContainerOperations()->getRawReader(), ASTUnit::LoadEverything,
- Diags, CI.getFileSystemOpts(), CI.getHeaderSearchOptsPtr());
+ ASTDumpPath, CI.getPCHContainerOperations()->getRawReader(),
+ ASTUnit::LoadEverything, Diags, CI.getFileSystemOpts(),
+ CI.getHeaderSearchOptsPtr());
}
/// Load the AST from a source-file, which is supposed to be located inside the
diff --git a/clang/lib/Frontend/ASTUnit.cpp b/clang/lib/Frontend/ASTUnit.cpp
index 84e273a..93836ec 100644
--- a/clang/lib/Frontend/ASTUnit.cpp
+++ b/clang/lib/Frontend/ASTUnit.cpp
@@ -802,7 +802,7 @@ void ASTUnit::ConfigureDiags(IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
}
std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
- const std::string &Filename, const PCHContainerReader &PCHContainerRdr,
+ StringRef Filename, const PCHContainerReader &PCHContainerRdr,
WhatToLoad ToLoad, IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
const FileSystemOptions &FileSystemOpts,
std::shared_ptr<HeaderSearchOptions> HSOpts,
diff --git a/clang/lib/Frontend/FrontendAction.cpp b/clang/lib/Frontend/FrontendAction.cpp
index a9c45e5..81eea9c4 100644
--- a/clang/lib/Frontend/FrontendAction.cpp
+++ b/clang/lib/Frontend/FrontendAction.cpp
@@ -625,8 +625,8 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
StringRef InputFile = Input.getFile();
std::unique_ptr<ASTUnit> AST = ASTUnit::LoadFromASTFile(
- std::string(InputFile), CI.getPCHContainerReader(),
- ASTUnit::LoadPreprocessorOnly, ASTDiags, CI.getFileSystemOpts(),
+ InputFile, CI.getPCHContainerReader(), ASTUnit::LoadPreprocessorOnly,
+ ASTDiags, CI.getFileSystemOpts(),
/*HeaderSearchOptions=*/nullptr);
if (!AST)
return false;
@@ -693,9 +693,9 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
StringRef InputFile = Input.getFile();
std::unique_ptr<ASTUnit> AST = ASTUnit::LoadFromASTFile(
- std::string(InputFile), CI.getPCHContainerReader(),
- ASTUnit::LoadEverything, Diags, CI.getFileSystemOpts(),
- CI.getHeaderSearchOptsPtr(), CI.getLangOptsPtr());
+ InputFile, CI.getPCHContainerReader(), ASTUnit::LoadEverything, Diags,
+ CI.getFileSystemOpts(), CI.getHeaderSearchOptsPtr(),
+ CI.getLangOptsPtr());
if (!AST)
return false;
diff --git a/clang/lib/Frontend/MultiplexConsumer.cpp b/clang/lib/Frontend/MultiplexConsumer.cpp
index 2158d17..3fd3c9b 100644
--- a/clang/lib/Frontend/MultiplexConsumer.cpp
+++ b/clang/lib/Frontend/MultiplexConsumer.cpp
@@ -298,6 +298,13 @@ MultiplexConsumer::MultiplexConsumer(
}
}
+MultiplexConsumer::MultiplexConsumer(std::unique_ptr<ASTConsumer> C)
+ : MultiplexConsumer([](std::unique_ptr<ASTConsumer> Consumer) {
+ std::vector<std::unique_ptr<ASTConsumer>> Consumers;
+ Consumers.push_back(std::move(Consumer));
+ return Consumers;
+ }(std::move(C))) {}
+
MultiplexConsumer::~MultiplexConsumer() {}
void MultiplexConsumer::Initialize(ASTContext &Context) {
diff --git a/clang/lib/Frontend/Rewrite/RewriteObjC.cpp b/clang/lib/Frontend/Rewrite/RewriteObjC.cpp
index 180a0125..f49ccf7 100644
--- a/clang/lib/Frontend/Rewrite/RewriteObjC.cpp
+++ b/clang/lib/Frontend/Rewrite/RewriteObjC.cpp
@@ -128,10 +128,8 @@ namespace {
SmallVector<DeclRefExpr *, 32> BlockDeclRefs;
// Block related declarations.
- SmallVector<ValueDecl *, 8> BlockByCopyDecls;
- llvm::SmallPtrSet<ValueDecl *, 8> BlockByCopyDeclsPtrSet;
- SmallVector<ValueDecl *, 8> BlockByRefDecls;
- llvm::SmallPtrSet<ValueDecl *, 8> BlockByRefDeclsPtrSet;
+ llvm::SmallSetVector<ValueDecl *, 8> BlockByCopyDecls;
+ llvm::SmallSetVector<ValueDecl *, 8> BlockByRefDecls;
llvm::DenseMap<ValueDecl *, unsigned> BlockByRefDeclNo;
llvm::SmallPtrSet<ValueDecl *, 8> ImportedBlockDecls;
llvm::SmallPtrSet<VarDecl *, 8> ImportedLocalExternalDecls;
@@ -3292,8 +3290,8 @@ std::string RewriteObjC::SynthesizeBlockFunc(BlockExpr *CE, int i,
// Create local declarations to avoid rewriting all closure decl ref exprs.
// First, emit a declaration for all "by ref" decls.
- for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByRefDecls.begin(),
- E = BlockByRefDecls.end(); I != E; ++I) {
+ for (auto I = BlockByRefDecls.begin(), E = BlockByRefDecls.end(); I != E;
+ ++I) {
S += " ";
std::string Name = (*I)->getNameAsString();
std::string TypeString;
@@ -3303,8 +3301,8 @@ std::string RewriteObjC::SynthesizeBlockFunc(BlockExpr *CE, int i,
S += Name + " = __cself->" + (*I)->getNameAsString() + "; // bound by ref\n";
}
// Next, emit a declaration for all "by copy" declarations.
- for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByCopyDecls.begin(),
- E = BlockByCopyDecls.end(); I != E; ++I) {
+ for (auto I = BlockByCopyDecls.begin(), E = BlockByCopyDecls.end(); I != E;
+ ++I) {
S += " ";
// Handle nested closure invocation. For example:
//
@@ -3357,7 +3355,7 @@ std::string RewriteObjC::SynthesizeBlockHelperFuncs(BlockExpr *CE, int i,
S += VD->getNameAsString();
S += ", (void*)src->";
S += VD->getNameAsString();
- if (BlockByRefDeclsPtrSet.count(VD))
+ if (BlockByRefDecls.contains(VD))
S += ", " + utostr(BLOCK_FIELD_IS_BYREF) + "/*BLOCK_FIELD_IS_BYREF*/);";
else if (VD->getType()->isBlockPointerType())
S += ", " + utostr(BLOCK_FIELD_IS_BLOCK) + "/*BLOCK_FIELD_IS_BLOCK*/);";
@@ -3374,7 +3372,7 @@ std::string RewriteObjC::SynthesizeBlockHelperFuncs(BlockExpr *CE, int i,
for (ValueDecl *VD : ImportedBlockDecls) {
S += "_Block_object_dispose((void*)src->";
S += VD->getNameAsString();
- if (BlockByRefDeclsPtrSet.count(VD))
+ if (BlockByRefDecls.contains(VD))
S += ", " + utostr(BLOCK_FIELD_IS_BYREF) + "/*BLOCK_FIELD_IS_BYREF*/);";
else if (VD->getType()->isBlockPointerType())
S += ", " + utostr(BLOCK_FIELD_IS_BLOCK) + "/*BLOCK_FIELD_IS_BLOCK*/);";
@@ -3400,8 +3398,8 @@ std::string RewriteObjC::SynthesizeBlockImpl(BlockExpr *CE, std::string Tag,
if (BlockDeclRefs.size()) {
// Output all "by copy" declarations.
- for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByCopyDecls.begin(),
- E = BlockByCopyDecls.end(); I != E; ++I) {
+ for (auto I = BlockByCopyDecls.begin(), E = BlockByCopyDecls.end(); I != E;
+ ++I) {
S += " ";
std::string FieldName = (*I)->getNameAsString();
std::string ArgName = "_" + FieldName;
@@ -3429,8 +3427,8 @@ std::string RewriteObjC::SynthesizeBlockImpl(BlockExpr *CE, std::string Tag,
S += FieldName + ";\n";
}
// Output all "by ref" declarations.
- for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByRefDecls.begin(),
- E = BlockByRefDecls.end(); I != E; ++I) {
+ for (auto I = BlockByRefDecls.begin(), E = BlockByRefDecls.end(); I != E;
+ ++I) {
S += " ";
std::string FieldName = (*I)->getNameAsString();
std::string ArgName = "_" + FieldName;
@@ -3448,8 +3446,8 @@ std::string RewriteObjC::SynthesizeBlockImpl(BlockExpr *CE, std::string Tag,
Constructor += ", int flags=0)";
// Initialize all "by copy" arguments.
bool firsTime = true;
- for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByCopyDecls.begin(),
- E = BlockByCopyDecls.end(); I != E; ++I) {
+ for (auto I = BlockByCopyDecls.begin(), E = BlockByCopyDecls.end(); I != E;
+ ++I) {
std::string Name = (*I)->getNameAsString();
if (firsTime) {
Constructor += " : ";
@@ -3463,8 +3461,8 @@ std::string RewriteObjC::SynthesizeBlockImpl(BlockExpr *CE, std::string Tag,
Constructor += Name + "(_" + Name + ")";
}
// Initialize all "by ref" arguments.
- for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByRefDecls.begin(),
- E = BlockByRefDecls.end(); I != E; ++I) {
+ for (auto I = BlockByRefDecls.begin(), E = BlockByRefDecls.end(); I != E;
+ ++I) {
std::string Name = (*I)->getNameAsString();
if (firsTime) {
Constructor += " : ";
@@ -3553,14 +3551,10 @@ void RewriteObjC::SynthesizeBlockLiterals(SourceLocation FunLocStart,
DeclRefExpr *Exp = InnerDeclRefs[count++];
ValueDecl *VD = Exp->getDecl();
BlockDeclRefs.push_back(Exp);
- if (!VD->hasAttr<BlocksAttr>() && !BlockByCopyDeclsPtrSet.count(VD)) {
- BlockByCopyDeclsPtrSet.insert(VD);
- BlockByCopyDecls.push_back(VD);
- }
- if (VD->hasAttr<BlocksAttr>() && !BlockByRefDeclsPtrSet.count(VD)) {
- BlockByRefDeclsPtrSet.insert(VD);
- BlockByRefDecls.push_back(VD);
- }
+ if (VD->hasAttr<BlocksAttr>())
+ BlockByRefDecls.insert(VD);
+ else
+ BlockByCopyDecls.insert(VD);
// imported objects in the inner blocks not used in the outer
// blocks must be copied/disposed in the outer block as well.
if (VD->hasAttr<BlocksAttr>() ||
@@ -3590,9 +3584,7 @@ void RewriteObjC::SynthesizeBlockLiterals(SourceLocation FunLocStart,
BlockDeclRefs.clear();
BlockByRefDecls.clear();
- BlockByRefDeclsPtrSet.clear();
BlockByCopyDecls.clear();
- BlockByCopyDeclsPtrSet.clear();
ImportedBlockDecls.clear();
}
if (RewriteSC) {
@@ -4314,20 +4306,12 @@ void RewriteObjC::CollectBlockDeclRefInfo(BlockExpr *Exp) {
if (BlockDeclRefs.size()) {
// Unique all "by copy" declarations.
for (unsigned i = 0; i < BlockDeclRefs.size(); i++)
- if (!BlockDeclRefs[i]->getDecl()->hasAttr<BlocksAttr>()) {
- if (!BlockByCopyDeclsPtrSet.count(BlockDeclRefs[i]->getDecl())) {
- BlockByCopyDeclsPtrSet.insert(BlockDeclRefs[i]->getDecl());
- BlockByCopyDecls.push_back(BlockDeclRefs[i]->getDecl());
- }
- }
+ if (!BlockDeclRefs[i]->getDecl()->hasAttr<BlocksAttr>())
+ BlockByCopyDecls.insert(BlockDeclRefs[i]->getDecl());
// Unique all "by ref" declarations.
for (unsigned i = 0; i < BlockDeclRefs.size(); i++)
- if (BlockDeclRefs[i]->getDecl()->hasAttr<BlocksAttr>()) {
- if (!BlockByRefDeclsPtrSet.count(BlockDeclRefs[i]->getDecl())) {
- BlockByRefDeclsPtrSet.insert(BlockDeclRefs[i]->getDecl());
- BlockByRefDecls.push_back(BlockDeclRefs[i]->getDecl());
- }
- }
+ if (BlockDeclRefs[i]->getDecl()->hasAttr<BlocksAttr>())
+ BlockByRefDecls.insert(BlockDeclRefs[i]->getDecl());
// Find any imported blocks...they will need special attention.
for (unsigned i = 0; i < BlockDeclRefs.size(); i++)
if (BlockDeclRefs[i]->getDecl()->hasAttr<BlocksAttr>() ||
@@ -4358,22 +4342,18 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
for (unsigned i = 0; i < InnerBlockDeclRefs.size(); i++) {
DeclRefExpr *Exp = InnerBlockDeclRefs[i];
ValueDecl *VD = Exp->getDecl();
- if (!VD->hasAttr<BlocksAttr>() && !BlockByCopyDeclsPtrSet.count(VD)) {
+ if (!VD->hasAttr<BlocksAttr>() && BlockByCopyDecls.insert(VD)) {
// We need to save the copied-in variables in nested
// blocks because it is needed at the end for some of the API
// generations. See SynthesizeBlockLiterals routine.
InnerDeclRefs.push_back(Exp);
countOfInnerDecls++;
BlockDeclRefs.push_back(Exp);
- BlockByCopyDeclsPtrSet.insert(VD);
- BlockByCopyDecls.push_back(VD);
}
- if (VD->hasAttr<BlocksAttr>() && !BlockByRefDeclsPtrSet.count(VD)) {
+ if (VD->hasAttr<BlocksAttr>() && BlockByRefDecls.insert(VD)) {
InnerDeclRefs.push_back(Exp);
countOfInnerDecls++;
BlockDeclRefs.push_back(Exp);
- BlockByRefDeclsPtrSet.insert(VD);
- BlockByRefDecls.push_back(VD);
}
}
// Find any imported blocks...they will need special attention.
@@ -4439,8 +4419,8 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
if (BlockDeclRefs.size()) {
Expr *Exp;
// Output all "by copy" declarations.
- for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByCopyDecls.begin(),
- E = BlockByCopyDecls.end(); I != E; ++I) {
+ for (auto I = BlockByCopyDecls.begin(), E = BlockByCopyDecls.end(); I != E;
+ ++I) {
if (isObjCType((*I)->getType())) {
// FIXME: Conform to ABI ([[obj retain] autorelease]).
FD = SynthBlockInitFunctionDecl((*I)->getName());
@@ -4476,8 +4456,8 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
InitExprs.push_back(Exp);
}
// Output all "by ref" declarations.
- for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByRefDecls.begin(),
- E = BlockByRefDecls.end(); I != E; ++I) {
+ for (auto I = BlockByRefDecls.begin(), E = BlockByRefDecls.end(); I != E;
+ ++I) {
ValueDecl *ND = (*I);
std::string Name(ND->getNameAsString());
std::string RecName;
@@ -4534,9 +4514,7 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
NewRep);
BlockDeclRefs.clear();
BlockByRefDecls.clear();
- BlockByRefDeclsPtrSet.clear();
BlockByCopyDecls.clear();
- BlockByCopyDeclsPtrSet.clear();
ImportedBlockDecls.clear();
return NewRep;
}
diff --git a/clang/lib/Interpreter/CMakeLists.txt b/clang/lib/Interpreter/CMakeLists.txt
index 6a06965..2cc7c59 100644
--- a/clang/lib/Interpreter/CMakeLists.txt
+++ b/clang/lib/Interpreter/CMakeLists.txt
@@ -22,6 +22,7 @@ add_clang_library(clangInterpreter
IncrementalExecutor.cpp
IncrementalParser.cpp
Interpreter.cpp
+ InterpreterValuePrinter.cpp
InterpreterUtils.cpp
Value.cpp
${WASM_SRC}
diff --git a/clang/lib/Interpreter/DeviceOffload.cpp b/clang/lib/Interpreter/DeviceOffload.cpp
index 07c9e30..1999d63 100644
--- a/clang/lib/Interpreter/DeviceOffload.cpp
+++ b/clang/lib/Interpreter/DeviceOffload.cpp
@@ -15,6 +15,7 @@
#include "clang/Basic/TargetOptions.h"
#include "clang/CodeGen/ModuleBuilder.h"
#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Interpreter/PartialTranslationUnit.h"
#include "llvm/IR/LegacyPassManager.h"
#include "llvm/IR/Module.h"
@@ -24,15 +25,17 @@
namespace clang {
IncrementalCUDADeviceParser::IncrementalCUDADeviceParser(
- Interpreter &Interp, std::unique_ptr<CompilerInstance> Instance,
- IncrementalParser &HostParser, llvm::LLVMContext &LLVMCtx,
+ std::unique_ptr<CompilerInstance> DeviceInstance,
+ CompilerInstance &HostInstance,
llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> FS,
- llvm::Error &Err)
- : IncrementalParser(Interp, std::move(Instance), LLVMCtx, Err),
- HostParser(HostParser), VFS(FS) {
+ llvm::Error &Err, const std::list<PartialTranslationUnit> &PTUs)
+ : IncrementalParser(*DeviceInstance, Err), PTUs(PTUs), VFS(FS),
+ CodeGenOpts(HostInstance.getCodeGenOpts()),
+ TargetOpts(HostInstance.getTargetOpts()) {
if (Err)
return;
- StringRef Arch = CI->getTargetOpts().CPU;
+ DeviceCI = std::move(DeviceInstance);
+ StringRef Arch = TargetOpts.CPU;
if (!Arch.starts_with("sm_") || Arch.substr(3).getAsInteger(10, SMVersion)) {
Err = llvm::joinErrors(std::move(Err), llvm::make_error<llvm::StringError>(
"Invalid CUDA architecture",
@@ -41,7 +44,7 @@ IncrementalCUDADeviceParser::IncrementalCUDADeviceParser(
}
}
-llvm::Expected<PartialTranslationUnit &>
+llvm::Expected<TranslationUnitDecl *>
IncrementalCUDADeviceParser::Parse(llvm::StringRef Input) {
auto PTU = IncrementalParser::Parse(Input);
if (!PTU)
@@ -62,7 +65,7 @@ IncrementalCUDADeviceParser::Parse(llvm::StringRef Input) {
llvm::StringRef(FatbinContent.data(), FatbinContent.size()),
"", false));
- HostParser.getCI()->getCodeGenOpts().CudaGpuBinaryFileName = FatbinFileName;
+ CodeGenOpts.CudaGpuBinaryFileName = FatbinFileName;
FatbinContent.clear();
@@ -80,7 +83,7 @@ llvm::Expected<llvm::StringRef> IncrementalCUDADeviceParser::GeneratePTX() {
std::error_code());
llvm::TargetOptions TO = llvm::TargetOptions();
llvm::TargetMachine *TargetMachine = Target->createTargetMachine(
- PTU.TheModule->getTargetTriple(), getCI()->getTargetOpts().CPU, "", TO,
+ PTU.TheModule->getTargetTriple(), TargetOpts.CPU, "", TO,
llvm::Reloc::Model::PIC_);
PTU.TheModule->setDataLayout(TargetMachine->createDataLayout());
diff --git a/clang/lib/Interpreter/DeviceOffload.h b/clang/lib/Interpreter/DeviceOffload.h
index ce4f218..b9a1aca 100644
--- a/clang/lib/Interpreter/DeviceOffload.h
+++ b/clang/lib/Interpreter/DeviceOffload.h
@@ -18,19 +18,24 @@
#include "llvm/Support/VirtualFileSystem.h"
namespace clang {
+struct PartialTranslationUnit;
+class CompilerInstance;
+class CodeGenOptions;
+class TargetOptions;
class IncrementalCUDADeviceParser : public IncrementalParser {
+ const std::list<PartialTranslationUnit> &PTUs;
+
public:
IncrementalCUDADeviceParser(
- Interpreter &Interp, std::unique_ptr<CompilerInstance> Instance,
- IncrementalParser &HostParser, llvm::LLVMContext &LLVMCtx,
+ std::unique_ptr<CompilerInstance> DeviceInstance,
+ CompilerInstance &HostInstance,
llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> VFS,
- llvm::Error &Err);
+ llvm::Error &Err, const std::list<PartialTranslationUnit> &PTUs);
- llvm::Expected<PartialTranslationUnit &>
- Parse(llvm::StringRef Input) override;
+ llvm::Expected<TranslationUnitDecl *> Parse(llvm::StringRef Input) override;
- // Generate PTX for the last PTU
+ // Generate PTX for the last PTU.
llvm::Expected<llvm::StringRef> GeneratePTX();
// Generate fatbinary contents in memory
@@ -39,11 +44,13 @@ public:
~IncrementalCUDADeviceParser();
protected:
- IncrementalParser &HostParser;
+ std::unique_ptr<CompilerInstance> DeviceCI;
int SMVersion;
llvm::SmallString<1024> PTXCode;
llvm::SmallVector<char, 1024> FatbinContent;
llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> VFS;
+ CodeGenOptions &CodeGenOpts; // Intentionally a reference.
+ const TargetOptions &TargetOpts;
};
} // namespace clang
diff --git a/clang/lib/Interpreter/IncrementalExecutor.cpp b/clang/lib/Interpreter/IncrementalExecutor.cpp
index 1824a5b..4d2adec 100644
--- a/clang/lib/Interpreter/IncrementalExecutor.cpp
+++ b/clang/lib/Interpreter/IncrementalExecutor.cpp
@@ -118,4 +118,4 @@ IncrementalExecutor::getSymbolAddress(llvm::StringRef Name,
return SymOrErr->getAddress();
}
-} // end namespace clang
+} // namespace clang
diff --git a/clang/lib/Interpreter/IncrementalParser.cpp b/clang/lib/Interpreter/IncrementalParser.cpp
index b7c809c..e43cea1 100644
--- a/clang/lib/Interpreter/IncrementalParser.cpp
+++ b/clang/lib/Interpreter/IncrementalParser.cpp
@@ -13,246 +13,40 @@
#include "IncrementalParser.h"
#include "clang/AST/DeclContextInternals.h"
-#include "clang/CodeGen/BackendUtil.h"
-#include "clang/CodeGen/CodeGenAction.h"
-#include "clang/CodeGen/ModuleBuilder.h"
#include "clang/Frontend/CompilerInstance.h"
-#include "clang/Frontend/FrontendAction.h"
-#include "clang/FrontendTool/Utils.h"
-#include "clang/Interpreter/Interpreter.h"
+#include "clang/Interpreter/PartialTranslationUnit.h"
#include "clang/Parse/Parser.h"
#include "clang/Sema/Sema.h"
-#include "llvm/Option/ArgList.h"
#include "llvm/Support/CrashRecoveryContext.h"
#include "llvm/Support/Error.h"
-#include "llvm/Support/Timer.h"
#include <sstream>
namespace clang {
-class IncrementalASTConsumer final : public ASTConsumer {
- Interpreter &Interp;
- std::unique_ptr<ASTConsumer> Consumer;
+// IncrementalParser::IncrementalParser() {}
-public:
- IncrementalASTConsumer(Interpreter &InterpRef, std::unique_ptr<ASTConsumer> C)
- : Interp(InterpRef), Consumer(std::move(C)) {}
-
- bool HandleTopLevelDecl(DeclGroupRef DGR) override final {
- if (DGR.isNull())
- return true;
- if (!Consumer)
- return true;
-
- for (Decl *D : DGR)
- if (auto *TSD = llvm::dyn_cast<TopLevelStmtDecl>(D);
- TSD && TSD->isSemiMissing())
- TSD->setStmt(Interp.SynthesizeExpr(cast<Expr>(TSD->getStmt())));
-
- return Consumer->HandleTopLevelDecl(DGR);
- }
- void HandleTranslationUnit(ASTContext &Ctx) override final {
- Consumer->HandleTranslationUnit(Ctx);
- }
- void HandleInlineFunctionDefinition(FunctionDecl *D) override final {
- Consumer->HandleInlineFunctionDefinition(D);
- }
- void HandleInterestingDecl(DeclGroupRef D) override final {
- Consumer->HandleInterestingDecl(D);
- }
- void HandleTagDeclDefinition(TagDecl *D) override final {
- Consumer->HandleTagDeclDefinition(D);
- }
- void HandleTagDeclRequiredDefinition(const TagDecl *D) override final {
- Consumer->HandleTagDeclRequiredDefinition(D);
- }
- void HandleCXXImplicitFunctionInstantiation(FunctionDecl *D) override final {
- Consumer->HandleCXXImplicitFunctionInstantiation(D);
- }
- void HandleTopLevelDeclInObjCContainer(DeclGroupRef D) override final {
- Consumer->HandleTopLevelDeclInObjCContainer(D);
- }
- void HandleImplicitImportDecl(ImportDecl *D) override final {
- Consumer->HandleImplicitImportDecl(D);
- }
- void CompleteTentativeDefinition(VarDecl *D) override final {
- Consumer->CompleteTentativeDefinition(D);
- }
- void CompleteExternalDeclaration(DeclaratorDecl *D) override final {
- Consumer->CompleteExternalDeclaration(D);
- }
- void AssignInheritanceModel(CXXRecordDecl *RD) override final {
- Consumer->AssignInheritanceModel(RD);
- }
- void HandleCXXStaticMemberVarInstantiation(VarDecl *D) override final {
- Consumer->HandleCXXStaticMemberVarInstantiation(D);
- }
- void HandleVTable(CXXRecordDecl *RD) override final {
- Consumer->HandleVTable(RD);
- }
- ASTMutationListener *GetASTMutationListener() override final {
- return Consumer->GetASTMutationListener();
- }
- ASTDeserializationListener *GetASTDeserializationListener() override final {
- return Consumer->GetASTDeserializationListener();
- }
- void PrintStats() override final { Consumer->PrintStats(); }
- bool shouldSkipFunctionBody(Decl *D) override final {
- return Consumer->shouldSkipFunctionBody(D);
- }
- static bool classof(const clang::ASTConsumer *) { return true; }
-};
-
-/// A custom action enabling the incremental processing functionality.
-///
-/// The usual \p FrontendAction expects one call to ExecuteAction and once it
-/// sees a call to \p EndSourceFile it deletes some of the important objects
-/// such as \p Preprocessor and \p Sema assuming no further input will come.
-///
-/// \p IncrementalAction ensures it keep its underlying action's objects alive
-/// as long as the \p IncrementalParser needs them.
-///
-class IncrementalAction : public WrapperFrontendAction {
-private:
- bool IsTerminating = false;
-
-public:
- IncrementalAction(CompilerInstance &CI, llvm::LLVMContext &LLVMCtx,
- llvm::Error &Err)
- : WrapperFrontendAction([&]() {
- llvm::ErrorAsOutParameter EAO(&Err);
- std::unique_ptr<FrontendAction> Act;
- switch (CI.getFrontendOpts().ProgramAction) {
- default:
- Err = llvm::createStringError(
- std::errc::state_not_recoverable,
- "Driver initialization failed. "
- "Incremental mode for action %d is not supported",
- CI.getFrontendOpts().ProgramAction);
- return Act;
- case frontend::ASTDump:
- [[fallthrough]];
- case frontend::ASTPrint:
- [[fallthrough]];
- case frontend::ParseSyntaxOnly:
- Act = CreateFrontendAction(CI);
- break;
- case frontend::PluginAction:
- [[fallthrough]];
- case frontend::EmitAssembly:
- [[fallthrough]];
- case frontend::EmitBC:
- [[fallthrough]];
- case frontend::EmitObj:
- [[fallthrough]];
- case frontend::PrintPreprocessedInput:
- [[fallthrough]];
- case frontend::EmitLLVMOnly:
- Act.reset(new EmitLLVMOnlyAction(&LLVMCtx));
- break;
- }
- return Act;
- }()) {}
- FrontendAction *getWrapped() const { return WrappedAction.get(); }
- TranslationUnitKind getTranslationUnitKind() override {
- return TU_Incremental;
- }
-
- void ExecuteAction() override {
- CompilerInstance &CI = getCompilerInstance();
- assert(CI.hasPreprocessor() && "No PP!");
-
- // Use a code completion consumer?
- CodeCompleteConsumer *CompletionConsumer = nullptr;
- if (CI.hasCodeCompletionConsumer())
- CompletionConsumer = &CI.getCodeCompletionConsumer();
-
- Preprocessor &PP = CI.getPreprocessor();
- PP.EnterMainSourceFile();
-
- if (!CI.hasSema())
- CI.createSema(getTranslationUnitKind(), CompletionConsumer);
- }
-
- // Do not terminate after processing the input. This allows us to keep various
- // clang objects alive and to incrementally grow the current TU.
- void EndSourceFile() override {
- // The WrappedAction can be nullptr if we issued an error in the ctor.
- if (IsTerminating && getWrapped())
- WrapperFrontendAction::EndSourceFile();
- }
-
- void FinalizeAction() {
- assert(!IsTerminating && "Already finalized!");
- IsTerminating = true;
- EndSourceFile();
- }
-};
-
-CodeGenerator *IncrementalParser::getCodeGen() const {
- FrontendAction *WrappedAct = Act->getWrapped();
- if (!WrappedAct->hasIRSupport())
- return nullptr;
- return static_cast<CodeGenAction *>(WrappedAct)->getCodeGenerator();
-}
-
-IncrementalParser::IncrementalParser() {}
-
-IncrementalParser::IncrementalParser(Interpreter &Interp,
- std::unique_ptr<CompilerInstance> Instance,
- llvm::LLVMContext &LLVMCtx,
+IncrementalParser::IncrementalParser(CompilerInstance &Instance,
llvm::Error &Err)
- : CI(std::move(Instance)) {
+ : S(Instance.getSema()) {
llvm::ErrorAsOutParameter EAO(&Err);
- Act = std::make_unique<IncrementalAction>(*CI, LLVMCtx, Err);
- if (Err)
- return;
- CI->ExecuteAction(*Act);
-
- if (getCodeGen())
- CachedInCodeGenModule = GenModule();
-
- std::unique_ptr<ASTConsumer> IncrConsumer =
- std::make_unique<IncrementalASTConsumer>(Interp, CI->takeASTConsumer());
- CI->setASTConsumer(std::move(IncrConsumer));
- Consumer = &CI->getASTConsumer();
- P.reset(
- new Parser(CI->getPreprocessor(), CI->getSema(), /*SkipBodies=*/false));
+ Consumer = &S.getASTConsumer();
+ P.reset(new Parser(S.getPreprocessor(), S, /*SkipBodies=*/false));
P->Initialize();
-
- // An initial PTU is needed as CUDA includes some headers automatically
- auto PTU = ParseOrWrapTopLevelDecl();
- if (auto E = PTU.takeError()) {
- consumeError(std::move(E)); // FIXME
- return; // PTU.takeError();
- }
-
- if (getCodeGen()) {
- PTU->TheModule = GenModule();
- assert(PTU->TheModule && "Failed to create initial PTU");
- }
}
-IncrementalParser::~IncrementalParser() {
- P.reset();
- Act->FinalizeAction();
-}
+IncrementalParser::~IncrementalParser() { P.reset(); }
-llvm::Expected<PartialTranslationUnit &>
+llvm::Expected<TranslationUnitDecl *>
IncrementalParser::ParseOrWrapTopLevelDecl() {
// Recover resources if we crash before exiting this method.
- Sema &S = CI->getSema();
llvm::CrashRecoveryContextCleanupRegistrar<Sema> CleanupSema(&S);
Sema::GlobalEagerInstantiationScope GlobalInstantiations(S, /*Enabled=*/true);
Sema::LocalEagerInstantiationScope LocalInstantiations(S);
- PTUs.emplace_back(PartialTranslationUnit());
- PartialTranslationUnit &LastPTU = PTUs.back();
// Add a new PTU.
ASTContext &C = S.getASTContext();
C.addTranslationUnitDecl();
- LastPTU.TUPart = C.getTranslationUnitDecl();
// Skip previous eof due to last incremental input.
if (P->getCurToken().is(tok::annot_repl_input_end)) {
@@ -276,11 +70,9 @@ IncrementalParser::ParseOrWrapTopLevelDecl() {
std::error_code());
}
- DiagnosticsEngine &Diags = getCI()->getDiagnostics();
+ DiagnosticsEngine &Diags = S.getDiagnostics();
if (Diags.hasErrorOccurred()) {
- PartialTranslationUnit MostRecentPTU = {C.getTranslationUnitDecl(),
- nullptr};
- CleanUpPTU(MostRecentPTU);
+ CleanUpPTU(C.getTranslationUnitDecl());
Diags.Reset(/*soft=*/true);
Diags.getClient()->clear();
@@ -299,12 +91,12 @@ IncrementalParser::ParseOrWrapTopLevelDecl() {
Consumer->HandleTranslationUnit(C);
- return LastPTU;
+ return C.getTranslationUnitDecl();
}
-llvm::Expected<PartialTranslationUnit &>
+llvm::Expected<TranslationUnitDecl *>
IncrementalParser::Parse(llvm::StringRef input) {
- Preprocessor &PP = CI->getPreprocessor();
+ Preprocessor &PP = S.getPreprocessor();
assert(PP.isIncrementalProcessingEnabled() && "Not in incremental mode!?");
std::ostringstream SourceName;
@@ -320,7 +112,7 @@ IncrementalParser::Parse(llvm::StringRef input) {
memcpy(MBStart, input.data(), InputSize);
MBStart[InputSize] = '\n';
- SourceManager &SM = CI->getSourceManager();
+ SourceManager &SM = S.getSourceManager();
// FIXME: Create SourceLocation, which will allow clang to order the overload
// candidates for example
@@ -356,37 +148,10 @@ IncrementalParser::Parse(llvm::StringRef input) {
"Lexer must be EOF when starting incremental parse!");
}
- if (std::unique_ptr<llvm::Module> M = GenModule())
- PTU->TheModule = std::move(M);
-
return PTU;
}
-std::unique_ptr<llvm::Module> IncrementalParser::GenModule() {
- static unsigned ID = 0;
- if (CodeGenerator *CG = getCodeGen()) {
- // Clang's CodeGen is designed to work with a single llvm::Module. In many
- // cases for convenience various CodeGen parts have a reference to the
- // llvm::Module (TheModule or Module) which does not change when a new
- // module is pushed. However, the execution engine wants to take ownership
- // of the module which does not map well to CodeGen's design. To work this
- // around we created an empty module to make CodeGen happy. We should make
- // sure it always stays empty.
- assert((!CachedInCodeGenModule ||
- (CachedInCodeGenModule->empty() &&
- CachedInCodeGenModule->global_empty() &&
- CachedInCodeGenModule->alias_empty() &&
- CachedInCodeGenModule->ifunc_empty())) &&
- "CodeGen wrote to a readonly module");
- std::unique_ptr<llvm::Module> M(CG->ReleaseModule());
- CG->StartModule("incr_module_" + std::to_string(ID++), M->getContext());
- return M;
- }
- return nullptr;
-}
-
-void IncrementalParser::CleanUpPTU(PartialTranslationUnit &PTU) {
- TranslationUnitDecl *MostRecentTU = PTU.TUPart;
+void IncrementalParser::CleanUpPTU(TranslationUnitDecl *MostRecentTU) {
if (StoredDeclsMap *Map = MostRecentTU->getPrimaryContext()->getLookupPtr()) {
for (auto &&[Key, List] : *Map) {
DeclContextLookupResult R = List.getLookupResult();
@@ -415,13 +180,8 @@ void IncrementalParser::CleanUpPTU(PartialTranslationUnit &PTU) {
// Check if we need to clean up the IdResolver chain.
if (ND->getDeclName().getFETokenInfo() && !D->getLangOpts().ObjC &&
!D->getLangOpts().CPlusPlus)
- getCI()->getSema().IdResolver.RemoveDecl(ND);
+ S.IdResolver.RemoveDecl(ND);
}
}
-llvm::StringRef IncrementalParser::GetMangledName(GlobalDecl GD) const {
- CodeGenerator *CG = getCodeGen();
- assert(CG);
- return CG->GetMangledName(GD);
-}
} // end namespace clang
diff --git a/clang/lib/Interpreter/IncrementalParser.h b/clang/lib/Interpreter/IncrementalParser.h
index f63bce5..4fdde74 100644
--- a/clang/lib/Interpreter/IncrementalParser.h
+++ b/clang/lib/Interpreter/IncrementalParser.h
@@ -13,37 +13,27 @@
#ifndef LLVM_CLANG_LIB_INTERPRETER_INCREMENTALPARSER_H
#define LLVM_CLANG_LIB_INTERPRETER_INCREMENTALPARSER_H
-#include "clang/AST/GlobalDecl.h"
-#include "clang/Interpreter/PartialTranslationUnit.h"
-
-#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Error.h"
#include <list>
#include <memory>
-namespace llvm {
-class LLVMContext;
-class Module;
-} // namespace llvm
namespace clang {
class ASTConsumer;
class CodeGenerator;
class CompilerInstance;
-class IncrementalAction;
-class Interpreter;
class Parser;
+class Sema;
+class TranslationUnitDecl;
+
/// Provides support for incremental compilation. Keeps track of the state
/// changes between the subsequent incremental input.
///
class IncrementalParser {
protected:
- /// Long-lived, incremental parsing action.
- std::unique_ptr<IncrementalAction> Act;
-
- /// Compiler instance performing the incremental compilation.
- std::unique_ptr<CompilerInstance> CI;
+ /// The Sema performing the incremental compilation.
+ Sema &S;
/// Parser.
std::unique_ptr<Parser> P;
@@ -54,42 +44,21 @@ protected:
/// Counts the number of direct user input lines that have been parsed.
unsigned InputCount = 0;
- /// List containing every information about every incrementally parsed piece
- /// of code.
- std::list<PartialTranslationUnit> PTUs;
-
- /// When CodeGen is created the first llvm::Module gets cached in many places
- /// and we must keep it alive.
- std::unique_ptr<llvm::Module> CachedInCodeGenModule;
-
- IncrementalParser();
+ // IncrementalParser();
public:
- IncrementalParser(Interpreter &Interp,
- std::unique_ptr<CompilerInstance> Instance,
- llvm::LLVMContext &LLVMCtx, llvm::Error &Err);
+ IncrementalParser(CompilerInstance &Instance, llvm::Error &Err);
virtual ~IncrementalParser();
- CompilerInstance *getCI() { return CI.get(); }
- CodeGenerator *getCodeGen() const;
-
/// Parses incremental input by creating an in-memory file.
///\returns a \c PartialTranslationUnit which holds information about the
- /// \c TranslationUnitDecl and \c llvm::Module corresponding to the input.
- virtual llvm::Expected<PartialTranslationUnit &> Parse(llvm::StringRef Input);
-
- /// Uses the CodeGenModule mangled name cache and avoids recomputing.
- ///\returns the mangled name of a \c GD.
- llvm::StringRef GetMangledName(GlobalDecl GD) const;
-
- void CleanUpPTU(PartialTranslationUnit &PTU);
-
- std::list<PartialTranslationUnit> &getPTUs() { return PTUs; }
+ /// \c TranslationUnitDecl.
+ virtual llvm::Expected<TranslationUnitDecl *> Parse(llvm::StringRef Input);
- std::unique_ptr<llvm::Module> GenModule();
+ void CleanUpPTU(TranslationUnitDecl *MostRecentTU);
private:
- llvm::Expected<PartialTranslationUnit &> ParseOrWrapTopLevelDecl();
+ llvm::Expected<TranslationUnitDecl *> ParseOrWrapTopLevelDecl();
};
} // end namespace clang
diff --git a/clang/lib/Interpreter/Interpreter.cpp b/clang/lib/Interpreter/Interpreter.cpp
index 7209a332..bc96da8 100644
--- a/clang/lib/Interpreter/Interpreter.cpp
+++ b/clang/lib/Interpreter/Interpreter.cpp
@@ -19,6 +19,7 @@
#include "Wasm.h"
#endif // __EMSCRIPTEN__
+#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Mangle.h"
#include "clang/AST/TypeVisitor.h"
@@ -33,7 +34,10 @@
#include "clang/Driver/Options.h"
#include "clang/Driver/Tool.h"
#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendAction.h"
+#include "clang/Frontend/MultiplexConsumer.h"
#include "clang/Frontend/TextDiagnosticBuffer.h"
+#include "clang/FrontendTool/Utils.h"
#include "clang/Interpreter/Interpreter.h"
#include "clang/Interpreter/Value.h"
#include "clang/Lex/PreprocessorOptions.h"
@@ -47,10 +51,7 @@
#include "llvm/Support/raw_ostream.h"
#include "llvm/TargetParser/Host.h"
-#include <cstdarg>
-
using namespace clang;
-
// FIXME: Figure out how to unify with namespace init_convenience from
// tools/clang-import-test/clang-import-test.cpp
namespace {
@@ -138,6 +139,8 @@ CreateCI(const llvm::opt::ArgStringList &Argv) {
} // anonymous namespace
+namespace clang {
+
llvm::Expected<std::unique_ptr<CompilerInstance>>
IncrementalCompilerBuilder::create(std::string TT,
std::vector<const char *> &ClangArgv) {
@@ -241,28 +244,173 @@ IncrementalCompilerBuilder::CreateCudaHost() {
return IncrementalCompilerBuilder::createCuda(false);
}
-Interpreter::Interpreter(std::unique_ptr<CompilerInstance> CI,
+class InProcessPrintingASTConsumer final : public MultiplexConsumer {
+ Interpreter &Interp;
+
+public:
+ InProcessPrintingASTConsumer(std::unique_ptr<ASTConsumer> C, Interpreter &I)
+ : MultiplexConsumer(std::move(C)), Interp(I) {}
+ bool HandleTopLevelDecl(DeclGroupRef DGR) override final {
+ if (DGR.isNull())
+ return true;
+
+ for (Decl *D : DGR)
+ if (auto *TLSD = llvm::dyn_cast<TopLevelStmtDecl>(D))
+ if (TLSD && TLSD->isSemiMissing()) {
+ auto ExprOrErr =
+ Interp.ExtractValueFromExpr(cast<Expr>(TLSD->getStmt()));
+ if (llvm::Error E = ExprOrErr.takeError()) {
+ llvm::logAllUnhandledErrors(std::move(E), llvm::errs(),
+ "Value printing failed: ");
+ return false; // abort parsing
+ }
+ TLSD->setStmt(*ExprOrErr);
+ }
+
+ return MultiplexConsumer::HandleTopLevelDecl(DGR);
+ }
+};
+
+/// A custom action enabling the incremental processing functionality.
+///
+/// The usual \p FrontendAction expects one call to ExecuteAction and once it
+/// sees a call to \p EndSourceFile it deletes some of the important objects
+/// such as \p Preprocessor and \p Sema assuming no further input will come.
+///
+/// \p IncrementalAction ensures it keep its underlying action's objects alive
+/// as long as the \p IncrementalParser needs them.
+///
+class IncrementalAction : public WrapperFrontendAction {
+private:
+ bool IsTerminating = false;
+ Interpreter &Interp;
+ std::unique_ptr<ASTConsumer> Consumer;
+
+public:
+ IncrementalAction(CompilerInstance &CI, llvm::LLVMContext &LLVMCtx,
+ llvm::Error &Err, Interpreter &I,
+ std::unique_ptr<ASTConsumer> Consumer = nullptr)
+ : WrapperFrontendAction([&]() {
+ llvm::ErrorAsOutParameter EAO(&Err);
+ std::unique_ptr<FrontendAction> Act;
+ switch (CI.getFrontendOpts().ProgramAction) {
+ default:
+ Err = llvm::createStringError(
+ std::errc::state_not_recoverable,
+ "Driver initialization failed. "
+ "Incremental mode for action %d is not supported",
+ CI.getFrontendOpts().ProgramAction);
+ return Act;
+ case frontend::ASTDump:
+ case frontend::ASTPrint:
+ case frontend::ParseSyntaxOnly:
+ Act = CreateFrontendAction(CI);
+ break;
+ case frontend::PluginAction:
+ case frontend::EmitAssembly:
+ case frontend::EmitBC:
+ case frontend::EmitObj:
+ case frontend::PrintPreprocessedInput:
+ case frontend::EmitLLVMOnly:
+ Act.reset(new EmitLLVMOnlyAction(&LLVMCtx));
+ break;
+ }
+ return Act;
+ }()),
+ Interp(I), Consumer(std::move(Consumer)) {}
+ FrontendAction *getWrapped() const { return WrappedAction.get(); }
+ TranslationUnitKind getTranslationUnitKind() override {
+ return TU_Incremental;
+ }
+
+ std::unique_ptr<ASTConsumer> CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) override {
+ std::unique_ptr<ASTConsumer> C =
+ WrapperFrontendAction::CreateASTConsumer(CI, InFile);
+
+ if (Consumer) {
+ std::vector<std::unique_ptr<ASTConsumer>> Cs;
+ Cs.push_back(std::move(Consumer));
+ Cs.push_back(std::move(C));
+ return std::make_unique<MultiplexConsumer>(std::move(Cs));
+ }
+
+ return std::make_unique<InProcessPrintingASTConsumer>(std::move(C), Interp);
+ }
+
+ void ExecuteAction() override {
+ CompilerInstance &CI = getCompilerInstance();
+ assert(CI.hasPreprocessor() && "No PP!");
+
+ // Use a code completion consumer?
+ CodeCompleteConsumer *CompletionConsumer = nullptr;
+ if (CI.hasCodeCompletionConsumer())
+ CompletionConsumer = &CI.getCodeCompletionConsumer();
+
+ Preprocessor &PP = CI.getPreprocessor();
+ PP.EnterMainSourceFile();
+
+ if (!CI.hasSema())
+ CI.createSema(getTranslationUnitKind(), CompletionConsumer);
+ }
+
+ // Do not terminate after processing the input. This allows us to keep various
+ // clang objects alive and to incrementally grow the current TU.
+ void EndSourceFile() override {
+ // The WrappedAction can be nullptr if we issued an error in the ctor.
+ if (IsTerminating && getWrapped())
+ WrapperFrontendAction::EndSourceFile();
+ }
+
+ void FinalizeAction() {
+ assert(!IsTerminating && "Already finalized!");
+ IsTerminating = true;
+ EndSourceFile();
+ }
+};
+
+Interpreter::Interpreter(std::unique_ptr<CompilerInstance> Instance,
llvm::Error &ErrOut,
- std::unique_ptr<llvm::orc::LLJITBuilder> JITBuilder)
+ std::unique_ptr<llvm::orc::LLJITBuilder> JITBuilder,
+ std::unique_ptr<clang::ASTConsumer> Consumer)
: JITBuilder(std::move(JITBuilder)) {
+ CI = std::move(Instance);
llvm::ErrorAsOutParameter EAO(&ErrOut);
auto LLVMCtx = std::make_unique<llvm::LLVMContext>();
TSCtx = std::make_unique<llvm::orc::ThreadSafeContext>(std::move(LLVMCtx));
- IncrParser = std::make_unique<IncrementalParser>(
- *this, std::move(CI), *TSCtx->getContext(), ErrOut);
+
+ Act = std::make_unique<IncrementalAction>(*CI, *TSCtx->getContext(), ErrOut,
+ *this, std::move(Consumer));
if (ErrOut)
return;
+ CI->ExecuteAction(*Act);
- // Not all frontends support code-generation, e.g. ast-dump actions don't
- if (IncrParser->getCodeGen()) {
+ ASTContext &C = CI->getASTContext();
+
+ IncrParser = std::make_unique<IncrementalParser>(*CI, ErrOut);
+
+ if (ErrOut)
+ return;
+
+ if (getCodeGen()) {
+ CachedInCodeGenModule = GenModule();
if (llvm::Error Err = CreateExecutor()) {
ErrOut = joinErrors(std::move(ErrOut), std::move(Err));
return;
}
+ }
+
+ // The initial PTU is filled by `-include` or by CUDA includes automatically.
+ RegisterPTU(C.getTranslationUnitDecl());
+ // Prepare the IncrParser for input.
+ llvm::cantFail(Parse(""));
+
+ // Not all frontends support code-generation, e.g. ast-dump actions don't
+ if (getCodeGen()) {
// Process the PTUs that came from initialization. For example -include will
// give us a header that's processed at initialization of the preprocessor.
- for (PartialTranslationUnit &PTU : IncrParser->getPTUs())
+ for (PartialTranslationUnit &PTU : PTUs)
if (llvm::Error Err = Execute(PTU)) {
ErrOut = joinErrors(std::move(ErrOut), std::move(Err));
return;
@@ -271,6 +419,8 @@ Interpreter::Interpreter(std::unique_ptr<CompilerInstance> CI,
}
Interpreter::~Interpreter() {
+ IncrParser.reset();
+ Act->FinalizeAction();
if (IncrExecutor) {
if (llvm::Error Err = IncrExecutor->cleanUp())
llvm::report_fatal_error(
@@ -342,8 +492,8 @@ Interpreter::createWithCUDA(std::unique_ptr<CompilerInstance> CI,
llvm::Error Err = llvm::Error::success();
auto DeviceParser = std::make_unique<IncrementalCUDADeviceParser>(
- **Interp, std::move(DCI), *(*Interp)->IncrParser.get(),
- *(*Interp)->TSCtx->getContext(), IMVFS, Err);
+ std::move(DCI), *(*Interp)->getCompilerInstance(), IMVFS, Err,
+ (*Interp)->PTUs);
if (Err)
return std::move(Err);
@@ -353,12 +503,10 @@ Interpreter::createWithCUDA(std::unique_ptr<CompilerInstance> CI,
}
const CompilerInstance *Interpreter::getCompilerInstance() const {
- return IncrParser->getCI();
+ return CI.get();
}
-CompilerInstance *Interpreter::getCompilerInstance() {
- return IncrParser->getCI();
-}
+CompilerInstance *Interpreter::getCompilerInstance() { return CI.get(); }
llvm::Expected<llvm::orc::LLJIT &> Interpreter::getExecutionEngine() {
if (!IncrExecutor) {
@@ -379,22 +527,32 @@ const ASTContext &Interpreter::getASTContext() const {
void Interpreter::markUserCodeStart() {
assert(!InitPTUSize && "We only do this once");
- InitPTUSize = IncrParser->getPTUs().size();
+ InitPTUSize = PTUs.size();
}
size_t Interpreter::getEffectivePTUSize() const {
- std::list<PartialTranslationUnit> &PTUs = IncrParser->getPTUs();
assert(PTUs.size() >= InitPTUSize && "empty PTU list?");
return PTUs.size() - InitPTUSize;
}
+PartialTranslationUnit &Interpreter::RegisterPTU(TranslationUnitDecl *TU) {
+ PTUs.emplace_back(PartialTranslationUnit());
+ PartialTranslationUnit &LastPTU = PTUs.back();
+ LastPTU.TUPart = TU;
+
+ if (std::unique_ptr<llvm::Module> M = GenModule())
+ LastPTU.TheModule = std::move(M);
+
+ return LastPTU;
+}
+
llvm::Expected<PartialTranslationUnit &>
Interpreter::Parse(llvm::StringRef Code) {
- // If we have a device parser, parse it first.
- // The generated code will be included in the host compilation
+ // If we have a device parser, parse it first. The generated code will be
+ // included in the host compilation
if (DeviceParser) {
- auto DevicePTU = DeviceParser->Parse(Code);
- if (auto E = DevicePTU.takeError())
+ llvm::Expected<TranslationUnitDecl *> DeviceTU = DeviceParser->Parse(Code);
+ if (auto E = DeviceTU.takeError())
return std::move(E);
}
@@ -402,7 +560,12 @@ Interpreter::Parse(llvm::StringRef Code) {
// printing could cause it.
getCompilerInstance()->getDiagnostics().setSeverity(
clang::diag::warn_unused_expr, diag::Severity::Ignored, SourceLocation());
- return IncrParser->Parse(Code);
+
+ llvm::Expected<TranslationUnitDecl *> TuOrErr = IncrParser->Parse(Code);
+ if (!TuOrErr)
+ return TuOrErr.takeError();
+
+ return RegisterPTU(*TuOrErr);
}
static llvm::Expected<llvm::orc::JITTargetMachineBuilder>
@@ -420,7 +583,7 @@ llvm::Error Interpreter::CreateExecutor() {
return llvm::make_error<llvm::StringError>("Operation failed. "
"Execution engine exists",
std::error_code());
- if (!IncrParser->getCodeGen())
+ if (!getCodeGen())
return llvm::make_error<llvm::StringError>("Operation failed. "
"No code generator available",
std::error_code());
@@ -492,7 +655,7 @@ Interpreter::getSymbolAddress(GlobalDecl GD) const {
return llvm::make_error<llvm::StringError>("Operation failed. "
"No execution engine",
std::error_code());
- llvm::StringRef MangledName = IncrParser->GetMangledName(GD);
+ llvm::StringRef MangledName = getCodeGen()->GetMangledName(GD);
return getSymbolAddress(MangledName);
}
@@ -518,7 +681,6 @@ Interpreter::getSymbolAddressFromLinkerName(llvm::StringRef Name) const {
llvm::Error Interpreter::Undo(unsigned N) {
- std::list<PartialTranslationUnit> &PTUs = IncrParser->getPTUs();
if (N > getEffectivePTUSize())
return llvm::make_error<llvm::StringError>("Operation failed. "
"Too many undos",
@@ -529,7 +691,7 @@ llvm::Error Interpreter::Undo(unsigned N) {
return Err;
}
- IncrParser->CleanUpPTU(PTUs.back());
+ IncrParser->CleanUpPTU(PTUs.back().TUPart);
PTUs.pop_back();
}
return llvm::Error::success();
@@ -551,416 +713,32 @@ llvm::Error Interpreter::LoadDynamicLibrary(const char *name) {
return llvm::Error::success();
}
-llvm::Expected<llvm::orc::ExecutorAddr>
-Interpreter::CompileDtorCall(CXXRecordDecl *CXXRD) {
- assert(CXXRD && "Cannot compile a destructor for a nullptr");
- if (auto Dtor = Dtors.find(CXXRD); Dtor != Dtors.end())
- return Dtor->getSecond();
-
- if (CXXRD->hasIrrelevantDestructor())
- return llvm::orc::ExecutorAddr{};
-
- CXXDestructorDecl *DtorRD =
- getCompilerInstance()->getSema().LookupDestructor(CXXRD);
-
- llvm::StringRef Name =
- IncrParser->GetMangledName(GlobalDecl(DtorRD, Dtor_Base));
- auto AddrOrErr = getSymbolAddress(Name);
- if (!AddrOrErr)
- return AddrOrErr.takeError();
-
- Dtors[CXXRD] = *AddrOrErr;
- return AddrOrErr;
-}
-
-static constexpr llvm::StringRef MagicRuntimeInterface[] = {
- "__clang_Interpreter_SetValueNoAlloc",
- "__clang_Interpreter_SetValueWithAlloc",
- "__clang_Interpreter_SetValueCopyArr", "__ci_newtag"};
-
-static std::unique_ptr<RuntimeInterfaceBuilder>
-createInProcessRuntimeInterfaceBuilder(Interpreter &Interp, ASTContext &Ctx,
- Sema &S);
-
-std::unique_ptr<RuntimeInterfaceBuilder> Interpreter::FindRuntimeInterface() {
- if (llvm::all_of(ValuePrintingInfo, [](Expr *E) { return E != nullptr; }))
- return nullptr;
-
- Sema &S = getCompilerInstance()->getSema();
- ASTContext &Ctx = S.getASTContext();
-
- auto LookupInterface = [&](Expr *&Interface, llvm::StringRef Name) {
- LookupResult R(S, &Ctx.Idents.get(Name), SourceLocation(),
- Sema::LookupOrdinaryName,
- RedeclarationKind::ForVisibleRedeclaration);
- S.LookupQualifiedName(R, Ctx.getTranslationUnitDecl());
- if (R.empty())
- return false;
-
- CXXScopeSpec CSS;
- Interface = S.BuildDeclarationNameExpr(CSS, R, /*ADL=*/false).get();
- return true;
- };
-
- if (!LookupInterface(ValuePrintingInfo[NoAlloc],
- MagicRuntimeInterface[NoAlloc]))
- return nullptr;
- if (Ctx.getLangOpts().CPlusPlus) {
- if (!LookupInterface(ValuePrintingInfo[WithAlloc],
- MagicRuntimeInterface[WithAlloc]))
- return nullptr;
- if (!LookupInterface(ValuePrintingInfo[CopyArray],
- MagicRuntimeInterface[CopyArray]))
- return nullptr;
- if (!LookupInterface(ValuePrintingInfo[NewTag],
- MagicRuntimeInterface[NewTag]))
- return nullptr;
- }
-
- return createInProcessRuntimeInterfaceBuilder(*this, Ctx, S);
-}
-
-namespace {
-
-class InterfaceKindVisitor
- : public TypeVisitor<InterfaceKindVisitor, Interpreter::InterfaceKind> {
- friend class InProcessRuntimeInterfaceBuilder;
-
- ASTContext &Ctx;
- Sema &S;
- Expr *E;
- llvm::SmallVector<Expr *, 3> Args;
-
-public:
- InterfaceKindVisitor(ASTContext &Ctx, Sema &S, Expr *E)
- : Ctx(Ctx), S(S), E(E) {}
-
- Interpreter::InterfaceKind VisitRecordType(const RecordType *Ty) {
- return Interpreter::InterfaceKind::WithAlloc;
- }
-
- Interpreter::InterfaceKind
- VisitMemberPointerType(const MemberPointerType *Ty) {
- return Interpreter::InterfaceKind::WithAlloc;
- }
-
- Interpreter::InterfaceKind
- VisitConstantArrayType(const ConstantArrayType *Ty) {
- return Interpreter::InterfaceKind::CopyArray;
- }
-
- Interpreter::InterfaceKind
- VisitFunctionProtoType(const FunctionProtoType *Ty) {
- HandlePtrType(Ty);
- return Interpreter::InterfaceKind::NoAlloc;
- }
-
- Interpreter::InterfaceKind VisitPointerType(const PointerType *Ty) {
- HandlePtrType(Ty);
- return Interpreter::InterfaceKind::NoAlloc;
- }
-
- Interpreter::InterfaceKind VisitReferenceType(const ReferenceType *Ty) {
- ExprResult AddrOfE = S.CreateBuiltinUnaryOp(SourceLocation(), UO_AddrOf, E);
- assert(!AddrOfE.isInvalid() && "Can not create unary expression");
- Args.push_back(AddrOfE.get());
- return Interpreter::InterfaceKind::NoAlloc;
- }
-
- Interpreter::InterfaceKind VisitBuiltinType(const BuiltinType *Ty) {
- if (Ty->isNullPtrType())
- Args.push_back(E);
- else if (Ty->isFloatingType())
- Args.push_back(E);
- else if (Ty->isIntegralOrEnumerationType())
- HandleIntegralOrEnumType(Ty);
- else if (Ty->isVoidType()) {
- // Do we need to still run `E`?
- }
-
- return Interpreter::InterfaceKind::NoAlloc;
- }
-
- Interpreter::InterfaceKind VisitEnumType(const EnumType *Ty) {
- HandleIntegralOrEnumType(Ty);
- return Interpreter::InterfaceKind::NoAlloc;
- }
-
-private:
- // Force cast these types to the uint that fits the register size. That way we
- // reduce the number of overloads of `__clang_Interpreter_SetValueNoAlloc`.
- void HandleIntegralOrEnumType(const Type *Ty) {
- uint64_t PtrBits = Ctx.getTypeSize(Ctx.VoidPtrTy);
- QualType UIntTy = Ctx.getBitIntType(/*Unsigned=*/true, PtrBits);
- TypeSourceInfo *TSI = Ctx.getTrivialTypeSourceInfo(UIntTy);
- ExprResult CastedExpr =
- S.BuildCStyleCastExpr(SourceLocation(), TSI, SourceLocation(), E);
- assert(!CastedExpr.isInvalid() && "Cannot create cstyle cast expr");
- Args.push_back(CastedExpr.get());
- }
-
- void HandlePtrType(const Type *Ty) {
- TypeSourceInfo *TSI = Ctx.getTrivialTypeSourceInfo(Ctx.VoidPtrTy);
- ExprResult CastedExpr =
- S.BuildCStyleCastExpr(SourceLocation(), TSI, SourceLocation(), E);
- assert(!CastedExpr.isInvalid() && "Can not create cstyle cast expression");
- Args.push_back(CastedExpr.get());
- }
-};
-
-class InProcessRuntimeInterfaceBuilder : public RuntimeInterfaceBuilder {
- Interpreter &Interp;
- ASTContext &Ctx;
- Sema &S;
-
-public:
- InProcessRuntimeInterfaceBuilder(Interpreter &Interp, ASTContext &C, Sema &S)
- : Interp(Interp), Ctx(C), S(S) {}
-
- TransformExprFunction *getPrintValueTransformer() override {
- return &transformForValuePrinting;
- }
-
-private:
- static ExprResult transformForValuePrinting(RuntimeInterfaceBuilder *Builder,
- Expr *E,
- ArrayRef<Expr *> FixedArgs) {
- auto *B = static_cast<InProcessRuntimeInterfaceBuilder *>(Builder);
-
- // Get rid of ExprWithCleanups.
- if (auto *EWC = llvm::dyn_cast_if_present<ExprWithCleanups>(E))
- E = EWC->getSubExpr();
-
- InterfaceKindVisitor Visitor(B->Ctx, B->S, E);
-
- // The Interpreter* parameter and the out parameter `OutVal`.
- for (Expr *E : FixedArgs)
- Visitor.Args.push_back(E);
-
- QualType Ty = E->getType();
- QualType DesugaredTy = Ty.getDesugaredType(B->Ctx);
-
- // For lvalue struct, we treat it as a reference.
- if (DesugaredTy->isRecordType() && E->isLValue()) {
- DesugaredTy = B->Ctx.getLValueReferenceType(DesugaredTy);
- Ty = B->Ctx.getLValueReferenceType(Ty);
- }
-
- Expr *TypeArg = CStyleCastPtrExpr(B->S, B->Ctx.VoidPtrTy,
- (uintptr_t)Ty.getAsOpaquePtr());
- // The QualType parameter `OpaqueType`, represented as `void*`.
- Visitor.Args.push_back(TypeArg);
-
- // We push the last parameter based on the type of the Expr. Note we need
- // special care for rvalue struct.
- Interpreter::InterfaceKind Kind = Visitor.Visit(&*DesugaredTy);
- switch (Kind) {
- case Interpreter::InterfaceKind::WithAlloc:
- case Interpreter::InterfaceKind::CopyArray: {
- // __clang_Interpreter_SetValueWithAlloc.
- ExprResult AllocCall = B->S.ActOnCallExpr(
- /*Scope=*/nullptr,
- B->Interp
- .getValuePrintingInfo()[Interpreter::InterfaceKind::WithAlloc],
- E->getBeginLoc(), Visitor.Args, E->getEndLoc());
- assert(!AllocCall.isInvalid() && "Can't create runtime interface call!");
-
- TypeSourceInfo *TSI =
- B->Ctx.getTrivialTypeSourceInfo(Ty, SourceLocation());
-
- // Force CodeGen to emit destructor.
- if (auto *RD = Ty->getAsCXXRecordDecl()) {
- auto *Dtor = B->S.LookupDestructor(RD);
- Dtor->addAttr(UsedAttr::CreateImplicit(B->Ctx));
- B->Interp.getCompilerInstance()->getASTConsumer().HandleTopLevelDecl(
- DeclGroupRef(Dtor));
- }
-
- // __clang_Interpreter_SetValueCopyArr.
- if (Kind == Interpreter::InterfaceKind::CopyArray) {
- const auto *ConstantArrTy =
- cast<ConstantArrayType>(DesugaredTy.getTypePtr());
- size_t ArrSize = B->Ctx.getConstantArrayElementCount(ConstantArrTy);
- Expr *ArrSizeExpr = IntegerLiteralExpr(B->Ctx, ArrSize);
- Expr *Args[] = {E, AllocCall.get(), ArrSizeExpr};
- return B->S.ActOnCallExpr(
- /*Scope *=*/nullptr,
- B->Interp
- .getValuePrintingInfo()[Interpreter::InterfaceKind::CopyArray],
- SourceLocation(), Args, SourceLocation());
- }
- Expr *Args[] = {
- AllocCall.get(),
- B->Interp.getValuePrintingInfo()[Interpreter::InterfaceKind::NewTag]};
- ExprResult CXXNewCall = B->S.BuildCXXNew(
- E->getSourceRange(),
- /*UseGlobal=*/true, /*PlacementLParen=*/SourceLocation(), Args,
- /*PlacementRParen=*/SourceLocation(),
- /*TypeIdParens=*/SourceRange(), TSI->getType(), TSI, std::nullopt,
- E->getSourceRange(), E);
-
- assert(!CXXNewCall.isInvalid() &&
- "Can't create runtime placement new call!");
-
- return B->S.ActOnFinishFullExpr(CXXNewCall.get(),
- /*DiscardedValue=*/false);
- }
- // __clang_Interpreter_SetValueNoAlloc.
- case Interpreter::InterfaceKind::NoAlloc: {
- return B->S.ActOnCallExpr(
- /*Scope=*/nullptr,
- B->Interp.getValuePrintingInfo()[Interpreter::InterfaceKind::NoAlloc],
- E->getBeginLoc(), Visitor.Args, E->getEndLoc());
- }
- default:
- llvm_unreachable("Unhandled Interpreter::InterfaceKind");
- }
- }
-};
-} // namespace
-
-static std::unique_ptr<RuntimeInterfaceBuilder>
-createInProcessRuntimeInterfaceBuilder(Interpreter &Interp, ASTContext &Ctx,
- Sema &S) {
- return std::make_unique<InProcessRuntimeInterfaceBuilder>(Interp, Ctx, S);
-}
-
-// This synthesizes a call expression to a speciall
-// function that is responsible for generating the Value.
-// In general, we transform:
-// clang-repl> x
-// To:
-// // 1. If x is a built-in type like int, float.
-// __clang_Interpreter_SetValueNoAlloc(ThisInterp, OpaqueValue, xQualType, x);
-// // 2. If x is a struct, and a lvalue.
-// __clang_Interpreter_SetValueNoAlloc(ThisInterp, OpaqueValue, xQualType,
-// &x);
-// // 3. If x is a struct, but a rvalue.
-// new (__clang_Interpreter_SetValueWithAlloc(ThisInterp, OpaqueValue,
-// xQualType)) (x);
-
-Expr *Interpreter::SynthesizeExpr(Expr *E) {
- Sema &S = getCompilerInstance()->getSema();
- ASTContext &Ctx = S.getASTContext();
-
- if (!RuntimeIB) {
- RuntimeIB = FindRuntimeInterface();
- AddPrintValueCall = RuntimeIB->getPrintValueTransformer();
+std::unique_ptr<llvm::Module> Interpreter::GenModule() {
+ static unsigned ID = 0;
+ if (CodeGenerator *CG = getCodeGen()) {
+ // Clang's CodeGen is designed to work with a single llvm::Module. In many
+ // cases for convenience various CodeGen parts have a reference to the
+ // llvm::Module (TheModule or Module) which does not change when a new
+ // module is pushed. However, the execution engine wants to take ownership
+ // of the module which does not map well to CodeGen's design. To work this
+ // around we created an empty module to make CodeGen happy. We should make
+ // sure it always stays empty.
+ assert((!CachedInCodeGenModule || (CachedInCodeGenModule->empty() &&
+ CachedInCodeGenModule->global_empty() &&
+ CachedInCodeGenModule->alias_empty() &&
+ CachedInCodeGenModule->ifunc_empty())) &&
+ "CodeGen wrote to a readonly module");
+ std::unique_ptr<llvm::Module> M(CG->ReleaseModule());
+ CG->StartModule("incr_module_" + std::to_string(ID++), M->getContext());
+ return M;
}
-
- assert(AddPrintValueCall &&
- "We don't have a runtime interface for pretty print!");
-
- // Create parameter `ThisInterp`.
- auto *ThisInterp = CStyleCastPtrExpr(S, Ctx.VoidPtrTy, (uintptr_t)this);
-
- // Create parameter `OutVal`.
- auto *OutValue = CStyleCastPtrExpr(S, Ctx.VoidPtrTy, (uintptr_t)&LastValue);
-
- // Build `__clang_Interpreter_SetValue*` call.
- ExprResult Result =
- AddPrintValueCall(RuntimeIB.get(), E, {ThisInterp, OutValue});
-
- // It could fail, like printing an array type in C. (not supported)
- if (Result.isInvalid())
- return E;
- return Result.get();
+ return nullptr;
}
-// Temporary rvalue struct that need special care.
-REPL_EXTERNAL_VISIBILITY void *
-__clang_Interpreter_SetValueWithAlloc(void *This, void *OutVal,
- void *OpaqueType) {
- Value &VRef = *(Value *)OutVal;
- VRef = Value(static_cast<Interpreter *>(This), OpaqueType);
- return VRef.getPtr();
-}
-
-extern "C" void REPL_EXTERNAL_VISIBILITY __clang_Interpreter_SetValueNoAlloc(
- void *This, void *OutVal, void *OpaqueType, ...) {
- Value &VRef = *(Value *)OutVal;
- Interpreter *I = static_cast<Interpreter *>(This);
- VRef = Value(I, OpaqueType);
- if (VRef.isVoid())
- return;
-
- va_list args;
- va_start(args, /*last named param*/ OpaqueType);
-
- QualType QT = VRef.getType();
- if (VRef.getKind() == Value::K_PtrOrObj) {
- VRef.setPtr(va_arg(args, void *));
- } else {
- if (const auto *ET = QT->getAs<EnumType>())
- QT = ET->getDecl()->getIntegerType();
- switch (QT->castAs<BuiltinType>()->getKind()) {
- default:
- llvm_unreachable("unknown type kind!");
- break;
- // Types shorter than int are resolved as int, else va_arg has UB.
- case BuiltinType::Bool:
- VRef.setBool(va_arg(args, int));
- break;
- case BuiltinType::Char_S:
- VRef.setChar_S(va_arg(args, int));
- break;
- case BuiltinType::SChar:
- VRef.setSChar(va_arg(args, int));
- break;
- case BuiltinType::Char_U:
- VRef.setChar_U(va_arg(args, unsigned));
- break;
- case BuiltinType::UChar:
- VRef.setUChar(va_arg(args, unsigned));
- break;
- case BuiltinType::Short:
- VRef.setShort(va_arg(args, int));
- break;
- case BuiltinType::UShort:
- VRef.setUShort(va_arg(args, unsigned));
- break;
- case BuiltinType::Int:
- VRef.setInt(va_arg(args, int));
- break;
- case BuiltinType::UInt:
- VRef.setUInt(va_arg(args, unsigned));
- break;
- case BuiltinType::Long:
- VRef.setLong(va_arg(args, long));
- break;
- case BuiltinType::ULong:
- VRef.setULong(va_arg(args, unsigned long));
- break;
- case BuiltinType::LongLong:
- VRef.setLongLong(va_arg(args, long long));
- break;
- case BuiltinType::ULongLong:
- VRef.setULongLong(va_arg(args, unsigned long long));
- break;
- // Types shorter than double are resolved as double, else va_arg has UB.
- case BuiltinType::Float:
- VRef.setFloat(va_arg(args, double));
- break;
- case BuiltinType::Double:
- VRef.setDouble(va_arg(args, double));
- break;
- case BuiltinType::LongDouble:
- VRef.setLongDouble(va_arg(args, long double));
- break;
- // See REPL_BUILTIN_TYPES.
- }
- }
- va_end(args);
-}
-
-// A trampoline to work around the fact that operator placement new cannot
-// really be forward declared due to libc++ and libstdc++ declaration mismatch.
-// FIXME: __clang_Interpreter_NewTag is ODR violation because we get the same
-// definition in the interpreter runtime. We should move it in a runtime header
-// which gets included by the interpreter and here.
-struct __clang_Interpreter_NewTag {};
-REPL_EXTERNAL_VISIBILITY void *
-operator new(size_t __sz, void *__p, __clang_Interpreter_NewTag) noexcept {
- // Just forward to the standard operator placement new.
- return operator new(__sz, __p);
+CodeGenerator *Interpreter::getCodeGen() const {
+ FrontendAction *WrappedAct = Act->getWrapped();
+ if (!WrappedAct->hasIRSupport())
+ return nullptr;
+ return static_cast<CodeGenAction *>(WrappedAct)->getCodeGenerator();
}
+} // namespace clang
diff --git a/clang/lib/Interpreter/InterpreterValuePrinter.cpp b/clang/lib/Interpreter/InterpreterValuePrinter.cpp
new file mode 100644
index 0000000..3e3fbfd
--- /dev/null
+++ b/clang/lib/Interpreter/InterpreterValuePrinter.cpp
@@ -0,0 +1,400 @@
+//===--- InterpreterValuePrinter.cpp - Value printing utils -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements routines for in-process value printing in clang-repl.
+//
+//===----------------------------------------------------------------------===//
+
+#include "IncrementalParser.h"
+#include "InterpreterUtils.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "clang/AST/Type.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Interpreter/Interpreter.h"
+#include "clang/Interpreter/Value.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/Sema.h"
+
+#include "llvm/Support/Error.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include <cassert>
+#include <string>
+
+#include <cstdarg>
+
+namespace clang {
+
+llvm::Expected<llvm::orc::ExecutorAddr>
+Interpreter::CompileDtorCall(CXXRecordDecl *CXXRD) {
+ assert(CXXRD && "Cannot compile a destructor for a nullptr");
+ if (auto Dtor = Dtors.find(CXXRD); Dtor != Dtors.end())
+ return Dtor->getSecond();
+
+ if (CXXRD->hasIrrelevantDestructor())
+ return llvm::orc::ExecutorAddr{};
+
+ CXXDestructorDecl *DtorRD =
+ getCompilerInstance()->getSema().LookupDestructor(CXXRD);
+
+ llvm::StringRef Name =
+ getCodeGen()->GetMangledName(GlobalDecl(DtorRD, Dtor_Base));
+ auto AddrOrErr = getSymbolAddress(Name);
+ if (!AddrOrErr)
+ return AddrOrErr.takeError();
+
+ Dtors[CXXRD] = *AddrOrErr;
+ return AddrOrErr;
+}
+
+enum InterfaceKind { NoAlloc, WithAlloc, CopyArray, NewTag };
+
+class InterfaceKindVisitor
+ : public TypeVisitor<InterfaceKindVisitor, InterfaceKind> {
+
+ Sema &S;
+ Expr *E;
+ llvm::SmallVectorImpl<Expr *> &Args;
+
+public:
+ InterfaceKindVisitor(Sema &S, Expr *E, llvm::SmallVectorImpl<Expr *> &Args)
+ : S(S), E(E), Args(Args) {}
+
+ InterfaceKind computeInterfaceKind(QualType Ty) {
+ return Visit(Ty.getTypePtr());
+ }
+
+ InterfaceKind VisitRecordType(const RecordType *Ty) {
+ return InterfaceKind::WithAlloc;
+ }
+
+ InterfaceKind VisitMemberPointerType(const MemberPointerType *Ty) {
+ return InterfaceKind::WithAlloc;
+ }
+
+ InterfaceKind VisitConstantArrayType(const ConstantArrayType *Ty) {
+ return InterfaceKind::CopyArray;
+ }
+
+ InterfaceKind VisitFunctionProtoType(const FunctionProtoType *Ty) {
+ HandlePtrType(Ty);
+ return InterfaceKind::NoAlloc;
+ }
+
+ InterfaceKind VisitPointerType(const PointerType *Ty) {
+ HandlePtrType(Ty);
+ return InterfaceKind::NoAlloc;
+ }
+
+ InterfaceKind VisitReferenceType(const ReferenceType *Ty) {
+ ExprResult AddrOfE = S.CreateBuiltinUnaryOp(SourceLocation(), UO_AddrOf, E);
+ assert(!AddrOfE.isInvalid() && "Can not create unary expression");
+ Args.push_back(AddrOfE.get());
+ return InterfaceKind::NoAlloc;
+ }
+
+ InterfaceKind VisitBuiltinType(const BuiltinType *Ty) {
+ if (Ty->isNullPtrType())
+ Args.push_back(E);
+ else if (Ty->isFloatingType())
+ Args.push_back(E);
+ else if (Ty->isIntegralOrEnumerationType())
+ HandleIntegralOrEnumType(Ty);
+ else if (Ty->isVoidType()) {
+ // Do we need to still run `E`?
+ }
+
+ return InterfaceKind::NoAlloc;
+ }
+
+ InterfaceKind VisitEnumType(const EnumType *Ty) {
+ HandleIntegralOrEnumType(Ty);
+ return InterfaceKind::NoAlloc;
+ }
+
+private:
+ // Force cast these types to the uint that fits the register size. That way we
+ // reduce the number of overloads of `__clang_Interpreter_SetValueNoAlloc`.
+ void HandleIntegralOrEnumType(const Type *Ty) {
+ ASTContext &Ctx = S.getASTContext();
+ uint64_t PtrBits = Ctx.getTypeSize(Ctx.VoidPtrTy);
+ QualType UIntTy = Ctx.getBitIntType(/*Unsigned=*/true, PtrBits);
+ TypeSourceInfo *TSI = Ctx.getTrivialTypeSourceInfo(UIntTy);
+ ExprResult CastedExpr =
+ S.BuildCStyleCastExpr(SourceLocation(), TSI, SourceLocation(), E);
+ assert(!CastedExpr.isInvalid() && "Cannot create cstyle cast expr");
+ Args.push_back(CastedExpr.get());
+ }
+
+ void HandlePtrType(const Type *Ty) {
+ ASTContext &Ctx = S.getASTContext();
+ TypeSourceInfo *TSI = Ctx.getTrivialTypeSourceInfo(Ctx.VoidPtrTy);
+ ExprResult CastedExpr =
+ S.BuildCStyleCastExpr(SourceLocation(), TSI, SourceLocation(), E);
+ assert(!CastedExpr.isInvalid() && "Can not create cstyle cast expression");
+ Args.push_back(CastedExpr.get());
+ }
+};
+
+// This synthesizes a call expression to a speciall
+// function that is responsible for generating the Value.
+// In general, we transform:
+// clang-repl> x
+// To:
+// // 1. If x is a built-in type like int, float.
+// __clang_Interpreter_SetValueNoAlloc(ThisInterp, OpaqueValue, xQualType, x);
+// // 2. If x is a struct, and a lvalue.
+// __clang_Interpreter_SetValueNoAlloc(ThisInterp, OpaqueValue, xQualType,
+// &x);
+// // 3. If x is a struct, but a rvalue.
+// new (__clang_Interpreter_SetValueWithAlloc(ThisInterp, OpaqueValue,
+// xQualType)) (x);
+llvm::Expected<Expr *> Interpreter::ExtractValueFromExpr(Expr *E) {
+ Sema &S = getCompilerInstance()->getSema();
+ ASTContext &Ctx = S.getASTContext();
+
+ // Find the value printing builtins.
+ if (!ValuePrintingInfo[0]) {
+ assert(llvm::all_of(ValuePrintingInfo, [](Expr *E) { return !E; }));
+
+ auto LookupInterface = [&](Expr *&Interface,
+ llvm::StringRef Name) -> llvm::Error {
+ LookupResult R(S, &Ctx.Idents.get(Name), SourceLocation(),
+ Sema::LookupOrdinaryName,
+ RedeclarationKind::ForVisibleRedeclaration);
+ S.LookupQualifiedName(R, Ctx.getTranslationUnitDecl());
+ if (R.empty())
+ return llvm::make_error<llvm::StringError>(
+ Name + " not found!", llvm::inconvertibleErrorCode());
+
+ CXXScopeSpec CSS;
+ Interface = S.BuildDeclarationNameExpr(CSS, R, /*ADL=*/false).get();
+ return llvm::Error::success();
+ };
+ static constexpr llvm::StringRef Builtin[] = {
+ "__clang_Interpreter_SetValueNoAlloc",
+ "__clang_Interpreter_SetValueWithAlloc",
+ "__clang_Interpreter_SetValueCopyArr", "__ci_newtag"};
+ if (llvm::Error Err =
+ LookupInterface(ValuePrintingInfo[NoAlloc], Builtin[NoAlloc]))
+ return std::move(Err);
+
+ if (Ctx.getLangOpts().CPlusPlus) {
+ if (llvm::Error Err =
+ LookupInterface(ValuePrintingInfo[WithAlloc], Builtin[WithAlloc]))
+ return std::move(Err);
+ if (llvm::Error Err =
+ LookupInterface(ValuePrintingInfo[CopyArray], Builtin[CopyArray]))
+ return std::move(Err);
+ if (llvm::Error Err =
+ LookupInterface(ValuePrintingInfo[NewTag], Builtin[NewTag]))
+ return std::move(Err);
+ }
+ }
+
+ llvm::SmallVector<Expr *, 4> AdjustedArgs;
+ // Create parameter `ThisInterp`.
+ AdjustedArgs.push_back(CStyleCastPtrExpr(S, Ctx.VoidPtrTy, (uintptr_t)this));
+
+ // Create parameter `OutVal`.
+ AdjustedArgs.push_back(
+ CStyleCastPtrExpr(S, Ctx.VoidPtrTy, (uintptr_t)&LastValue));
+
+ // Build `__clang_Interpreter_SetValue*` call.
+
+ // Get rid of ExprWithCleanups.
+ if (auto *EWC = llvm::dyn_cast_if_present<ExprWithCleanups>(E))
+ E = EWC->getSubExpr();
+
+ QualType Ty = E->getType();
+ QualType DesugaredTy = Ty.getDesugaredType(Ctx);
+
+ // For lvalue struct, we treat it as a reference.
+ if (DesugaredTy->isRecordType() && E->isLValue()) {
+ DesugaredTy = Ctx.getLValueReferenceType(DesugaredTy);
+ Ty = Ctx.getLValueReferenceType(Ty);
+ }
+
+ Expr *TypeArg =
+ CStyleCastPtrExpr(S, Ctx.VoidPtrTy, (uintptr_t)Ty.getAsOpaquePtr());
+ // The QualType parameter `OpaqueType`, represented as `void*`.
+ AdjustedArgs.push_back(TypeArg);
+
+ // We push the last parameter based on the type of the Expr. Note we need
+ // special care for rvalue struct.
+ InterfaceKindVisitor V(S, E, AdjustedArgs);
+ Scope *Scope = nullptr;
+ ExprResult SetValueE;
+ InterfaceKind Kind = V.computeInterfaceKind(DesugaredTy);
+ switch (Kind) {
+ case InterfaceKind::WithAlloc:
+ LLVM_FALLTHROUGH;
+ case InterfaceKind::CopyArray: {
+ // __clang_Interpreter_SetValueWithAlloc.
+ ExprResult AllocCall =
+ S.ActOnCallExpr(Scope, ValuePrintingInfo[InterfaceKind::WithAlloc],
+ E->getBeginLoc(), AdjustedArgs, E->getEndLoc());
+ assert(!AllocCall.isInvalid() && "Can't create runtime interface call!");
+
+ TypeSourceInfo *TSI = Ctx.getTrivialTypeSourceInfo(Ty, SourceLocation());
+
+ // Force CodeGen to emit destructor.
+ if (auto *RD = Ty->getAsCXXRecordDecl()) {
+ auto *Dtor = S.LookupDestructor(RD);
+ Dtor->addAttr(UsedAttr::CreateImplicit(Ctx));
+ getCompilerInstance()->getASTConsumer().HandleTopLevelDecl(
+ DeclGroupRef(Dtor));
+ }
+
+ // __clang_Interpreter_SetValueCopyArr.
+ if (Kind == InterfaceKind::CopyArray) {
+ const auto *ConstantArrTy =
+ cast<ConstantArrayType>(DesugaredTy.getTypePtr());
+ size_t ArrSize = Ctx.getConstantArrayElementCount(ConstantArrTy);
+ Expr *ArrSizeExpr = IntegerLiteralExpr(Ctx, ArrSize);
+ Expr *Args[] = {E, AllocCall.get(), ArrSizeExpr};
+ SetValueE =
+ S.ActOnCallExpr(Scope, ValuePrintingInfo[InterfaceKind::CopyArray],
+ SourceLocation(), Args, SourceLocation());
+ }
+ Expr *Args[] = {AllocCall.get(), ValuePrintingInfo[InterfaceKind::NewTag]};
+ ExprResult CXXNewCall = S.BuildCXXNew(
+ E->getSourceRange(),
+ /*UseGlobal=*/true, /*PlacementLParen=*/SourceLocation(), Args,
+ /*PlacementRParen=*/SourceLocation(),
+ /*TypeIdParens=*/SourceRange(), TSI->getType(), TSI, std::nullopt,
+ E->getSourceRange(), E);
+
+ assert(!CXXNewCall.isInvalid() &&
+ "Can't create runtime placement new call!");
+
+ SetValueE = S.ActOnFinishFullExpr(CXXNewCall.get(),
+ /*DiscardedValue=*/false);
+ break;
+ }
+ // __clang_Interpreter_SetValueNoAlloc.
+ case InterfaceKind::NoAlloc: {
+ SetValueE =
+ S.ActOnCallExpr(Scope, ValuePrintingInfo[InterfaceKind::NoAlloc],
+ E->getBeginLoc(), AdjustedArgs, E->getEndLoc());
+ break;
+ }
+ default:
+ llvm_unreachable("Unhandled InterfaceKind");
+ }
+
+ // It could fail, like printing an array type in C. (not supported)
+ if (SetValueE.isInvalid())
+ return E;
+
+ return SetValueE.get();
+}
+
+} // namespace clang
+
+using namespace clang;
+
+// Temporary rvalue struct that need special care.
+REPL_EXTERNAL_VISIBILITY void *
+__clang_Interpreter_SetValueWithAlloc(void *This, void *OutVal,
+ void *OpaqueType) {
+ Value &VRef = *(Value *)OutVal;
+ VRef = Value(static_cast<Interpreter *>(This), OpaqueType);
+ return VRef.getPtr();
+}
+
+extern "C" void REPL_EXTERNAL_VISIBILITY __clang_Interpreter_SetValueNoAlloc(
+ void *This, void *OutVal, void *OpaqueType, ...) {
+ Value &VRef = *(Value *)OutVal;
+ Interpreter *I = static_cast<Interpreter *>(This);
+ VRef = Value(I, OpaqueType);
+ if (VRef.isVoid())
+ return;
+
+ va_list args;
+ va_start(args, /*last named param*/ OpaqueType);
+
+ QualType QT = VRef.getType();
+ if (VRef.getKind() == Value::K_PtrOrObj) {
+ VRef.setPtr(va_arg(args, void *));
+ } else {
+ if (const auto *ET = QT->getAs<EnumType>())
+ QT = ET->getDecl()->getIntegerType();
+ switch (QT->castAs<BuiltinType>()->getKind()) {
+ default:
+ llvm_unreachable("unknown type kind!");
+ break;
+ // Types shorter than int are resolved as int, else va_arg has UB.
+ case BuiltinType::Bool:
+ VRef.setBool(va_arg(args, int));
+ break;
+ case BuiltinType::Char_S:
+ VRef.setChar_S(va_arg(args, int));
+ break;
+ case BuiltinType::SChar:
+ VRef.setSChar(va_arg(args, int));
+ break;
+ case BuiltinType::Char_U:
+ VRef.setChar_U(va_arg(args, unsigned));
+ break;
+ case BuiltinType::UChar:
+ VRef.setUChar(va_arg(args, unsigned));
+ break;
+ case BuiltinType::Short:
+ VRef.setShort(va_arg(args, int));
+ break;
+ case BuiltinType::UShort:
+ VRef.setUShort(va_arg(args, unsigned));
+ break;
+ case BuiltinType::Int:
+ VRef.setInt(va_arg(args, int));
+ break;
+ case BuiltinType::UInt:
+ VRef.setUInt(va_arg(args, unsigned));
+ break;
+ case BuiltinType::Long:
+ VRef.setLong(va_arg(args, long));
+ break;
+ case BuiltinType::ULong:
+ VRef.setULong(va_arg(args, unsigned long));
+ break;
+ case BuiltinType::LongLong:
+ VRef.setLongLong(va_arg(args, long long));
+ break;
+ case BuiltinType::ULongLong:
+ VRef.setULongLong(va_arg(args, unsigned long long));
+ break;
+ // Types shorter than double are resolved as double, else va_arg has UB.
+ case BuiltinType::Float:
+ VRef.setFloat(va_arg(args, double));
+ break;
+ case BuiltinType::Double:
+ VRef.setDouble(va_arg(args, double));
+ break;
+ case BuiltinType::LongDouble:
+ VRef.setLongDouble(va_arg(args, long double));
+ break;
+ // See REPL_BUILTIN_TYPES.
+ }
+ }
+ va_end(args);
+}
+
+// A trampoline to work around the fact that operator placement new cannot
+// really be forward declared due to libc++ and libstdc++ declaration mismatch.
+// FIXME: __clang_Interpreter_NewTag is ODR violation because we get the same
+// definition in the interpreter runtime. We should move it in a runtime header
+// which gets included by the interpreter and here.
+struct __clang_Interpreter_NewTag {};
+REPL_EXTERNAL_VISIBILITY void *
+operator new(size_t __sz, void *__p, __clang_Interpreter_NewTag) noexcept {
+ // Just forward to the standard operator placement new.
+ return operator new(__sz, __p);
+}
diff --git a/clang/lib/Sema/CheckExprLifetime.cpp b/clang/lib/Sema/CheckExprLifetime.cpp
index c98fbca..e9e39c1 100644
--- a/clang/lib/Sema/CheckExprLifetime.cpp
+++ b/clang/lib/Sema/CheckExprLifetime.cpp
@@ -34,6 +34,10 @@ enum LifetimeKind {
LK_Return,
/// The lifetime of a temporary bound to this entity ends too soon, because
+ /// the entity passed to a musttail function call.
+ LK_MustTail,
+
+ /// The lifetime of a temporary bound to this entity ends too soon, because
/// the entity is the result of a statement expression.
LK_StmtExprResult,
@@ -1150,6 +1154,7 @@ static void checkExprLifetimeImpl(Sema &SemaRef,
break;
case LK_Return:
+ case LK_MustTail:
case LK_StmtExprResult:
if (auto *DRE = dyn_cast<DeclRefExpr>(L)) {
// We can't determine if the local variable outlives the statement
@@ -1158,7 +1163,8 @@ static void checkExprLifetimeImpl(Sema &SemaRef,
return false;
SemaRef.Diag(DiagLoc, diag::warn_ret_stack_addr_ref)
<< InitEntity->getType()->isReferenceType() << DRE->getDecl()
- << isa<ParmVarDecl>(DRE->getDecl()) << DiagRange;
+ << isa<ParmVarDecl>(DRE->getDecl()) << (LK == LK_MustTail)
+ << DiagRange;
} else if (isa<BlockExpr>(L)) {
SemaRef.Diag(DiagLoc, diag::err_ret_local_block) << DiagRange;
} else if (isa<AddrLabelExpr>(L)) {
@@ -1170,7 +1176,7 @@ static void checkExprLifetimeImpl(Sema &SemaRef,
} else if (auto *CLE = dyn_cast<CompoundLiteralExpr>(L)) {
SemaRef.Diag(DiagLoc, diag::warn_ret_stack_addr_ref)
<< InitEntity->getType()->isReferenceType() << CLE->getInitializer()
- << 2 << DiagRange;
+ << 2 << (LK == LK_MustTail) << DiagRange;
} else {
// P2748R5: Disallow Binding a Returned Glvalue to a Temporary.
// [stmt.return]/p6: In a function whose return type is a reference,
@@ -1181,6 +1187,9 @@ static void checkExprLifetimeImpl(Sema &SemaRef,
InitEntity->getType()->isReferenceType())
SemaRef.Diag(DiagLoc, diag::err_ret_local_temp_ref)
<< InitEntity->getType()->isReferenceType() << DiagRange;
+ else if (LK == LK_MustTail)
+ SemaRef.Diag(DiagLoc, diag::warn_musttail_local_temp_addr_ref)
+ << InitEntity->getType()->isReferenceType() << DiagRange;
else
SemaRef.Diag(DiagLoc, diag::warn_ret_local_temp_addr_ref)
<< InitEntity->getType()->isReferenceType() << DiagRange;
@@ -1265,6 +1274,12 @@ void checkExprLifetime(Sema &SemaRef, const InitializedEntity &Entity,
/*AEntity*/ nullptr, Init);
}
+void checkExprLifetimeMustTailArg(Sema &SemaRef,
+ const InitializedEntity &Entity, Expr *Init) {
+ checkExprLifetimeImpl(SemaRef, &Entity, nullptr, LK_MustTail,
+ /*AEntity*/ nullptr, Init);
+}
+
void checkExprLifetime(Sema &SemaRef, const AssignedEntity &Entity,
Expr *Init) {
bool EnableDanglingPointerAssignment = !SemaRef.getDiagnostics().isIgnored(
diff --git a/clang/lib/Sema/CheckExprLifetime.h b/clang/lib/Sema/CheckExprLifetime.h
index 8c8d080..903f312 100644
--- a/clang/lib/Sema/CheckExprLifetime.h
+++ b/clang/lib/Sema/CheckExprLifetime.h
@@ -35,6 +35,12 @@ void checkExprLifetime(Sema &SemaRef, const InitializedEntity &Entity,
/// sufficient for assigning to the entity.
void checkExprLifetime(Sema &SemaRef, const AssignedEntity &Entity, Expr *Init);
+/// Check that the lifetime of the given expr (and its subobjects) is
+/// sufficient, assuming that it is passed as an argument to a musttail
+/// function.
+void checkExprLifetimeMustTailArg(Sema &SemaRef,
+ const InitializedEntity &Entity, Expr *Init);
+
} // namespace clang::sema
#endif // LLVM_CLANG_SEMA_CHECK_EXPR_LIFETIME_H
diff --git a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp
index 31bf50a..1bf0e80 100644
--- a/clang/lib/Sema/SemaDecl.cpp
+++ b/clang/lib/Sema/SemaDecl.cpp
@@ -7342,7 +7342,7 @@ static void copyAttrFromTypedefToDecl(Sema &S, Decl *D, const TypedefType *TT) {
// This function emits warning and a corresponding note based on the
// ReadOnlyPlacementAttr attribute. The warning checks that all global variable
// declarations of an annotated type must be const qualified.
-void emitReadOnlyPlacementAttrWarning(Sema &S, const VarDecl *VD) {
+static void emitReadOnlyPlacementAttrWarning(Sema &S, const VarDecl *VD) {
QualType VarType = VD->getType().getCanonicalType();
// Ignore local declarations (for now) and those with const qualification.
@@ -9762,11 +9762,11 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
if (getLangOpts().CPlusPlus) {
// The rules for implicit inlines changed in C++20 for methods and friends
// with an in-class definition (when such a definition is not attached to
- // the global module). User-specified 'inline' overrides this (set when
- // the function decl is created above).
+ // the global module). This does not affect declarations that are already
+ // inline (whether explicitly or implicitly by being declared constexpr,
+ // consteval, etc).
// FIXME: We need a better way to separate C++ standard and clang modules.
bool ImplicitInlineCXX20 = !getLangOpts().CPlusPlusModules ||
- NewFD->isConstexpr() || NewFD->isConsteval() ||
!NewFD->getOwningModule() ||
NewFD->isFromGlobalModule() ||
NewFD->getOwningModule()->isHeaderLikeModule();
@@ -9774,14 +9774,14 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
bool isVirtual = D.getDeclSpec().isVirtualSpecified();
bool hasExplicit = D.getDeclSpec().hasExplicitSpecifier();
isFriend = D.getDeclSpec().isFriendSpecified();
- if (isFriend && !isInline && D.isFunctionDefinition()) {
+ if (ImplicitInlineCXX20 && isFriend && D.isFunctionDefinition()) {
// Pre-C++20 [class.friend]p5
// A function can be defined in a friend declaration of a
// class . . . . Such a function is implicitly inline.
// Post C++20 [class.friend]p7
// Such a function is implicitly an inline function if it is attached
// to the global module.
- NewFD->setImplicitlyInline(ImplicitInlineCXX20);
+ NewFD->setImplicitlyInline();
}
// If this is a method defined in an __interface, and is not a constructor
@@ -10083,15 +10083,15 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
break;
}
- if (isa<CXXMethodDecl>(NewFD) && DC == CurContext &&
- D.isFunctionDefinition() && !isInline) {
+ if (ImplicitInlineCXX20 && isa<CXXMethodDecl>(NewFD) && DC == CurContext &&
+ D.isFunctionDefinition()) {
// Pre C++20 [class.mfct]p2:
// A member function may be defined (8.4) in its class definition, in
// which case it is an inline member function (7.1.2)
// Post C++20 [class.mfct]p1:
// If a member function is attached to the global module and is defined
// in its class definition, it is inline.
- NewFD->setImplicitlyInline(ImplicitInlineCXX20);
+ NewFD->setImplicitlyInline();
}
if (!isFriend && SC != SC_None) {
diff --git a/clang/lib/Sema/SemaHLSL.cpp b/clang/lib/Sema/SemaHLSL.cpp
index 03b7c2e..ebe7618 100644
--- a/clang/lib/Sema/SemaHLSL.cpp
+++ b/clang/lib/Sema/SemaHLSL.cpp
@@ -805,7 +805,7 @@ static CXXRecordDecl *getRecordDeclFromVarDecl(VarDecl *VD) {
return TheRecordDecl;
}
-const HLSLAttributedResourceType *
+static const HLSLAttributedResourceType *
findAttributedResourceTypeOnField(VarDecl *VD) {
assert(VD != nullptr && "expected VarDecl");
if (RecordDecl *RD = getRecordDeclFromVarDecl(VD)) {
@@ -1417,7 +1417,7 @@ void SemaHLSL::DiagnoseAvailabilityViolations(TranslationUnitDecl *TU) {
}
// Helper function for CheckHLSLBuiltinFunctionCall
-bool CheckVectorElementCallArgs(Sema *S, CallExpr *TheCall) {
+static bool CheckVectorElementCallArgs(Sema *S, CallExpr *TheCall) {
assert(TheCall->getNumArgs() > 1);
ExprResult A = TheCall->getArg(0);
@@ -1467,7 +1467,7 @@ bool CheckVectorElementCallArgs(Sema *S, CallExpr *TheCall) {
return true;
}
-bool CheckArgsTypesAreCorrect(
+static bool CheckArgsTypesAreCorrect(
Sema *S, CallExpr *TheCall, QualType ExpectedType,
llvm::function_ref<bool(clang::QualType PassedType)> Check) {
for (unsigned i = 0; i < TheCall->getNumArgs(); ++i) {
@@ -1485,7 +1485,7 @@ bool CheckArgsTypesAreCorrect(
return false;
}
-bool CheckAllArgsHaveFloatRepresentation(Sema *S, CallExpr *TheCall) {
+static bool CheckAllArgsHaveFloatRepresentation(Sema *S, CallExpr *TheCall) {
auto checkAllFloatTypes = [](clang::QualType PassedType) -> bool {
return !PassedType->hasFloatingRepresentation();
};
@@ -1493,7 +1493,7 @@ bool CheckAllArgsHaveFloatRepresentation(Sema *S, CallExpr *TheCall) {
checkAllFloatTypes);
}
-bool CheckFloatOrHalfRepresentations(Sema *S, CallExpr *TheCall) {
+static bool CheckFloatOrHalfRepresentations(Sema *S, CallExpr *TheCall) {
auto checkFloatorHalf = [](clang::QualType PassedType) -> bool {
clang::QualType BaseType =
PassedType->isVectorType()
@@ -1505,7 +1505,7 @@ bool CheckFloatOrHalfRepresentations(Sema *S, CallExpr *TheCall) {
checkFloatorHalf);
}
-bool CheckNoDoubleVectors(Sema *S, CallExpr *TheCall) {
+static bool CheckNoDoubleVectors(Sema *S, CallExpr *TheCall) {
auto checkDoubleVector = [](clang::QualType PassedType) -> bool {
if (const auto *VecTy = PassedType->getAs<VectorType>())
return VecTy->getElementType()->isDoubleType();
@@ -1514,7 +1514,7 @@ bool CheckNoDoubleVectors(Sema *S, CallExpr *TheCall) {
return CheckArgsTypesAreCorrect(S, TheCall, S->Context.FloatTy,
checkDoubleVector);
}
-bool CheckFloatingOrSignedIntRepresentation(Sema *S, CallExpr *TheCall) {
+static bool CheckFloatingOrSignedIntRepresentation(Sema *S, CallExpr *TheCall) {
auto checkAllSignedTypes = [](clang::QualType PassedType) -> bool {
return !PassedType->hasSignedIntegerRepresentation() &&
!PassedType->hasFloatingRepresentation();
@@ -1523,7 +1523,7 @@ bool CheckFloatingOrSignedIntRepresentation(Sema *S, CallExpr *TheCall) {
checkAllSignedTypes);
}
-bool CheckUnsignedIntRepresentation(Sema *S, CallExpr *TheCall) {
+static bool CheckUnsignedIntRepresentation(Sema *S, CallExpr *TheCall) {
auto checkAllUnsignedTypes = [](clang::QualType PassedType) -> bool {
return !PassedType->hasUnsignedIntegerRepresentation();
};
@@ -1531,8 +1531,8 @@ bool CheckUnsignedIntRepresentation(Sema *S, CallExpr *TheCall) {
checkAllUnsignedTypes);
}
-void SetElementTypeAsReturnType(Sema *S, CallExpr *TheCall,
- QualType ReturnType) {
+static void SetElementTypeAsReturnType(Sema *S, CallExpr *TheCall,
+ QualType ReturnType) {
auto *VecTyA = TheCall->getArg(0)->getType()->getAs<VectorType>();
if (VecTyA)
ReturnType = S->Context.getVectorType(ReturnType, VecTyA->getNumElements(),
diff --git a/clang/lib/Sema/SemaLookup.cpp b/clang/lib/Sema/SemaLookup.cpp
index ed5d44a..f3f6247 100644
--- a/clang/lib/Sema/SemaLookup.cpp
+++ b/clang/lib/Sema/SemaLookup.cpp
@@ -3850,8 +3850,9 @@ void Sema::ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
// exports are only valid in module purview and outside of any
// PMF (although a PMF should not even be present in a module
// with an import).
- assert(FM && FM->isNamedModule() && !FM->isPrivateModule() &&
- "bad export context");
+ assert(FM &&
+ (FM->isNamedModule() || FM->isImplicitGlobalModule()) &&
+ !FM->isPrivateModule() && "bad export context");
// .. are attached to a named module M, do not appear in the
// translation unit containing the point of the lookup..
if (D->isInAnotherModuleUnit() &&
diff --git a/clang/lib/Sema/SemaStmt.cpp b/clang/lib/Sema/SemaStmt.cpp
index 9664287..9e235a4 100644
--- a/clang/lib/Sema/SemaStmt.cpp
+++ b/clang/lib/Sema/SemaStmt.cpp
@@ -10,6 +10,7 @@
//
//===----------------------------------------------------------------------===//
+#include "CheckExprLifetime.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTDiagnostic.h"
#include "clang/AST/ASTLambda.h"
@@ -889,6 +890,15 @@ bool Sema::checkMustTailAttr(const Stmt *St, const Attr &MTA) {
return false;
}
+ // The lifetimes of locals and incoming function parameters must end before
+ // the call, because we can't have a stack frame to store them, so diagnose
+ // any pointers or references to them passed into the musttail call.
+ for (auto ArgExpr : CE->arguments()) {
+ InitializedEntity Entity = InitializedEntity::InitializeParameter(
+ Context, ArgExpr->getType(), false);
+ checkExprLifetimeMustTailArg(*this, Entity, const_cast<Expr *>(ArgExpr));
+ }
+
return true;
}
diff --git a/clang/lib/Sema/SemaTemplate.cpp b/clang/lib/Sema/SemaTemplate.cpp
index 92274cd..99423b0 100644
--- a/clang/lib/Sema/SemaTemplate.cpp
+++ b/clang/lib/Sema/SemaTemplate.cpp
@@ -8631,6 +8631,7 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
return SkipBody->Previous;
Specialization->setInvalidDecl(Invalid);
+ inferGslOwnerPointerAttribute(Specialization);
return Specialization;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
index d8c5294..a76639b 100644
--- a/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
@@ -337,6 +337,10 @@ static std::optional<std::string> printReferrer(const MemRegion *Referrer) {
// warn_bind_ref_member_to_parameter or
// warn_init_ptr_member_to_parameter_addr
return std::nullopt;
+ } else if (isa<AllocaRegion>(Referrer)) {
+ // Skip alloca() regions, they indicate advanced memory management
+ // and higher likelihood of CSA false positives.
+ return std::nullopt;
} else {
assert(false && "Unexpected referrer region type.");
return std::nullopt;
diff --git a/clang/lib/StaticAnalyzer/Core/ProgramState.cpp b/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
index e6d3399..0be2709 100644
--- a/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
+++ b/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
@@ -147,30 +147,24 @@ ProgramState::bindDefaultZero(SVal loc, const LocationContext *LCtx) const {
typedef ArrayRef<const MemRegion *> RegionList;
typedef ArrayRef<SVal> ValueList;
-ProgramStateRef
-ProgramState::invalidateRegions(RegionList Regions,
- const Expr *E, unsigned Count,
- const LocationContext *LCtx,
- bool CausedByPointerEscape,
- InvalidatedSymbols *IS,
- const CallEvent *Call,
- RegionAndSymbolInvalidationTraits *ITraits) const {
+ProgramStateRef ProgramState::invalidateRegions(
+ RegionList Regions, const Stmt *S, unsigned Count,
+ const LocationContext *LCtx, bool CausedByPointerEscape,
+ InvalidatedSymbols *IS, const CallEvent *Call,
+ RegionAndSymbolInvalidationTraits *ITraits) const {
SmallVector<SVal, 8> Values;
for (const MemRegion *Reg : Regions)
Values.push_back(loc::MemRegionVal(Reg));
- return invalidateRegions(Values, E, Count, LCtx, CausedByPointerEscape, IS,
+ return invalidateRegions(Values, S, Count, LCtx, CausedByPointerEscape, IS,
Call, ITraits);
}
-ProgramStateRef
-ProgramState::invalidateRegions(ValueList Values,
- const Expr *E, unsigned Count,
- const LocationContext *LCtx,
- bool CausedByPointerEscape,
- InvalidatedSymbols *IS,
- const CallEvent *Call,
- RegionAndSymbolInvalidationTraits *ITraits) const {
+ProgramStateRef ProgramState::invalidateRegions(
+ ValueList Values, const Stmt *S, unsigned Count,
+ const LocationContext *LCtx, bool CausedByPointerEscape,
+ InvalidatedSymbols *IS, const CallEvent *Call,
+ RegionAndSymbolInvalidationTraits *ITraits) const {
ProgramStateManager &Mgr = getStateManager();
ExprEngine &Eng = Mgr.getOwningEngine();
@@ -186,7 +180,7 @@ ProgramState::invalidateRegions(ValueList Values,
StoreManager::InvalidatedRegions TopLevelInvalidated;
StoreManager::InvalidatedRegions Invalidated;
const StoreRef &NewStore = Mgr.StoreMgr->invalidateRegions(
- getStore(), Values, E, Count, LCtx, Call, *IS, *ITraits,
+ getStore(), Values, S, Count, LCtx, Call, *IS, *ITraits,
&TopLevelInvalidated, &Invalidated);
ProgramStateRef NewState = makeWithStore(NewStore);
diff --git a/clang/lib/StaticAnalyzer/Core/RegionStore.cpp b/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
index c257a87..674099d 100644
--- a/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
+++ b/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
@@ -405,19 +405,15 @@ public:
//===-------------------------------------------------------------------===//
// Binding values to regions.
//===-------------------------------------------------------------------===//
- RegionBindingsRef invalidateGlobalRegion(MemRegion::Kind K,
- const Expr *Ex,
+ RegionBindingsRef invalidateGlobalRegion(MemRegion::Kind K, const Stmt *S,
unsigned Count,
const LocationContext *LCtx,
RegionBindingsRef B,
InvalidatedRegions *Invalidated);
- StoreRef invalidateRegions(Store store,
- ArrayRef<SVal> Values,
- const Expr *E, unsigned Count,
- const LocationContext *LCtx,
- const CallEvent *Call,
- InvalidatedSymbols &IS,
+ StoreRef invalidateRegions(Store store, ArrayRef<SVal> Values, const Stmt *S,
+ unsigned Count, const LocationContext *LCtx,
+ const CallEvent *Call, InvalidatedSymbols &IS,
RegionAndSymbolInvalidationTraits &ITraits,
InvalidatedRegions *Invalidated,
InvalidatedRegions *InvalidatedTopLevel) override;
@@ -975,7 +971,7 @@ RegionStoreManager::removeSubRegionBindings(RegionBindingsConstRef B,
namespace {
class InvalidateRegionsWorker : public ClusterAnalysis<InvalidateRegionsWorker>
{
- const Expr *Ex;
+ const Stmt *S;
unsigned Count;
const LocationContext *LCtx;
InvalidatedSymbols &IS;
@@ -983,18 +979,15 @@ class InvalidateRegionsWorker : public ClusterAnalysis<InvalidateRegionsWorker>
StoreManager::InvalidatedRegions *Regions;
GlobalsFilterKind GlobalsFilter;
public:
- InvalidateRegionsWorker(RegionStoreManager &rm,
- ProgramStateManager &stateMgr,
- RegionBindingsRef b,
- const Expr *ex, unsigned count,
- const LocationContext *lctx,
- InvalidatedSymbols &is,
+ InvalidateRegionsWorker(RegionStoreManager &rm, ProgramStateManager &stateMgr,
+ RegionBindingsRef b, const Stmt *S, unsigned count,
+ const LocationContext *lctx, InvalidatedSymbols &is,
RegionAndSymbolInvalidationTraits &ITraitsIn,
StoreManager::InvalidatedRegions *r,
GlobalsFilterKind GFK)
- : ClusterAnalysis<InvalidateRegionsWorker>(rm, stateMgr, b),
- Ex(ex), Count(count), LCtx(lctx), IS(is), ITraits(ITraitsIn), Regions(r),
- GlobalsFilter(GFK) {}
+ : ClusterAnalysis<InvalidateRegionsWorker>(rm, stateMgr, b), S(S),
+ Count(count), LCtx(lctx), IS(is), ITraits(ITraitsIn), Regions(r),
+ GlobalsFilter(GFK) {}
void VisitCluster(const MemRegion *baseR, const ClusterBindings *C);
void VisitBinding(SVal V);
@@ -1127,7 +1120,7 @@ void InvalidateRegionsWorker::VisitCluster(const MemRegion *baseR,
// Invalidate the region by setting its default value to
// conjured symbol. The type of the symbol is irrelevant.
DefinedOrUnknownSVal V =
- svalBuilder.conjureSymbolVal(baseR, Ex, LCtx, Ctx.IntTy, Count);
+ svalBuilder.conjureSymbolVal(baseR, S, LCtx, Ctx.IntTy, Count);
B = B.addBinding(baseR, BindingKey::Default, V);
return;
}
@@ -1148,8 +1141,8 @@ void InvalidateRegionsWorker::VisitCluster(const MemRegion *baseR,
if (T->isRecordType()) {
// Invalidate the region by setting its default value to
// conjured symbol. The type of the symbol is irrelevant.
- DefinedOrUnknownSVal V = svalBuilder.conjureSymbolVal(baseR, Ex, LCtx,
- Ctx.IntTy, Count);
+ DefinedOrUnknownSVal V =
+ svalBuilder.conjureSymbolVal(baseR, S, LCtx, Ctx.IntTy, Count);
B = B.addBinding(baseR, BindingKey::Default, V);
return;
}
@@ -1216,15 +1209,14 @@ void InvalidateRegionsWorker::VisitCluster(const MemRegion *baseR,
}
conjure_default:
// Set the default value of the array to conjured symbol.
- DefinedOrUnknownSVal V =
- svalBuilder.conjureSymbolVal(baseR, Ex, LCtx,
- AT->getElementType(), Count);
- B = B.addBinding(baseR, BindingKey::Default, V);
- return;
+ DefinedOrUnknownSVal V = svalBuilder.conjureSymbolVal(
+ baseR, S, LCtx, AT->getElementType(), Count);
+ B = B.addBinding(baseR, BindingKey::Default, V);
+ return;
}
- DefinedOrUnknownSVal V = svalBuilder.conjureSymbolVal(baseR, Ex, LCtx,
- T,Count);
+ DefinedOrUnknownSVal V =
+ svalBuilder.conjureSymbolVal(baseR, S, LCtx, T, Count);
assert(SymbolManager::canSymbolicate(T) || V.isUnknown());
B = B.addBinding(baseR, BindingKey::Direct, V);
}
@@ -1252,19 +1244,16 @@ bool InvalidateRegionsWorker::includeEntireMemorySpace(const MemRegion *Base) {
RegionAndSymbolInvalidationTraits::TK_EntireMemSpace);
}
-RegionBindingsRef
-RegionStoreManager::invalidateGlobalRegion(MemRegion::Kind K,
- const Expr *Ex,
- unsigned Count,
- const LocationContext *LCtx,
- RegionBindingsRef B,
- InvalidatedRegions *Invalidated) {
+RegionBindingsRef RegionStoreManager::invalidateGlobalRegion(
+ MemRegion::Kind K, const Stmt *S, unsigned Count,
+ const LocationContext *LCtx, RegionBindingsRef B,
+ InvalidatedRegions *Invalidated) {
// Bind the globals memory space to a new symbol that we will use to derive
// the bindings for all globals.
const GlobalsSpaceRegion *GS = MRMgr.getGlobalsRegion(K);
- SVal V = svalBuilder.conjureSymbolVal(/* symbolTag = */ (const void*) GS, Ex, LCtx,
- /* type does not matter */ Ctx.IntTy,
- Count);
+ SVal V =
+ svalBuilder.conjureSymbolVal(/* symbolTag = */ (const void *)GS, S, LCtx,
+ /* type does not matter */ Ctx.IntTy, Count);
B = B.removeBinding(GS)
.addBinding(BindingKey::Make(GS, BindingKey::Default), V);
@@ -1298,16 +1287,11 @@ void RegionStoreManager::populateWorkList(InvalidateRegionsWorker &W,
}
}
-StoreRef
-RegionStoreManager::invalidateRegions(Store store,
- ArrayRef<SVal> Values,
- const Expr *Ex, unsigned Count,
- const LocationContext *LCtx,
- const CallEvent *Call,
- InvalidatedSymbols &IS,
- RegionAndSymbolInvalidationTraits &ITraits,
- InvalidatedRegions *TopLevelRegions,
- InvalidatedRegions *Invalidated) {
+StoreRef RegionStoreManager::invalidateRegions(
+ Store store, ArrayRef<SVal> Values, const Stmt *S, unsigned Count,
+ const LocationContext *LCtx, const CallEvent *Call, InvalidatedSymbols &IS,
+ RegionAndSymbolInvalidationTraits &ITraits,
+ InvalidatedRegions *TopLevelRegions, InvalidatedRegions *Invalidated) {
GlobalsFilterKind GlobalsFilter;
if (Call) {
if (Call->isInSystemHeader())
@@ -1319,7 +1303,7 @@ RegionStoreManager::invalidateRegions(Store store,
}
RegionBindingsRef B = getRegionBindings(store);
- InvalidateRegionsWorker W(*this, StateMgr, B, Ex, Count, LCtx, IS, ITraits,
+ InvalidateRegionsWorker W(*this, StateMgr, B, S, Count, LCtx, IS, ITraits,
Invalidated, GlobalsFilter);
// Scan the bindings and generate the clusters.
@@ -1339,12 +1323,12 @@ RegionStoreManager::invalidateRegions(Store store,
// TODO: This could possibly be more precise with modules.
switch (GlobalsFilter) {
case GFK_All:
- B = invalidateGlobalRegion(MemRegion::GlobalInternalSpaceRegionKind,
- Ex, Count, LCtx, B, Invalidated);
+ B = invalidateGlobalRegion(MemRegion::GlobalInternalSpaceRegionKind, S,
+ Count, LCtx, B, Invalidated);
[[fallthrough]];
case GFK_SystemOnly:
- B = invalidateGlobalRegion(MemRegion::GlobalSystemSpaceRegionKind,
- Ex, Count, LCtx, B, Invalidated);
+ B = invalidateGlobalRegion(MemRegion::GlobalSystemSpaceRegionKind, S, Count,
+ LCtx, B, Invalidated);
[[fallthrough]];
case GFK_None:
break;
diff --git a/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp b/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
index 7eca057..cb5fcba 100644
--- a/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
+++ b/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
@@ -174,7 +174,7 @@ DefinedOrUnknownSVal SValBuilder::conjureSymbolVal(const void *SymbolTag,
}
DefinedOrUnknownSVal SValBuilder::conjureSymbolVal(const void *symbolTag,
- const Expr *expr,
+ const Stmt *St,
const LocationContext *LCtx,
QualType type,
unsigned count) {
@@ -184,7 +184,7 @@ DefinedOrUnknownSVal SValBuilder::conjureSymbolVal(const void *symbolTag,
if (!SymbolManager::canSymbolicate(type))
return UnknownVal();
- SymbolRef sym = SymMgr.conjureSymbol(expr, LCtx, type, count, symbolTag);
+ SymbolRef sym = SymMgr.conjureSymbol(St, LCtx, type, count, symbolTag);
if (Loc::isLocType(type))
return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
diff --git a/clang/test/AST/ByteCode/codegen.m b/clang/test/AST/ByteCode/codegen.m
index 08c3d9a..6139596 100644
--- a/clang/test/AST/ByteCode/codegen.m
+++ b/clang/test/AST/ByteCode/codegen.m
@@ -10,3 +10,8 @@
//
// CHECK-NEXT: @.str = {{.*}}constant [13 x i8] c"Hello World!\00", section "__TEXT,__cstring,cstring_literals", align 1
id a = @"Hello World!";
+
+extern void OBJC_CLASS_$_f;
+Class c = (Class)&OBJC_CLASS_$_f;
+// CHECK: @c ={{.*}} global ptr @"OBJC_CLASS_$_f"
+// CHECK: @"OBJC_CLASS_$_f" ={{.*}} global %struct._class_t
diff --git a/clang/test/AST/ByteCode/cxx20.cpp b/clang/test/AST/ByteCode/cxx20.cpp
index 9bbc3db..68e212f 100644
--- a/clang/test/AST/ByteCode/cxx20.cpp
+++ b/clang/test/AST/ByteCode/cxx20.cpp
@@ -596,8 +596,6 @@ namespace ImplicitFunction {
// both-note {{in call to 'callMe()'}}
}
-/// FIXME: Unfortunately, the similar tests in test/SemaCXX/{compare-cxx2a.cpp use member pointers,
-/// which we don't support yet.
namespace std {
class strong_ordering {
public:
diff --git a/clang/test/AST/ByteCode/new-delete.cpp b/clang/test/AST/ByteCode/new-delete.cpp
index 2ba1286b..8c9d5d9 100644
--- a/clang/test/AST/ByteCode/new-delete.cpp
+++ b/clang/test/AST/ByteCode/new-delete.cpp
@@ -241,12 +241,10 @@ namespace std {
-/// FIXME: The new interpreter produces the wrong diagnostic.
namespace PlacementNew {
constexpr int foo() { // both-error {{never produces a constant expression}}
char c[sizeof(int)];
- new (c) int{12}; // ref-note {{this placement new expression is not supported in constant expressions before C++2c}} \
- // expected-note {{subexpression not valid in a constant expression}}
+ new (c) int{12}; // both-note {{this placement new expression is not supported in constant expressions before C++2c}}
return 0;
}
}
@@ -305,31 +303,28 @@ namespace placement_new_delete {
}
static_assert(ok());
- /// FIXME: Diagnosting placement new.
constexpr bool bad(int which) {
switch (which) {
case 0:
- delete new (placement_new_arg{}) int; // ref-note {{this placement new expression is not supported in constant expressions}} \
- // expected-note {{subexpression not valid in a constant expression}}
+ delete new (placement_new_arg{}) int; // both-note {{this placement new expression is not supported in constant expressions}}
break;
case 1:
- delete new ClassSpecificNew; // ref-note {{call to class-specific 'operator new'}}
+ delete new ClassSpecificNew; // both-note {{call to class-specific 'operator new'}}
break;
case 2:
- delete new ClassSpecificDelete; // ref-note {{call to class-specific 'operator delete'}}
+ delete new ClassSpecificDelete; // both-note {{call to class-specific 'operator delete'}}
break;
case 3:
- delete new DestroyingDelete; // ref-note {{call to class-specific 'operator delete'}}
+ delete new DestroyingDelete; // both-note {{call to class-specific 'operator delete'}}
break;
case 4:
// FIXME: This technically follows the standard's rules, but it seems
// unreasonable to expect implementations to support this.
- delete new (std::align_val_t{64}) Overaligned; // ref-note {{this placement new expression is not supported in constant expressions}} \
- // expected-note {{subexpression not valid in a constant expression}}
+ delete new (std::align_val_t{64}) Overaligned; // both-note {{this placement new expression is not supported in constant expressions}}
break;
}
@@ -337,9 +332,9 @@ namespace placement_new_delete {
}
static_assert(bad(0)); // both-error {{constant expression}} \
// both-note {{in call}}
- static_assert(bad(1)); // ref-error {{constant expression}} ref-note {{in call}}
- static_assert(bad(2)); // ref-error {{constant expression}} ref-note {{in call}}
- static_assert(bad(3)); // ref-error {{constant expression}} ref-note {{in call}}
+ static_assert(bad(1)); // both-error {{constant expression}} both-note {{in call}}
+ static_assert(bad(2)); // both-error {{constant expression}} both-note {{in call}}
+ static_assert(bad(3)); // both-error {{constant expression}} both-note {{in call}}
static_assert(bad(4)); // both-error {{constant expression}} \
// both-note {{in call}}
}
@@ -586,19 +581,23 @@ constexpr void use_after_free_2() { // both-error {{never produces a constant ex
p->f(); // both-note {{member call on heap allocated object that has been deleted}}
}
-
/// std::allocator definition
namespace std {
using size_t = decltype(sizeof(0));
template<typename T> struct allocator {
constexpr T *allocate(size_t N) {
- return (T*)__builtin_operator_new(sizeof(T) * N); // both-note 2{{allocation performed here}}
+ return (T*)__builtin_operator_new(sizeof(T) * N); // both-note 2{{allocation performed here}} \
+ // #alloc
}
constexpr void deallocate(void *p) {
__builtin_operator_delete(p); // both-note 2{{std::allocator<...>::deallocate' used to delete pointer to object allocated with 'new'}} \
// both-note {{used to delete a null pointer}}
}
};
+ template<typename T, typename ...Args>
+ constexpr void construct_at(void *p, Args &&...args) { // #construct
+ new (p) T((Args&&)args...);
+ }
}
/// Specialization for float, using operator new/delete.
@@ -731,6 +730,52 @@ namespace Limits {
return n;
}
static_assert(dynarray<char>(5, 0) == 'f');
+
+
+#if __LP64__
+ template <typename T>
+ struct S {
+ constexpr S(unsigned long long N)
+ : data(nullptr){
+ data = alloc.allocate(N); // both-note {{in call to 'this->alloc.allocate(18446744073709551615)}}
+ }
+ constexpr T operator[](std::size_t i) const {
+ return data[i];
+ }
+
+ constexpr ~S() {
+ alloc.deallocate(data);
+ }
+ std::allocator<T> alloc;
+ T* data;
+ };
+
+ constexpr std::size_t s = S<std::size_t>(~0UL)[42]; // both-error {{constexpr variable 's' must be initialized by a constant expression}} \
+ // both-note@#alloc {{cannot allocate array; evaluated array bound 2305843009213693951 is too large}} \
+ // both-note {{in call to}}
+#endif
+}
+
+/// Just test that we reject placement-new expressions before C++2c.
+/// Tests for successful expressions are in placement-new.cpp
+namespace Placement {
+ consteval auto ok1() { // both-error {{never produces a constant expression}}
+ bool b;
+ new (&b) bool(true); // both-note 2{{this placement new expression is not supported in constant expressions before C++2c}}
+ return b;
+ }
+ static_assert(ok1()); // both-error {{not an integral constant expression}} \
+ // both-note {{in call to}}
+
+ /// placement-new should be supported before C++26 in std functions.
+ constexpr int ok2() {
+ int *I = new int;
+ std::construct_at<int>(I);
+ int r = *I;
+ delete I;
+ return r;
+ }
+ static_assert(ok2()== 0);
}
#else
diff --git a/clang/test/AST/ByteCode/placement-new.cpp b/clang/test/AST/ByteCode/placement-new.cpp
new file mode 100644
index 0000000..7a562ad
--- /dev/null
+++ b/clang/test/AST/ByteCode/placement-new.cpp
@@ -0,0 +1,263 @@
+// RUN: %clang_cc1 -std=c++2c -fcxx-exceptions -fexperimental-new-constant-interpreter -verify=expected,both %s
+// RUN: %clang_cc1 -std=c++2c -fcxx-exceptions -verify=ref,both %s
+
+namespace std {
+ using size_t = decltype(sizeof(0));
+ template<typename T> struct allocator {
+ constexpr T *allocate(size_t N) {
+ return (T*)operator new(sizeof(T) * N);
+ }
+ constexpr void deallocate(void *p) {
+ operator delete(p);
+ }
+ };
+ template<typename T, typename ...Args>
+ constexpr void construct_at(void *p, Args &&...args) {
+ new (p) T((Args&&)args...); // both-note {{in call to}}
+ }
+}
+
+void *operator new(std::size_t, void *p) { return p; }
+void* operator new[] (std::size_t, void* p) {return p;}
+
+
+consteval auto ok1() {
+ bool b;
+ new (&b) bool(true);
+ return b;
+}
+static_assert(ok1());
+
+consteval auto ok2() {
+ int b;
+ new (&b) int(12);
+ return b;
+}
+static_assert(ok2() == 12);
+
+
+consteval auto ok3() {
+ float b;
+ new (&b) float(12.0);
+ return b;
+}
+static_assert(ok3() == 12.0);
+
+
+consteval auto ok4() {
+ _BitInt(11) b;
+ new (&b) _BitInt(11)(37);
+ return b;
+}
+static_assert(ok4() == 37);
+
+/// FIXME: Broken in both interpreters.
+#if 0
+consteval int ok5() {
+ int i;
+ new (&i) int[1]{1}; // expected-note {{assignment to dereferenced one-past-the-end pointer}}
+ return i;
+}
+static_assert(ok5() == 1); // expected-error {{not an integral constant expression}} \
+ // expected-note {{in call to}}
+#endif
+
+/// FIXME: Crashes the current interpreter.
+#if 0
+consteval int ok6() {
+ int i[2];
+ new (&i) int(100);
+ return i[0];
+}
+static_assert(ok6() == 100);
+#endif
+
+consteval int ok6() {
+ int i[2];
+ new (i) int(100);
+ new (i + 1) int(200);
+ return i[0] + i[1];
+}
+static_assert(ok6() == 300);
+
+
+consteval auto fail1() {
+ int b;
+ new (&b) float(1.0); // both-note {{placement new would change type of storage from 'int' to 'float'}}
+ return b;
+}
+static_assert(fail1() == 0); // both-error {{not an integral constant expression}} \
+ // both-note {{in call to}}
+
+consteval int fail2() {
+ int i;
+ new (static_cast<void*>(&i)) float(0); // both-note {{placement new would change type of storage from 'int' to 'float'}}
+ return 0;
+}
+static_assert(fail2() == 0); // both-error {{not an integral constant expression}} \
+ // both-note {{in call to}}
+
+consteval int indeterminate() {
+ int * indeterminate;
+ new (indeterminate) int(0); // both-note {{read of uninitialized object is not allowed in a constant expression}}
+ return 0;
+}
+static_assert(indeterminate() == 0); // both-error {{not an integral constant expression}} \
+ // both-note {{in call to}}
+
+consteval int array1() {
+ int i[2];
+ new (&i) int[]{1,2};
+ return i[0] + i[1];
+}
+static_assert(array1() == 3);
+
+consteval int array2() {
+ int i[2];
+ new (static_cast<void*>(&i)) int[]{1,2};
+ return i[0] + i[1];
+}
+static_assert(array2() == 3);
+
+consteval int array3() {
+ int i[1];
+ new (&i) int[2]; // both-note {{placement new would change type of storage from 'int[1]' to 'int[2]'}}
+ return 0;
+}
+static_assert(array3() == 0); // both-error {{not an integral constant expression}} \
+ // both-note {{in call to}}
+
+consteval int array4() {
+ int i[2];
+ new (&i) int[]{12};
+ return i[0];
+}
+static_assert(array4() == 12);
+
+constexpr int *intptr() {
+ return new int;
+}
+constexpr bool yay() {
+ int *ptr = new (intptr()) int(42);
+ bool ret = *ptr == 42;
+ delete ptr;
+ return ret;
+}
+static_assert(yay());
+
+
+constexpr bool blah() {
+ int *ptr = new (intptr()) int[3]{ 1, 2, 3 }; // both-note {{placement new would change type of storage from 'int' to 'int[3]'}}
+ bool ret = ptr[0] == 1 && ptr[1] == 2 && ptr[2] == 3;
+ delete [] ptr;
+ return ret;
+}
+static_assert(blah()); // both-error {{not an integral constant expression}} \
+ // both-note {{in call to 'blah()'}}
+
+
+constexpr int *get_indeterminate() {
+ int *evil;
+ return evil; // both-note {{read of uninitialized object is not allowed in a constant expression}}
+}
+
+constexpr bool bleh() {
+ int *ptr = new (get_indeterminate()) int; // both-note {{in call to 'get_indeterminate()'}}
+ return true;
+}
+static_assert(bleh()); // both-error {{not an integral constant expression}} \
+ // both-note {{in call to 'bleh()'}}
+
+namespace records {
+ class S {
+ public:
+ float f;
+ };
+
+ constexpr bool record1() {
+ S s(13);
+ new (&s) S(42);
+ return s.f == 42;
+ }
+ static_assert(record1());
+
+ S GlobalS;
+ constexpr bool record2() {
+ new (&GlobalS) S(42); // both-note {{a constant expression cannot modify an object that is visible outside that expression}}
+ return GlobalS.f == 42;
+ }
+ static_assert(record2()); // both-error {{not an integral constant expression}} \
+ // both-note {{in call to}}
+
+
+ constexpr bool record3() {
+ S ss[3];
+
+ new (&ss) S[]{{1}, {2}, {3}};
+
+ return ss[0].f == 1 && ss[1].f == 2 && ss[2].f == 3;
+ }
+ static_assert(record3());
+
+ struct F {
+ float f;
+ };
+ struct R {
+ F f;
+ int a;
+ };
+ constexpr bool record4() {
+ R r;
+ new (&r.f) F{42.0};
+ new (&r.a) int(12);
+
+ return r.f.f == 42.0 && r.a == 12;
+ }
+ static_assert(record4());
+
+ /// Destructor is NOT called.
+ struct A {
+ bool b;
+ constexpr ~A() { if (b) throw; }
+ };
+
+ constexpr int foo() {
+ A a;
+ new (&a) A(true);
+ new (&a) A(false);
+ return 0;
+ }
+ static_assert(foo() == 0);
+}
+
+namespace ConstructAt {
+ struct S {
+ int a = 10;
+ float b = 1.0;
+ };
+
+ constexpr bool ok1() {
+ S s;
+
+ std::construct_at<S>(&s);
+ return s.a == 10 && s.b == 1.0;
+ }
+ static_assert(ok1());
+
+ struct S2 {
+ constexpr S2() {
+ (void)(1/0); // both-note {{division by zero}} \
+ // both-warning {{division by zero is undefined}}
+ }
+ };
+
+ constexpr bool ctorFail() { //
+ S2 *s = std::allocator<S2>().allocate(1);
+ std::construct_at<S2>(s); // both-note {{in call to}}
+
+ return true;
+ }
+ static_assert(ctorFail()); // both-error {{not an integral constant expression}} \
+ // both-note {{in call to 'ctorFail()'}}
+
+}
diff --git a/clang/test/Analysis/stack-addr-ps.cpp b/clang/test/Analysis/stack-addr-ps.cpp
index 35f38fb..73e9dbe 100644
--- a/clang/test/Analysis/stack-addr-ps.cpp
+++ b/clang/test/Analysis/stack-addr-ps.cpp
@@ -1,10 +1,23 @@
-// RUN: %clang_analyze_cc1 -analyzer-checker=core,debug.ExprInspection -verify %s -Wno-undefined-bool-conversion
+// RUN: %clang_analyze_cc1 \
+// RUN: -analyzer-checker=core,debug.ExprInspection \
+// RUN: -verify %s \
+// RUN: -Wno-undefined-bool-conversion
+// RUN: %clang_analyze_cc1 \
+// RUN: -analyzer-checker=core,debug.ExprInspection,unix.Malloc \
+// RUN: -verify %s \
+// RUN: -Wno-undefined-bool-conversion
+// unix.Malloc is necessary to model __builtin_alloca,
+// which could trigger an "unexpected region" bug in StackAddrEscapeChecker.
typedef __INTPTR_TYPE__ intptr_t;
template <typename T>
void clang_analyzer_dump(T x);
+using size_t = decltype(sizeof(int));
+void * malloc(size_t size);
+void free(void*);
+
const int& g() {
int s;
return s; // expected-warning{{Address of stack memory associated with local variable 's' returned}} expected-warning{{reference to stack memory associated with local variable 's' returned}}
@@ -846,3 +859,21 @@ void top(char **p) {
foo(); // no-warning FIXME: p binding is reclaimed before the function end
}
} // namespace early_reclaim_dead_limitation
+
+namespace alloca_region_pointer {
+void callee(char **pptr) {
+ char local;
+ *pptr = &local;
+} // no crash
+
+void top_alloca_no_crash_fn() {
+ char **pptr = (char**)__builtin_alloca(sizeof(char*));
+ callee(pptr);
+}
+
+void top_malloc_no_crash_fn() {
+ char **pptr = (char**)malloc(sizeof(char*));
+ callee(pptr);
+ free(pptr);
+}
+} // namespace alloca_region_pointer
diff --git a/clang/test/CXX/class/class.friend/p7-cxx20.cpp b/clang/test/CXX/class/class.friend/p7-cxx20.cpp
index 8843d55..d034fa4 100644
--- a/clang/test/CXX/class/class.friend/p7-cxx20.cpp
+++ b/clang/test/CXX/class/class.friend/p7-cxx20.cpp
@@ -46,14 +46,51 @@ module;
export module M;
class Z {
- friend void z(){};
+ friend void z1(){};
};
+
+class Inline {
+ friend inline void z2(){};
+};
+
+class Constexpr {
+ friend constexpr void z3(){};
+};
+
+class Consteval {
+ friend consteval void z4(){};
+};
+
+extern "C++" class GlobalModule {
+ friend void z5(){};
+};
+
// CHECK-MOD: |-CXXRecordDecl {{.*}} <.{{/|\\\\?}}header.h:2:1, line:4:1> line:2:7 in M.<global> hidden class A definition
// CHECK-MOD: | |-CXXRecordDecl {{.*}} <col:1, col:7> col:7 in M.<global> hidden implicit class A
// CHECK-MOD-NEXT: | `-FriendDecl {{.*}} <line:3:3, col:19> col:15 in M.<global>
// CHECK-MOD-NEXT: | `-FunctionDecl {{.*}} parent {{.*}} <col:3, col:19> col:15 in M.<global> hidden friend_undeclared a 'void ()' implicit-inline
-// CHECK-MOD: `-CXXRecordDecl {{.*}} <module.cpp:6:1, line:8:1> line:6:7 in M hidden class Z{{( ReachableWhenImported)?}} definition
-// CHECK-MOD: |-CXXRecordDecl {{.*}} <col:1, col:7> col:7 in M hidden implicit class Z{{( ReachableWhenImported)?}}
-// CHECK-MOD-NEXT: `-FriendDecl {{.*}} <line:7:3, col:19> col:15 in M{{( ReachableWhenImported)?}}
-// CHECK-MOD-NEXT: `-FunctionDecl {{.*}} parent {{.*}} <col:3, col:19> col:15 in M hidden friend_undeclared z 'void ()'{{( ReachableWhenImported)?}}
+// CHECK-MOD: |-CXXRecordDecl {{.*}} <module.cpp:6:1, line:8:1> line:6:7 in M hidden class Z{{( ReachableWhenImported)?}} definition
+// CHECK-MOD: | |-CXXRecordDecl {{.*}} <col:1, col:7> col:7 in M hidden implicit class Z{{( ReachableWhenImported)?}}
+// CHECK-MOD-NEXT: | `-FriendDecl {{.*}} <line:7:3, col:20> col:15 in M{{( ReachableWhenImported)?}}
+// CHECK-MOD-NEXT: | `-FunctionDecl {{.*}} parent {{.*}} <col:3, col:20> col:15 in M hidden friend_undeclared z1 'void ()'{{( ReachableWhenImported)?}}
+
+// CHECK-MOD: |-CXXRecordDecl {{.*}} <line:10:1, line:12:1> line:10:7 in M hidden class Inline{{( ReachableWhenImported)?}} definition
+// CHECK-MOD: | |-CXXRecordDecl {{.*}} <col:1, col:7> col:7 in M hidden implicit class Inline{{( ReachableWhenImported)?}}
+// CHECK-MOD-NEXT: | `-FriendDecl {{.*}} <line:11:3, col:27> col:22 in M{{( ReachableWhenImported)?}}
+// CHECK-MOD-NEXT: | `-FunctionDecl {{.*}} parent {{.*}} <col:3, col:27> col:22 in M hidden friend_undeclared z2 'void ()'{{( ReachableWhenImported)?}} inline
+
+// CHECK-MOD: |-CXXRecordDecl {{.*}} <line:14:1, line:16:1> line:14:7 in M hidden class Constexpr{{( ReachableWhenImported)?}} definition
+// CHECK-MOD: | |-CXXRecordDecl {{.*}} <col:1, col:7> col:7 in M hidden implicit class Constexpr{{( ReachableWhenImported)?}}
+// CHECK-MOD-NEXT: | `-FriendDecl {{.*}} <line:15:3, col:30> col:25 in M{{( ReachableWhenImported)?}}
+// CHECK-MOD-NEXT: | `-FunctionDecl {{.*}} parent {{.*}} <col:3, col:30> col:25 in M hidden constexpr friend_undeclared z3 'void ()'{{( ReachableWhenImported)?}} implicit-inline
+
+// CHECK-MOD: |-CXXRecordDecl {{.*}} <line:18:1, line:20:1> line:18:7 in M hidden class Consteval{{( ReachableWhenImported)?}} definition
+// CHECK-MOD: | |-CXXRecordDecl {{.*}} <col:1, col:7> col:7 in M hidden implicit class Consteval{{( ReachableWhenImported)?}}
+// CHECK-MOD-NEXT: | `-FriendDecl {{.*}} <line:19:3, col:30> col:25 in M{{( ReachableWhenImported)?}}
+// CHECK-MOD-NEXT: | `-FunctionDecl {{.*}} parent {{.*}} <col:3, col:30> col:25 in M hidden consteval friend_undeclared z4 'void ()'{{( ReachableWhenImported)?}} implicit-inline
+
+// CHECK-MOD: `-CXXRecordDecl {{.*}} <col:14, line:24:1> line:22:20 in M.<implicit global> hidden class GlobalModule{{( ReachableWhenImported)?}} definition
+// CHECK-MOD: |-CXXRecordDecl {{.*}} <col:14, col:20> col:20 in M.<implicit global> hidden implicit class GlobalModule{{( ReachableWhenImported)?}}
+// CHECK-MOD-NEXT: `-FriendDecl {{.*}} <line:23:3, col:20> col:15 in M.<implicit global>{{( ReachableWhenImported)?}}
+// CHECK-MOD-NEXT: `-FunctionDecl {{.*}} parent {{.*}} <col:3, col:20> col:15 in M.<implicit global> hidden friend_undeclared z5 'void ()'{{( ReachableWhenImported)?}} implicit-inline
diff --git a/clang/test/CXX/class/class.mfct/p1-cxx20.cpp b/clang/test/CXX/class/class.mfct/p1-cxx20.cpp
index 5b24668..ce6e58e 100644
--- a/clang/test/CXX/class/class.mfct/p1-cxx20.cpp
+++ b/clang/test/CXX/class/class.mfct/p1-cxx20.cpp
@@ -48,10 +48,42 @@ class Z {
void z(){};
};
+class Inline {
+ inline void z(){};
+};
+
+class Constexpr {
+ constexpr void z(){};
+};
+
+class Consteval {
+ consteval void z(){};
+};
+
+extern "C++" class GlobalModule {
+ void z(){};
+};
+
// CHECK-MOD: |-CXXRecordDecl {{.*}} <.{{/|\\\\?}}header.h:2:1, line:4:1> line:2:7 in M.<global> hidden class A definition
// CHECK-MOD: | |-CXXRecordDecl {{.*}} <col:1, col:7> col:7 in M.<global> hidden implicit class A
// CHECK-MOD-NEXT: | `-CXXMethodDecl {{.*}} <line:3:3, col:12> col:8 in M.<global> hidden a 'void ()' implicit-inline
-// CHECK-MOD: `-CXXRecordDecl {{.*}} <module.cpp:6:1, line:8:1> line:6:7 in M hidden class Z{{( ReachableWhenImported)?}} definition
-// CHECK-MOD: |-CXXRecordDecl {{.*}} <col:1, col:7> col:7 in M hidden implicit class Z{{( ReachableWhenImported)?}}
-// CHECK-MOD-NEXT: `-CXXMethodDecl {{.*}} <line:7:3, col:12> col:8 in M hidden z 'void ()'{{( ReachableWhenImported)?}}
+// CHECK-MOD: |-CXXRecordDecl {{.*}} <module.cpp:6:1, line:8:1> line:6:7 in M hidden class Z{{( ReachableWhenImported)?}} definition
+// CHECK-MOD: | |-CXXRecordDecl {{.*}} <col:1, col:7> col:7 in M hidden implicit class Z{{( ReachableWhenImported)?}}
+// CHECK-MOD-NEXT: | `-CXXMethodDecl {{.*}} <line:7:3, col:12> col:8 in M hidden z 'void ()'{{( ReachableWhenImported)?}}
+
+// CHECK-MOD: |-CXXRecordDecl {{.*}} <line:10:1, line:12:1> line:10:7 in M hidden class Inline{{( ReachableWhenImported)?}} definition
+// CHECK-MOD: | |-CXXRecordDecl {{.*}} <col:1, col:7> col:7 in M hidden implicit class Inline{{( ReachableWhenImported)?}}
+// CHECK-MOD-NEXT: | `-CXXMethodDecl {{.*}} <line:11:3, col:19> col:15 in M hidden z 'void ()'{{( ReachableWhenImported)?}} inline
+
+// CHECK-MOD: |-CXXRecordDecl {{.*}} <line:14:1, line:16:1> line:14:7 in M hidden class Constexpr{{( ReachableWhenImported)?}} definition
+// CHECK-MOD: | |-CXXRecordDecl {{.*}} <col:1, col:7> col:7 in M hidden implicit class Constexpr{{( ReachableWhenImported)?}}
+// CHECK-MOD-NEXT: | `-CXXMethodDecl {{.*}} <line:15:3, col:22> col:18 in M hidden constexpr z 'void ()'{{( ReachableWhenImported)?}} implicit-inline
+
+// CHECK-MOD: |-CXXRecordDecl {{.*}} <line:18:1, line:20:1> line:18:7 in M hidden class Consteval{{( ReachableWhenImported)?}} definition
+// CHECK-MOD: | |-CXXRecordDecl {{.*}} <col:1, col:7> col:7 in M hidden implicit class Consteval{{( ReachableWhenImported)?}}
+// CHECK-MOD-NEXT: | `-CXXMethodDecl {{.*}} <line:19:3, col:22> col:18 in M hidden consteval z 'void ()'{{( ReachableWhenImported)?}} implicit-inline
+
+// CHECK-MOD: `-CXXRecordDecl {{.*}} <col:14, line:24:1> line:22:20 in M.<implicit global> hidden class GlobalModule{{( ReachableWhenImported)?}} definition
+// CHECK-MOD: |-CXXRecordDecl {{.*}} <col:14, col:20> col:20 in M.<implicit global> hidden implicit class GlobalModule{{( ReachableWhenImported)?}}
+// CHECK-MOD-NEXT: `-CXXMethodDecl {{.*}} <line:23:3, col:12> col:8 in M.<implicit global> hidden z 'void ()'{{( ReachableWhenImported)?}} implicit-inline
diff --git a/clang/test/CodeGen/2008-07-22-bitfield-init-after-zero-len-array.c b/clang/test/CodeGen/2008-07-22-bitfield-init-after-zero-len-array.c
index b72d689..b639734 100644
--- a/clang/test/CodeGen/2008-07-22-bitfield-init-after-zero-len-array.c
+++ b/clang/test/CodeGen/2008-07-22-bitfield-init-after-zero-len-array.c
@@ -8,4 +8,4 @@ struct et7 {
52,
};
-// CHECK: @yv7 ={{.*}} global %struct.et7 { [0 x float] zeroinitializer, i8 52 }
+// CHECK: @yv7 ={{.*}} global { [0 x float], i8, [3 x i8] } { [0 x float] zeroinitializer, i8 52, [3 x i8] zeroinitializer }
diff --git a/clang/test/CodeGen/2008-08-07-AlignPadding1.c b/clang/test/CodeGen/2008-08-07-AlignPadding1.c
index 17e88ce..d69cbc2 100644
--- a/clang/test/CodeGen/2008-08-07-AlignPadding1.c
+++ b/clang/test/CodeGen/2008-08-07-AlignPadding1.c
@@ -20,9 +20,9 @@ struct gc_generation {
#define GEN_HEAD(n) (&generations[n].head)
-// The idea is that there are 6 undefs in this structure initializer to cover
+// The idea is that there are 6 zeroinitializers in this structure initializer to cover
// the padding between elements.
-// CHECK: @generations ={{.*}} global [3 x %struct.gc_generation] [%struct.gc_generation { %union._gc_head { %struct.anon { ptr @generations, ptr @generations, i64 0 }, [8 x i8] undef }, i32 700, i32 0, [8 x i8] undef }, %struct.gc_generation { %union._gc_head { %struct.anon { ptr getelementptr (i8, ptr @generations, i64 48), ptr getelementptr (i8, ptr @generations, i64 48), i64 0 }, [8 x i8] undef }, i32 10, i32 0, [8 x i8] undef }, %struct.gc_generation { %union._gc_head { %struct.anon { ptr getelementptr (i8, ptr @generations, i64 96), ptr getelementptr (i8, ptr @generations, i64 96), i64 0 }, [8 x i8] undef }, i32 10, i32 0, [8 x i8] undef }]
+// CHECK: @generations ={{.*}} global [3 x %struct.gc_generation] [%struct.gc_generation { %union._gc_head { %struct.anon { ptr @generations, ptr @generations, i64 0 }, [8 x i8] zeroinitializer }, i32 700, i32 0, [8 x i8] zeroinitializer }, %struct.gc_generation { %union._gc_head { %struct.anon { ptr getelementptr (i8, ptr @generations, i64 48), ptr getelementptr (i8, ptr @generations, i64 48), i64 0 }, [8 x i8] zeroinitializer }, i32 10, i32 0, [8 x i8] zeroinitializer }, %struct.gc_generation { %union._gc_head { %struct.anon { ptr getelementptr (i8, ptr @generations, i64 96), ptr getelementptr (i8, ptr @generations, i64 96), i64 0 }, [8 x i8] zeroinitializer }, i32 10, i32 0, [8 x i8] zeroinitializer }]
/* linked lists of container objects */
struct gc_generation generations[3] = {
/* PyGC_Head, threshold, count */
diff --git a/clang/test/CodeGen/2009-06-14-anonymous-union-init.c b/clang/test/CodeGen/2009-06-14-anonymous-union-init.c
index 13f6357..a4375d7 100644
--- a/clang/test/CodeGen/2009-06-14-anonymous-union-init.c
+++ b/clang/test/CodeGen/2009-06-14-anonymous-union-init.c
@@ -7,7 +7,7 @@ struct sysfs_dirent {
};
struct sysfs_dirent sysfs_root = { {}, 16877 };
-// CHECK: @sysfs_root = {{.*}}global %struct.sysfs_dirent { %union.anon zeroinitializer, i16 16877 }
+// CHECK: @sysfs_root = {{.*}}global { %union.anon, i16, [2 x i8] } { %union.anon zeroinitializer, i16 16877, [2 x i8] zeroinitializer }
struct Foo {
union { struct empty {} x; };
@@ -16,4 +16,4 @@ struct Foo {
struct Foo foo = { {}, 16877 };
// EMPTY: @foo = {{.*}}global %struct.Foo { i16 16877 }
-// EMPTY-MSVC: @foo = {{.*}}global %struct.Foo { [4 x i8] undef, i16 16877 }
+// EMPTY-MSVC: @foo = {{.*}}global %struct.Foo { [4 x i8] zeroinitializer, i16 16877 }
diff --git a/clang/test/CodeGen/64bit-swiftcall.c b/clang/test/CodeGen/64bit-swiftcall.c
index 7af65cc..7f8aa02 100644
--- a/clang/test/CodeGen/64bit-swiftcall.c
+++ b/clang/test/CodeGen/64bit-swiftcall.c
@@ -14,8 +14,6 @@
// CHECK-DAG: %struct.atomic_padded = type { { %struct.packed, [7 x i8] } }
// CHECK-DAG: %struct.packed = type <{ i64, i8 }>
-//
-// CHECK: [[STRUCT2_RESULT:@.*]] = private {{.*}} constant [[STRUCT2_TYPE:%.*]] { i32 0, i8 0, i8 undef, i8 0, i32 0, i32 0 }
/*****************************************************************************/
/****************************** PARAMETER ABIS *******************************/
@@ -162,8 +160,8 @@ typedef struct {
} struct_2;
TEST(struct_2);
// CHECK-LABEL: define{{.*}} swiftcc { i64, i64 } @return_struct_2() {{.*}}{
-// CHECK: [[RET:%.*]] = alloca [[STRUCT2_TYPE]], align 4
-// CHECK: call void @llvm.memcpy{{.*}}({{.*}}[[RET]], {{.*}}[[STRUCT2_RESULT]]
+// CHECK: [[RET:%.*]] = alloca [[STRUCT2:%.*]], align 4
+// CHECK: call void @llvm.memset
// CHECK: [[GEP0:%.*]] = getelementptr inbounds nuw { i64, i64 }, ptr [[RET]], i32 0, i32 0
// CHECK: [[T0:%.*]] = load i64, ptr [[GEP0]], align 4
// CHECK: [[GEP1:%.*]] = getelementptr inbounds nuw { i64, i64 }, ptr [[RET]], i32 0, i32 1
@@ -173,7 +171,7 @@ TEST(struct_2);
// CHECK: ret { i64, i64 } [[R1]]
// CHECK: }
// CHECK-LABEL: define{{.*}} swiftcc void @take_struct_2(i64 %0, i64 %1) {{.*}}{
-// CHECK: [[V:%.*]] = alloca [[STRUCT:%.*]], align 4
+// CHECK: [[V:%.*]] = alloca [[STRUCT2]], align 4
// CHECK: [[GEP0:%.*]] = getelementptr inbounds nuw { i64, i64 }, ptr [[V]], i32 0, i32 0
// CHECK: store i64 %0, ptr [[GEP0]], align 4
// CHECK: [[GEP1:%.*]] = getelementptr inbounds nuw { i64, i64 }, ptr [[V]], i32 0, i32 1
@@ -181,7 +179,7 @@ TEST(struct_2);
// CHECK: ret void
// CHECK: }
// CHECK-LABEL: define{{.*}} void @test_struct_2() {{.*}} {
-// CHECK: [[TMP:%.*]] = alloca [[STRUCT2_TYPE]], align 4
+// CHECK: [[TMP:%.*]] = alloca [[STRUCT2]], align 4
// CHECK: [[CALL:%.*]] = call swiftcc { i64, i64 } @return_struct_2()
// CHECK: [[GEP:%.*]] = getelementptr inbounds nuw {{.*}} [[TMP]], i32 0, i32 0
// CHECK: [[T0:%.*]] = extractvalue { i64, i64 } [[CALL]], 0
@@ -254,7 +252,7 @@ typedef union {
TEST(union_het_fp)
// CHECK-LABEL: define{{.*}} swiftcc i64 @return_union_het_fp()
// CHECK: [[RET:%.*]] = alloca [[UNION:%.*]], align 8
-// CHECK: call void @llvm.memcpy{{.*}}(ptr align 8 [[RET]]
+// CHECK: call void @llvm.memset{{.*}}(ptr align 8 [[RET]]
// CHECK: [[GEP:%.*]] = getelementptr inbounds nuw { i64 }, ptr [[RET]], i32 0, i32 0
// CHECK: [[R0:%.*]] = load i64, ptr [[GEP]], align 8
// CHECK: ret i64 [[R0]]
diff --git a/clang/test/CodeGen/PowerPC/builtins-ppc-build-pair-mma.c b/clang/test/CodeGen/PowerPC/builtins-ppc-build-pair-mma.c
index 8a2bc93..cdbfdd6 100644
--- a/clang/test/CodeGen/PowerPC/builtins-ppc-build-pair-mma.c
+++ b/clang/test/CodeGen/PowerPC/builtins-ppc-build-pair-mma.c
@@ -99,7 +99,7 @@ void test1(unsigned char *vqp, unsigned char *vpp, vector unsigned char vc1, vec
// CHECK-LE-NOOPT-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr [[VC1_ADDR]], align 16
// CHECK-LE-NOOPT-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr [[VC2_ADDR]], align 16
// CHECK-LE-NOOPT-NEXT: [[TMP6:%.*]] = call <256 x i1> @llvm.ppc.vsx.assemble.pair(<16 x i8> [[TMP5]], <16 x i8> [[TMP4]])
-// CHECK-LE-NOOPT-NEXT: store <256 x i1> [[TMP6]], ptr [[RES]], align 64
+// CHECK-LE-NOOPT-NEXT: store <256 x i1> [[TMP6]], ptr [[RES]], align 32
// CHECK-LE-NOOPT-NEXT: [[TMP7:%.*]] = load <256 x i1>, ptr [[RES]], align 32
// CHECK-LE-NOOPT-NEXT: [[TMP8:%.*]] = load ptr, ptr [[RESP_ADDR]], align 8
// CHECK-LE-NOOPT-NEXT: store <256 x i1> [[TMP7]], ptr [[TMP8]], align 32
diff --git a/clang/test/CodeGen/PowerPC/builtins-ppc-pair-mma-types.c b/clang/test/CodeGen/PowerPC/builtins-ppc-pair-mma-types.c
index 39c0409..b18bb3a 100644
--- a/clang/test/CodeGen/PowerPC/builtins-ppc-pair-mma-types.c
+++ b/clang/test/CodeGen/PowerPC/builtins-ppc-pair-mma-types.c
@@ -85,11 +85,11 @@ void testVQLocal(int *ptr, vector unsigned char vc) {
// CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr [[VC_ADDR]], align 16
// CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr [[VC_ADDR]], align 16
// CHECK-NEXT: [[TMP5:%.*]] = call <256 x i1> @llvm.ppc.vsx.assemble.pair(<16 x i8> [[TMP3]], <16 x i8> [[TMP4]])
-// CHECK-NEXT: store <256 x i1> [[TMP5]], ptr [[VP2]], align 64
+// CHECK-NEXT: store <256 x i1> [[TMP5]], ptr [[VP2]], align 32
// CHECK-NEXT: [[TMP6:%.*]] = load <16 x i8>, ptr [[VC_ADDR]], align 16
// CHECK-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr [[VC_ADDR]], align 16
// CHECK-NEXT: [[TMP8:%.*]] = call <256 x i1> @llvm.ppc.vsx.assemble.pair(<16 x i8> [[TMP7]], <16 x i8> [[TMP6]])
-// CHECK-NEXT: store <256 x i1> [[TMP8]], ptr [[VP2]], align 64
+// CHECK-NEXT: store <256 x i1> [[TMP8]], ptr [[VP2]], align 32
// CHECK-NEXT: [[TMP9:%.*]] = load <256 x i1>, ptr [[VP3]], align 32
// CHECK-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr [[VC_ADDR]], align 16
// CHECK-NEXT: [[TMP11:%.*]] = call <512 x i1> @llvm.ppc.mma.xvf64ger(<256 x i1> [[TMP9]], <16 x i8> [[TMP10]])
@@ -118,11 +118,11 @@ void testVQLocal(int *ptr, vector unsigned char vc) {
// CHECK-BE-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr [[VC_ADDR]], align 16
// CHECK-BE-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr [[VC_ADDR]], align 16
// CHECK-BE-NEXT: [[TMP5:%.*]] = call <256 x i1> @llvm.ppc.vsx.assemble.pair(<16 x i8> [[TMP3]], <16 x i8> [[TMP4]])
-// CHECK-BE-NEXT: store <256 x i1> [[TMP5]], ptr [[VP2]], align 64
+// CHECK-BE-NEXT: store <256 x i1> [[TMP5]], ptr [[VP2]], align 32
// CHECK-BE-NEXT: [[TMP6:%.*]] = load <16 x i8>, ptr [[VC_ADDR]], align 16
// CHECK-BE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr [[VC_ADDR]], align 16
// CHECK-BE-NEXT: [[TMP8:%.*]] = call <256 x i1> @llvm.ppc.vsx.assemble.pair(<16 x i8> [[TMP6]], <16 x i8> [[TMP7]])
-// CHECK-BE-NEXT: store <256 x i1> [[TMP8]], ptr [[VP2]], align 64
+// CHECK-BE-NEXT: store <256 x i1> [[TMP8]], ptr [[VP2]], align 32
// CHECK-BE-NEXT: [[TMP9:%.*]] = load <256 x i1>, ptr [[VP3]], align 32
// CHECK-BE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr [[VC_ADDR]], align 16
// CHECK-BE-NEXT: [[TMP11:%.*]] = call <512 x i1> @llvm.ppc.mma.xvf64ger(<256 x i1> [[TMP9]], <16 x i8> [[TMP10]])
diff --git a/clang/test/CodeGen/arm-swiftcall.c b/clang/test/CodeGen/arm-swiftcall.c
index ec0e386..677b878 100644
--- a/clang/test/CodeGen/arm-swiftcall.c
+++ b/clang/test/CodeGen/arm-swiftcall.c
@@ -172,7 +172,7 @@ typedef struct {
TEST(struct_2);
// CHECK-LABEL: define{{.*}} @return_struct_2()
// CHECK: [[RET:%.*]] = alloca [[REC:%.*]], align 4
-// CHECK: @llvm.memcpy
+// CHECK: @llvm.memset
// CHECK: [[T0:%.*]] = getelementptr inbounds nuw [[AGG:{ i32, i32, float, float }]], ptr [[RET]], i32 0, i32 0
// CHECK: [[FIRST:%.*]] = load i32, ptr [[T0]], align 4
// CHECK: [[T0:%.*]] = getelementptr inbounds nuw [[AGG]], ptr [[RET]], i32 0, i32 1
@@ -274,7 +274,7 @@ typedef union {
TEST(union_het_fp)
// CHECK-LABEL: define{{.*}} @return_union_het_fp()
// CHECK: [[RET:%.*]] = alloca [[REC:%.*]], align {{(4|8)}}
-// CHECK: @llvm.memcpy
+// CHECK: @llvm.memset
// CHECK: [[T0:%.*]] = getelementptr inbounds nuw [[AGG:{ i32, i32 }]], ptr [[RET]], i32 0, i32 0
// CHECK: [[FIRST:%.*]] = load i32, ptr [[T0]], align {{(4|8)}}
// CHECK: [[T0:%.*]] = getelementptr inbounds nuw [[AGG]], ptr [[RET]], i32 0, i32 1
diff --git a/clang/test/CodeGen/bpf-attr-type-tag-atomic.c b/clang/test/CodeGen/bpf-attr-type-tag-atomic.c
new file mode 100644
index 0000000..a10a45d
--- /dev/null
+++ b/clang/test/CodeGen/bpf-attr-type-tag-atomic.c
@@ -0,0 +1,16 @@
+// REQUIRES: bpf-registered-target
+// RUN: %clang_cc1 -triple bpf -emit-llvm -disable-llvm-passes -debug-info-kind=limited %s -o - | FileCheck %s
+
+#define __tag1 __attribute__((btf_type_tag("tag1")))
+int _Atomic __tag1 *g1;
+volatile int _Atomic __tag1 *g2;
+
+// CHECK: distinct !DIGlobalVariable(name: "g1", scope: ![[#]], file: ![[#]], line: [[#]], type: ![[PTR1:[0-9]+]]
+// CHECK: distinct !DIGlobalVariable(name: "g2", scope: ![[#]], file: ![[#]], line: [[#]], type: ![[PTR2:[0-9]+]]
+// CHECK: ![[PTR2]] = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: ![[BASE2:[0-9]+]], size: [[#]], annotations: ![[ANNOT:[0-9]+]])
+// CHECK: ![[BASE2]] = !DIDerivedType(tag: DW_TAG_volatile_type, baseType: ![[BASE1:[0-9]+]])
+// CHECK: ![[BASE1]] = !DIDerivedType(tag: DW_TAG_atomic_type, baseType: ![[BASIC:[0-9]+]])
+// CHECK: ![[BASIC]] = !DIBasicType(name: "int", size: [[#]], encoding: DW_ATE_signed)
+// CHECK: ![[ANNOT]] = !{![[ENTRY:[0-9]+]]}
+// CHECK: ![[ENTRY]] = !{!"btf_type_tag", !"tag1"}
+// CHECK: ![[PTR1]] = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: ![[BASE1]], size: [[#]], annotations: ![[ANNOT]])
diff --git a/clang/test/CodeGen/const-init.c b/clang/test/CodeGen/const-init.c
index ad3e955..fc973cb 100644
--- a/clang/test/CodeGen/const-init.c
+++ b/clang/test/CodeGen/const-init.c
@@ -170,7 +170,7 @@ void g30(void) {
int : 1;
int x;
} a = {};
- // CHECK: @g30.a = internal global %struct.anon.1 <{ i8 undef, i32 0 }>, align 1
+ // CHECK: @g30.a = internal global %struct.anon.1 zeroinitializer, align 1
#pragma pack()
}
@@ -182,7 +182,7 @@ void g31(void) {
short z;
} a = {23122, -12312731, -312};
#pragma pack()
- // CHECK: @g31.a = internal global %struct.anon.2 { i16 23122, i32 -12312731, i16 -312 }, align 4
+ // CHECK: @g31.a = internal global { i16, [2 x i8], i32, i16, [2 x i8] } { i16 23122, [2 x i8] zeroinitializer, i32 -12312731, i16 -312, [2 x i8] zeroinitializer }, align 4
}
// Clang should evaluate this in constant context, so floating point mode should
diff --git a/clang/test/CodeGen/decl.c b/clang/test/CodeGen/decl.c
index a63846b3..9744678 100644
--- a/clang/test/CodeGen/decl.c
+++ b/clang/test/CodeGen/decl.c
@@ -2,10 +2,10 @@
// CHECK: @test1.x = internal constant [12 x i32] [i32 1
// CHECK: @__const.test2.x = private unnamed_addr constant [13 x i32] [i32 1,
-// CHECK: @test5w = {{(dso_local )?}}global { i32, [4 x i8] } { i32 2, [4 x i8] undef }
+// CHECK: @test5w = {{(dso_local )?}}global { i32, [4 x i8] } { i32 2, [4 x i8] zeroinitializer }
// CHECK: @test5y = {{(dso_local )?}}global { double } { double 7.300000e+0{{[0]*}}1 }
-// CHECK: @__const.test6.x = private unnamed_addr constant %struct.SelectDest { i8 1, i8 2, i32 3, i32 0 }
+// CHECK: @__const.test6.x = private unnamed_addr constant { i8, i8, [2 x i8], i32, i32 } { i8 1, i8 2, [2 x i8] zeroinitializer, i32 3, i32 0 }
// CHECK: @test7 = {{(dso_local )?}}global [2 x %struct.test7s] [%struct.test7s { i32 1, i32 2 }, %struct.test7s { i32 4, i32 0 }]
diff --git a/clang/test/CodeGen/designated-initializers.c b/clang/test/CodeGen/designated-initializers.c
index 620b1b9..ac7860d 100644
--- a/clang/test/CodeGen/designated-initializers.c
+++ b/clang/test/CodeGen/designated-initializers.c
@@ -8,7 +8,7 @@ struct foo {
// CHECK: @u ={{.*}} global %union.anon zeroinitializer
union { int i; float f; } u = { };
-// CHECK: @u2 ={{.*}} global { i32, [4 x i8] } { i32 0, [4 x i8] undef }
+// CHECK: @u2 ={{.*}} global { i32, [4 x i8] } zeroinitializer
union { int i; double f; } u2 = { };
// CHECK: @u3 ={{.*}} global %union.anon.1 zeroinitializer
@@ -62,22 +62,22 @@ struct overwrite_string_struct2 {
char L[6];
int M;
} overwrite_string2[] = { { { "foo" }, 1 }, [0].L[2] = 'x'};
-// CHECK: [6 x i8] c"fox\00\00\00", i32 1
+// CHECK: [6 x i8] c"fox\00\00\00", [2 x i8] zeroinitializer, i32 1
struct overwrite_string_struct3 {
char L[3];
int M;
} overwrite_string3[] = { { { "foo" }, 1 }, [0].L[2] = 'x'};
-// CHECK: [3 x i8] c"fox", i32 1
+// CHECK: [3 x i8] c"fox", i8 0, i32 1
struct overwrite_string_struct4 {
char L[3];
int M;
} overwrite_string4[] = { { { "foobar" }, 1 }, [0].L[2] = 'x'};
-// CHECK: [3 x i8] c"fox", i32 1
+// CHECK: [3 x i8] c"fox", i8 0, i32 1
struct overwrite_string_struct5 {
char L[6];
int M;
} overwrite_string5[] = { { { "foo" }, 1 }, [0].L[4] = 'y'};
-// CHECK: [6 x i8] c"foo\00y\00", i32 1
+// CHECK: [6 x i8] c"foo\00y\00", [2 x i8] zeroinitializer, i32 1
// CHECK: @u1 = {{.*}} { i32 65535 }
@@ -138,7 +138,7 @@ union_16644_t union_16644_instance_4[2] =
[1].b[1] = 4
};
-// CHECK: @lab ={{.*}} global { [4 x i8], i32 } { [4 x i8] undef, i32 123 }
+// CHECK: @lab ={{.*}} global { [4 x i8], i32 } { [4 x i8] zeroinitializer, i32 123 }
struct leading_anon_bitfield { int : 32; int n; } lab = { .n = 123 };
struct Base {
diff --git a/clang/test/CodeGen/ext-int.c b/clang/test/CodeGen/ext-int.c
index e3d609a..aebacd6 100644
--- a/clang/test/CodeGen/ext-int.c
+++ b/clang/test/CodeGen/ext-int.c
@@ -16,7 +16,7 @@
unsigned _BitInt(1) GlobSize1 = 0;
// CHECK: @GlobSize1 = {{.*}}global i8 0
-// CHECK64: @__const.foo.A = private unnamed_addr constant { i32, [4 x i8], <{ i8, [23 x i8] }> } { i32 1, [4 x i8] undef, <{ i8, [23 x i8] }> <{ i8 -86, [23 x i8] zeroinitializer }> }, align 8
+// CHECK64: @__const.foo.A = private unnamed_addr constant { i32, [4 x i8], <{ i8, [23 x i8] }> } { i32 1, [4 x i8] zeroinitializer, <{ i8, [23 x i8] }> <{ i8 -86, [23 x i8] zeroinitializer }> }, align 8
// @BigGlob = global [40 x i8] c"\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF", align 8
// CHECK64: @f.p = internal global <{ i8, i8, [22 x i8] }> <{ i8 16, i8 39, [22 x i8] zeroinitializer }>, align 8
@@ -91,8 +91,8 @@ int foo(int a) {
// CHECK64: %B2 = getelementptr inbounds nuw %struct.S1, ptr %B, i32 0, i32 2
// WIN32: %B2 = getelementptr inbounds nuw %struct.S1, ptr %B, i32 0, i32 2
// LIN32: %B2 = getelementptr inbounds nuw %struct.S1, ptr %B, i32 0, i32 1
- // CHECK: %0 = load i32, ptr %a.addr, align 4
- // CHECK: %conv = sext i32 %0 to i129
+ // CHECK: %[[V1:.+]] = load i32, ptr %a.addr, align 4
+ // CHECK: %conv = sext i32 %[[V1]] to i129
// CHECK64: storedv = sext i129 %conv to i192
// WIN32: storedv = sext i129 %conv to i192
// LIN32: storedv = sext i129 %conv to i160
@@ -102,12 +102,12 @@ int foo(int a) {
// CHECK64: %B3 = getelementptr inbounds nuw %struct.S1, ptr %A, i32 0, i32 2
// WIN32: %B3 = getelementptr inbounds nuw %struct.S1, ptr %A, i32 0, i32 2
// LIN32: %B3 = getelementptr inbounds nuw %struct.S1, ptr %A, i32 0, i32 1
- // CHECK64: %1 = load i192, ptr %B3, align 8
- // WIN32: %1 = load i192, ptr %B3, align 8
- // LIN32: %1 = load i160, ptr %B3, align 4
- // CHECK64: %loadedv = trunc i192 %1 to i129
- // WIN32: %loadedv = trunc i192 %1 to i129
- // LIN32: %loadedv = trunc i160 %1 to i129
+ // CHECK64: %[[V2:.+]] = load i192, ptr %B3, align 8
+ // WIN32: %[[V2:.+]] = load i192, ptr %B3, align 8
+ // LIN32: %[[V2:.+]] = load i160, ptr %B3, align 4
+ // CHECK64: %loadedv = trunc i192 %[[V2]] to i129
+ // WIN32: %loadedv = trunc i192 %[[V2]] to i129
+ // LIN32: %loadedv = trunc i160 %[[V2]] to i129
// CHECK: %conv4 = trunc i129 %loadedv to i32
struct S1 A = {1, 170};
struct S1 B = {1, a};
diff --git a/clang/test/CodeGen/flexible-array-init.c b/clang/test/CodeGen/flexible-array-init.c
index 15a30c1..17b520f 100644
--- a/clang/test/CodeGen/flexible-array-init.c
+++ b/clang/test/CodeGen/flexible-array-init.c
@@ -14,11 +14,11 @@ struct { int y[]; } b1 = { { 14, 16 } };
// sizeof(c) == 8, so this global should be at least 8 bytes.
struct { int x; char c; char y[]; } c = { 1, 2, { 13, 15 } };
-// CHECK: @c ={{.*}} global { i32, i8, [2 x i8] } { i32 1, i8 2, [2 x i8] c"\0D\0F" }
+// CHECK: @c ={{.*}} global { i32, i8, [2 x i8], i8 } { i32 1, i8 2, [2 x i8] c"\0D\0F", i8 0 }
// sizeof(d) == 8, so this global should be at least 8 bytes.
struct __attribute((packed, aligned(4))) { char a; int x; char z[]; } d = { 1, 2, { 13, 15 } };
-// CHECK: @d ={{.*}} <{ i8, i32, [2 x i8], i8 }> <{ i8 1, i32 2, [2 x i8] c"\0D\0F", i8 undef }>,
+// CHECK: @d ={{.*}} <{ i8, i32, [2 x i8], i8 }> <{ i8 1, i32 2, [2 x i8] c"\0D\0F", i8 0 }>,
// This global needs 9 bytes to hold all the flexible array members.
struct __attribute((packed, aligned(4))) { char a; int x; char z[]; } e = { 1, 2, { 13, 15, 17, 19 } };
@@ -55,21 +55,21 @@ struct { int a; union { int b; short x[]; }; int c; int d; } hf = {1, 2, {}, 3};
// First member is the potential flexible array, initialization requires braces.
struct { int a; union { short x; int b; }; int c; int d; } i = {1, 2, {}, 3};
-// CHECK: @i = global { i32, { i16, [2 x i8] }, i32, i32 } { i32 1, { i16, [2 x i8] } { i16 2, [2 x i8] undef }, i32 0, i32 3 }
+// CHECK: @i = global { i32, { i16, [2 x i8] }, i32, i32 } { i32 1, { i16, [2 x i8] } { i16 2, [2 x i8] zeroinitializer }, i32 0, i32 3 }
struct { int a; union { short x[0]; int b; }; int c; int d; } i0 = {1, {}, 2, 3};
-// CHECK: @i0 = global { i32, { [0 x i16], [4 x i8] }, i32, i32 } { i32 1, { [0 x i16], [4 x i8] } { [0 x i16] zeroinitializer, [4 x i8] undef }, i32 2, i32 3 }
+// CHECK: @i0 = global { i32, { [0 x i16], [4 x i8] }, i32, i32 } { i32 1, { [0 x i16], [4 x i8] } zeroinitializer, i32 2, i32 3 }
struct { int a; union { short x[1]; int b; }; int c; int d; } i1 = {1, {2}, {}, 3};
-// CHECK: @i1 = global { i32, { [1 x i16], [2 x i8] }, i32, i32 } { i32 1, { [1 x i16], [2 x i8] } { [1 x i16] [i16 2], [2 x i8] undef }, i32 0, i32 3 }
+// CHECK: @i1 = global { i32, { [1 x i16], [2 x i8] }, i32, i32 } { i32 1, { [1 x i16], [2 x i8] } { [1 x i16] [i16 2], [2 x i8] zeroinitializer }, i32 0, i32 3 }
struct { int a; union { short x[]; int b; }; int c; int d; } i_f = {4, {}, {}, 6};
-// CHECK: @i_f = global { i32, { [0 x i16], [4 x i8] }, i32, i32 } { i32 4, { [0 x i16], [4 x i8] } { [0 x i16] zeroinitializer, [4 x i8] undef }, i32 0, i32 6 }
+// CHECK: @i_f = global { i32, { [0 x i16], [4 x i8] }, i32, i32 } { i32 4, { [0 x i16], [4 x i8] } zeroinitializer, i32 0, i32 6 }
// Named initializers; order doesn't matter.
struct { int a; union { int b; short x; }; int c; int d; } hn = {.a = 1, .x = 2, .c = 3};
-// CHECK: @hn = global { i32, { i16, [2 x i8] }, i32, i32 } { i32 1, { i16, [2 x i8] } { i16 2, [2 x i8] undef }, i32 3, i32 0 }
+// CHECK: @hn = global { i32, { i16, [2 x i8] }, i32, i32 } { i32 1, { i16, [2 x i8] } { i16 2, [2 x i8] zeroinitializer }, i32 3, i32 0 }
struct { int a; union { int b; short x[0]; }; int c; int d; } hn0 = {.a = 1, .x = {2}, .c = 3};
-// CHECK: @hn0 = global { i32, { [0 x i16], [4 x i8] }, i32, i32 } { i32 1, { [0 x i16], [4 x i8] } { [0 x i16] zeroinitializer, [4 x i8] undef }, i32 3, i32 0 }
+// CHECK: @hn0 = global { i32, { [0 x i16], [4 x i8] }, i32, i32 } { i32 1, { [0 x i16], [4 x i8] } zeroinitializer, i32 3, i32 0 }
struct { int a; union { int b; short x[1]; }; int c; int d; } hn1 = {.a = 1, .x = {2}, .c = 3};
-// CHECK: @hn1 = global { i32, { [1 x i16], [2 x i8] }, i32, i32 } { i32 1, { [1 x i16], [2 x i8] } { [1 x i16] [i16 2], [2 x i8] undef }, i32 3, i32 0 }
+// CHECK: @hn1 = global { i32, { [1 x i16], [2 x i8] }, i32, i32 } { i32 1, { [1 x i16], [2 x i8] } { [1 x i16] [i16 2], [2 x i8] zeroinitializer }, i32 3, i32 0 }
struct { char a[]; } empty_struct = {};
// CHECK: @empty_struct ={{.*}} global %struct.anon{{.*}} zeroinitializer, align 1
@@ -96,10 +96,10 @@ union { char a[]; } only_in_union0 = {0};
// CHECK: @only_in_union0 = global { [1 x i8] } zeroinitializer, align 1
union { char a[]; int b; } first_in_union = {};
-// CHECK: @first_in_union = global { [0 x i8], [4 x i8] } { [0 x i8] zeroinitializer, [4 x i8] undef }, align 4
+// CHECK: @first_in_union = global { [0 x i8], [4 x i8] } zeroinitializer, align 4
union { char a[]; int b; } first_in_union0 = {0};
-// CHECK: @first_in_union0 = global { [1 x i8], [3 x i8] } { [1 x i8] zeroinitializer, [3 x i8] undef }, align 4
+// CHECK: @first_in_union0 = global { [1 x i8], [3 x i8] } zeroinitializer, align 4
union { char a[]; int b; } first_in_union123 = { {1, 2, 3} };
-// CHECK: @first_in_union123 = global { [3 x i8], i8 } { [3 x i8] c"\01\02\03", i8 undef }, align 4
+// CHECK: @first_in_union123 = global { [3 x i8], i8 } { [3 x i8] c"\01\02\03", i8 0 }, align 4
diff --git a/clang/test/CodeGen/global-init.c b/clang/test/CodeGen/global-init.c
index 7f1d675..b156466 100644
--- a/clang/test/CodeGen/global-init.c
+++ b/clang/test/CodeGen/global-init.c
@@ -33,7 +33,7 @@ struct ManyFields {
int f;
};
-// CHECK: global %struct.ManyFields { i32 1, i32 2, i32 0, i8 0, i32 0, i32 0 }
+// CHECK: global { i32, i32, i32, i8, [3 x i8], i32, i32 } { i32 1, i32 2, i32 0, i8 0, [3 x i8] zeroinitializer, i32 0, i32 0 }
struct ManyFields FewInits = {1, 2};
diff --git a/clang/test/CodeGen/init.c b/clang/test/CodeGen/init.c
index cbf615b..27f427d 100644
--- a/clang/test/CodeGen/init.c
+++ b/clang/test/CodeGen/init.c
@@ -187,25 +187,6 @@ void nonzeroMemsetf64(void) {
// CHECK: call void @llvm.memset.p0.i32(ptr {{.*}}, i8 68, i32 56, i1 false)
}
-void nonzeroPaddedUnionMemset(void) {
- union U { char c; int i; };
- union U arr[9] = { 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, };
- // CHECK-LABEL: @nonzeroPaddedUnionMemset(
- // CHECK-NOT: store
- // CHECK-NOT: memcpy
- // CHECK: call void @llvm.memset.p0.i32(ptr {{.*}}, i8 -16, i32 36, i1 false)
-}
-
-void nonzeroNestedMemset(void) {
- union U { char c; int i; };
- struct S { union U u; short i; };
- struct S arr[5] = { { {0xF0}, 0xF0F0 }, { {0xF0}, 0xF0F0 }, { {0xF0}, 0xF0F0 }, { {0xF0}, 0xF0F0 }, { {0xF0}, 0xF0F0 }, };
- // CHECK-LABEL: @nonzeroNestedMemset(
- // CHECK-NOT: store
- // CHECK-NOT: memcpy
- // CHECK: call void @llvm.memset.p0.i32(ptr {{.*}}, i8 -16, i32 40, i1 false)
-}
-
// PR9257
struct test11S {
int A[10];
diff --git a/clang/test/CodeGen/inline-asm-output-variant.c b/clang/test/CodeGen/inline-asm-output-variant.c
new file mode 100644
index 0000000..376a876
--- /dev/null
+++ b/clang/test/CodeGen/inline-asm-output-variant.c
@@ -0,0 +1,26 @@
+// REQUIRES: x86-registered-target
+/// AT&T input
+// RUN: %clang_cc1 -triple x86_64 -S --output-asm-variant=0 %s -o - | FileCheck --check-prefix=ATT %s
+// RUN: %clang_cc1 -triple x86_64 -S --output-asm-variant=1 %s -o - | FileCheck --check-prefix=INTEL %s
+
+/// Intel input
+// RUN: %clang_cc1 -triple x86_64 -S -D INTEL -mllvm -x86-asm-syntax=intel -inline-asm=intel %s -o - | FileCheck --check-prefix=INTEL %s
+// RUN: %clang_cc1 -triple x86_64 -S -D INTEL -mllvm -x86-asm-syntax=intel -inline-asm=intel --output-asm-variant=1 %s -o - | FileCheck --check-prefix=INTEL %s
+
+// ATT: movl $1, %eax
+// ATT: movl $2, %eax
+
+// INTEL: mov eax, 1
+// INTEL: mov eax, 2
+
+#ifdef INTEL
+asm("mov eax, 1");
+void foo() {
+ asm("mov eax, 2");
+}
+#else
+asm("mov $1, %eax");
+void foo() {
+ asm("mov $2, %eax");
+}
+#endif
diff --git a/clang/test/CodeGen/linux-kernel-struct-union-initializer.c b/clang/test/CodeGen/linux-kernel-struct-union-initializer.c
new file mode 100644
index 0000000..dc68cc0
--- /dev/null
+++ b/clang/test/CodeGen/linux-kernel-struct-union-initializer.c
@@ -0,0 +1,267 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --check-globals all --version 5
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -std=gnu11 -verify -emit-llvm %s -o - | FileCheck %s
+// expected-no-diagnostics
+
+union U1 {
+ int x;
+ char y[16];
+};
+
+struct S1 {
+ int x;
+ union U1 y;
+};
+
+union U2 {
+ int x;
+ char y[16];
+} __attribute__((__aligned__(32)));
+
+struct S2 {
+ int x;
+ long long y;
+ char z[8];
+} __attribute__((__aligned__(32)));
+
+union U1 global_u1 = {};
+
+union U1 global_u2 = {3};
+
+union U1 global_u2_from_cast = (union U1)3;
+
+struct S1 global_s1 = {};
+
+struct S1 global_s2 = {
+ .x = 3,
+};
+
+struct S1 global_s3 = {.x = 3, .y = {.x = 6}};
+
+const union U1 global_const_u1 = {4};
+struct S1 global_s3_from_const_u1 = {.y = global_const_u1};
+
+union U2 global_u3 = {};
+
+struct S2 global_s4 = {};
+
+struct S2 global_s5 = {.x = 1};
+
+
+// Test empty initializer for union.
+//.
+// CHECK: @global_u1 = global %union.U1 zeroinitializer, align 4
+// CHECK: @global_u2 = global %union.U1 { i32 3, [12 x i8] zeroinitializer }, align 4
+// CHECK: @global_u2_from_cast = global { i32, [12 x i8] } { i32 3, [12 x i8] zeroinitializer }, align 4
+// CHECK: @global_s1 = global %struct.S1 zeroinitializer, align 4
+// CHECK: @global_s2 = global %struct.S1 { i32 3, %union.U1 zeroinitializer }, align 4
+// CHECK: @global_s3 = global %struct.S1 { i32 3, %union.U1 { i32 6, [12 x i8] zeroinitializer } }, align 4
+// CHECK: @global_const_u1 = constant %union.U1 { i32 4, [12 x i8] zeroinitializer }, align 4
+// CHECK: @global_s3_from_const_u1 = global %struct.S1 { i32 0, %union.U1 { i32 4, [12 x i8] zeroinitializer } }, align 4
+// CHECK: @global_u3 = global %union.U2 zeroinitializer, align 32
+// CHECK: @global_s4 = global { i32, [4 x i8], i64, [8 x i8], [8 x i8] } zeroinitializer, align 32
+// CHECK: @global_s5 = global { i32, [4 x i8], i64, [8 x i8], [8 x i8] } { i32 1, [4 x i8] zeroinitializer, i64 0, [8 x i8] zeroinitializer, [8 x i8] zeroinitializer }, align 32
+// CHECK: @test2.a = internal global %union.U1 zeroinitializer, align 4
+// CHECK: @__const.test3.a = private unnamed_addr constant %union.U1 { i32 3, [12 x i8] zeroinitializer }, align 4
+// CHECK: @test4.a = internal global %union.U1 { i32 3, [12 x i8] zeroinitializer }, align 4
+// CHECK: @test6.s = internal global %struct.S1 zeroinitializer, align 4
+// CHECK: @__const.test7.s = private unnamed_addr constant %struct.S1 { i32 3, %union.U1 zeroinitializer }, align 4
+// CHECK: @test8.s = internal global %struct.S1 { i32 3, %union.U1 zeroinitializer }, align 4
+// CHECK: @__const.test9.s = private unnamed_addr constant %struct.S1 { i32 3, %union.U1 { i32 6, [12 x i8] zeroinitializer } }, align 4
+// CHECK: @test10.s = internal global %struct.S1 { i32 3, %union.U1 { i32 6, [12 x i8] zeroinitializer } }, align 4
+// CHECK: @test12.a = internal global %union.U2 zeroinitializer, align 32
+// CHECK: @test14.s = internal global { i32, [4 x i8], i64, [8 x i8], [8 x i8] } zeroinitializer, align 32
+// CHECK: @__const.test15.s = private unnamed_addr constant { i32, [4 x i8], i64, [8 x i8], [8 x i8] } { i32 1, [4 x i8] zeroinitializer, i64 0, [8 x i8] zeroinitializer, [8 x i8] zeroinitializer }, align 32
+// CHECK: @test16.s = internal global { i32, [4 x i8], i64, [8 x i8], [8 x i8] } { i32 1, [4 x i8] zeroinitializer, i64 0, [8 x i8] zeroinitializer, [8 x i8] zeroinitializer }, align 32
+//.
+// CHECK-LABEL: define dso_local void @test1(
+// CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[A:%.*]] = alloca [[UNION_U1:%.*]], align 4
+// CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[A]], i8 0, i64 16, i1 false)
+// CHECK-NEXT: ret void
+//
+void test1() {
+ union U1 a = {};
+}
+
+// Test empty initializer for union. Use static variable.
+// CHECK-LABEL: define dso_local void @test2(
+// CHECK-SAME: ) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: ret void
+//
+void test2() {
+ static union U1 a = {};
+}
+
+// Test only initializing a small field for union.
+// CHECK-LABEL: define dso_local void @test3(
+// CHECK-SAME: ) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[A:%.*]] = alloca [[UNION_U1:%.*]], align 4
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[A]], ptr align 4 @__const.test3.a, i64 16, i1 false)
+// CHECK-NEXT: ret void
+//
+void test3() {
+ union U1 a = {3};
+}
+
+// Test only initializing a small field for union. Use static variable.
+// CHECK-LABEL: define dso_local void @test4(
+// CHECK-SAME: ) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: ret void
+//
+void test4() {
+ static union U1 a = {3};
+}
+
+// Test union in struct. Use empty initializer for the struct.
+// CHECK-LABEL: define dso_local void @test5(
+// CHECK-SAME: ) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
+// CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[S]], i8 0, i64 20, i1 false)
+// CHECK-NEXT: ret void
+//
+void test5() {
+ struct S1 s = {};
+}
+
+// Test union in struct. Use empty initializer for the struct. Use static variable.
+// CHECK-LABEL: define dso_local void @test6(
+// CHECK-SAME: ) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: ret void
+//
+void test6() {
+ static struct S1 s = {};
+}
+
+// Test union in struct. Initialize other fields of the struct.
+// CHECK-LABEL: define dso_local void @test7(
+// CHECK-SAME: ) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[S]], ptr align 4 @__const.test7.s, i64 20, i1 false)
+// CHECK-NEXT: ret void
+//
+void test7() {
+ struct S1 s = {
+ .x = 3,
+ };
+}
+
+// Test union in struct. Initialize other fields of the struct. Use static variable.
+// CHECK-LABEL: define dso_local void @test8(
+// CHECK-SAME: ) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: ret void
+//
+void test8() {
+ static struct S1 s = {
+ .x = 3,
+ };
+}
+
+// Test union in struct. Initialize a small field for union.
+// CHECK-LABEL: define dso_local void @test9(
+// CHECK-SAME: ) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[S]], ptr align 4 @__const.test9.s, i64 20, i1 false)
+// CHECK-NEXT: ret void
+//
+void test9() {
+ struct S1 s = {.x = 3,
+ .y = {
+ .x = 6,
+ }};
+}
+
+// Test union in struct. Initialize a small field for union. Use static variable.
+// CHECK-LABEL: define dso_local void @test10(
+// CHECK-SAME: ) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: ret void
+//
+void test10() {
+ static struct S1 s = {.x = 3,
+ .y = {
+ .x = 6,
+ }};
+}
+
+// Test empty initializer for union with padding.
+// CHECK-LABEL: define dso_local void @test11(
+// CHECK-SAME: ) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[A:%.*]] = alloca [[UNION_U2:%.*]], align 32
+// CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 32 [[A]], i8 0, i64 32, i1 false)
+// CHECK-NEXT: ret void
+//
+void test11() {
+ union U2 a = {};
+}
+
+// Test empty initializer for union with padding. Use static variable.
+// CHECK-LABEL: define dso_local void @test12(
+// CHECK-SAME: ) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: ret void
+//
+void test12() {
+ static union U2 a = {};
+}
+
+// Test empty initializer for struct with padding.
+// CHECK-LABEL: define dso_local void @test13(
+// CHECK-SAME: ) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[S:%.*]] = alloca [[STRUCT_S2:%.*]], align 32
+// CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 32 [[S]], i8 0, i64 32, i1 false)
+// CHECK-NEXT: ret void
+//
+void test13() {
+ struct S2 s = {};
+}
+
+// Test empty initializer for struct with padding. Use static variable.
+// CHECK-LABEL: define dso_local void @test14(
+// CHECK-SAME: ) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: ret void
+//
+void test14() {
+ static struct S2 s = {};
+}
+
+// Test partial initialization for struct with padding.
+// CHECK-LABEL: define dso_local void @test15(
+// CHECK-SAME: ) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[S:%.*]] = alloca [[STRUCT_S2:%.*]], align 32
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 32 [[S]], ptr align 32 @__const.test15.s, i64 32, i1 false)
+// CHECK-NEXT: ret void
+//
+void test15() {
+ struct S2 s = {.x = 1};
+}
+
+// Test partial initialization for struct with padding. Use static variable.
+// CHECK-LABEL: define dso_local void @test16(
+// CHECK-SAME: ) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: ret void
+//
+void test16() {
+ static struct S2 s = {.x = 1};
+}
+//.
+// CHECK: attributes #[[ATTR0]] = { noinline nounwind optnone "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+cx8,+mmx,+sse,+sse2,+x87" }
+// CHECK: attributes #[[ATTR1:[0-9]+]] = { nocallback nofree nounwind willreturn memory(argmem: write) }
+// CHECK: attributes #[[ATTR2:[0-9]+]] = { nocallback nofree nounwind willreturn memory(argmem: readwrite) }
+//.
+// CHECK: [[META0:![0-9]+]] = !{i32 1, !"wchar_size", i32 4}
+// CHECK: [[META1:![0-9]+]] = !{!"{{.*}}clang version {{.*}}"}
+//.
diff --git a/clang/test/CodeGen/linux-kernel-struct-union-initializer2.c b/clang/test/CodeGen/linux-kernel-struct-union-initializer2.c
new file mode 100644
index 0000000..0a1ad3a
--- /dev/null
+++ b/clang/test/CodeGen/linux-kernel-struct-union-initializer2.c
@@ -0,0 +1,140 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -std=gnu11 -verify -emit-llvm %s -o - | FileCheck %s
+// expected-no-diagnostics
+
+union U1 {
+ int x;
+ char y[5];
+};
+
+struct S1 {
+ int x;
+ long long y;
+};
+
+struct S2 {
+ unsigned char b1 : 3; // 1st 3 bits (in 1st byte) are b1
+ unsigned char : 2; // next 2 bits (in 1st byte) are blocked out as unused
+ unsigned char b2 : 6; // 6 bits for b2 - doesn't fit into the 1st byte => starts a 2nd
+ unsigned char b3 : 2; // 2 bits for b3 - next (and final) bits in the 2nd byte
+ int i;
+};
+
+struct S3 {
+ int x;
+} __attribute__((__aligned__(8)));
+
+struct S4 {
+ int a;
+ union U1 b;
+};
+
+// Test non-const initializer for union with padding.
+// CHECK-LABEL: define dso_local void @test1(
+// CHECK-SAME: i32 noundef [[X:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[X_ADDR:%.*]] = alloca i32, align 4
+// CHECK-NEXT: [[A:%.*]] = alloca [[UNION_U1:%.*]], align 4
+// CHECK-NEXT: store i32 [[X]], ptr [[X_ADDR]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[X_ADDR]], align 4
+// CHECK-NEXT: store i32 [[TMP0]], ptr [[A]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[A]], i64 4
+// CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[TMP1]], i8 0, i64 4, i1 false)
+// CHECK-NEXT: ret void
+//
+void test1(int x) {
+ union U1 a = {x};
+}
+
+// Test non-const initializer for struct with padding.
+// CHECK-LABEL: define dso_local void @test2(
+// CHECK-SAME: i64 noundef [[Y:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[Y_ADDR:%.*]] = alloca i64, align 8
+// CHECK-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
+// CHECK-NEXT: store i64 [[Y]], ptr [[Y_ADDR]], align 8
+// CHECK-NEXT: [[X:%.*]] = getelementptr inbounds nuw [[STRUCT_S1]], ptr [[S]], i32 0, i32 0
+// CHECK-NEXT: store i32 0, ptr [[X]], align 8
+// CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[S]], i64 4
+// CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[TMP0]], i8 0, i64 4, i1 false)
+// CHECK-NEXT: [[Y1:%.*]] = getelementptr inbounds nuw [[STRUCT_S1]], ptr [[S]], i32 0, i32 1
+// CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr [[Y_ADDR]], align 8
+// CHECK-NEXT: store i64 [[TMP1]], ptr [[Y1]], align 8
+// CHECK-NEXT: ret void
+//
+void test2(long long y) {
+ struct S1 s = {.y = y};
+}
+
+// Test non-const initializer for struct with padding and bit fields.
+// CHECK-LABEL: define dso_local void @test3(
+// CHECK-SAME: i8 noundef zeroext [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[B_ADDR:%.*]] = alloca i8, align 1
+// CHECK-NEXT: [[S:%.*]] = alloca [[STRUCT_S2:%.*]], align 4
+// CHECK-NEXT: store i8 [[B]], ptr [[B_ADDR]], align 1
+// CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[B_ADDR]], align 1
+// CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[TMP0]] to i16
+// CHECK-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[S]], align 4
+// CHECK-NEXT: [[BF_VALUE:%.*]] = and i16 [[TMP1]], 7
+// CHECK-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -8
+// CHECK-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[BF_VALUE]]
+// CHECK-NEXT: store i16 [[BF_SET]], ptr [[S]], align 4
+// CHECK-NEXT: [[BF_LOAD1:%.*]] = load i16, ptr [[S]], align 4
+// CHECK-NEXT: [[BF_CLEAR2:%.*]] = and i16 [[BF_LOAD1]], -16129
+// CHECK-NEXT: [[BF_SET3:%.*]] = or i16 [[BF_CLEAR2]], 0
+// CHECK-NEXT: store i16 [[BF_SET3]], ptr [[S]], align 4
+// CHECK-NEXT: [[BF_LOAD4:%.*]] = load i16, ptr [[S]], align 4
+// CHECK-NEXT: [[BF_CLEAR5:%.*]] = and i16 [[BF_LOAD4]], 16383
+// CHECK-NEXT: [[BF_SET6:%.*]] = or i16 [[BF_CLEAR5]], 0
+// CHECK-NEXT: store i16 [[BF_SET6]], ptr [[S]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[S]], i64 2
+// CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 2 [[TMP2]], i8 0, i64 2, i1 false)
+// CHECK-NEXT: [[I:%.*]] = getelementptr inbounds nuw [[STRUCT_S2]], ptr [[S]], i32 0, i32 1
+// CHECK-NEXT: store i32 0, ptr [[I]], align 4
+// CHECK-NEXT: ret void
+//
+void test3(unsigned char b) {
+ struct S2 s = {.b1 = b};
+}
+
+// Test non-const initializer for struct with padding at the end of the struct.
+// CHECK-LABEL: define dso_local void @test4(
+// CHECK-SAME: i32 noundef [[X:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[X_ADDR:%.*]] = alloca i32, align 4
+// CHECK-NEXT: [[S:%.*]] = alloca [[STRUCT_S3:%.*]], align 8
+// CHECK-NEXT: store i32 [[X]], ptr [[X_ADDR]], align 4
+// CHECK-NEXT: [[X1:%.*]] = getelementptr inbounds nuw [[STRUCT_S3]], ptr [[S]], i32 0, i32 0
+// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[X_ADDR]], align 4
+// CHECK-NEXT: store i32 [[TMP0]], ptr [[X1]], align 8
+// CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[S]], i64 4
+// CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[TMP1]], i8 0, i64 4, i1 false)
+// CHECK-NEXT: ret void
+//
+void test4(int x) {
+ struct S3 s = {x};
+}
+
+// Test non-const initializer for union in struct.
+// CHECK-LABEL: define dso_local void @test5(
+// CHECK-SAME: i32 noundef [[A:%.*]], i32 noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
+// CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
+// CHECK-NEXT: [[S:%.*]] = alloca [[STRUCT_S4:%.*]], align 4
+// CHECK-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
+// CHECK-NEXT: store i32 [[B]], ptr [[B_ADDR]], align 4
+// CHECK-NEXT: [[A1:%.*]] = getelementptr inbounds nuw [[STRUCT_S4]], ptr [[S]], i32 0, i32 0
+// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
+// CHECK-NEXT: store i32 [[TMP0]], ptr [[A1]], align 4
+// CHECK-NEXT: [[B2:%.*]] = getelementptr inbounds nuw [[STRUCT_S4]], ptr [[S]], i32 0, i32 1
+// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
+// CHECK-NEXT: store i32 [[TMP1]], ptr [[B2]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[B2]], i64 4
+// CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[TMP2]], i8 0, i64 4, i1 false)
+// CHECK-NEXT: ret void
+//
+void test5(int a, int b) {
+ struct S4 s = {a, {b}};
+}
diff --git a/clang/test/CodeGen/mingw-long-double.c b/clang/test/CodeGen/mingw-long-double.c
index 4be9752..0fc8f015 100644
--- a/clang/test/CodeGen/mingw-long-double.c
+++ b/clang/test/CodeGen/mingw-long-double.c
@@ -11,12 +11,9 @@ struct {
char c;
long double ldb;
} agggregate_LD = {};
-// GNU32: %struct.anon = type { i8, x86_fp80 }
-// GNU32: @agggregate_LD = dso_local global %struct.anon zeroinitializer, align 4
-// GNU64: %struct.anon = type { i8, x86_fp80 }
-// GNU64: @agggregate_LD = dso_local global %struct.anon zeroinitializer, align 16
-// MSC64: %struct.anon = type { i8, double }
-// MSC64: @agggregate_LD = dso_local global %struct.anon zeroinitializer, align 8
+// GNU32: @agggregate_LD = dso_local global { i8, [3 x i8], x86_fp80 } zeroinitializer, align 4
+// GNU64: @agggregate_LD = dso_local global { i8, [15 x i8], x86_fp80 } zeroinitializer, align 16
+// MSC64: @agggregate_LD = dso_local global { i8, [7 x i8], double } zeroinitializer, align 8
long double dataLD = 1.0L;
// GNU32: @dataLD = dso_local global x86_fp80 0xK3FFF8000000000000000, align 4
diff --git a/clang/test/CodeGen/mms-bitfields.c b/clang/test/CodeGen/mms-bitfields.c
index 49c5c1c..2ccce32 100644
--- a/clang/test/CodeGen/mms-bitfields.c
+++ b/clang/test/CodeGen/mms-bitfields.c
@@ -61,5 +61,5 @@ union HEADER {
struct Inner variable = { 1,0,1, 21 };
union HEADER hdr = {{1,2,3,4}};
-// CHECK: @variable ={{.*}} global { i8, [3 x i8], i8, i8, i8, i8 } { i8 5, [3 x i8] undef, i8 21, i8 0, i8 0, i8 0 }, align 1
-// CHECK: @hdr ={{.*}} global { { i8, i8, [2 x i8], i8, i8, i8, i8, i8, [3 x i8] } } { { i8, i8, [2 x i8], i8, i8, i8, i8, i8, [3 x i8] } { i8 8, i8 0, [2 x i8] undef, i8 2, i8 0, i8 0, i8 3, i8 4, [3 x i8] undef } }, align 1
+// CHECK: @variable ={{.*}} global { i8, [3 x i8], i8, i8, i8, i8 } { i8 5, [3 x i8] zeroinitializer, i8 21, i8 0, i8 0, i8 0 }, align 1
+// CHECK: @hdr ={{.*}} global { { i8, i8, [2 x i8], i8, i8, i8, i8, i8, [3 x i8] } } { { i8, i8, [2 x i8], i8, i8, i8, i8, i8, [3 x i8] } { i8 8, i8 0, [2 x i8] zeroinitializer, i8 2, i8 0, i8 0, i8 3, i8 4, [3 x i8] zeroinitializer } }, align 1
diff --git a/clang/test/CodeGen/scoped-atomic-ops.c b/clang/test/CodeGen/scoped-atomic-ops.c
index b003204..cf98812 100644
--- a/clang/test/CodeGen/scoped-atomic-ops.c
+++ b/clang/test/CodeGen/scoped-atomic-ops.c
@@ -1,12 +1,21 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
// RUN: %clang_cc1 %s -emit-llvm -o - -triple=amdgcn-amd-amdhsa -ffreestanding \
-// RUN: -fvisibility=hidden | FileCheck %s
+// RUN: -fvisibility=hidden | FileCheck --check-prefix=AMDGCN %s
+// RUN: %clang_cc1 %s -emit-llvm -o - -triple=spirv64-unknown-unknown -ffreestanding \
+// RUN: -fvisibility=hidden | FileCheck --check-prefix=SPIRV %s
-// CHECK-LABEL: define hidden i32 @fi1a(
-// CHECK: [[TMP0:%.*]] = load atomic i32, ptr [[PTR0:.+]] syncscope("one-as") monotonic, align 4
-// CHECK: [[TMP1:%.*]] = load atomic i32, ptr [[PTR1:.+]] syncscope("agent-one-as") monotonic, align 4
-// CHECK: [[TMP2:%.*]] = load atomic i32, ptr [[PTR2:.+]] syncscope("workgroup-one-as") monotonic, align 4
-// CHECK: [[TMP3:%.*]] = load atomic i32, ptr [[PTR3:.+]] syncscope("wavefront-one-as") monotonic, align 4
-// CHECK: [[TMP4:%.*]] = load atomic i32, ptr [[PTR4:.+]] syncscope("singlethread-one-as") monotonic, align 4
+// AMDGCN-LABEL: define hidden i32 @fi1a(
+// AMDGCN: [[TMP0:%.*]] = load atomic i32, ptr [[PTR0:.+]] syncscope("one-as") monotonic, align 4
+// AMDGCN: [[TMP1:%.*]] = load atomic i32, ptr [[PTR1:.+]] syncscope("agent-one-as") monotonic, align 4
+// AMDGCN: [[TMP2:%.*]] = load atomic i32, ptr [[PTR2:.+]] syncscope("workgroup-one-as") monotonic, align 4
+// AMDGCN: [[TMP3:%.*]] = load atomic i32, ptr [[PTR3:.+]] syncscope("wavefront-one-as") monotonic, align 4
+// AMDGCN: [[TMP4:%.*]] = load atomic i32, ptr [[PTR4:.+]] syncscope("singlethread-one-as") monotonic, align 4
+// SPIRV: define hidden spir_func i32 @fi1a(
+// SPIRV: [[TMP0:%.*]] = load atomic i32, ptr [[PTR0:.+]] monotonic, align 4
+// SPIRV: [[TMP1:%.*]] = load atomic i32, ptr [[PTR1:.+]] syncscope("device") monotonic, align 4
+// SPIRV: [[TMP2:%.*]] = load atomic i32, ptr [[PTR2:.+]] syncscope("workgroup") monotonic, align 4
+// SPIRV: [[TMP3:%.*]] = load atomic i32, ptr [[PTR3:.+]] syncscope("subgroup") monotonic, align 4
+// SPIRV: [[TMP4:%.*]] = load atomic i32, ptr [[PTR4:.+]] syncscope("singlethread") monotonic, align 4
int fi1a(int *i) {
int v;
__scoped_atomic_load(i, &v, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
@@ -17,13 +26,18 @@ int fi1a(int *i) {
return v;
}
-// CHECK-LABEL: define hidden i32 @fi1b(
-// CHECK: [[TMP0:%.*]] = load atomic i32, ptr [[PTR0:%.+]] syncscope("one-as") monotonic, align 4
-// CHECK: [[TMP1:%.*]] = load atomic i32, ptr [[PTR1:%.+]] syncscope("agent-one-as") monotonic, align 4
-// CHECK: [[TMP2:%.*]] = load atomic i32, ptr [[PTR2:%.+]] syncscope("workgroup-one-as") monotonic, align 4
-// CHECK: [[TMP3:%.*]] = load atomic i32, ptr [[PTR3:%.+]] syncscope("wavefront-one-as") monotonic, align 4
-// CHECK: [[TMP4:%.*]] = load atomic i32, ptr [[PTR4:%.+]] syncscope("singlethread-one-as") monotonic, align 4
-//
+// AMDGCN-LABEL: define hidden i32 @fi1b(
+// AMDGCN: [[TMP0:%.*]] = load atomic i32, ptr [[PTR0:%.+]] syncscope("one-as") monotonic, align 4
+// AMDGCN: [[TMP1:%.*]] = load atomic i32, ptr [[PTR1:%.+]] syncscope("agent-one-as") monotonic, align 4
+// AMDGCN: [[TMP2:%.*]] = load atomic i32, ptr [[PTR2:%.+]] syncscope("workgroup-one-as") monotonic, align 4
+// AMDGCN: [[TMP3:%.*]] = load atomic i32, ptr [[PTR3:%.+]] syncscope("wavefront-one-as") monotonic, align 4
+// AMDGCN: [[TMP4:%.*]] = load atomic i32, ptr [[PTR4:%.+]] syncscope("singlethread-one-as") monotonic, align 4
+// SPIRV-LABEL: define hidden spir_func i32 @fi1b(
+// SPIRV: [[TMP0:%.*]] = load atomic i32, ptr [[PTR0:%.+]] monotonic, align 4
+// SPIRV: [[TMP1:%.*]] = load atomic i32, ptr [[PTR1:%.+]] syncscope("device") monotonic, align 4
+// SPIRV: [[TMP2:%.*]] = load atomic i32, ptr [[PTR2:%.+]] syncscope("workgroup") monotonic, align 4
+// SPIRV: [[TMP3:%.*]] = load atomic i32, ptr [[PTR3:%.+]] syncscope("subgroup") monotonic, align 4
+// SPIRV: [[TMP4:%.*]] = load atomic i32, ptr [[PTR4:%.+]] syncscope("singlethread") monotonic, align 4
int fi1b(int *i) {
*i = __scoped_atomic_load_n(i, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
*i = __scoped_atomic_load_n(i, __ATOMIC_RELAXED, __MEMORY_SCOPE_DEVICE);
@@ -33,13 +47,18 @@ int fi1b(int *i) {
return *i;
}
-// CHECK-LABEL: define hidden void @fi2a(
-// CHECK: store atomic i32 [[TMP0:%.+]], ptr [[PTR0:%.+]] syncscope("one-as") monotonic, align 4
-// CHECK: store atomic i32 [[TMP1:%.+]], ptr [[PTR1:%.+]] syncscope("agent-one-as") monotonic, align 4
-// CHECK: store atomic i32 [[TMP2:%.+]], ptr [[PTR2:%.+]] syncscope("workgroup-one-as") monotonic, align 4
-// CHECK: store atomic i32 [[TMP3:%.+]], ptr [[PTR3:%.+]] syncscope("wavefront-one-as") monotonic, align 4
-// CHECK: store atomic i32 [[TMP4:%.+]], ptr [[PTR4:%.+]] syncscope("singlethread-one-as") monotonic, align 4
-//
+// AMDGCN-LABEL: define hidden void @fi2a(
+// AMDGCN: store atomic i32 [[TMP0:%.+]], ptr [[PTR0:%.+]] syncscope("one-as") monotonic, align 4
+// AMDGCN: store atomic i32 [[TMP1:%.+]], ptr [[PTR1:%.+]] syncscope("agent-one-as") monotonic, align 4
+// AMDGCN: store atomic i32 [[TMP2:%.+]], ptr [[PTR2:%.+]] syncscope("workgroup-one-as") monotonic, align 4
+// AMDGCN: store atomic i32 [[TMP3:%.+]], ptr [[PTR3:%.+]] syncscope("wavefront-one-as") monotonic, align 4
+// AMDGCN: store atomic i32 [[TMP4:%.+]], ptr [[PTR4:%.+]] syncscope("singlethread-one-as") monotonic, align 4
+// SPIRV-LABEL: define hidden spir_func void @fi2a(
+// SPIRV: store atomic i32 [[TMP0:%.+]], ptr [[PTR0:%.+]] monotonic, align 4
+// SPIRV: store atomic i32 [[TMP1:%.+]], ptr [[PTR1:%.+]] syncscope("device") monotonic, align 4
+// SPIRV: store atomic i32 [[TMP2:%.+]], ptr [[PTR2:%.+]] syncscope("workgroup") monotonic, align 4
+// SPIRV: store atomic i32 [[TMP3:%.+]], ptr [[PTR3:%.+]] syncscope("subgroup") monotonic, align 4
+// SPIRV: store atomic i32 [[TMP4:%.+]], ptr [[PTR4:%.+]] syncscope("singlethread") monotonic, align 4
void fi2a(int *i) {
int v = 1;
__scoped_atomic_store(i, &v, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
@@ -49,12 +68,18 @@ void fi2a(int *i) {
__scoped_atomic_store(i, &v, __ATOMIC_RELAXED, __MEMORY_SCOPE_SINGLE);
}
-// CHECK-LABEL: define hidden void @fi2b(
-// CHECK: store atomic i32 [[TMP0:%.+]], ptr [[PTR0:%.+]] syncscope("one-as") monotonic, align 4
-// CHECK: store atomic i32 [[TMP1:%.+]], ptr [[PTR1:%.+]] syncscope("agent-one-as") monotonic, align 4
-// CHECK: store atomic i32 [[TMP2:%.+]], ptr [[PTR2:%.+]] syncscope("workgroup-one-as") monotonic, align 4
-// CHECK: store atomic i32 [[TMP3:%.+]], ptr [[PTR3:%.+]] syncscope("wavefront-one-as") monotonic, align 4
-// CHECK: store atomic i32 [[TMP4:%.+]], ptr [[PTR4:%.+]] syncscope("singlethread-one-as") monotonic, align 4
+// AMDGCN-LABEL: define hidden void @fi2b(
+// AMDGCN: store atomic i32 [[TMP0:%.+]], ptr [[PTR0:%.+]] syncscope("one-as") monotonic, align 4
+// AMDGCN: store atomic i32 [[TMP1:%.+]], ptr [[PTR1:%.+]] syncscope("agent-one-as") monotonic, align 4
+// AMDGCN: store atomic i32 [[TMP2:%.+]], ptr [[PTR2:%.+]] syncscope("workgroup-one-as") monotonic, align 4
+// AMDGCN: store atomic i32 [[TMP3:%.+]], ptr [[PTR3:%.+]] syncscope("wavefront-one-as") monotonic, align 4
+// AMDGCN: store atomic i32 [[TMP4:%.+]], ptr [[PTR4:%.+]] syncscope("singlethread-one-as") monotonic, align 4
+// SPIRV-LABEL: define hidden spir_func void @fi2b(
+// SPIRV: store atomic i32 [[TMP0:%.+]], ptr [[PTR0:%.+]] monotonic, align 4
+// SPIRV: store atomic i32 [[TMP1:%.+]], ptr [[PTR1:%.+]] syncscope("device") monotonic, align 4
+// SPIRV: store atomic i32 [[TMP2:%.+]], ptr [[PTR2:%.+]] syncscope("workgroup") monotonic, align 4
+// SPIRV: store atomic i32 [[TMP3:%.+]], ptr [[PTR3:%.+]] syncscope("subgroup") monotonic, align 4
+// SPIRV: store atomic i32 [[TMP4:%.+]], ptr [[PTR4:%.+]] syncscope("singlethread") monotonic, align 4
void fi2b(int *i) {
__scoped_atomic_store_n(i, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
__scoped_atomic_store_n(i, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_DEVICE);
@@ -63,15 +88,24 @@ void fi2b(int *i) {
__scoped_atomic_store_n(i, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SINGLE);
}
-// CHECK-LABEL: define hidden void @fi3a(
-// CHECK: [[TMP0:%.*]] = atomicrmw add ptr [[PTR0:%.+]], i32 [[VAL0:.+]] syncscope("one-as") monotonic, align 4
-// CHECK: [[TMP1:%.*]] = atomicrmw sub ptr [[PTR1:%.+]], i32 [[VAL1:.+]] syncscope("one-as") monotonic, align 4
-// CHECK: [[TMP2:%.*]] = atomicrmw and ptr [[PTR2:%.+]], i32 [[VAL2:.+]] syncscope("one-as") monotonic, align 4
-// CHECK: [[TMP3:%.*]] = atomicrmw or ptr [[PTR3:%.+]], i32 [[VAL3:.+]] syncscope("one-as") monotonic, align 4
-// CHECK: [[TMP4:%.*]] = atomicrmw xor ptr [[PTR4:%.+]], i32 [[VAL4:.+]] syncscope("one-as") monotonic, align 4
-// CHECK: [[TMP5:%.*]] = atomicrmw nand ptr [[PTR5:%.+]], i32 [[VAL5:.+]] syncscope("one-as") monotonic, align 4
-// CHECK: [[TMP6:%.*]] = atomicrmw min ptr [[PTR6:%.+]], i32 [[VAL6:.+]] syncscope("one-as") monotonic, align 4
-// CHECK: [[TMP7:%.*]] = atomicrmw max ptr [[PTR7:%.+]], i32 [[VAL7:.+]] syncscope("one-as") monotonic, align 4
+// AMDGCN-LABEL: define hidden void @fi3a(
+// AMDGCN: [[TMP0:%.*]] = atomicrmw add ptr [[PTR0:%.+]], i32 [[VAL0:.+]] syncscope("one-as") monotonic, align 4
+// AMDGCN: [[TMP1:%.*]] = atomicrmw sub ptr [[PTR1:%.+]], i32 [[VAL1:.+]] syncscope("one-as") monotonic, align 4
+// AMDGCN: [[TMP2:%.*]] = atomicrmw and ptr [[PTR2:%.+]], i32 [[VAL2:.+]] syncscope("one-as") monotonic, align 4
+// AMDGCN: [[TMP3:%.*]] = atomicrmw or ptr [[PTR3:%.+]], i32 [[VAL3:.+]] syncscope("one-as") monotonic, align 4
+// AMDGCN: [[TMP4:%.*]] = atomicrmw xor ptr [[PTR4:%.+]], i32 [[VAL4:.+]] syncscope("one-as") monotonic, align 4
+// AMDGCN: [[TMP5:%.*]] = atomicrmw nand ptr [[PTR5:%.+]], i32 [[VAL5:.+]] syncscope("one-as") monotonic, align 4
+// AMDGCN: [[TMP6:%.*]] = atomicrmw min ptr [[PTR6:%.+]], i32 [[VAL6:.+]] syncscope("one-as") monotonic, align 4
+// AMDGCN: [[TMP7:%.*]] = atomicrmw max ptr [[PTR7:%.+]], i32 [[VAL7:.+]] syncscope("one-as") monotonic, align 4
+// SPIRV-LABEL: define hidden spir_func void @fi3a(
+// SPIRV: [[TMP0:%.*]] = atomicrmw add ptr [[PTR0:%.+]], i32 [[VAL0:.+]] monotonic, align 4
+// SPIRV: [[TMP1:%.*]] = atomicrmw sub ptr [[PTR1:%.+]], i32 [[VAL1:.+]] monotonic, align 4
+// SPIRV: [[TMP2:%.*]] = atomicrmw and ptr [[PTR2:%.+]], i32 [[VAL2:.+]] monotonic, align 4
+// SPIRV: [[TMP3:%.*]] = atomicrmw or ptr [[PTR3:%.+]], i32 [[VAL3:.+]] monotonic, align 4
+// SPIRV: [[TMP4:%.*]] = atomicrmw xor ptr [[PTR4:%.+]], i32 [[VAL4:.+]] monotonic, align 4
+// SPIRV: [[TMP5:%.*]] = atomicrmw nand ptr [[PTR5:%.+]], i32 [[VAL5:.+]] monotonic, align 4
+// SPIRV: [[TMP6:%.*]] = atomicrmw min ptr [[PTR6:%.+]], i32 [[VAL6:.+]] monotonic, align 4
+// SPIRV: [[TMP7:%.*]] = atomicrmw max ptr [[PTR7:%.+]], i32 [[VAL7:.+]] monotonic, align 4
void fi3a(int *a, int *b, int *c, int *d, int *e, int *f, int *g, int *h) {
*a = __scoped_atomic_fetch_add(a, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
*b = __scoped_atomic_fetch_sub(b, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
@@ -83,15 +117,24 @@ void fi3a(int *a, int *b, int *c, int *d, int *e, int *f, int *g, int *h) {
*h = __scoped_atomic_fetch_max(h, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
}
-// CHECK-LABEL: define hidden void @fi3b(
-// CHECK: [[TMP0:%.*]] = atomicrmw add ptr [[PTR0:%.+]], i32 [[VAL0:.+]] syncscope("agent-one-as") monotonic, align 4
-// CHECK: [[TMP1:%.*]] = atomicrmw sub ptr [[PTR1:%.+]], i32 [[VAL1:.+]] syncscope("agent-one-as") monotonic, align 4
-// CHECK: [[TMP2:%.*]] = atomicrmw and ptr [[PTR2:%.+]], i32 [[VAL2:.+]] syncscope("agent-one-as") monotonic, align 4
-// CHECK: [[TMP3:%.*]] = atomicrmw or ptr [[PTR3:%.+]], i32 [[VAL3:.+]] syncscope("agent-one-as") monotonic, align 4
-// CHECK: [[TMP4:%.*]] = atomicrmw xor ptr [[PTR4:%.+]], i32 [[VAL4:.+]] syncscope("agent-one-as") monotonic, align 4
-// CHECK: [[TMP5:%.*]] = atomicrmw nand ptr [[PTR5:%.+]], i32 [[VAL5:.+]] syncscope("agent-one-as") monotonic, align 4
-// CHECK: [[TMP6:%.*]] = atomicrmw min ptr [[PTR6:%.+]], i32 [[VAL6:.+]] syncscope("agent-one-as") monotonic, align 4
-// CHECK: [[TMP7:%.*]] = atomicrmw max ptr [[PTR7:%.+]], i32 [[VAL7:.+]] syncscope("agent-one-as") monotonic, align 4
+// AMDGCN-LABEL: define hidden void @fi3b(
+// AMDGCN: [[TMP0:%.*]] = atomicrmw add ptr [[PTR0:%.+]], i32 [[VAL0:.+]] syncscope("agent-one-as") monotonic, align 4
+// AMDGCN: [[TMP1:%.*]] = atomicrmw sub ptr [[PTR1:%.+]], i32 [[VAL1:.+]] syncscope("agent-one-as") monotonic, align 4
+// AMDGCN: [[TMP2:%.*]] = atomicrmw and ptr [[PTR2:%.+]], i32 [[VAL2:.+]] syncscope("agent-one-as") monotonic, align 4
+// AMDGCN: [[TMP3:%.*]] = atomicrmw or ptr [[PTR3:%.+]], i32 [[VAL3:.+]] syncscope("agent-one-as") monotonic, align 4
+// AMDGCN: [[TMP4:%.*]] = atomicrmw xor ptr [[PTR4:%.+]], i32 [[VAL4:.+]] syncscope("agent-one-as") monotonic, align 4
+// AMDGCN: [[TMP5:%.*]] = atomicrmw nand ptr [[PTR5:%.+]], i32 [[VAL5:.+]] syncscope("agent-one-as") monotonic, align 4
+// AMDGCN: [[TMP6:%.*]] = atomicrmw min ptr [[PTR6:%.+]], i32 [[VAL6:.+]] syncscope("agent-one-as") monotonic, align 4
+// AMDGCN: [[TMP7:%.*]] = atomicrmw max ptr [[PTR7:%.+]], i32 [[VAL7:.+]] syncscope("agent-one-as") monotonic, align 4
+// SPIRV-LABEL: define hidden spir_func void @fi3b(
+// SPIRV: [[TMP0:%.*]] = atomicrmw add ptr [[PTR0:%.+]], i32 [[VAL0:.+]] syncscope("device") monotonic, align 4
+// SPIRV: [[TMP1:%.*]] = atomicrmw sub ptr [[PTR1:%.+]], i32 [[VAL1:.+]] syncscope("device") monotonic, align 4
+// SPIRV: [[TMP2:%.*]] = atomicrmw and ptr [[PTR2:%.+]], i32 [[VAL2:.+]] syncscope("device") monotonic, align 4
+// SPIRV: [[TMP3:%.*]] = atomicrmw or ptr [[PTR3:%.+]], i32 [[VAL3:.+]] syncscope("device") monotonic, align 4
+// SPIRV: [[TMP4:%.*]] = atomicrmw xor ptr [[PTR4:%.+]], i32 [[VAL4:.+]] syncscope("device") monotonic, align 4
+// SPIRV: [[TMP5:%.*]] = atomicrmw nand ptr [[PTR5:%.+]], i32 [[VAL5:.+]] syncscope("device") monotonic, align 4
+// SPIRV: [[TMP6:%.*]] = atomicrmw min ptr [[PTR6:%.+]], i32 [[VAL6:.+]] syncscope("device") monotonic, align 4
+// SPIRV: [[TMP7:%.*]] = atomicrmw max ptr [[PTR7:%.+]], i32 [[VAL7:.+]] syncscope("device") monotonic, align 4
void fi3b(int *a, int *b, int *c, int *d, int *e, int *f, int *g, int *h) {
*a = __scoped_atomic_fetch_add(a, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_DEVICE);
*b = __scoped_atomic_fetch_sub(b, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_DEVICE);
@@ -103,15 +146,24 @@ void fi3b(int *a, int *b, int *c, int *d, int *e, int *f, int *g, int *h) {
*h = __scoped_atomic_fetch_max(h, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_DEVICE);
}
-// CHECK-LABEL: define hidden void @fi3c(
-// CHECK: [[TMP0:%.*]] = atomicrmw add ptr [[PTR0:%.+]], i32 [[VAL0:.+]] syncscope("workgroup-one-as") monotonic, align 4
-// CHECK: [[TMP1:%.*]] = atomicrmw sub ptr [[PTR1:%.+]], i32 [[VAL1:.+]] syncscope("workgroup-one-as") monotonic, align 4
-// CHECK: [[TMP2:%.*]] = atomicrmw and ptr [[PTR2:%.+]], i32 [[VAL2:.+]] syncscope("workgroup-one-as") monotonic, align 4
-// CHECK: [[TMP3:%.*]] = atomicrmw or ptr [[PTR3:%.+]], i32 [[VAL3:.+]] syncscope("workgroup-one-as") monotonic, align 4
-// CHECK: [[TMP4:%.*]] = atomicrmw xor ptr [[PTR4:%.+]], i32 [[VAL4:.+]] syncscope("workgroup-one-as") monotonic, align 4
-// CHECK: [[TMP5:%.*]] = atomicrmw nand ptr [[PTR5:%.+]], i32 [[VAL5:.+]] syncscope("workgroup-one-as") monotonic, align 4
-// CHECK: [[TMP6:%.*]] = atomicrmw min ptr [[PTR6:%.+]], i32 [[VAL6:.+]] syncscope("workgroup-one-as") monotonic, align 4
-// CHECK: [[TMP7:%.*]] = atomicrmw max ptr [[PTR7:%.+]], i32 [[VAL7:.+]] syncscope("workgroup-one-as") monotonic, align 4
+// AMDGCN-LABEL: define hidden void @fi3c(
+// AMDGCN: [[TMP0:%.*]] = atomicrmw add ptr [[PTR0:%.+]], i32 [[VAL0:.+]] syncscope("workgroup-one-as") monotonic, align 4
+// AMDGCN: [[TMP1:%.*]] = atomicrmw sub ptr [[PTR1:%.+]], i32 [[VAL1:.+]] syncscope("workgroup-one-as") monotonic, align 4
+// AMDGCN: [[TMP2:%.*]] = atomicrmw and ptr [[PTR2:%.+]], i32 [[VAL2:.+]] syncscope("workgroup-one-as") monotonic, align 4
+// AMDGCN: [[TMP3:%.*]] = atomicrmw or ptr [[PTR3:%.+]], i32 [[VAL3:.+]] syncscope("workgroup-one-as") monotonic, align 4
+// AMDGCN: [[TMP4:%.*]] = atomicrmw xor ptr [[PTR4:%.+]], i32 [[VAL4:.+]] syncscope("workgroup-one-as") monotonic, align 4
+// AMDGCN: [[TMP5:%.*]] = atomicrmw nand ptr [[PTR5:%.+]], i32 [[VAL5:.+]] syncscope("workgroup-one-as") monotonic, align 4
+// AMDGCN: [[TMP6:%.*]] = atomicrmw min ptr [[PTR6:%.+]], i32 [[VAL6:.+]] syncscope("workgroup-one-as") monotonic, align 4
+// AMDGCN: [[TMP7:%.*]] = atomicrmw max ptr [[PTR7:%.+]], i32 [[VAL7:.+]] syncscope("workgroup-one-as") monotonic, align 4
+// SPIRV-LABEL: define hidden spir_func void @fi3c(
+// SPIRV: [[TMP0:%.*]] = atomicrmw add ptr [[PTR0:%.+]], i32 [[VAL0:.+]] syncscope("workgroup") monotonic, align 4
+// SPIRV: [[TMP1:%.*]] = atomicrmw sub ptr [[PTR1:%.+]], i32 [[VAL1:.+]] syncscope("workgroup") monotonic, align 4
+// SPIRV: [[TMP2:%.*]] = atomicrmw and ptr [[PTR2:%.+]], i32 [[VAL2:.+]] syncscope("workgroup") monotonic, align 4
+// SPIRV: [[TMP3:%.*]] = atomicrmw or ptr [[PTR3:%.+]], i32 [[VAL3:.+]] syncscope("workgroup") monotonic, align 4
+// SPIRV: [[TMP4:%.*]] = atomicrmw xor ptr [[PTR4:%.+]], i32 [[VAL4:.+]] syncscope("workgroup") monotonic, align 4
+// SPIRV: [[TMP5:%.*]] = atomicrmw nand ptr [[PTR5:%.+]], i32 [[VAL5:.+]] syncscope("workgroup") monotonic, align 4
+// SPIRV: [[TMP6:%.*]] = atomicrmw min ptr [[PTR6:%.+]], i32 [[VAL6:.+]] syncscope("workgroup") monotonic, align 4
+// SPIRV: [[TMP7:%.*]] = atomicrmw max ptr [[PTR7:%.+]], i32 [[VAL7:.+]] syncscope("workgroup") monotonic, align 4
void fi3c(int *a, int *b, int *c, int *d, int *e, int *f, int *g, int *h) {
*a = __scoped_atomic_fetch_add(a, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_WRKGRP);
*b = __scoped_atomic_fetch_sub(b, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_WRKGRP);
@@ -123,15 +175,24 @@ void fi3c(int *a, int *b, int *c, int *d, int *e, int *f, int *g, int *h) {
*h = __scoped_atomic_fetch_max(h, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_WRKGRP);
}
-// CHECK-LABEL: define hidden void @fi3d(
-// CHECK: [[TMP0:%.*]] = atomicrmw add ptr [[PTR0:%.+]], i32 [[VAL0:.+]] syncscope("wavefront-one-as") monotonic, align 4
-// CHECK: [[TMP1:%.*]] = atomicrmw sub ptr [[PTR1:%.+]], i32 [[VAL1:.+]] syncscope("wavefront-one-as") monotonic, align 4
-// CHECK: [[TMP2:%.*]] = atomicrmw and ptr [[PTR2:%.+]], i32 [[VAL2:.+]] syncscope("wavefront-one-as") monotonic, align 4
-// CHECK: [[TMP3:%.*]] = atomicrmw or ptr [[PTR3:%.+]], i32 [[VAL3:.+]] syncscope("wavefront-one-as") monotonic, align 4
-// CHECK: [[TMP4:%.*]] = atomicrmw xor ptr [[PTR4:%.+]], i32 [[VAL4:.+]] syncscope("wavefront-one-as") monotonic, align 4
-// CHECK: [[TMP5:%.*]] = atomicrmw nand ptr [[PTR5:%.+]], i32 [[VAL5:.+]] syncscope("wavefront-one-as") monotonic, align 4
-// CHECK: [[TMP6:%.*]] = atomicrmw min ptr [[PTR6:%.+]], i32 [[VAL6:.+]] syncscope("wavefront-one-as") monotonic, align 4
-// CHECK: [[TMP7:%.*]] = atomicrmw max ptr [[PTR7:%.+]], i32 [[VAL7:.+]] syncscope("wavefront-one-as") monotonic, align 4
+// AMDGCN-LABEL: define hidden void @fi3d(
+// AMDGCN: [[TMP0:%.*]] = atomicrmw add ptr [[PTR0:%.+]], i32 [[VAL0:.+]] syncscope("wavefront-one-as") monotonic, align 4
+// AMDGCN: [[TMP1:%.*]] = atomicrmw sub ptr [[PTR1:%.+]], i32 [[VAL1:.+]] syncscope("wavefront-one-as") monotonic, align 4
+// AMDGCN: [[TMP2:%.*]] = atomicrmw and ptr [[PTR2:%.+]], i32 [[VAL2:.+]] syncscope("wavefront-one-as") monotonic, align 4
+// AMDGCN: [[TMP3:%.*]] = atomicrmw or ptr [[PTR3:%.+]], i32 [[VAL3:.+]] syncscope("wavefront-one-as") monotonic, align 4
+// AMDGCN: [[TMP4:%.*]] = atomicrmw xor ptr [[PTR4:%.+]], i32 [[VAL4:.+]] syncscope("wavefront-one-as") monotonic, align 4
+// AMDGCN: [[TMP5:%.*]] = atomicrmw nand ptr [[PTR5:%.+]], i32 [[VAL5:.+]] syncscope("wavefront-one-as") monotonic, align 4
+// AMDGCN: [[TMP6:%.*]] = atomicrmw min ptr [[PTR6:%.+]], i32 [[VAL6:.+]] syncscope("wavefront-one-as") monotonic, align 4
+// AMDGCN: [[TMP7:%.*]] = atomicrmw max ptr [[PTR7:%.+]], i32 [[VAL7:.+]] syncscope("wavefront-one-as") monotonic, align 4
+// SPIRV-LABEL: define hidden spir_func void @fi3d(
+// SPIRV: [[TMP0:%.*]] = atomicrmw add ptr [[PTR0:%.+]], i32 [[VAL0:.+]] syncscope("subgroup") monotonic, align 4
+// SPIRV: [[TMP1:%.*]] = atomicrmw sub ptr [[PTR1:%.+]], i32 [[VAL1:.+]] syncscope("subgroup") monotonic, align 4
+// SPIRV: [[TMP2:%.*]] = atomicrmw and ptr [[PTR2:%.+]], i32 [[VAL2:.+]] syncscope("subgroup") monotonic, align 4
+// SPIRV: [[TMP3:%.*]] = atomicrmw or ptr [[PTR3:%.+]], i32 [[VAL3:.+]] syncscope("subgroup") monotonic, align 4
+// SPIRV: [[TMP4:%.*]] = atomicrmw xor ptr [[PTR4:%.+]], i32 [[VAL4:.+]] syncscope("subgroup") monotonic, align 4
+// SPIRV: [[TMP5:%.*]] = atomicrmw nand ptr [[PTR5:%.+]], i32 [[VAL5:.+]] syncscope("subgroup") monotonic, align 4
+// SPIRV: [[TMP6:%.*]] = atomicrmw min ptr [[PTR6:%.+]], i32 [[VAL6:.+]] syncscope("subgroup") monotonic, align 4
+// SPIRV: [[TMP7:%.*]] = atomicrmw max ptr [[PTR7:%.+]], i32 [[VAL7:.+]] syncscope("subgroup") monotonic, align 4
void fi3d(int *a, int *b, int *c, int *d, int *e, int *f, int *g, int *h) {
*a = __scoped_atomic_fetch_add(a, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_WVFRNT);
*b = __scoped_atomic_fetch_sub(b, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_WVFRNT);
@@ -143,15 +204,24 @@ void fi3d(int *a, int *b, int *c, int *d, int *e, int *f, int *g, int *h) {
*h = __scoped_atomic_fetch_max(h, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_WVFRNT);
}
-// CHECK-LABEL: define hidden void @fi3e(
-// CHECK: [[TMP0:%.*]] = atomicrmw add ptr [[PTR0:%.+]], i32 [[VAL0:.+]] syncscope("singlethread-one-as") monotonic, align 4
-// CHECK: [[TMP1:%.*]] = atomicrmw sub ptr [[PTR1:%.+]], i32 [[VAL1:.+]] syncscope("singlethread-one-as") monotonic, align 4
-// CHECK: [[TMP2:%.*]] = atomicrmw and ptr [[PTR2:%.+]], i32 [[VAL2:.+]] syncscope("singlethread-one-as") monotonic, align 4
-// CHECK: [[TMP3:%.*]] = atomicrmw or ptr [[PTR3:%.+]], i32 [[VAL3:.+]] syncscope("singlethread-one-as") monotonic, align 4
-// CHECK: [[TMP4:%.*]] = atomicrmw xor ptr [[PTR4:%.+]], i32 [[VAL4:.+]] syncscope("singlethread-one-as") monotonic, align 4
-// CHECK: [[TMP5:%.*]] = atomicrmw nand ptr [[PTR5:%.+]], i32 [[VAL5:.+]] syncscope("singlethread-one-as") monotonic, align 4
-// CHECK: [[TMP6:%.*]] = atomicrmw min ptr [[PTR6:%.+]], i32 [[VAL6:.+]] syncscope("singlethread-one-as") monotonic, align 4
-// CHECK: [[TMP7:%.*]] = atomicrmw max ptr [[PTR7:%.+]], i32 [[VAL7:.+]] syncscope("singlethread-one-as") monotonic, align 4
+// AMDGCN-LABEL: define hidden void @fi3e(
+// AMDGCN: [[TMP0:%.*]] = atomicrmw add ptr [[PTR0:%.+]], i32 [[VAL0:.+]] syncscope("singlethread-one-as") monotonic, align 4
+// AMDGCN: [[TMP1:%.*]] = atomicrmw sub ptr [[PTR1:%.+]], i32 [[VAL1:.+]] syncscope("singlethread-one-as") monotonic, align 4
+// AMDGCN: [[TMP2:%.*]] = atomicrmw and ptr [[PTR2:%.+]], i32 [[VAL2:.+]] syncscope("singlethread-one-as") monotonic, align 4
+// AMDGCN: [[TMP3:%.*]] = atomicrmw or ptr [[PTR3:%.+]], i32 [[VAL3:.+]] syncscope("singlethread-one-as") monotonic, align 4
+// AMDGCN: [[TMP4:%.*]] = atomicrmw xor ptr [[PTR4:%.+]], i32 [[VAL4:.+]] syncscope("singlethread-one-as") monotonic, align 4
+// AMDGCN: [[TMP5:%.*]] = atomicrmw nand ptr [[PTR5:%.+]], i32 [[VAL5:.+]] syncscope("singlethread-one-as") monotonic, align 4
+// AMDGCN: [[TMP6:%.*]] = atomicrmw min ptr [[PTR6:%.+]], i32 [[VAL6:.+]] syncscope("singlethread-one-as") monotonic, align 4
+// AMDGCN: [[TMP7:%.*]] = atomicrmw max ptr [[PTR7:%.+]], i32 [[VAL7:.+]] syncscope("singlethread-one-as") monotonic, align 4
+// SPIRV-LABEL: define hidden spir_func void @fi3e(
+// SPIRV: [[TMP0:%.*]] = atomicrmw add ptr [[PTR0:%.+]], i32 [[VAL0:.+]] syncscope("singlethread") monotonic, align 4
+// SPIRV: [[TMP1:%.*]] = atomicrmw sub ptr [[PTR1:%.+]], i32 [[VAL1:.+]] syncscope("singlethread") monotonic, align 4
+// SPIRV: [[TMP2:%.*]] = atomicrmw and ptr [[PTR2:%.+]], i32 [[VAL2:.+]] syncscope("singlethread") monotonic, align 4
+// SPIRV: [[TMP3:%.*]] = atomicrmw or ptr [[PTR3:%.+]], i32 [[VAL3:.+]] syncscope("singlethread") monotonic, align 4
+// SPIRV: [[TMP4:%.*]] = atomicrmw xor ptr [[PTR4:%.+]], i32 [[VAL4:.+]] syncscope("singlethread") monotonic, align 4
+// SPIRV: [[TMP5:%.*]] = atomicrmw nand ptr [[PTR5:%.+]], i32 [[VAL5:.+]] syncscope("singlethread") monotonic, align 4
+// SPIRV: [[TMP6:%.*]] = atomicrmw min ptr [[PTR6:%.+]], i32 [[VAL6:.+]] syncscope("singlethread") monotonic, align 4
+// SPIRV: [[TMP7:%.*]] = atomicrmw max ptr [[PTR7:%.+]], i32 [[VAL7:.+]] syncscope("singlethread") monotonic, align 4
void fi3e(int *a, int *b, int *c, int *d, int *e, int *f, int *g, int *h) {
*a = __scoped_atomic_fetch_add(a, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SINGLE);
*b = __scoped_atomic_fetch_sub(b, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SINGLE);
@@ -163,8 +233,10 @@ void fi3e(int *a, int *b, int *c, int *d, int *e, int *f, int *g, int *h) {
*h = __scoped_atomic_fetch_max(h, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SINGLE);
}
-// CHECK-LABEL: define hidden zeroext i1 @fi4a(
-// CHECK: [[TMP0:%.*]] = cmpxchg ptr [[PTR0:%.+]], i32 [[VAL0:.+]], i32 [[VAL1:.+]] syncscope("one-as") acquire acquire, align 4
+// AMDGCN-LABEL: define hidden zeroext i1 @fi4a(
+// AMDGCN-DAG: [[TMP0:%.*]] = cmpxchg ptr [[PTR0:%.+]], i32 [[VAL0:.+]], i32 [[VAL1:.+]] syncscope("one-as") acquire acquire, align 4
+// SPIRV-LABEL: define hidden spir_func zeroext i1 @fi4a(
+// SPIRV-DAG: [[TMP0:%.*]] = cmpxchg ptr [[PTR0:%.+]], i32 [[VAL0:.+]], i32 [[VAL1:.+]] acquire acquire, align 4
_Bool fi4a(int *i) {
int cmp = 0;
int desired = 1;
@@ -173,8 +245,10 @@ _Bool fi4a(int *i) {
__MEMORY_SCOPE_SYSTEM);
}
-// CHECK-LABEL: define hidden zeroext i1 @fi4b(
-// CHECK: [[TMP0:%.*]] = cmpxchg ptr [[PTR0:%.+]], i32 [[VAL0:.+]], i32 [[VAL1:.+]] syncscope("agent-one-as") acquire acquire, align 4
+// AMDGCN-LABEL: define hidden zeroext i1 @fi4b(
+// AMDGCN-DAG: [[TMP0:%.*]] = cmpxchg ptr [[PTR0:%.+]], i32 [[VAL0:.+]], i32 [[VAL1:.+]] syncscope("agent-one-as") acquire acquire, align 4
+// SPIRV-LABEL: define hidden spir_func zeroext i1 @fi4b(
+// SPIRV-DAG: [[TMP0:%.*]] = cmpxchg ptr [[PTR0:%.+]], i32 [[VAL0:.+]], i32 [[VAL1:.+]] syncscope("device") acquire acquire, align 4
_Bool fi4b(int *i) {
int cmp = 0;
int desired = 1;
@@ -183,8 +257,10 @@ _Bool fi4b(int *i) {
__MEMORY_SCOPE_DEVICE);
}
-// CHECK-LABEL: define hidden zeroext i1 @fi4c(
-// CHECK: [[TMP0:%.*]] = cmpxchg ptr [[PTR0:%.+]], i32 [[VAL0:.+]], i32 [[VAL1:.+]] syncscope("workgroup-one-as") acquire acquire, align 4
+// AMDGCN-LABEL: define hidden zeroext i1 @fi4c(
+// AMDGCN: [[TMP0:%.*]] = cmpxchg ptr [[PTR0:%.+]], i32 [[VAL0:.+]], i32 [[VAL1:.+]] syncscope("workgroup-one-as") acquire acquire, align 4
+// SPIRV-LABEL: define hidden spir_func zeroext i1 @fi4c(
+// SPIRV: [[TMP0:%.*]] = cmpxchg ptr [[PTR0:%.+]], i32 [[VAL0:.+]], i32 [[VAL1:.+]] syncscope("workgroup") acquire acquire, align 4
_Bool fi4c(int *i) {
int cmp = 0;
int desired = 1;
@@ -193,8 +269,10 @@ _Bool fi4c(int *i) {
__MEMORY_SCOPE_WRKGRP);
}
-// CHECK-LABEL: define hidden zeroext i1 @fi4d(
-// CHECK: [[TMP0:%.*]] = cmpxchg ptr [[PTR0:%.+]], i32 [[VAL0:.+]], i32 [[VAL1:.+]] syncscope("wavefront-one-as") acquire acquire, align 4
+// AMDGCN-LABEL: define hidden zeroext i1 @fi4d(
+// AMDGCN: [[TMP0:%.*]] = cmpxchg ptr [[PTR0:%.+]], i32 [[VAL0:.+]], i32 [[VAL1:.+]] syncscope("wavefront-one-as") acquire acquire, align 4
+// SPIRV-LABEL: define hidden spir_func zeroext i1 @fi4d(
+// SPIRV: [[TMP0:%.*]] = cmpxchg ptr [[PTR0:%.+]], i32 [[VAL0:.+]], i32 [[VAL1:.+]] syncscope("subgroup") acquire acquire, align 4
_Bool fi4d(int *i) {
int cmp = 0;
int desired = 1;
@@ -203,8 +281,10 @@ _Bool fi4d(int *i) {
__MEMORY_SCOPE_WVFRNT);
}
-// CHECK-LABEL: define hidden zeroext i1 @fi4e(
-// CHECK: [[TMP0:%.*]] = cmpxchg ptr [[PTR0:%.+]], i32 [[VAL0:.+]], i32 [[VAL1:.+]] syncscope("singlethread-one-as") acquire acquire, align 4
+// AMDGCN-LABEL: define hidden zeroext i1 @fi4e(
+// AMDGCN: [[TMP0:%.*]] = cmpxchg ptr [[PTR0:%.+]], i32 [[VAL0:.+]], i32 [[VAL1:.+]] syncscope("singlethread-one-as") acquire acquire, align 4
+// SPIRV-LABEL: define hidden spir_func zeroext i1 @fi4e(
+// SPIRV: [[TMP0:%.*]] = cmpxchg ptr [[PTR0:%.+]], i32 [[VAL0:.+]], i32 [[VAL1:.+]] syncscope("singlethread") acquire acquire, align 4
_Bool fi4e(int *i) {
int cmp = 0;
int desired = 1;
@@ -213,8 +293,10 @@ _Bool fi4e(int *i) {
__MEMORY_SCOPE_SINGLE);
}
-// CHECK-LABEL: define hidden zeroext i1 @fi5a(
-// CHECK: [[TMP0:%.*]] = cmpxchg weak ptr [[PTR0:%.+]], i32 [[VAL0:.+]], i32 [[VAL1:.+]] syncscope("one-as") acquire acquire, align 4
+// AMDGCN-LABEL: define hidden zeroext i1 @fi5a(
+// AMDGCN: [[TMP0:%.*]] = cmpxchg weak ptr [[PTR0:%.+]], i32 [[VAL0:.+]], i32 [[VAL1:.+]] syncscope("one-as") acquire acquire, align 4
+// SPIRV-LABEL: define hidden spir_func zeroext i1 @fi5a(
+// SPIRV: [[TMP0:%.*]] = cmpxchg weak ptr [[PTR0:%.+]], i32 [[VAL0:.+]], i32 [[VAL1:.+]] acquire acquire, align 4
_Bool fi5a(int *i) {
int cmp = 0;
return __scoped_atomic_compare_exchange_n(i, &cmp, 1, 1, __ATOMIC_ACQUIRE,
@@ -222,8 +304,10 @@ _Bool fi5a(int *i) {
__MEMORY_SCOPE_SYSTEM);
}
-// CHECK-LABEL: define hidden zeroext i1 @fi5b(
-// CHECK: [[TMP0:%.*]] = cmpxchg weak ptr [[PTR0:%.+]], i32 [[VAL0:.+]], i32 [[VAL1:.+]] syncscope("agent-one-as") acquire acquire, align 4
+// AMDGCN-LABEL: define hidden zeroext i1 @fi5b(
+// AMDGCN: [[TMP0:%.*]] = cmpxchg weak ptr [[PTR0:%.+]], i32 [[VAL0:.+]], i32 [[VAL1:.+]] syncscope("agent-one-as") acquire acquire, align 4
+// SPIRV-LABEL: define hidden spir_func zeroext i1 @fi5b(
+// SPIRV: [[TMP0:%.*]] = cmpxchg weak ptr [[PTR0:%.+]], i32 [[VAL0:.+]], i32 [[VAL1:.+]] syncscope("device") acquire acquire, align 4
_Bool fi5b(int *i) {
int cmp = 0;
return __scoped_atomic_compare_exchange_n(i, &cmp, 1, 1, __ATOMIC_ACQUIRE,
@@ -231,101 +315,127 @@ _Bool fi5b(int *i) {
__MEMORY_SCOPE_DEVICE);
}
-// CHECK-LABEL: define hidden zeroext i1 @fi5c(
-// CHECK: [[TMP0:%.*]] = cmpxchg weak ptr [[PTR0:%.+]], i32 [[VAL0:.+]], i32 [[VAL1:.+]] syncscope("workgroup-one-as") acquire acquire, align 4
+// AMDGCN-LABEL: define hidden zeroext i1 @fi5c(
+// AMDGCN: [[TMP0:%.*]] = cmpxchg weak ptr [[PTR0:%.+]], i32 [[VAL0:.+]], i32 [[VAL1:.+]] syncscope("workgroup-one-as") acquire acquire, align 4
+// SPIRV-LABEL: define hidden spir_func zeroext i1 @fi5c(
+// SPIRV: [[TMP0:%.*]] = cmpxchg weak ptr [[PTR0:%.+]], i32 [[VAL0:.+]], i32 [[VAL1:.+]] syncscope("workgroup") acquire acquire, align 4
_Bool fi5c(int *i) {
int cmp = 0;
return __scoped_atomic_compare_exchange_n(
i, &cmp, 1, 1, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE, __MEMORY_SCOPE_WRKGRP);
}
-// CHECK-LABEL: define hidden zeroext i1 @fi5d(
-// CHECK: [[TMP0:%.*]] = cmpxchg weak ptr [[PTR0:%.+]], i32 [[VAL0:.+]], i32 [[VAL1:.+]] syncscope("wavefront-one-as") acquire acquire, align 4
+// AMDGCN-LABEL: define hidden zeroext i1 @fi5d(
+// AMDGCN: [[TMP0:%.*]] = cmpxchg weak ptr [[PTR0:%.+]], i32 [[VAL0:.+]], i32 [[VAL1:.+]] syncscope("wavefront-one-as") acquire acquire, align 4
+// SPIRV-LABEL: define hidden spir_func zeroext i1 @fi5d(
+// SPIRV: [[TMP0:%.*]] = cmpxchg weak ptr [[PTR0:%.+]], i32 [[VAL0:.+]], i32 [[VAL1:.+]] syncscope("subgroup") acquire acquire, align 4
_Bool fi5d(int *i) {
int cmp = 0;
return __scoped_atomic_compare_exchange_n(
i, &cmp, 1, 1, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE, __MEMORY_SCOPE_WVFRNT);
}
-// CHECK-LABEL: define hidden zeroext i1 @fi5e(
-// CHECK: [[TMP0:%.*]] = cmpxchg weak ptr [[PTR0:%.+]], i32 [[VAL0:.+]], i32 [[VAL1:.+]] syncscope("singlethread-one-as") acquire acquire, align 4
+// AMDGCN-LABEL: define hidden zeroext i1 @fi5e(
+// AMDGCN: [[TMP0:%.*]] = cmpxchg weak ptr [[PTR0:%.+]], i32 [[VAL0:.+]], i32 [[VAL1:.+]] syncscope("singlethread-one-as") acquire acquire, align 4
+// SPIRV-LABEL: define hidden spir_func zeroext i1 @fi5e(
+// SPIRV: [[TMP0:%.*]] = cmpxchg weak ptr [[PTR0:%.+]], i32 [[VAL0:.+]], i32 [[VAL1:.+]] syncscope("singlethread") acquire acquire, align 4
_Bool fi5e(int *i) {
int cmp = 0;
return __scoped_atomic_compare_exchange_n(
i, &cmp, 1, 1, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE, __MEMORY_SCOPE_SINGLE);
}
-// CHECK-LABEL: define hidden i32 @fi6a(
-// CHECK: [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR0:%.+]], i32 [[VAL0:.+]] syncscope("one-as") monotonic, align 4
+// AMDGCN-LABEL: define hidden i32 @fi6a(
+// AMDGCN: [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR0:%.+]], i32 [[VAL0:.+]] syncscope("one-as") monotonic, align 4
+// SPIRV-LABEL: define hidden spir_func i32 @fi6a(
+// SPIRV: [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR0:%.+]], i32 [[VAL0:.+]] monotonic, align 4
int fi6a(int *c, int *d) {
int ret;
__scoped_atomic_exchange(c, d, &ret, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
return ret;
}
-// CHECK-LABEL: define hidden i32 @fi6b(
-// CHECK: [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR0:%.+]], i32 [[VAL0:.+]] syncscope("agent-one-as") monotonic, align 4
+// AMDGCN-LABEL: define hidden i32 @fi6b(
+// AMDGCN: [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR0:%.+]], i32 [[VAL0:.+]] syncscope("agent-one-as") monotonic, align 4
+// SPIRV-LABEL: define hidden spir_func i32 @fi6b(
+// SPIRV: [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR0:%.+]], i32 [[VAL0:.+]] syncscope("device") monotonic, align 4
int fi6b(int *c, int *d) {
int ret;
__scoped_atomic_exchange(c, d, &ret, __ATOMIC_RELAXED, __MEMORY_SCOPE_DEVICE);
return ret;
}
-// CHECK-LABEL: define hidden i32 @fi6c(
-// CHECK: [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR0:%.+]], i32 [[VAL0:.+]] syncscope("workgroup-one-as") monotonic, align 4
+// AMDGCN-LABEL: define hidden i32 @fi6c(
+// AMDGCN: [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR0:%.+]], i32 [[VAL0:.+]] syncscope("workgroup-one-as") monotonic, align 4
+// SPIRV-LABEL: define hidden spir_func i32 @fi6c(
+// SPIRV: [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR0:%.+]], i32 [[VAL0:.+]] syncscope("workgroup") monotonic, align 4
int fi6c(int *c, int *d) {
int ret;
__scoped_atomic_exchange(c, d, &ret, __ATOMIC_RELAXED, __MEMORY_SCOPE_WRKGRP);
return ret;
}
-// CHECK-LABEL: define hidden i32 @fi6d(
-// CHECK: [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR0:%.+]], i32 [[VAL0:.+]] syncscope("wavefront-one-as") monotonic, align 4
+// AMDGCN-LABEL: define hidden i32 @fi6d(
+// AMDGCN: [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR0:%.+]], i32 [[VAL0:.+]] syncscope("wavefront-one-as") monotonic, align 4
+// SPIRV-LABEL: define hidden spir_func i32 @fi6d(
+// SPIRV: [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR0:%.+]], i32 [[VAL0:.+]] syncscope("subgroup") monotonic, align 4
int fi6d(int *c, int *d) {
int ret;
__scoped_atomic_exchange(c, d, &ret, __ATOMIC_RELAXED, __MEMORY_SCOPE_WVFRNT);
return ret;
}
-// CHECK-LABEL: define hidden i32 @fi6e(
-// CHECK: [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR0:%.+]], i32 [[VAL0:.+]] syncscope("singlethread-one-as") monotonic, align 4
+// AMDGCN-LABEL: define hidden i32 @fi6e(
+// AMDGCN: [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR0:%.+]], i32 [[VAL0:.+]] syncscope("singlethread-one-as") monotonic, align 4
+// SPIRV-LABEL: define hidden spir_func i32 @fi6e(
+// SPIRV: [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR0:%.+]], i32 [[VAL0:.+]] syncscope("singlethread") monotonic, align 4
int fi6e(int *c, int *d) {
int ret;
__scoped_atomic_exchange(c, d, &ret, __ATOMIC_RELAXED, __MEMORY_SCOPE_SINGLE);
return ret;
}
-// CHECK-LABEL: define hidden zeroext i1 @fi7a(
-// CHECK: [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR0:%.+]], i8 [[VAL0:.+]] syncscope("one-as") monotonic, align 1
+// AMDGCN-LABEL: define hidden zeroext i1 @fi7a(
+// AMDGCN: [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR0:%.+]], i8 [[VAL0:.+]] syncscope("one-as") monotonic, align 1
+// SPIRV-LABEL: define hidden spir_func zeroext i1 @fi7a(
+// SPIRV: [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR0:%.+]], i8 [[VAL0:.+]] monotonic, align 1
_Bool fi7a(_Bool *c) {
return __scoped_atomic_exchange_n(c, 1, __ATOMIC_RELAXED,
__MEMORY_SCOPE_SYSTEM);
}
-// CHECK-LABEL: define hidden zeroext i1 @fi7b(
-// CHECK: [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR0:%.+]], i8 [[VAL0:.+]] syncscope("agent-one-as") monotonic, align 1
+// AMDGCN-LABEL: define hidden zeroext i1 @fi7b(
+// AMDGCN: [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR0:%.+]], i8 [[VAL0:.+]] syncscope("agent-one-as") monotonic, align 1
+// SPIRV-LABEL: define hidden spir_func zeroext i1 @fi7b(
+// SPIRV: [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR0:%.+]], i8 [[VAL0:.+]] syncscope("device") monotonic, align 1
_Bool fi7b(_Bool *c) {
return __scoped_atomic_exchange_n(c, 1, __ATOMIC_RELAXED,
__MEMORY_SCOPE_DEVICE);
}
-// CHECK-LABEL: define hidden zeroext i1 @fi7c(
-// CHECK: [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR0:%.+]], i8 [[VAL0:.+]] syncscope("workgroup-one-as") monotonic, align 1
+// AMDGCN-LABEL: define hidden zeroext i1 @fi7c(
+// AMDGCN: [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR0:%.+]], i8 [[VAL0:.+]] syncscope("workgroup-one-as") monotonic, align 1
+// SPIRV-LABEL: define hidden spir_func zeroext i1 @fi7c(
+// SPIRV: [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR0:%.+]], i8 [[VAL0:.+]] syncscope("workgroup") monotonic, align 1
_Bool fi7c(_Bool *c) {
return __scoped_atomic_exchange_n(c, 1, __ATOMIC_RELAXED,
__MEMORY_SCOPE_WRKGRP);
}
-// CHECK-LABEL: define hidden zeroext i1 @fi7d(
-// CHECK: [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR0:%.+]], i8 [[VAL0:.+]] syncscope("wavefront-one-as") monotonic, align 1
+// AMDGCN-LABEL: define hidden zeroext i1 @fi7d(
+// AMDGCN: [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR0:%.+]], i8 [[VAL0:.+]] syncscope("wavefront-one-as") monotonic, align 1
+// SPIRV-LABEL: define hidden spir_func zeroext i1 @fi7d(
+// SPIRV: [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR0:%.+]], i8 [[VAL0:.+]] syncscope("subgroup") monotonic, align 1
_Bool fi7d(_Bool *c) {
return __scoped_atomic_exchange_n(c, 1, __ATOMIC_RELAXED,
__MEMORY_SCOPE_WVFRNT);
}
-// CHECK-LABEL: define hidden zeroext i1 @fi7e(
-// CHECK: [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR0:%.+]], i8 [[VAL0:.+]] syncscope("singlethread-one-as") monotonic, align 1
+// AMDGCN-LABEL: define hidden zeroext i1 @fi7e(
+// AMDGCN: [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR0:%.+]], i8 [[VAL0:.+]] syncscope("singlethread-one-as") monotonic, align 1
+// SPIRV-LABEL: define hidden spir_func zeroext i1 @fi7e(
+// SPIRV: [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR0:%.+]], i8 [[VAL0:.+]] syncscope("singlethread") monotonic, align 1
_Bool fi7e(_Bool *c) {
- return __scoped_atomic_exchange_n(c, 1, __ATOMIC_RELAXED,
+ return __scoped_atomic_exchange_n(c, 1, __ATOMIC_RELAXED,
__MEMORY_SCOPE_SINGLE);
}
diff --git a/clang/test/CodeGen/union-init2.c b/clang/test/CodeGen/union-init2.c
index 048ff00..ee35e78 100644
--- a/clang/test/CodeGen/union-init2.c
+++ b/clang/test/CodeGen/union-init2.c
@@ -2,11 +2,11 @@
// RUN: %clang_cc1 -x c++ %s -emit-llvm -triple x86_64-linux-gnu -o - | FileCheck %s --check-prefixes=CHECK-CXX
// Make sure we generate something sane instead of a ptrtoint
-// CHECK: @r, [4 x i8] undef
+// CHECK: @r, [4 x i8] zeroinitializer
union x {long long b;union x* a;} r = {.a = &r};
-// CHECK: global { [3 x i8], [5 x i8] } { [3 x i8] zeroinitializer, [5 x i8] undef }
+// CHECK: global { [3 x i8], [5 x i8] } zeroinitializer
union z {
char a[3];
long long b;
diff --git a/clang/test/CodeGen/windows-swiftcall.c b/clang/test/CodeGen/windows-swiftcall.c
index bc7832d..41569c2 100644
--- a/clang/test/CodeGen/windows-swiftcall.c
+++ b/clang/test/CodeGen/windows-swiftcall.c
@@ -5,8 +5,6 @@
#define ERROR __attribute__((swift_error_result))
#define CONTEXT __attribute__((swift_context))
-// CHECK: [[STRUCT2_RESULT:@.*]] = private {{.*}} constant [[STRUCT2_TYPE:%.*]] { i32 0, i8 0, i8 undef, i8 0, i32 0, i32 0 }
-
/*****************************************************************************/
/****************************** PARAMETER ABIS *******************************/
/*****************************************************************************/
@@ -142,8 +140,8 @@ typedef struct {
} struct_2;
TEST(struct_2);
// CHECK-LABEL: define dso_local swiftcc { i64, i64 } @return_struct_2() {{.*}}{
-// CHECK: [[RET:%.*]] = alloca [[STRUCT2_TYPE]], align 4
-// CHECK: call void @llvm.memcpy{{.*}}({{.*}}[[RET]], {{.*}}[[STRUCT2_RESULT]]
+// CHECK: [[RET:%.*]] = alloca [[STRUCT2:%.*]], align 4
+// CHECK: call void @llvm.memset
// CHECK: [[GEP0:%.*]] = getelementptr inbounds nuw { i64, i64 }, ptr [[RET]], i32 0, i32 0
// CHECK: [[T0:%.*]] = load i64, ptr [[GEP0]], align 4
// CHECK: [[GEP1:%.*]] = getelementptr inbounds nuw { i64, i64 }, ptr [[RET]], i32 0, i32 1
@@ -153,7 +151,7 @@ TEST(struct_2);
// CHECK: ret { i64, i64 } [[R1]]
// CHECK: }
// CHECK-LABEL: define dso_local swiftcc void @take_struct_2(i64 %0, i64 %1) {{.*}}{
-// CHECK: [[V:%.*]] = alloca [[STRUCT:%.*]], align 4
+// CHECK: [[V:%.*]] = alloca [[STRUCT2]], align 4
// CHECK: [[GEP0:%.*]] = getelementptr inbounds nuw { i64, i64 }, ptr [[V]], i32 0, i32 0
// CHECK: store i64 %0, ptr [[GEP0]], align 4
// CHECK: [[GEP1:%.*]] = getelementptr inbounds nuw { i64, i64 }, ptr [[V]], i32 0, i32 1
@@ -161,7 +159,7 @@ TEST(struct_2);
// CHECK: ret void
// CHECK: }
// CHECK-LABEL: define dso_local void @test_struct_2() {{.*}} {
-// CHECK: [[TMP:%.*]] = alloca [[STRUCT2_TYPE]], align 4
+// CHECK: [[TMP:%.*]] = alloca [[STRUCT2]], align 4
// CHECK: [[CALL:%.*]] = call swiftcc { i64, i64 } @return_struct_2()
// CHECK: [[GEP:%.*]] = getelementptr inbounds nuw {{.*}} [[TMP]], i32 0, i32 0
// CHECK: [[T0:%.*]] = extractvalue { i64, i64 } [[CALL]], 0
@@ -234,7 +232,7 @@ typedef union {
TEST(union_het_fp)
// CHECK-LABEL: define dso_local swiftcc i64 @return_union_het_fp()
// CHECK: [[RET:%.*]] = alloca [[UNION:%.*]], align 8
-// CHECK: call void @llvm.memcpy{{.*}}(ptr align {{[0-9]+}} [[RET]]
+// CHECK: call void @llvm.memset{{.*}}(ptr align {{[0-9]+}} [[RET]]
// CHECK: [[GEP:%.*]] = getelementptr inbounds nuw { i64 }, ptr [[RET]], i32 0, i32 0
// CHECK: [[R0:%.*]] = load i64, ptr [[GEP]], align 8
// CHECK: ret i64 [[R0]]
diff --git a/clang/test/CodeGenCXX/debug-info-line-if-2.cpp b/clang/test/CodeGenCXX/debug-info-line-if-2.cpp
new file mode 100644
index 0000000..8ab96a7
--- /dev/null
+++ b/clang/test/CodeGenCXX/debug-info-line-if-2.cpp
@@ -0,0 +1,45 @@
+// RUN: %clang_cc1 -debug-info-kind=limited -gno-column-info -triple=x86_64-pc-linux -emit-llvm %s -o - | FileCheck %s
+
+// The important thing is that the compare and the conditional branch have
+// locs with the same scope (the lexical block for the 'if'). By turning off
+// column info, they end up with the same !dbg record, which halves the number
+// of checks to verify the scope.
+
+int c = 2;
+
+int f() {
+#line 100
+ if (int a = 5; a > c)
+ return 1;
+ return 0;
+}
+// CHECK-LABEL: define {{.*}} @_Z1fv()
+// CHECK: = icmp {{.*}} !dbg [[F_CMP:![0-9]+]]
+// CHECK-NEXT: br i1 {{.*}} !dbg [[F_CMP]]
+
+int g() {
+#line 200
+ if (int a = f())
+ return 2;
+ return 3;
+}
+// CHECK-LABEL: define {{.*}} @_Z1gv()
+// CHECK: = icmp {{.*}} !dbg [[G_CMP:![0-9]+]]
+// CHECK-NEXT: br i1 {{.*}} !dbg [[G_CMP]]
+
+int h() {
+#line 300
+ if (c > 3)
+ return 4;
+ return 5;
+}
+// CHECK-LABEL: define {{.*}} @_Z1hv()
+// CHECK: = icmp {{.*}} !dbg [[H_CMP:![0-9]+]]
+// CHECK-NEXT: br i1 {{.*}} !dbg [[H_CMP]]
+
+// CHECK-DAG: [[F_CMP]] = !DILocation(line: 100, scope: [[F_SCOPE:![0-9]+]]
+// CHECK-DAG: [[F_SCOPE]] = distinct !DILexicalBlock({{.*}} line: 100)
+// CHECK-DAG: [[G_CMP]] = !DILocation(line: 200, scope: [[G_SCOPE:![0-9]+]]
+// CHECK-DAG: [[G_SCOPE]] = distinct !DILexicalBlock({{.*}} line: 200)
+// CHECK-DAG: [[H_CMP]] = !DILocation(line: 300, scope: [[H_SCOPE:![0-9]+]]
+// CHECK-DAG: [[H_SCOPE]] = distinct !DILexicalBlock({{.*}} line: 300)
diff --git a/clang/test/CodeGenObjC/designated-initializers.m b/clang/test/CodeGenObjC/designated-initializers.m
index a67f82e..ce58f6c 100644
--- a/clang/test/CodeGenObjC/designated-initializers.m
+++ b/clang/test/CodeGenObjC/designated-initializers.m
@@ -4,4 +4,4 @@ struct overwrite_string_struct {
char L[3];
int M;
} overwrite_string[] = { { { @encode(void**) }, 1 }, [0].L[1] = 'x'};
-// CHECK: [3 x i8] c"^xv", i32 1
+// CHECK: [3 x i8] c"^xv", i8 0, i32 1
diff --git a/clang/test/CodeGenOpenCL/atomic-builtins-default-to-device-scope.cl b/clang/test/CodeGenOpenCL/atomic-builtins-default-to-device-scope.cl
new file mode 100644
index 0000000..5af2d807
--- /dev/null
+++ b/clang/test/CodeGenOpenCL/atomic-builtins-default-to-device-scope.cl
@@ -0,0 +1,235 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// RUN: %clang_cc1 %s -cl-std=CL2.0 -emit-llvm -O3 -o - -triple=amdgcn-amd-amdhsa \
+// RUN: | FileCheck %s --check-prefix=AMDGCN
+// RUN: %clang_cc1 %s -cl-std=CL2.0 -emit-llvm -O3 -o - -triple=spirv64-unknown-unknown \
+// RUN: | FileCheck %s --check-prefix=SPIRV
+
+// AMDGCN-LABEL: define dso_local i32 @load(
+// AMDGCN-SAME: ptr nocapture noundef readonly [[P:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+// AMDGCN-NEXT: [[ENTRY:.*:]]
+// AMDGCN-NEXT: [[TMP0:%.*]] = load atomic i32, ptr [[P]] syncscope("agent") seq_cst, align 4
+// AMDGCN-NEXT: ret i32 [[TMP0]]
+//
+// SPIRV-LABEL: define spir_func i32 @load(
+// SPIRV-SAME: ptr addrspace(4) nocapture noundef readonly [[P:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+// SPIRV-NEXT: [[ENTRY:.*:]]
+// SPIRV-NEXT: [[TMP0:%.*]] = load atomic i32, ptr addrspace(4) [[P]] syncscope("device") seq_cst, align 4
+// SPIRV-NEXT: ret i32 [[TMP0]]
+//
+int load(int *p) { return __atomic_load_n(p, __ATOMIC_SEQ_CST); }
+// AMDGCN-LABEL: define dso_local void @store(
+// AMDGCN-SAME: ptr nocapture noundef writeonly [[P:%.*]], i32 noundef [[X:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// AMDGCN-NEXT: [[ENTRY:.*:]]
+// AMDGCN-NEXT: store atomic i32 [[X]], ptr [[P]] syncscope("agent") seq_cst, align 4
+// AMDGCN-NEXT: ret void
+//
+// SPIRV-LABEL: define spir_func void @store(
+// SPIRV-SAME: ptr addrspace(4) nocapture noundef writeonly [[P:%.*]], i32 noundef [[X:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// SPIRV-NEXT: [[ENTRY:.*:]]
+// SPIRV-NEXT: store atomic i32 [[X]], ptr addrspace(4) [[P]] syncscope("device") seq_cst, align 4
+// SPIRV-NEXT: ret void
+//
+void store(int *p, int x) { return __atomic_store_n(p, x, __ATOMIC_SEQ_CST); }
+// AMDGCN-LABEL: define dso_local i32 @add(
+// AMDGCN-SAME: ptr nocapture noundef [[P:%.*]], i32 noundef [[X:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// AMDGCN-NEXT: [[ENTRY:.*:]]
+// AMDGCN-NEXT: [[TMP0:%.*]] = atomicrmw add ptr [[P]], i32 [[X]] syncscope("agent") seq_cst, align 4
+// AMDGCN-NEXT: ret i32 [[TMP0]]
+//
+// SPIRV-LABEL: define spir_func i32 @add(
+// SPIRV-SAME: ptr addrspace(4) nocapture noundef [[P:%.*]], i32 noundef [[X:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// SPIRV-NEXT: [[ENTRY:.*:]]
+// SPIRV-NEXT: [[TMP0:%.*]] = atomicrmw add ptr addrspace(4) [[P]], i32 [[X]] syncscope("device") seq_cst, align 4
+// SPIRV-NEXT: ret i32 [[TMP0]]
+//
+int add(int *p, int x) { return __atomic_fetch_add(p, x, __ATOMIC_SEQ_CST); }
+// AMDGCN-LABEL: define dso_local float @fadd(
+// AMDGCN-SAME: ptr nocapture noundef [[P:%.*]], float noundef [[X:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// AMDGCN-NEXT: [[ENTRY:.*:]]
+// AMDGCN-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr [[P]], float [[X]] syncscope("agent") seq_cst, align 4
+// AMDGCN-NEXT: ret float [[TMP0]]
+//
+// SPIRV-LABEL: define spir_func float @fadd(
+// SPIRV-SAME: ptr addrspace(4) nocapture noundef [[P:%.*]], float noundef [[X:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// SPIRV-NEXT: [[ENTRY:.*:]]
+// SPIRV-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr addrspace(4) [[P]], float [[X]] syncscope("device") seq_cst, align 4
+// SPIRV-NEXT: ret float [[TMP0]]
+//
+float fadd(float *p, float x) { return __atomic_fetch_add(p, x, __ATOMIC_SEQ_CST); }
+// AMDGCN-LABEL: define dso_local i32 @sub(
+// AMDGCN-SAME: ptr nocapture noundef [[P:%.*]], i32 noundef [[X:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// AMDGCN-NEXT: [[ENTRY:.*:]]
+// AMDGCN-NEXT: [[TMP0:%.*]] = atomicrmw sub ptr [[P]], i32 [[X]] syncscope("agent") seq_cst, align 4
+// AMDGCN-NEXT: ret i32 [[TMP0]]
+//
+// SPIRV-LABEL: define spir_func i32 @sub(
+// SPIRV-SAME: ptr addrspace(4) nocapture noundef [[P:%.*]], i32 noundef [[X:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// SPIRV-NEXT: [[ENTRY:.*:]]
+// SPIRV-NEXT: [[TMP0:%.*]] = atomicrmw sub ptr addrspace(4) [[P]], i32 [[X]] syncscope("device") seq_cst, align 4
+// SPIRV-NEXT: ret i32 [[TMP0]]
+//
+int sub(int *p, int x) { return __atomic_fetch_sub(p, x, __ATOMIC_SEQ_CST); }
+// AMDGCN-LABEL: define dso_local float @fsub(
+// AMDGCN-SAME: ptr nocapture noundef [[P:%.*]], float noundef [[X:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// AMDGCN-NEXT: [[ENTRY:.*:]]
+// AMDGCN-NEXT: [[TMP0:%.*]] = atomicrmw fsub ptr [[P]], float [[X]] syncscope("agent") seq_cst, align 4
+// AMDGCN-NEXT: ret float [[TMP0]]
+//
+// SPIRV-LABEL: define spir_func float @fsub(
+// SPIRV-SAME: ptr addrspace(4) nocapture noundef [[P:%.*]], float noundef [[X:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// SPIRV-NEXT: [[ENTRY:.*:]]
+// SPIRV-NEXT: [[TMP0:%.*]] = atomicrmw fsub ptr addrspace(4) [[P]], float [[X]] syncscope("device") seq_cst, align 4
+// SPIRV-NEXT: ret float [[TMP0]]
+//
+float fsub(float *p, float x) { return __atomic_fetch_sub(p, x, __ATOMIC_SEQ_CST); }
+// AMDGCN-LABEL: define dso_local i32 @and(
+// AMDGCN-SAME: ptr nocapture noundef [[P:%.*]], i32 noundef [[X:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// AMDGCN-NEXT: [[ENTRY:.*:]]
+// AMDGCN-NEXT: [[TMP0:%.*]] = atomicrmw and ptr [[P]], i32 [[X]] syncscope("agent") seq_cst, align 4
+// AMDGCN-NEXT: ret i32 [[TMP0]]
+//
+// SPIRV-LABEL: define spir_func i32 @and(
+// SPIRV-SAME: ptr addrspace(4) nocapture noundef [[P:%.*]], i32 noundef [[X:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// SPIRV-NEXT: [[ENTRY:.*:]]
+// SPIRV-NEXT: [[TMP0:%.*]] = atomicrmw and ptr addrspace(4) [[P]], i32 [[X]] syncscope("device") seq_cst, align 4
+// SPIRV-NEXT: ret i32 [[TMP0]]
+//
+int and(int *p, int x) { return __atomic_fetch_and(p, x, __ATOMIC_SEQ_CST); }
+// AMDGCN-LABEL: define dso_local i32 @nand(
+// AMDGCN-SAME: ptr nocapture noundef [[P:%.*]], i32 noundef [[X:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// AMDGCN-NEXT: [[ENTRY:.*:]]
+// AMDGCN-NEXT: [[TMP0:%.*]] = atomicrmw nand ptr [[P]], i32 [[X]] syncscope("agent") seq_cst, align 4
+// AMDGCN-NEXT: ret i32 [[TMP0]]
+//
+// SPIRV-LABEL: define spir_func i32 @nand(
+// SPIRV-SAME: ptr addrspace(4) nocapture noundef [[P:%.*]], i32 noundef [[X:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// SPIRV-NEXT: [[ENTRY:.*:]]
+// SPIRV-NEXT: [[TMP0:%.*]] = atomicrmw nand ptr addrspace(4) [[P]], i32 [[X]] syncscope("device") seq_cst, align 4
+// SPIRV-NEXT: ret i32 [[TMP0]]
+//
+int nand(int *p, int x) { return __atomic_fetch_nand(p, x, __ATOMIC_SEQ_CST); }
+// AMDGCN-LABEL: define dso_local i32 @or(
+// AMDGCN-SAME: ptr nocapture noundef [[P:%.*]], i32 noundef [[X:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// AMDGCN-NEXT: [[ENTRY:.*:]]
+// AMDGCN-NEXT: [[TMP0:%.*]] = atomicrmw or ptr [[P]], i32 [[X]] syncscope("agent") seq_cst, align 4
+// AMDGCN-NEXT: ret i32 [[TMP0]]
+//
+// SPIRV-LABEL: define spir_func i32 @or(
+// SPIRV-SAME: ptr addrspace(4) nocapture noundef [[P:%.*]], i32 noundef [[X:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// SPIRV-NEXT: [[ENTRY:.*:]]
+// SPIRV-NEXT: [[TMP0:%.*]] = atomicrmw or ptr addrspace(4) [[P]], i32 [[X]] syncscope("device") seq_cst, align 4
+// SPIRV-NEXT: ret i32 [[TMP0]]
+//
+int or(int *p, int x) { return __atomic_fetch_or(p, x, __ATOMIC_SEQ_CST); }
+// AMDGCN-LABEL: define dso_local i32 @xor(
+// AMDGCN-SAME: ptr nocapture noundef [[P:%.*]], i32 noundef [[X:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// AMDGCN-NEXT: [[ENTRY:.*:]]
+// AMDGCN-NEXT: [[TMP0:%.*]] = atomicrmw xor ptr [[P]], i32 [[X]] syncscope("agent") seq_cst, align 4
+// AMDGCN-NEXT: ret i32 [[TMP0]]
+//
+// SPIRV-LABEL: define spir_func i32 @xor(
+// SPIRV-SAME: ptr addrspace(4) nocapture noundef [[P:%.*]], i32 noundef [[X:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// SPIRV-NEXT: [[ENTRY:.*:]]
+// SPIRV-NEXT: [[TMP0:%.*]] = atomicrmw xor ptr addrspace(4) [[P]], i32 [[X]] syncscope("device") seq_cst, align 4
+// SPIRV-NEXT: ret i32 [[TMP0]]
+//
+int xor(int *p, int x) { return __atomic_fetch_xor(p, x, __ATOMIC_SEQ_CST); }
+// AMDGCN-LABEL: define dso_local i32 @min(
+// AMDGCN-SAME: ptr nocapture noundef [[P:%.*]], i32 noundef [[X:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// AMDGCN-NEXT: [[ENTRY:.*:]]
+// AMDGCN-NEXT: [[TMP0:%.*]] = atomicrmw min ptr [[P]], i32 [[X]] syncscope("agent") seq_cst, align 4
+// AMDGCN-NEXT: ret i32 [[TMP0]]
+//
+// SPIRV-LABEL: define spir_func i32 @min(
+// SPIRV-SAME: ptr addrspace(4) nocapture noundef [[P:%.*]], i32 noundef [[X:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// SPIRV-NEXT: [[ENTRY:.*:]]
+// SPIRV-NEXT: [[TMP0:%.*]] = atomicrmw min ptr addrspace(4) [[P]], i32 [[X]] syncscope("device") seq_cst, align 4
+// SPIRV-NEXT: ret i32 [[TMP0]]
+//
+int min(int *p, int x) { return __atomic_fetch_min(p, x, __ATOMIC_SEQ_CST); }
+// AMDGCN-LABEL: define dso_local float @fmin(
+// AMDGCN-SAME: ptr nocapture noundef [[P:%.*]], float noundef [[X:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// AMDGCN-NEXT: [[ENTRY:.*:]]
+// AMDGCN-NEXT: [[TMP0:%.*]] = atomicrmw fmin ptr [[P]], float [[X]] syncscope("agent") seq_cst, align 4
+// AMDGCN-NEXT: ret float [[TMP0]]
+//
+// SPIRV-LABEL: define spir_func float @fmin(
+// SPIRV-SAME: ptr addrspace(4) nocapture noundef [[P:%.*]], float noundef [[X:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// SPIRV-NEXT: [[ENTRY:.*:]]
+// SPIRV-NEXT: [[TMP0:%.*]] = atomicrmw fmin ptr addrspace(4) [[P]], float [[X]] syncscope("device") seq_cst, align 4
+// SPIRV-NEXT: ret float [[TMP0]]
+//
+float fmin(float *p, float x) { return __atomic_fetch_min(p, x, __ATOMIC_SEQ_CST); }
+// AMDGCN-LABEL: define dso_local i32 @max(
+// AMDGCN-SAME: ptr nocapture noundef [[P:%.*]], i32 noundef [[X:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// AMDGCN-NEXT: [[ENTRY:.*:]]
+// AMDGCN-NEXT: [[TMP0:%.*]] = atomicrmw max ptr [[P]], i32 [[X]] syncscope("agent") seq_cst, align 4
+// AMDGCN-NEXT: ret i32 [[TMP0]]
+//
+// SPIRV-LABEL: define spir_func i32 @max(
+// SPIRV-SAME: ptr addrspace(4) nocapture noundef [[P:%.*]], i32 noundef [[X:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// SPIRV-NEXT: [[ENTRY:.*:]]
+// SPIRV-NEXT: [[TMP0:%.*]] = atomicrmw max ptr addrspace(4) [[P]], i32 [[X]] syncscope("device") seq_cst, align 4
+// SPIRV-NEXT: ret i32 [[TMP0]]
+//
+int max(int *p, int x) { return __atomic_fetch_max(p, x, __ATOMIC_SEQ_CST); }
+// AMDGCN-LABEL: define dso_local float @fmax(
+// AMDGCN-SAME: ptr nocapture noundef [[P:%.*]], float noundef [[X:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// AMDGCN-NEXT: [[ENTRY:.*:]]
+// AMDGCN-NEXT: [[TMP0:%.*]] = atomicrmw fmax ptr [[P]], float [[X]] syncscope("agent") seq_cst, align 4
+// AMDGCN-NEXT: ret float [[TMP0]]
+//
+// SPIRV-LABEL: define spir_func float @fmax(
+// SPIRV-SAME: ptr addrspace(4) nocapture noundef [[P:%.*]], float noundef [[X:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// SPIRV-NEXT: [[ENTRY:.*:]]
+// SPIRV-NEXT: [[TMP0:%.*]] = atomicrmw fmax ptr addrspace(4) [[P]], float [[X]] syncscope("device") seq_cst, align 4
+// SPIRV-NEXT: ret float [[TMP0]]
+//
+float fmax(float *p, float x) { return __atomic_fetch_max(p, x, __ATOMIC_SEQ_CST); }
+// AMDGCN-LABEL: define dso_local i32 @xchg(
+// AMDGCN-SAME: ptr nocapture noundef [[P:%.*]], i32 noundef [[X:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// AMDGCN-NEXT: [[ENTRY:.*:]]
+// AMDGCN-NEXT: [[TMP0:%.*]] = atomicrmw xchg ptr [[P]], i32 [[X]] syncscope("agent") seq_cst, align 4
+// AMDGCN-NEXT: ret i32 [[TMP0]]
+//
+// SPIRV-LABEL: define spir_func i32 @xchg(
+// SPIRV-SAME: ptr addrspace(4) nocapture noundef [[P:%.*]], i32 noundef [[X:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// SPIRV-NEXT: [[ENTRY:.*:]]
+// SPIRV-NEXT: [[TMP0:%.*]] = atomicrmw xchg ptr addrspace(4) [[P]], i32 [[X]] syncscope("device") seq_cst, align 4
+// SPIRV-NEXT: ret i32 [[TMP0]]
+//
+int xchg(int *p, int x) { return __atomic_exchange_n(p, x, __ATOMIC_SEQ_CST); }
+// AMDGCN-LABEL: define dso_local range(i32 0, 2) i32 @cmpxchg(
+// AMDGCN-SAME: ptr nocapture noundef [[P:%.*]], i32 noundef [[X:%.*]], i32 noundef [[Y:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// AMDGCN-NEXT: [[ENTRY:.*:]]
+// AMDGCN-NEXT: [[TMP0:%.*]] = cmpxchg ptr [[P]], i32 [[X]], i32 [[Y]] syncscope("agent") seq_cst seq_cst, align 4
+// AMDGCN-NEXT: [[TMP1:%.*]] = extractvalue { i32, i1 } [[TMP0]], 1
+// AMDGCN-NEXT: [[CONV:%.*]] = zext i1 [[TMP1]] to i32
+// AMDGCN-NEXT: ret i32 [[CONV]]
+//
+// SPIRV-LABEL: define spir_func range(i32 0, 2) i32 @cmpxchg(
+// SPIRV-SAME: ptr addrspace(4) nocapture noundef [[P:%.*]], i32 noundef [[X:%.*]], i32 noundef [[Y:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// SPIRV-NEXT: [[ENTRY:.*:]]
+// SPIRV-NEXT: [[TMP0:%.*]] = cmpxchg ptr addrspace(4) [[P]], i32 [[X]], i32 [[Y]] syncscope("device") seq_cst seq_cst, align 4
+// SPIRV-NEXT: [[TMP1:%.*]] = extractvalue { i32, i1 } [[TMP0]], 1
+// SPIRV-NEXT: [[CONV:%.*]] = zext i1 [[TMP1]] to i32
+// SPIRV-NEXT: ret i32 [[CONV]]
+//
+int cmpxchg(int *p, int x, int y) { return __atomic_compare_exchange(p, &x, &y, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
+// AMDGCN-LABEL: define dso_local range(i32 0, 2) i32 @cmpxchg_weak(
+// AMDGCN-SAME: ptr nocapture noundef [[P:%.*]], i32 noundef [[X:%.*]], i32 noundef [[Y:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// AMDGCN-NEXT: [[ENTRY:.*:]]
+// AMDGCN-NEXT: [[TMP0:%.*]] = cmpxchg weak ptr [[P]], i32 [[X]], i32 [[Y]] syncscope("agent") seq_cst seq_cst, align 4
+// AMDGCN-NEXT: [[TMP1:%.*]] = extractvalue { i32, i1 } [[TMP0]], 1
+// AMDGCN-NEXT: [[CONV:%.*]] = zext i1 [[TMP1]] to i32
+// AMDGCN-NEXT: ret i32 [[CONV]]
+//
+// SPIRV-LABEL: define spir_func range(i32 0, 2) i32 @cmpxchg_weak(
+// SPIRV-SAME: ptr addrspace(4) nocapture noundef [[P:%.*]], i32 noundef [[X:%.*]], i32 noundef [[Y:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// SPIRV-NEXT: [[ENTRY:.*:]]
+// SPIRV-NEXT: [[TMP0:%.*]] = cmpxchg weak ptr addrspace(4) [[P]], i32 [[X]], i32 [[Y]] syncscope("device") seq_cst seq_cst, align 4
+// SPIRV-NEXT: [[TMP1:%.*]] = extractvalue { i32, i1 } [[TMP0]], 1
+// SPIRV-NEXT: [[CONV:%.*]] = zext i1 [[TMP1]] to i32
+// SPIRV-NEXT: ret i32 [[CONV]]
+//
+int cmpxchg_weak(int *p, int x, int y) { return __atomic_compare_exchange(p, &x, &y, 1, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn.cl
index 6a6d5b1..9274c80 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn.cl
@@ -638,11 +638,7 @@ void test_get_workgroup_size(int d, global int *out)
// CHECK-LABEL: @test_get_grid_size(
// CHECK: {{.*}}call align 4 dereferenceable(64){{.*}} ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
-// CHECK: getelementptr inbounds i8, ptr addrspace(4) %{{.*}}, i64 12
-// CHECK: load i32, ptr addrspace(4) %{{.*}}, align 4, !invariant.load
-// CHECK: getelementptr inbounds i8, ptr addrspace(4) %{{.*}}, i64 16
-// CHECK: load i32, ptr addrspace(4) %{{.*}}, align 4, !invariant.load
-// CHECK: getelementptr inbounds i8, ptr addrspace(4) %{{.*}}, i64 20
+// CHECK: getelementptr inbounds i8, ptr addrspace(4) %{{.*}}, i64 %.sink
// CHECK: load i32, ptr addrspace(4) %{{.*}}, align 4, !invariant.load
void test_get_grid_size(int d, global int *out)
{
diff --git a/clang/test/Driver/relax.s b/clang/test/Driver/relax.s
index 154d4db..b4a696a3 100644
--- a/clang/test/Driver/relax.s
+++ b/clang/test/Driver/relax.s
@@ -8,5 +8,7 @@
// RUN: llvm-readobj -r %t | FileCheck --check-prefix=REL %s
// REL: R_X86_64_REX_GOTPCRELX foo
+// REL: R_X86_64_REX2_GOTPCRELX foo
movq foo@GOTPCREL(%rip), %rax
+ movq foo@GOTPCREL(%rip), %r16
diff --git a/clang/test/Misc/cc1as-output-asm-variant.c b/clang/test/Misc/cc1as-output-asm-variant.c
new file mode 100644
index 0000000..c287c62
--- /dev/null
+++ b/clang/test/Misc/cc1as-output-asm-variant.c
@@ -0,0 +1,8 @@
+// REQUIRES: x86-registered-target
+// RUN: %clang -cc1as -triple x86_64 %s -o - | FileCheck %s --check-prefix=ATT
+// RUN: %clang -cc1as -triple x86_64 %s --output-asm-variant=1 -o - | FileCheck %s --check-prefix=INTEL
+
+// ATT: movl $1, %eax
+// INTEL: mov eax, 1
+
+mov $1, %eax
diff --git a/clang/test/Modules/GH109879-1.cpp b/clang/test/Modules/GH109879-1.cpp
new file mode 100644
index 0000000..72cfb11
--- /dev/null
+++ b/clang/test/Modules/GH109879-1.cpp
@@ -0,0 +1,25 @@
+// RUN: rm -rf %t
+// RUN: mkdir -p %t
+// RUN: split-file %s %t
+//
+// RUN: %clang_cc1 -std=c++20 %t/A.cppm -emit-module-interface -o %t/A.pcm
+// RUN: %clang_cc1 -std=c++20 %t/B.cppm -fprebuilt-module-path=%t -emit-module-interface -o %t/B.pcm
+// RUN: %clang_cc1 -fsyntax-only -std=c++20 -fprebuilt-module-path=%t -verify %t/C.cpp
+
+//--- A.cppm
+export module A;
+export extern "C" void foo(struct Bar);
+
+//--- B.cppm
+module;
+import A;
+export module B;
+
+//--- C.cpp
+import B;
+struct Bar {};
+void test() {
+ foo(Bar());
+ // expected-error@-1 {{declaration of 'foo' must be imported}}
+ // expected-note@A.cppm:2 {{declaration here is not visible}}
+}
diff --git a/clang/test/Modules/GH109879-2.cpp b/clang/test/Modules/GH109879-2.cpp
new file mode 100644
index 0000000..ccec578
--- /dev/null
+++ b/clang/test/Modules/GH109879-2.cpp
@@ -0,0 +1,29 @@
+// RUN: rm -rf %t
+// RUN: mkdir -p %t
+// RUN: split-file %s %t
+//
+// RUN: %clang_cc1 -std=c++20 %t/A.cppm -emit-module-interface -o %t/A.pcm
+// RUN: %clang_cc1 -std=c++20 %t/B.cppm -fprebuilt-module-path=%t -emit-module-interface -o %t/B.pcm
+// RUN: %clang_cc1 -fsyntax-only -std=c++20 -fprebuilt-module-path=%t -verify %t/C.cpp
+
+//--- foo.h
+struct Bar {};
+extern "C" void foo(struct Bar);
+
+//--- A.cppm
+module;
+#include "foo.h"
+export module A;
+export extern "C" using ::foo;
+//--- B.cppm
+module;
+import A;
+export module B;
+
+//--- C.cpp
+// expected-no-diagnostics
+import B;
+#include "foo.h"
+void test() {
+ foo(Bar());
+}
diff --git a/clang/test/Preprocessor/bpf-predefined-macros.c b/clang/test/Preprocessor/bpf-predefined-macros.c
index 360b933..8c2143f 100644
--- a/clang/test/Preprocessor/bpf-predefined-macros.c
+++ b/clang/test/Preprocessor/bpf-predefined-macros.c
@@ -64,6 +64,9 @@ int s;
#ifdef __BPF_FEATURE_ADDR_SPACE_CAST
int t;
#endif
+#ifdef __BPF_FEATURE_MAY_GOTO
+int u;
+#endif
// CHECK: int b;
// CHECK: int c;
@@ -98,6 +101,11 @@ int t;
// CPU_V3: int t;
// CPU_V4: int t;
+// CPU_V1: int u;
+// CPU_V2: int u;
+// CPU_V3: int u;
+// CPU_V4: int u;
+
// CPU_GENERIC: int g;
// CPU_PROBE: int f;
diff --git a/clang/test/Sema/riscv-asm.c b/clang/test/Sema/riscv-asm.c
index 82664c0..69ba3be 100644
--- a/clang/test/Sema/riscv-asm.c
+++ b/clang/test/Sema/riscv-asm.c
@@ -1,8 +1,6 @@
// RUN: %clang_cc1 %s -triple riscv32 -verify -fsyntax-only
// RUN: %clang_cc1 %s -triple riscv64 -verify -fsyntax-only
-// expected-no-diagnostics
-
void i (void) {
asm volatile ("" ::: "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7");
asm volatile ("" ::: "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15");
@@ -26,3 +24,18 @@ void f (void) {
asm volatile ("" ::: "fa6", "fa7", "fs2", "fs3", "fs4", "fs5", "fs6", "fs7");
asm volatile ("" ::: "fs8", "fs9", "fs10", "fs11", "ft8", "ft9", "ft10", "ft11");
}
+
+register char i1 __asm__ ("x1"); // expected-error {{size of register 'x1' does not match variable size}}
+#if __riscv_xlen == 32
+register long long ll2 __asm__ ("x2"); // expected-error {{size of register 'x2' does not match variable size}}
+register int i2 __asm__ ("x3");
+#endif
+register long l3 __asm__ ("x4");
+register long ra __asm__ ("ra");
+register long sp __asm__ ("sp");
+register int *gp __asm__ ("gp");
+register char *tp __asm__ ("tp");
+register long a7 __asm__ ("a7");
+register long s11 __asm__ ("s11");
+register long t5 __asm__ ("t5");
+register long* f1 __asm__ ("f1"); // expected-error {{register 'f1' unsuitable for global register variables on this target}}
diff --git a/clang/test/Sema/scoped-atomic-ops.c b/clang/test/Sema/scoped-atomic-ops.c
index 59e638c..33044aa 100644
--- a/clang/test/Sema/scoped-atomic-ops.c
+++ b/clang/test/Sema/scoped-atomic-ops.c
@@ -1,5 +1,6 @@
// RUN: %clang_cc1 -x c -triple=amdgcn-amd-amdhsa -verify -fsyntax-only %s
// RUN: %clang_cc1 -x c -triple=x86_64-pc-linux-gnu -verify -fsyntax-only %s
+// RUN: %clang_cc1 -x c -triple=spirv64-unknown-unknown -verify -fsyntax-only %s
int fi1a(int *i) {
int v;
diff --git a/clang/test/SemaCXX/attr-gsl-owner-pointer-std.cpp b/clang/test/SemaCXX/attr-gsl-owner-pointer-std.cpp
index 352e1e4..8fb4cc76 100644
--- a/clang/test/SemaCXX/attr-gsl-owner-pointer-std.cpp
+++ b/clang/test/SemaCXX/attr-gsl-owner-pointer-std.cpp
@@ -27,6 +27,11 @@ public:
static_assert(sizeof(vector<int>), ""); // Force instantiation.
static_assert(sizeof(vector<int>::iterator), ""); // Force instantiation.
+template <>
+class vector<bool> {};
+// CHECK: ClassTemplateSpecializationDecl {{.*}} vector
+// CHECK: OwnerAttr {{.*}}
+
// If std::container::iterator is a using declaration, attributes are inferred
// for the underlying class.
template <typename T>
@@ -173,6 +178,18 @@ class reference_wrapper;
class some_unknown_type;
// CHECK: CXXRecordDecl {{.*}} some_unknown_type
+using size_t = unsigned;
+inline constexpr size_t dynamic_extent = -1;
+template <typename _Tp, size_t _Extent = dynamic_extent>
+class span;
+// CHECK: CXXRecordDecl {{.*}} span
+// CHECK: PointerAttr {{.*}}
+
+
+template <typename _Tp>
+struct span<_Tp, dynamic_extent> {};
+// CHECK: ClassTemplatePartialSpecializationDecl {{.*}} span
+// CHECK: PointerAttr {{.*}}
} // namespace std
namespace user {
diff --git a/clang/test/SemaCXX/attr-musttail.cpp b/clang/test/SemaCXX/attr-musttail.cpp
index 561184e7..12cfd89 100644
--- a/clang/test/SemaCXX/attr-musttail.cpp
+++ b/clang/test/SemaCXX/attr-musttail.cpp
@@ -267,3 +267,34 @@ namespace ns {}
void TestCallNonValue() {
[[clang::musttail]] return ns; // expected-error {{unexpected namespace name 'ns': expected expression}}
}
+
+// Test diagnostics for lifetimes of local variables, which end earlier for a
+// musttail call than for a nowmal one.
+
+void TakesIntAndPtr(int, int *);
+void PassAddressOfLocal(int a, int *b) {
+ int c;
+ [[clang::musttail]] return TakesIntAndPtr(0, &c); // expected-warning {{address of stack memory associated with local variable 'c' passed to musttail function}}
+}
+void PassAddressOfParam(int a, int *b) {
+ [[clang::musttail]] return TakesIntAndPtr(0, &a); // expected-warning {{address of stack memory associated with parameter 'a' passed to musttail function}}
+}
+void PassValues(int a, int *b) {
+ [[clang::musttail]] return TakesIntAndPtr(a, b);
+}
+
+void TakesIntAndRef(int, const int &);
+void PassRefOfLocal(int a, const int &b) {
+ int c;
+ [[clang::musttail]] return TakesIntAndRef(0, c); // expected-warning {{address of stack memory associated with local variable 'c' passed to musttail function}}
+}
+void PassRefOfParam(int a, const int &b) {
+ [[clang::musttail]] return TakesIntAndRef(0, a); // expected-warning {{address of stack memory associated with parameter 'a' passed to musttail function}}
+}
+int ReturnInt();
+void PassRefOfTemporary(int a, const int &b) {
+ [[clang::musttail]] return TakesIntAndRef(0, ReturnInt()); // expected-warning {{passing address of local temporary object to musttail function}}
+}
+void PassValuesRef(int a, const int &b) {
+ [[clang::musttail]] return TakesIntAndRef(a, b);
+}
diff --git a/clang/tools/CMakeLists.txt b/clang/tools/CMakeLists.txt
index 9a35127..88e2941 100644
--- a/clang/tools/CMakeLists.txt
+++ b/clang/tools/CMakeLists.txt
@@ -22,7 +22,7 @@ add_clang_subdirectory(c-index-test)
add_clang_subdirectory(clang-refactor)
# For MinGW we only enable shared library if LLVM_LINK_LLVM_DYLIB=ON.
# Without that option resulting library is too close to 2^16 DLL exports limit.
-if(UNIX OR (MINGW AND LLVM_LINK_LLVM_DYLIB))
+if(UNIX OR (MSVC AND LLVM_BUILD_LLVM_DYLIB_VIS) OR (MINGW AND LLVM_LINK_LLVM_DYLIB))
add_clang_subdirectory(clang-shlib)
endif()
diff --git a/clang/tools/c-index-test/core_main.cpp b/clang/tools/c-index-test/core_main.cpp
index 003b1ba..c43bff2 100644
--- a/clang/tools/c-index-test/core_main.cpp
+++ b/clang/tools/c-index-test/core_main.cpp
@@ -274,12 +274,12 @@ static bool printSourceSymbolsFromModule(StringRef modulePath,
IntrusiveRefCntPtr<DiagnosticsEngine> Diags =
CompilerInstance::createDiagnostics(new DiagnosticOptions());
- std::unique_ptr<ASTUnit> AU = ASTUnit::LoadFromASTFile(
- std::string(modulePath), *pchRdr, ASTUnit::LoadASTOnly, Diags,
- FileSystemOpts, HSOpts, /*LangOpts=*/nullptr,
- /*OnlyLocalDecls=*/true, CaptureDiagsKind::None,
- /*AllowASTWithCompilerErrors=*/true,
- /*UserFilesAreVolatile=*/false);
+ std::unique_ptr<ASTUnit> AU =
+ ASTUnit::LoadFromASTFile(modulePath, *pchRdr, ASTUnit::LoadASTOnly, Diags,
+ FileSystemOpts, HSOpts, /*LangOpts=*/nullptr,
+ /*OnlyLocalDecls=*/true, CaptureDiagsKind::None,
+ /*AllowASTWithCompilerErrors=*/true,
+ /*UserFilesAreVolatile=*/false);
if (!AU) {
errs() << "failed to create TU for: " << modulePath << '\n';
return true;
diff --git a/clang/tools/clang-extdef-mapping/ClangExtDefMapGen.cpp b/clang/tools/clang-extdef-mapping/ClangExtDefMapGen.cpp
index c048f33..3a2c32c 100644
--- a/clang/tools/clang-extdef-mapping/ClangExtDefMapGen.cpp
+++ b/clang/tools/clang-extdef-mapping/ClangExtDefMapGen.cpp
@@ -155,7 +155,7 @@ static bool HandleAST(StringRef AstPath) {
IntrusiveRefCntPtr<DiagnosticsEngine> DiagEngine = GetDiagnosticsEngine();
std::unique_ptr<ASTUnit> Unit = ASTUnit::LoadFromASTFile(
- AstPath.str(), CI->getPCHContainerOperations()->getRawReader(),
+ AstPath, CI->getPCHContainerOperations()->getRawReader(),
ASTUnit::LoadASTOnly, DiagEngine, CI->getFileSystemOpts(),
CI->getHeaderSearchOptsPtr());
diff --git a/clang/unittests/Frontend/ASTUnitTest.cpp b/clang/unittests/Frontend/ASTUnitTest.cpp
index 30d2731..19b5d9b 100644
--- a/clang/unittests/Frontend/ASTUnitTest.cpp
+++ b/clang/unittests/Frontend/ASTUnitTest.cpp
@@ -92,8 +92,8 @@ TEST_F(ASTUnitTest, SaveLoadPreservesLangOptionsInPrintingPolicy) {
auto HSOpts = std::make_shared<HeaderSearchOptions>();
std::unique_ptr<ASTUnit> AU = ASTUnit::LoadFromASTFile(
- std::string(ASTFileName.str()), PCHContainerOps->getRawReader(),
- ASTUnit::LoadEverything, Diags, FileSystemOptions(), HSOpts);
+ ASTFileName, PCHContainerOps->getRawReader(), ASTUnit::LoadEverything,
+ Diags, FileSystemOptions(), HSOpts);
if (!AU)
FAIL() << "failed to load ASTUnit";
diff --git a/clang/unittests/Interpreter/CodeCompletionTest.cpp b/clang/unittests/Interpreter/CodeCompletionTest.cpp
index 72fcce7..23cfc46 100644
--- a/clang/unittests/Interpreter/CodeCompletionTest.cpp
+++ b/clang/unittests/Interpreter/CodeCompletionTest.cpp
@@ -26,7 +26,7 @@ auto CB = clang::IncrementalCompilerBuilder();
class CodeCompletionTest : public InterpreterTestBase {
public:
- std::unique_ptr<Interpreter> Interp;
+ std::unique_ptr<clang::Interpreter> Interp;
void SetUp() override {
if (!HostSupportsJIT())
diff --git a/clang/unittests/Interpreter/InterpreterExtensionsTest.cpp b/clang/unittests/Interpreter/InterpreterExtensionsTest.cpp
index 5f1f29c..29af464 100644
--- a/clang/unittests/Interpreter/InterpreterExtensionsTest.cpp
+++ b/clang/unittests/Interpreter/InterpreterExtensionsTest.cpp
@@ -65,41 +65,13 @@ public:
}
};
-class RecordRuntimeIBMetrics : public Interpreter {
- struct NoopRuntimeInterfaceBuilder : public RuntimeInterfaceBuilder {
- NoopRuntimeInterfaceBuilder(Sema &S) : S(S) {}
-
- TransformExprFunction *getPrintValueTransformer() override {
- TransformerQueries += 1;
- return &noop;
- }
-
- static ExprResult noop(RuntimeInterfaceBuilder *Builder, Expr *E,
- ArrayRef<Expr *> FixedArgs) {
- auto *B = static_cast<NoopRuntimeInterfaceBuilder *>(Builder);
- B->TransformedExprs += 1;
- return B->S.ActOnFinishFullExpr(E, /*DiscardedValue=*/false);
- }
-
- Sema &S;
- size_t TransformedExprs = 0;
- size_t TransformerQueries = 0;
- };
-
-public:
- // Inherit with using wouldn't make it public
- RecordRuntimeIBMetrics(std::unique_ptr<CompilerInstance> CI, llvm::Error &Err)
- : Interpreter(std::move(CI), Err) {}
-
- std::unique_ptr<RuntimeInterfaceBuilder> FindRuntimeInterface() override {
- assert(RuntimeIBPtr == nullptr && "We create the builder only once");
- Sema &S = getCompilerInstance()->getSema();
- auto RuntimeIB = std::make_unique<NoopRuntimeInterfaceBuilder>(S);
- RuntimeIBPtr = RuntimeIB.get();
- return RuntimeIB;
- }
-
- NoopRuntimeInterfaceBuilder *RuntimeIBPtr = nullptr;
+struct OutOfProcInterpreter : public Interpreter {
+ OutOfProcInterpreter(
+ std::unique_ptr<CompilerInstance> CI, llvm::Error &ErrOut,
+ std::unique_ptr<clang::ASTConsumer> Consumer,
+ std::unique_ptr<llvm::orc::LLJITBuilder> JITBuilder = nullptr)
+ : Interpreter(std::move(CI), ErrOut, std::move(JITBuilder),
+ std::move(Consumer)) {}
};
TEST_F(InterpreterExtensionsTest, FindRuntimeInterface) {
@@ -108,13 +80,23 @@ TEST_F(InterpreterExtensionsTest, FindRuntimeInterface) {
clang::IncrementalCompilerBuilder CB;
llvm::Error ErrOut = llvm::Error::success();
- RecordRuntimeIBMetrics Interp(cantFail(CB.CreateCpp()), ErrOut);
+ auto CI = cantFail(CB.CreateCpp());
+ // Do not attach the default consumer which is specialized for in-process.
+ class NoopConsumer : public ASTConsumer {};
+ std::unique_ptr<ASTConsumer> C = std::make_unique<NoopConsumer>();
+ OutOfProcInterpreter I(std::move(CI), ErrOut, std::move(C),
+ /*JITBuilder=*/nullptr);
cantFail(std::move(ErrOut));
- cantFail(Interp.Parse("int a = 1; a"));
- cantFail(Interp.Parse("int b = 2; b"));
- cantFail(Interp.Parse("int c = 3; c"));
- EXPECT_EQ(3U, Interp.RuntimeIBPtr->TransformedExprs);
- EXPECT_EQ(1U, Interp.RuntimeIBPtr->TransformerQueries);
+ cantFail(I.Parse("int a = 1; a"));
+ cantFail(I.Parse("int b = 2; b"));
+ cantFail(I.Parse("int c = 3; c"));
+
+ // Make sure no clang::Value logic is attached by the Interpreter.
+ Value V1;
+ llvm::cantFail(I.ParseAndExecute("int x = 42;"));
+ llvm::cantFail(I.ParseAndExecute("x", &V1));
+ EXPECT_FALSE(V1.isValid());
+ EXPECT_FALSE(V1.hasValue());
}
class CustomJBInterpreter : public Interpreter {
diff --git a/cmake/Modules/CMakePolicy.cmake b/cmake/Modules/CMakePolicy.cmake
index b696266..665af01 100644
--- a/cmake/Modules/CMakePolicy.cmake
+++ b/cmake/Modules/CMakePolicy.cmake
@@ -29,3 +29,9 @@ endif()
if(POLICY CMP0144)
cmake_policy(SET CMP0144 NEW)
endif()
+
+# CMP0147: Visual Studio Generators build custom commands in parallel.
+# New in CMake 3.27: https://cmake.org/cmake/help/latest/policy/CMP0147.html
+if(POLICY CMP0147)
+ cmake_policy(SET CMP0147 NEW)
+endif()
diff --git a/compiler-rt/lib/builtins/fp_lib.h b/compiler-rt/lib/builtins/fp_lib.h
index 0289cfd..fae5849 100644
--- a/compiler-rt/lib/builtins/fp_lib.h
+++ b/compiler-rt/lib/builtins/fp_lib.h
@@ -171,8 +171,11 @@ static __inline void wideMultiply(rep_t a, rep_t b, rep_t *hi, rep_t *lo) {
(sum2 & Word_FullMask) + ((sum3 << 32) & Word_HiMask);
*lo = r0 + (r1 << 64);
+ // The addition above can overflow, in which case `*lo` will be less than
+ // `r0`. Carry any overflow into `hi`.
+ const bool carry = *lo < r0;
*hi = (r1 >> 64) + (sum1 >> 96) + (sum2 >> 64) + (sum3 >> 32) + sum4 +
- (sum5 << 32) + (sum6 << 64);
+ (sum5 << 32) + (sum6 << 64) + carry;
}
#undef Word_1
#undef Word_2
diff --git a/compiler-rt/lib/fuzzer/FuzzerUtilWindows.cpp b/compiler-rt/lib/fuzzer/FuzzerUtilWindows.cpp
index e0210aa..37aecae 100644
--- a/compiler-rt/lib/fuzzer/FuzzerUtilWindows.cpp
+++ b/compiler-rt/lib/fuzzer/FuzzerUtilWindows.cpp
@@ -239,6 +239,10 @@ size_t PageSize() {
}
void SetThreadName(std::thread &thread, const std::string &name) {
+#if defined(_LIBCPP_HAS_THREAD_API_PTHREAD) || \
+ defined(_GLIBCXX_GCC_GTHR_POSIX_H)
+ (void)pthread_setname_np(thread.native_handle(), name.c_str());
+#else
typedef HRESULT(WINAPI * proc)(HANDLE, PCWSTR);
HMODULE kbase = GetModuleHandleA("KernelBase.dll");
proc ThreadNameProc = reinterpret_cast<proc>(
@@ -253,6 +257,7 @@ void SetThreadName(std::thread &thread, const std::string &name) {
}
}
}
+#endif
}
} // namespace fuzzer
diff --git a/compiler-rt/lib/nsan/nsan.cpp b/compiler-rt/lib/nsan/nsan.cpp
index 4679bcd..1c06340 100644
--- a/compiler-rt/lib/nsan/nsan.cpp
+++ b/compiler-rt/lib/nsan/nsan.cpp
@@ -25,7 +25,7 @@
// on the runtime configuration. The middle part indicates the type of
// the application value, the suffix (f,d,l) indicates the type of the
// shadow, and depends on the instrumentation configuration.
-// * __nsan_fcmp_fail_* emits a warning for an fcmp instruction whose
+// * __nsan_fcmp_fail_* emits a warning for a fcmp instruction whose
// corresponding shadow fcmp result differs.
//
//===----------------------------------------------------------------------===//
@@ -682,7 +682,7 @@ void fCmpFailFT(const FT Lhs, const FT Rhs, ShadowFT LhsShadow,
if (flags().enable_warning_stats)
nsan_stats->AddWarning(CheckTypeT::kFcmp, pc, bp, 0.0);
- if (flags().disable_warnings)
+ if (flags().disable_warnings || !flags().check_cmp)
return;
// FIXME: ideally we would print the shadow value as FP128. Right now because
diff --git a/compiler-rt/lib/nsan/nsan_flags.inc b/compiler-rt/lib/nsan/nsan_flags.inc
index 7c9e579..7609732 100644
--- a/compiler-rt/lib/nsan/nsan_flags.inc
+++ b/compiler-rt/lib/nsan/nsan_flags.inc
@@ -49,4 +49,7 @@ NSAN_FLAG(bool, enable_loadtracking_stats, false,
NSAN_FLAG(bool, poison_in_free, true, "")
NSAN_FLAG(bool, print_stats_on_exit, false, "If true, print stats on exit.")
NSAN_FLAG(bool, check_nan, false,
- "If true, check the floating-point number is nan") \ No newline at end of file
+ "If true, check the floating-point number is nan")
+NSAN_FLAG(bool, check_cmp, true,
+ "If true, emit a warning for a fcmp instruction whose "
+ "corresponding shadow fcmp result differs.")
diff --git a/compiler-rt/lib/rtsan/rtsan.cpp b/compiler-rt/lib/rtsan/rtsan.cpp
index 2afdf3c..1e10069 100644
--- a/compiler-rt/lib/rtsan/rtsan.cpp
+++ b/compiler-rt/lib/rtsan/rtsan.cpp
@@ -22,30 +22,47 @@
using namespace __rtsan;
using namespace __sanitizer;
+namespace {
+enum class InitializationState : u8 {
+ Uninitialized,
+ Initializing,
+ Initialized,
+};
+} // namespace
+
static StaticSpinMutex rtsan_inited_mutex;
-static atomic_uint8_t rtsan_initialized = {0};
+static atomic_uint8_t rtsan_initialized = {
+ static_cast<u8>(InitializationState::Uninitialized)};
+
+static void SetInitializationState(InitializationState state) {
+ atomic_store(&rtsan_initialized, static_cast<u8>(state),
+ memory_order_release);
+}
-static void SetInitialized() {
- atomic_store(&rtsan_initialized, 1, memory_order_release);
+static InitializationState GetInitializationState() {
+ return static_cast<InitializationState>(
+ atomic_load(&rtsan_initialized, memory_order_acquire));
}
-static auto PrintDiagnosticsAndDieAction(DiagnosticsInfo info) {
+static auto OnViolationAction(DiagnosticsInfo info) {
return [info]() {
__rtsan::PrintDiagnostics(info);
- Die();
+ if (flags().halt_on_error)
+ Die();
};
}
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE void __rtsan_init() {
- CHECK(!__rtsan_is_initialized());
+ CHECK(GetInitializationState() == InitializationState::Uninitialized);
+ SetInitializationState(InitializationState::Initializing);
SanitizerToolName = "RealtimeSanitizer";
InitializeFlags();
InitializeInterceptors();
- SetInitialized();
+ SetInitializationState(InitializationState::Initialized);
}
SANITIZER_INTERFACE_ATTRIBUTE void __rtsan_ensure_initialized() {
@@ -62,7 +79,7 @@ SANITIZER_INTERFACE_ATTRIBUTE void __rtsan_ensure_initialized() {
}
SANITIZER_INTERFACE_ATTRIBUTE bool __rtsan_is_initialized() {
- return atomic_load(&rtsan_initialized, memory_order_acquire) == 1;
+ return GetInitializationState() == InitializationState::Initialized;
}
SANITIZER_INTERFACE_ATTRIBUTE void __rtsan_realtime_enter() {
@@ -83,20 +100,24 @@ SANITIZER_INTERFACE_ATTRIBUTE void __rtsan_enable() {
SANITIZER_INTERFACE_ATTRIBUTE void
__rtsan_notify_intercepted_call(const char *func_name) {
+ // While initializing, we need all intercepted functions to behave normally
+ if (GetInitializationState() == InitializationState::Initializing)
+ return;
+
__rtsan_ensure_initialized();
GET_CALLER_PC_BP;
- ExpectNotRealtime(
- GetContextForThisThread(),
- PrintDiagnosticsAndDieAction({InterceptedCallInfo{func_name}, pc, bp}));
+ ExpectNotRealtime(GetContextForThisThread(),
+ OnViolationAction({DiagnosticsInfoType::InterceptedCall,
+ func_name, pc, bp}));
}
SANITIZER_INTERFACE_ATTRIBUTE void
__rtsan_notify_blocking_call(const char *func_name) {
__rtsan_ensure_initialized();
GET_CALLER_PC_BP;
- ExpectNotRealtime(
- GetContextForThisThread(),
- PrintDiagnosticsAndDieAction({BlockingCallInfo{func_name}, pc, bp}));
+ ExpectNotRealtime(GetContextForThisThread(),
+ OnViolationAction({DiagnosticsInfoType::BlockingCall,
+ func_name, pc, bp}));
}
} // extern "C"
diff --git a/compiler-rt/lib/rtsan/rtsan_diagnostics.cpp b/compiler-rt/lib/rtsan/rtsan_diagnostics.cpp
index ac13b07..f82001f 100644
--- a/compiler-rt/lib/rtsan/rtsan_diagnostics.cpp
+++ b/compiler-rt/lib/rtsan/rtsan_diagnostics.cpp
@@ -37,12 +37,6 @@ public:
const char *FunctionName() const { return Green(); }
const char *Reason() const { return Blue(); }
};
-
-template <class... Ts> struct Overloaded : Ts... {
- using Ts::operator()...;
-};
-// TODO: Remove below when c++20
-template <class... Ts> Overloaded(Ts...) -> Overloaded<Ts...>;
} // namespace
static void PrintStackTrace(uptr pc, uptr bp) {
@@ -53,35 +47,39 @@ static void PrintStackTrace(uptr pc, uptr bp) {
}
static void PrintError(const Decorator &decorator,
- const DiagnosticsCallerInfo &info) {
- const char *violation_type = std::visit(
- Overloaded{
- [](const InterceptedCallInfo &) { return "unsafe-library-call"; },
- [](const BlockingCallInfo &) { return "blocking-call"; }},
- info);
+ const DiagnosticsInfo &info) {
+ const auto ErrorTypeStr = [&info]() -> const char * {
+ switch (info.type) {
+ case DiagnosticsInfoType::InterceptedCall:
+ return "unsafe-library-call";
+ case DiagnosticsInfoType::BlockingCall:
+ return "blocking-call";
+ }
+ return "(unknown error)";
+ };
Printf("%s", decorator.Error());
- Report("ERROR: RealtimeSanitizer: %s\n", violation_type);
+ Report("ERROR: RealtimeSanitizer: %s\n", ErrorTypeStr());
}
static void PrintReason(const Decorator &decorator,
- const DiagnosticsCallerInfo &info) {
+ const DiagnosticsInfo &info) {
Printf("%s", decorator.Reason());
- std::visit(
- Overloaded{[decorator](const InterceptedCallInfo &call) {
- Printf("Intercepted call to real-time unsafe function "
- "`%s%s%s` in real-time context!",
- decorator.FunctionName(),
- call.intercepted_function_name_, decorator.Reason());
- },
- [decorator](const BlockingCallInfo &arg) {
- Printf("Call to blocking function "
- "`%s%s%s` in real-time context!",
- decorator.FunctionName(), arg.blocking_function_name_,
- decorator.Reason());
- }},
- info);
+ switch (info.type) {
+ case DiagnosticsInfoType::InterceptedCall: {
+ Printf("Intercepted call to real-time unsafe function "
+ "`%s%s%s` in real-time context!",
+ decorator.FunctionName(), info.func_name, decorator.Reason());
+ break;
+ }
+ case DiagnosticsInfoType::BlockingCall: {
+ Printf("Call to blocking function "
+ "`%s%s%s` in real-time context!",
+ decorator.FunctionName(), info.func_name, decorator.Reason());
+ break;
+ }
+ }
Printf("\n");
}
@@ -90,8 +88,8 @@ void __rtsan::PrintDiagnostics(const DiagnosticsInfo &info) {
ScopedErrorReportLock l;
Decorator d;
- PrintError(d, info.call_info);
- PrintReason(d, info.call_info);
+ PrintError(d, info);
+ PrintReason(d, info);
Printf("%s", d.Default());
PrintStackTrace(info.pc, info.bp);
}
diff --git a/compiler-rt/lib/rtsan/rtsan_diagnostics.h b/compiler-rt/lib/rtsan/rtsan_diagnostics.h
index 8aec512..f8a6b8a 100644
--- a/compiler-rt/lib/rtsan/rtsan_diagnostics.h
+++ b/compiler-rt/lib/rtsan/rtsan_diagnostics.h
@@ -15,25 +15,16 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
-#include <variant>
-
namespace __rtsan {
-struct InterceptedCallInfo {
- const char *intercepted_function_name_;
-};
-
-struct BlockingCallInfo {
-public:
- const char *blocking_function_name_;
+enum class DiagnosticsInfoType {
+ InterceptedCall,
+ BlockingCall,
};
-using DiagnosticsCallerInfo =
- std::variant<InterceptedCallInfo, BlockingCallInfo>;
-
struct DiagnosticsInfo {
- DiagnosticsCallerInfo call_info;
-
+ DiagnosticsInfoType type;
+ const char *func_name;
__sanitizer::uptr pc;
__sanitizer::uptr bp;
};
diff --git a/compiler-rt/lib/rtsan/rtsan_flags.inc b/compiler-rt/lib/rtsan/rtsan_flags.inc
index 93b0294..25d62cf 100644
--- a/compiler-rt/lib/rtsan/rtsan_flags.inc
+++ b/compiler-rt/lib/rtsan/rtsan_flags.inc
@@ -16,5 +16,4 @@
// RTSAN_FLAG(Type, Name, DefaultValue, Description)
// See COMMON_FLAG in sanitizer_flags.inc for more details.
-// Example flag, until we get a real one
-// RTSAN_FLAG(bool, halt_on_error, true, "If true, halt the program on error")
+RTSAN_FLAG(bool, halt_on_error, true, "Exit after first reported error.")
diff --git a/compiler-rt/lib/rtsan/rtsan_interceptors.cpp b/compiler-rt/lib/rtsan/rtsan_interceptors.cpp
index d186d1a..9cc7214 100644
--- a/compiler-rt/lib/rtsan/rtsan_interceptors.cpp
+++ b/compiler-rt/lib/rtsan/rtsan_interceptors.cpp
@@ -64,13 +64,15 @@ INTERCEPTOR(int, open, const char *path, int oflag, ...) {
// O_NONBLOCK
__rtsan_notify_intercepted_call("open");
- va_list args;
- va_start(args, oflag);
- const mode_t mode = va_arg(args, int);
- va_end(args);
+ if (OpenReadsVaArgs(oflag)) {
+ va_list args;
+ va_start(args, oflag);
+ const mode_t mode = va_arg(args, int);
+ va_end(args);
+ return REAL(open)(path, oflag, mode);
+ }
- const int result = REAL(open)(path, oflag, mode);
- return result;
+ return REAL(open)(path, oflag);
}
#if SANITIZER_INTERCEPT_OPEN64
@@ -79,13 +81,15 @@ INTERCEPTOR(int, open64, const char *path, int oflag, ...) {
// O_NONBLOCK
__rtsan_notify_intercepted_call("open64");
- va_list args;
- va_start(args, oflag);
- const mode_t mode = va_arg(args, int);
- va_end(args);
+ if (OpenReadsVaArgs(oflag)) {
+ va_list args;
+ va_start(args, oflag);
+ const mode_t mode = va_arg(args, int);
+ va_end(args);
+ return REAL(open64)(path, oflag, mode);
+ }
- const int result = REAL(open64)(path, oflag, mode);
- return result;
+ return REAL(open64)(path, oflag);
}
#define RTSAN_MAYBE_INTERCEPT_OPEN64 INTERCEPT_FUNCTION(open64)
#else
@@ -97,13 +101,15 @@ INTERCEPTOR(int, openat, int fd, const char *path, int oflag, ...) {
// O_NONBLOCK
__rtsan_notify_intercepted_call("openat");
- va_list args;
- va_start(args, oflag);
- mode_t mode = va_arg(args, int);
- va_end(args);
+ if (OpenReadsVaArgs(oflag)) {
+ va_list args;
+ va_start(args, oflag);
+ const mode_t mode = va_arg(args, int);
+ va_end(args);
+ return REAL(openat)(fd, path, oflag, mode);
+ }
- const int result = REAL(openat)(fd, path, oflag, mode);
- return result;
+ return REAL(openat)(fd, path, oflag);
}
#if SANITIZER_INTERCEPT_OPENAT64
@@ -112,13 +118,15 @@ INTERCEPTOR(int, openat64, int fd, const char *path, int oflag, ...) {
// O_NONBLOCK
__rtsan_notify_intercepted_call("openat64");
- va_list args;
- va_start(args, oflag);
- mode_t mode = va_arg(args, int);
- va_end(args);
+ if (OpenReadsVaArgs(oflag)) {
+ va_list args;
+ va_start(args, oflag);
+ const mode_t mode = va_arg(args, int);
+ va_end(args);
+ return REAL(openat64)(fd, path, oflag, mode);
+ }
- const int result = REAL(openat64)(fd, path, oflag, mode);
- return result;
+ return REAL(openat64)(fd, path, oflag);
}
#define RTSAN_MAYBE_INTERCEPT_OPENAT64 INTERCEPT_FUNCTION(openat64)
#else
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc b/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc
index e09a4a8..b382e7a 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc
@@ -127,6 +127,39 @@ extern const short *_toupper_tab_;
extern const short *_tolower_tab_;
#endif
+#if SANITIZER_LINUX && SANITIZER_SPARC32
+// On 32-bit Linux/sparc64, double and long double are identical and glibc
+// uses a __nldbl_ (no long double) prefix for various stdio functions.
+# define __isoc23_fscanf __nldbl___isoc23_fscanf
+# define __isoc23_scanf __nldbl___isoc23_scanf
+# define __isoc23_sscanf __nldbl___isoc23_sscanf
+# define __isoc23_vfscanf __nldbl___isoc23_vfscanf
+# define __isoc23_vscanf __nldbl___isoc23_vscanf
+# define __isoc23_vsscanf __nldbl___isoc23_vsscanf
+# define __isoc99_fscanf __nldbl___isoc99_fscanf
+# define __isoc99_scanf __nldbl___isoc99_scanf
+# define __isoc99_sscanf __nldbl___isoc99_sscanf
+# define __isoc99_vfscanf __nldbl___isoc99_vfscanf
+# define __isoc99_vscanf __nldbl___isoc99_vscanf
+# define __isoc99_vsscanf __nldbl___isoc99_vsscanf
+# define asprintf __nldbl_asprintf
+# define fprintf __nldbl_fprintf
+# define fscanf __nldbl_fscanf
+# define printf __nldbl_printf
+# define scanf __nldbl_scanf
+# define snprintf __nldbl_snprintf
+# define sprintf __nldbl_sprintf
+# define sscanf __nldbl_sscanf
+# define vasprintf __nldbl_vasprintf
+# define vfprintf __nldbl_vfprintf
+# define vfscanf __nldbl_vfscanf
+# define vprintf __nldbl_vprintf
+# define vscanf __nldbl_vscanf
+# define vsnprintf __nldbl_vsnprintf
+# define vsprintf __nldbl_vsprintf
+# define vsscanf __nldbl_vsscanf
+#endif
+
#if SANITIZER_MUSL && \
(defined(__i386__) || defined(__arm__) || SANITIZER_MIPS32 || SANITIZER_PPC32)
// musl 1.2.0 on existing 32-bit architectures uses new symbol names for the
@@ -1256,6 +1289,11 @@ INTERCEPTOR(int, prctl, int option, unsigned long arg2, unsigned long arg3,
static const int PR_SCHED_CORE = 62;
static const int PR_SCHED_CORE_GET = 0;
static const int PR_GET_PDEATHSIG = 2;
+
+# if !SANITIZER_ANDROID
+ static const int PR_SET_SECCOMP = 22;
+ static const int SECCOMP_MODE_FILTER = 2;
+# endif
if (option == PR_SET_VMA && arg2 == 0UL) {
char *name = (char *)arg5;
COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
@@ -1274,6 +1312,11 @@ INTERCEPTOR(int, prctl, int option, unsigned long arg2, unsigned long arg3,
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (u64 *)(arg5), sizeof(u64));
} else if (res != -1 && option == PR_GET_PDEATHSIG) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (u64 *)(arg2), sizeof(int));
+# if !SANITIZER_ANDROID
+ } else if (res != -1 && option == PR_SET_SECCOMP &&
+ arg2 == SECCOMP_MODE_FILTER) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (u64 *)(arg3), struct_sock_fprog_sz);
+# endif
}
return res;
}
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp
index 1c637d1..d421d11 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp
@@ -107,7 +107,9 @@ extern struct ps_strings *__ps_strings;
# endif // SANITIZER_NETBSD
# if SANITIZER_SOLARIS
+# include <stddef.h>
# include <stdlib.h>
+# include <sys/frame.h>
# include <thread.h>
# define environ _environ
# endif
@@ -725,6 +727,11 @@ static void GetArgsAndEnv(char ***argv, char ***envp) {
# if !SANITIZER_GO
if (&__libc_stack_end) {
uptr *stack_end = (uptr *)__libc_stack_end;
+ // Linux/sparc64 needs an adjustment, cf. glibc
+ // sysdeps/sparc/sparc{32,64}/dl-machine.h (DL_STACK_END).
+# if SANITIZER_LINUX && defined(__sparc__)
+ stack_end = &stack_end[16];
+# endif
// Normally argc can be obtained from *stack_end, however, on ARM glibc's
// _start clobbers it:
// https://sourceware.org/git/?p=glibc.git;a=blob;f=sysdeps/arm/start.S;hb=refs/heads/release/2.31/master#l75
@@ -2617,7 +2624,19 @@ static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
# if SANITIZER_SOLARIS
ucontext_t *ucontext = (ucontext_t *)context;
*pc = ucontext->uc_mcontext.gregs[REG_PC];
- *sp = ucontext->uc_mcontext.gregs[REG_O6] + STACK_BIAS;
+ *sp = ucontext->uc_mcontext.gregs[REG_SP] + STACK_BIAS;
+ // Avoid SEGV when dereferencing sp on stack overflow with non-faulting load.
+ // This requires a SPARC V9 CPU. Cannot use #ASI_PNF here: only supported
+ // since clang-19.
+# if defined(__sparcv9)
+ asm("ldxa [%[fp]] 0x82, %[bp]"
+# else
+ asm("lduwa [%[fp]] 0x82, %[bp]"
+# endif
+ : [bp] "=r"(*bp)
+ : [fp] "r"(&((struct frame *)*sp)->fr_savfp));
+ if (*bp)
+ *bp += STACK_BIAS;
# else
// Historical BSDism here.
struct sigcontext *scontext = (struct sigcontext *)context;
@@ -2628,8 +2647,8 @@ static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
*pc = scontext->si_regs.pc;
*sp = scontext->si_regs.u_regs[14];
# endif
-# endif
*bp = (uptr)((uhwptr *)*sp)[14] + STACK_BIAS;
+# endif
# elif defined(__mips__)
ucontext_t *ucontext = (ucontext_t *)context;
*pc = ucontext->uc_mcontext.pc;
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp
index 6d61d27..5eeb2a8 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp
@@ -117,15 +117,16 @@ typedef struct user_fpregs elf_fpregset_t;
#if SANITIZER_LINUX
#if SANITIZER_GLIBC
#include <fstab.h>
-#include <net/if_ppp.h>
-#include <netax25/ax25.h>
-#include <netipx/ipx.h>
-#include <netrom/netrom.h>
-#include <obstack.h>
-#if HAVE_RPC_XDR_H
-# include <rpc/xdr.h>
-#endif
-#include <scsi/scsi.h>
+# include <linux/filter.h>
+# include <net/if_ppp.h>
+# include <netax25/ax25.h>
+# include <netipx/ipx.h>
+# include <netrom/netrom.h>
+# include <obstack.h>
+# if HAVE_RPC_XDR_H
+# include <rpc/xdr.h>
+# endif
+# include <scsi/scsi.h>
#else
#include <linux/if_ppp.h>
#include <linux/kd.h>
@@ -531,9 +532,10 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
unsigned struct_audio_buf_info_sz = sizeof(struct audio_buf_info);
unsigned struct_ppp_stats_sz = sizeof(struct ppp_stats);
-#endif // SANITIZER_GLIBC
+ unsigned struct_sock_fprog_sz = sizeof(struct sock_fprog);
+# endif // SANITIZER_GLIBC
-#if !SANITIZER_ANDROID && !SANITIZER_APPLE
+# if !SANITIZER_ANDROID && !SANITIZER_APPLE
unsigned struct_sioc_sg_req_sz = sizeof(struct sioc_sg_req);
unsigned struct_sioc_vif_req_sz = sizeof(struct sioc_vif_req);
#endif
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h
index 34bfef1..ca03841 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h
@@ -1050,7 +1050,8 @@ extern unsigned struct_serial_struct_sz;
extern unsigned struct_sockaddr_ax25_sz;
extern unsigned struct_unimapdesc_sz;
extern unsigned struct_unimapinit_sz;
-#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+extern unsigned struct_sock_fprog_sz;
+# endif // SANITIZER_LINUX && !SANITIZER_ANDROID
extern const unsigned long __sanitizer_bufsiz;
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_posix.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_posix.cpp
index 7d7d575..69af646 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_posix.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_posix.cpp
@@ -353,7 +353,15 @@ bool ShouldMockFailureToOpen(const char *path) {
internal_strncmp(path, "/proc/", 6) == 0;
}
-#if SANITIZER_LINUX && !SANITIZER_ANDROID && !SANITIZER_GO
+bool OpenReadsVaArgs(int oflag) {
+# ifdef O_TMPFILE
+ return (oflag & (O_CREAT | O_TMPFILE)) != 0;
+# else
+ return (oflag & O_CREAT) != 0;
+# endif
+}
+
+# if SANITIZER_LINUX && !SANITIZER_ANDROID && !SANITIZER_GO
int GetNamedMappingFd(const char *name, uptr size, int *flags) {
if (!common_flags()->decorate_proc_maps || !name)
return -1;
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_posix.h b/compiler-rt/lib/sanitizer_common/sanitizer_posix.h
index d0954f7..1f0795c 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_posix.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_posix.h
@@ -108,6 +108,7 @@ bool IsStateDetached(int state);
fd_t ReserveStandardFds(fd_t fd);
bool ShouldMockFailureToOpen(const char *path);
+bool OpenReadsVaArgs(int oflag);
// Create a non-file mapping with a given /proc/self/maps name.
uptr MmapNamed(void *addr, uptr length, int prot, int flags, const char *name);
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report.cpp
index ffbaf14..351e00d 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report.cpp
@@ -227,12 +227,15 @@ static void ReportStackOverflowImpl(const SignalContext &sig, u32 tid,
SanitizerToolName, kDescription, (void *)sig.addr, (void *)sig.pc,
(void *)sig.bp, (void *)sig.sp, tid);
Printf("%s", d.Default());
- InternalMmapVector<BufferedStackTrace> stack_buffer(1);
- BufferedStackTrace *stack = stack_buffer.data();
- stack->Reset();
- unwind(sig, unwind_context, stack);
- stack->Print();
- ReportErrorSummary(kDescription, stack);
+ // Avoid SEGVs in the unwinder when bp couldn't be determined.
+ if (sig.bp) {
+ InternalMmapVector<BufferedStackTrace> stack_buffer(1);
+ BufferedStackTrace *stack = stack_buffer.data();
+ stack->Reset();
+ unwind(sig, unwind_context, stack);
+ stack->Print();
+ ReportErrorSummary(kDescription, stack);
+ }
}
static void ReportDeadlySignalImpl(const SignalContext &sig, u32 tid,
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
index 53c876f..423d97e 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
@@ -1680,13 +1680,23 @@ TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) {
#endif
TSAN_INTERCEPTOR(int, open, const char *name, int oflag, ...) {
- va_list ap;
- va_start(ap, oflag);
- mode_t mode = va_arg(ap, int);
- va_end(ap);
+ mode_t mode = 0;
+ if (OpenReadsVaArgs(oflag)) {
+ va_list ap;
+ va_start(ap, oflag);
+ mode = va_arg(ap, int);
+ va_end(ap);
+ }
+
SCOPED_TSAN_INTERCEPTOR(open, name, oflag, mode);
READ_STRING(thr, pc, name, 0);
- int fd = REAL(open)(name, oflag, mode);
+
+ int fd;
+ if (OpenReadsVaArgs(oflag))
+ fd = REAL(open)(name, oflag, mode);
+ else
+ fd = REAL(open)(name, oflag);
+
if (fd >= 0)
FdFileCreate(thr, pc, fd);
return fd;
diff --git a/compiler-rt/lib/ubsan/ubsan_handlers.cpp b/compiler-rt/lib/ubsan/ubsan_handlers.cpp
index 27d0165..9dbe8e6 100644
--- a/compiler-rt/lib/ubsan/ubsan_handlers.cpp
+++ b/compiler-rt/lib/ubsan/ubsan_handlers.cpp
@@ -634,12 +634,12 @@ static void handleInvalidBuiltin(InvalidBuiltinData *Data, ReportOptions Opts) {
ScopedReport R(Opts, Loc, ET);
Diag(Loc, DL_Error, ET,
- "passing zero to %0, which is not a valid argument")
- << ((Data->Kind == BCK_CTZPassedZero) ? "ctz()" : "clz()");
+ "passing zero to __builtin_%0(), which is not a valid argument")
+ << ((Data->Kind == BCK_CTZPassedZero) ? "ctz" : "clz");
}
void __ubsan::__ubsan_handle_invalid_builtin(InvalidBuiltinData *Data) {
- GET_REPORT_OPTIONS(true);
+ GET_REPORT_OPTIONS(false);
handleInvalidBuiltin(Data, Opts);
}
void __ubsan::__ubsan_handle_invalid_builtin_abort(InvalidBuiltinData *Data) {
diff --git a/compiler-rt/test/asan/TestCases/Linux/preinstalled_signal.cpp b/compiler-rt/test/asan/TestCases/Linux/preinstalled_signal.cpp
index 71929fd..dd31693 100644
--- a/compiler-rt/test/asan/TestCases/Linux/preinstalled_signal.cpp
+++ b/compiler-rt/test/asan/TestCases/Linux/preinstalled_signal.cpp
@@ -17,6 +17,9 @@
// This way of setting LD_PRELOAD does not work with Android test runner.
// REQUIRES: !android
+// Issue #109573: Cannot use syscall(__NR_rt_sigaction) on Linux/sparc64.
+// XFAIL: target={{sparc.*-.*-linux.*}}
+
#include <assert.h>
#include <signal.h>
#include <stdio.h>
diff --git a/compiler-rt/test/asan/TestCases/Posix/coverage-fork.cpp b/compiler-rt/test/asan/TestCases/Posix/coverage-fork.cpp
index fec9ba0..a876847 100644
--- a/compiler-rt/test/asan/TestCases/Posix/coverage-fork.cpp
+++ b/compiler-rt/test/asan/TestCases/Posix/coverage-fork.cpp
@@ -26,11 +26,14 @@ void baz() { printf("baz\n"); }
int main(int argc, char **argv) {
pid_t child_pid = fork();
+ char buf[100];
if (child_pid == 0) {
- fprintf(stderr, "Child PID: %d\n", getpid());
+ snprintf(buf, sizeof(buf), "Child PID: %ld\n", (long)getpid());
+ write(2, buf, strlen(buf));
baz();
} else {
- fprintf(stderr, "Parent PID: %d\n", getpid());
+ snprintf(buf, sizeof(buf), "Parent PID: %ld\n", (long)getpid());
+ write(2, buf, strlen(buf));
foo();
bar();
diff --git a/compiler-rt/test/asan/lit.cfg.py b/compiler-rt/test/asan/lit.cfg.py
index 05ed7e8..dac3ef0 100644
--- a/compiler-rt/test/asan/lit.cfg.py
+++ b/compiler-rt/test/asan/lit.cfg.py
@@ -153,12 +153,16 @@ if config.asan_dynamic:
if platform.system() == "Windows":
# MSVC-specific tests might also use the clang-cl.exe driver.
if target_is_msvc:
- clang_cl_cxxflags = [
- "-Wno-deprecated-declarations",
- "-WX",
- "-D_HAS_EXCEPTIONS=0",
- "-Zi",
- ] + target_cflags
+ clang_cl_cxxflags = (
+ [
+ "-WX",
+ "-D_HAS_EXCEPTIONS=0",
+ ]
+ + config.debug_info_flags
+ + target_cflags
+ )
+ if config.compiler_id != "MSVC":
+ clang_cl_cxxflags = ["-Wno-deprecated-declarations"] + clang_cl_cxxflags
clang_cl_asan_cxxflags = ["-fsanitize=address"] + clang_cl_cxxflags
if config.asan_dynamic:
clang_cl_asan_cxxflags.append("-MD")
@@ -286,6 +290,12 @@ if config.host_os == "Windows":
[config.compiler_rt_libdir, os.environ.get("PATH", "")]
)
+# msvc needs to be instructed where the compiler-rt libraries are
+if config.compiler_id == "MSVC":
+ config.environment["LIB"] = os.path.pathsep.join(
+ [config.compiler_rt_libdir, config.environment.get("LIB", "")]
+ )
+
# Default test suffixes.
config.suffixes = [".c", ".cpp"]
diff --git a/compiler-rt/test/builtins/Unit/multf3_test.c b/compiler-rt/test/builtins/Unit/multf3_test.c
index 543b5589..0e56155 100644
--- a/compiler-rt/test/builtins/Unit/multf3_test.c
+++ b/compiler-rt/test/builtins/Unit/multf3_test.c
@@ -77,6 +77,12 @@ int main()
UINT64_C(0x0),
UINT64_C(0x0)))
return 1;
+ // test carry between lo and hi in widening multiply
+ if (test__multf3(0x0.7fffffffffffffffffffffffffffp-16382L,
+ 0x1.7fffffffffffffffffffffffffffp+1L,
+ UINT64_C(0x00017fffffffffff),
+ UINT64_C(0xfffffffffffffffc)))
+ return 1;
#else
printf("skipped\n");
diff --git a/compiler-rt/test/nsan/fcmp.cpp b/compiler-rt/test/nsan/fcmp.cpp
new file mode 100644
index 0000000..e9ec84e
--- /dev/null
+++ b/compiler-rt/test/nsan/fcmp.cpp
@@ -0,0 +1,28 @@
+// RUN: %clangxx_nsan -O2 -g %s -o %t
+// RUN: env NSAN_OPTIONS=check_cmp=true,halt_on_error=0 %run %t 2>&1 | FileCheck %s -check-prefix=CMP_ENABLE
+// RUN: env NSAN_OPTIONS=check_cmp=false,halt_on_error=0 %run %t 2>&1 | FileCheck %s -check-prefix=CMP_DISABLE
+
+#include <cmath>
+#include <cstdio>
+
+// 0.6/0.2 is slightly below 3, so the comparison will fail after a certain
+// threshold that depends on the precision of the computation.
+__attribute__((noinline)) // To check call stack reporting.
+bool DoCmp(double a, double b, double c, double threshold) {
+ return c - a / b < threshold;
+ // CMP_ENABLE: WARNING: NumericalStabilitySanitizer: floating-point comparison results depend on precision
+ // CMP_ENABLE: double {{ *}}precision dec (native): {{.*}}<{{.*}}
+ // CMP_ENABLE: __float128{{ *}}precision dec (shadow): {{.*}}<{{.*}}
+ // CMP_ENABLE: {{#0 .*in DoCmp}}
+}
+
+int main() {
+ double threshold = 1.0;
+ for (int i = 0; i < 60; ++i) {
+ threshold /= 2;
+ // CMP_DISABLE: value at threshold {{.*}}
+ printf("value at threshold %.20f: %i\n", threshold,
+ DoCmp(0.6, 0.2, 3.0, threshold));
+ }
+ return 0;
+}
diff --git a/compiler-rt/test/profile/Posix/instrprof-dlopen-norpath.test b/compiler-rt/test/profile/Posix/instrprof-dlopen-norpath.test
index ba42433..0d75018 100644
--- a/compiler-rt/test/profile/Posix/instrprof-dlopen-norpath.test
+++ b/compiler-rt/test/profile/Posix/instrprof-dlopen-norpath.test
@@ -1,8 +1,8 @@
RUN: rm -rf %t && split-file %s %t && cd %t
-RUN: %clang_pgogen -fPIC foo.c -c -Xclang -fprofile-instrument-path="default_foo_%m.profraw"
-RUN: %clang_pgogen -fPIC foo2.c -c -Xclang -fprofile-instrument-path="default_foo2_%m.profraw"
-RUN: %clang_pgogen -shared foo.o -o shr_foo.o %if target={{.*aix.*}} %{ -bcdtors:mbr %}
-RUN: %clang_pgogen -shared foo2.o -o shr_foo2.o
+RUN: %clang_pgogen -fprofile-update=atomic -fPIC foo.c -c -Xclang -fprofile-instrument-path="default_foo_%m.profraw"
+RUN: %clang_pgogen -fprofile-update=atomic -fPIC foo2.c -c -Xclang -fprofile-instrument-path="default_foo2_%m.profraw"
+RUN: %clang_pgogen -fprofile-update=atomic -shared foo.o -o shr_foo.o %if target={{.*aix.*}} %{ -bcdtors:mbr %}
+RUN: %clang_pgogen -fprofile-update=atomic -shared foo2.o -o shr_foo2.o
RUN: %clang_pgogen common.c -c
diff --git a/compiler-rt/test/rtsan/halt_on_error.cpp b/compiler-rt/test/rtsan/halt_on_error.cpp
new file mode 100644
index 0000000..c2ebdf3
--- /dev/null
+++ b/compiler-rt/test/rtsan/halt_on_error.cpp
@@ -0,0 +1,26 @@
+// RUN: %clangxx -fsanitize=realtime %s -o %t
+// RUN: %env_rtsan_opts="halt_on_error=true" not %run %t 2>&1 | FileCheck %s
+// RUN: %env_rtsan_opts="halt_on_error=false" %run %t 2>&1 | FileCheck %s --check-prefixes=CHECK-NO-HALT,CHECK
+// UNSUPPORTED: ios
+
+// Intent: Ensure that halt_on_error does not exit on the first violation.
+
+#include <stdlib.h>
+
+void *MallocViolation() { return malloc(10); }
+
+void FreeViolation(void *Ptr) { free(Ptr); }
+
+void process() [[clang::nonblocking]] {
+ void *Ptr = MallocViolation();
+ FreeViolation(Ptr);
+}
+
+int main() {
+ process();
+ return 0;
+ // CHECK: ==ERROR: RealtimeSanitizer
+ // CHECK-NEXT: {{.*`malloc`.*}}
+ // CHECK-NO-HALT: ==ERROR: RealtimeSanitizer
+ // CHECK-NO-HALT-NEXT: {{.*`free`.*}}
+}
diff --git a/compiler-rt/test/sanitizer_common/TestCases/Linux/prctl.cpp b/compiler-rt/test/sanitizer_common/TestCases/Linux/prctl.cpp
index cbff02d..dab1d1b 100644
--- a/compiler-rt/test/sanitizer_common/TestCases/Linux/prctl.cpp
+++ b/compiler-rt/test/sanitizer_common/TestCases/Linux/prctl.cpp
@@ -4,6 +4,8 @@
#include <assert.h>
#include <errno.h>
+#include <linux/filter.h>
+#include <linux/seccomp.h>
#include <stdint.h>
#include <string.h>
#include <sys/mman.h>
@@ -78,5 +80,13 @@ int main() {
}
}
+ sock_filter f[] = {{.code = (BPF_LD | BPF_W | BPF_ABS),
+ .k = (uint32_t)(SKF_AD_OFF | SKF_AD_CPU)},
+ {.code = (BPF_RET | BPF_A), .k = 0}};
+ sock_fprog pr = {.len = 2, .filter = f};
+
+ res = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &pr);
+ assert(res == -1);
+
return 0;
}
diff --git a/compiler-rt/test/ubsan/TestCases/Integer/suppressions-builtin.cpp b/compiler-rt/test/ubsan/TestCases/Integer/suppressions-builtin.cpp
new file mode 100644
index 0000000..60377c4
--- /dev/null
+++ b/compiler-rt/test/ubsan/TestCases/Integer/suppressions-builtin.cpp
@@ -0,0 +1,18 @@
+// RUN: %clangxx -fsanitize=builtin -g0 %s -o %t
+
+// Suppression by symbol name requires the compiler-rt runtime to be able to
+// symbolize stack addresses.
+// REQUIRES: can-symbolize
+// UNSUPPORTED: android
+
+// RUN: echo "invalid-builtin-use:do_ctz" > %t.func-supp
+// RUN: %env_ubsan_opts=halt_on_error=1:suppressions='"%t.func-supp"' %run %t
+
+#include <stdint.h>
+
+extern "C" void do_ctz(int n) { __builtin_ctz(0); }
+
+int main() {
+ do_ctz(0);
+ return 0;
+}
diff --git a/compiler-rt/test/ubsan/TestCases/Misc/builtins.cpp b/compiler-rt/test/ubsan/TestCases/Misc/builtins.cpp
index f8f564c..a635f7f 100644
--- a/compiler-rt/test/ubsan/TestCases/Misc/builtins.cpp
+++ b/compiler-rt/test/ubsan/TestCases/Misc/builtins.cpp
@@ -6,25 +6,25 @@
// RUN: not %run %t.abort 2>&1 | FileCheck %s --check-prefix=ABORT
void check_ctz(int n) {
- // ABORT: builtins.cpp:[[@LINE+2]]:17: runtime error: passing zero to ctz(), which is not a valid argument
- // RECOVER: builtins.cpp:[[@LINE+1]]:17: runtime error: passing zero to ctz(), which is not a valid argument
+ // ABORT: builtins.cpp:[[@LINE+2]]:17: runtime error: passing zero to __builtin_ctz(), which is not a valid argument
+ // RECOVER: builtins.cpp:[[@LINE+1]]:17: runtime error: passing zero to __builtin_ctz(), which is not a valid argument
__builtin_ctz(n);
- // RECOVER: builtins.cpp:[[@LINE+1]]:18: runtime error: passing zero to ctz(), which is not a valid argument
+ // RECOVER: builtins.cpp:[[@LINE+1]]:18: runtime error: passing zero to __builtin_ctz(), which is not a valid argument
__builtin_ctzl(n);
- // RECOVER: builtins.cpp:[[@LINE+1]]:19: runtime error: passing zero to ctz(), which is not a valid argument
+ // RECOVER: builtins.cpp:[[@LINE+1]]:19: runtime error: passing zero to __builtin_ctz(), which is not a valid argument
__builtin_ctzll(n);
}
void check_clz(int n) {
- // RECOVER: builtins.cpp:[[@LINE+1]]:17: runtime error: passing zero to clz(), which is not a valid argument
+ // RECOVER: builtins.cpp:[[@LINE+1]]:17: runtime error: passing zero to __builtin_clz(), which is not a valid argument
__builtin_clz(n);
- // RECOVER: builtins.cpp:[[@LINE+1]]:18: runtime error: passing zero to clz(), which is not a valid argument
+ // RECOVER: builtins.cpp:[[@LINE+1]]:18: runtime error: passing zero to __builtin_clz(), which is not a valid argument
__builtin_clzl(n);
- // RECOVER: builtins.cpp:[[@LINE+1]]:19: runtime error: passing zero to clz(), which is not a valid argument
+ // RECOVER: builtins.cpp:[[@LINE+1]]:19: runtime error: passing zero to __builtin_clz(), which is not a valid argument
__builtin_clzll(n);
}
diff --git a/flang/include/flang/Optimizer/Transforms/Passes.h b/flang/include/flang/Optimizer/Transforms/Passes.h
index fcfb867..3b2af3a 100644
--- a/flang/include/flang/Optimizer/Transforms/Passes.h
+++ b/flang/include/flang/Optimizer/Transforms/Passes.h
@@ -39,6 +39,7 @@ namespace fir {
#define GEN_PASS_DECL_ASSUMEDRANKOPCONVERSION
#define GEN_PASS_DECL_CHARACTERCONVERSION
#define GEN_PASS_DECL_CFGCONVERSION
+#define GEN_PASS_DECL_CUFADDCONSTRUCTOR
#define GEN_PASS_DECL_CUFIMPLICITDEVICEGLOBAL
#define GEN_PASS_DECL_CUFOPCONVERSION
#define GEN_PASS_DECL_EXTERNALNAMECONVERSION
diff --git a/flang/include/flang/Optimizer/Transforms/Passes.td b/flang/include/flang/Optimizer/Transforms/Passes.td
index ab98591..bf75123 100644
--- a/flang/include/flang/Optimizer/Transforms/Passes.td
+++ b/flang/include/flang/Optimizer/Transforms/Passes.td
@@ -436,4 +436,11 @@ def CufImplicitDeviceGlobal :
];
}
+def CUFAddConstructor : Pass<"cuf-add-constructor", "mlir::ModuleOp"> {
+ let summary = "Add constructor to register CUDA Fortran allocators";
+ let dependentDialects = [
+ "mlir::func::FuncDialect"
+ ];
+}
+
#endif // FLANG_OPTIMIZER_TRANSFORMS_PASSES
diff --git a/flang/lib/Optimizer/Transforms/CMakeLists.txt b/flang/lib/Optimizer/Transforms/CMakeLists.txt
index b68e3d6..5e1a029 100644
--- a/flang/lib/Optimizer/Transforms/CMakeLists.txt
+++ b/flang/lib/Optimizer/Transforms/CMakeLists.txt
@@ -9,6 +9,7 @@ add_flang_library(FIRTransforms
CompilerGeneratedNames.cpp
ConstantArgumentGlobalisation.cpp
ControlFlowConverter.cpp
+ CUFAddConstructor.cpp
CufImplicitDeviceGlobal.cpp
CufOpConversion.cpp
ArrayValueCopy.cpp
diff --git a/flang/lib/Optimizer/Transforms/CUFAddConstructor.cpp b/flang/lib/Optimizer/Transforms/CUFAddConstructor.cpp
new file mode 100644
index 0000000..48620fb
--- /dev/null
+++ b/flang/lib/Optimizer/Transforms/CUFAddConstructor.cpp
@@ -0,0 +1,75 @@
+//===-- CUFAddConstructor.cpp ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "flang/Optimizer/Builder/FIRBuilder.h"
+#include "flang/Optimizer/Dialect/CUF/CUFOps.h"
+#include "flang/Optimizer/Dialect/FIRAttr.h"
+#include "flang/Optimizer/Dialect/FIRDialect.h"
+#include "flang/Optimizer/Dialect/FIROpsSupport.h"
+#include "flang/Runtime/entry-names.h"
+#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
+#include "mlir/Pass/Pass.h"
+#include "llvm/ADT/SmallVector.h"
+
+namespace fir {
+#define GEN_PASS_DEF_CUFADDCONSTRUCTOR
+#include "flang/Optimizer/Transforms/Passes.h.inc"
+} // namespace fir
+
+namespace {
+
+static constexpr llvm::StringRef cudaFortranCtorName{
+ "__cudaFortranConstructor"};
+
+struct CUFAddConstructor
+ : public fir::impl::CUFAddConstructorBase<CUFAddConstructor> {
+
+ void runOnOperation() override {
+ mlir::ModuleOp mod = getOperation();
+ mlir::OpBuilder builder{mod.getBodyRegion()};
+ builder.setInsertionPointToEnd(mod.getBody());
+ mlir::Location loc = mod.getLoc();
+ auto *ctx = mod.getContext();
+ auto voidTy = mlir::LLVM::LLVMVoidType::get(ctx);
+ auto funcTy =
+ mlir::LLVM::LLVMFunctionType::get(voidTy, {}, /*isVarArg=*/false);
+
+ // Symbol reference to CUFRegisterAllocator.
+ builder.setInsertionPointToEnd(mod.getBody());
+ auto registerFuncOp = builder.create<mlir::LLVM::LLVMFuncOp>(
+ loc, RTNAME_STRING(CUFRegisterAllocator), funcTy);
+ registerFuncOp.setVisibility(mlir::SymbolTable::Visibility::Private);
+ auto cufRegisterAllocatorRef = mlir::SymbolRefAttr::get(
+ mod.getContext(), RTNAME_STRING(CUFRegisterAllocator));
+ builder.setInsertionPointToEnd(mod.getBody());
+
+ // Create the constructor function that cal CUFRegisterAllocator.
+ builder.setInsertionPointToEnd(mod.getBody());
+ auto func = builder.create<mlir::LLVM::LLVMFuncOp>(loc, cudaFortranCtorName,
+ funcTy);
+ func.setLinkage(mlir::LLVM::Linkage::Internal);
+ builder.setInsertionPointToStart(func.addEntryBlock(builder));
+ builder.create<mlir::LLVM::CallOp>(loc, funcTy, cufRegisterAllocatorRef);
+ builder.create<mlir::LLVM::ReturnOp>(loc, mlir::ValueRange{});
+
+ // Create the llvm.global_ctor with the function.
+ // TODO: We might want to have a utility that retrieve it if already created
+ // and adds new functions.
+ builder.setInsertionPointToEnd(mod.getBody());
+ llvm::SmallVector<mlir::Attribute> funcs;
+ funcs.push_back(
+ mlir::FlatSymbolRefAttr::get(mod.getContext(), func.getSymName()));
+ llvm::SmallVector<int> priorities;
+ priorities.push_back(0);
+ builder.create<mlir::LLVM::GlobalCtorsOp>(
+ mod.getLoc(), builder.getArrayAttr(funcs),
+ builder.getI32ArrayAttr(priorities));
+ }
+};
+
+} // end anonymous namespace
diff --git a/flang/test/Driver/target-cpu-features.f90 b/flang/test/Driver/target-cpu-features.f90
index 1c77d4a..e3eb849 100644
--- a/flang/test/Driver/target-cpu-features.f90
+++ b/flang/test/Driver/target-cpu-features.f90
@@ -23,6 +23,12 @@
! RUN: %flang --target=x86_64-linux-gnu -mno-apx-features=ccmp -c %s -### 2>&1 \
! RUN: | FileCheck %s -check-prefix=CHECK-NO-APX
+! RUN: %flang --target=x86_64-linux-gnu -mevex512 -c %s -### 2>&1 \
+! RUN: | FileCheck %s -check-prefix=CHECK-EVEX512
+
+! RUN: %flang --target=x86_64-linux-gnu -mno-evex512 -c %s -### 2>&1 \
+! RUN: | FileCheck %s -check-prefix=CHECK-NO-EVEX512
+
! RUN: %flang --target=x86_64h-linux-gnu -c %s -### 2>&1 \
! RUN: | FileCheck %s -check-prefix=CHECK-X86_64H
@@ -63,6 +69,12 @@
! CHECK-NO-APX: "-fc1" "-triple" "x86_64-unknown-linux-gnu"
! CHECK-NO-APX-SAME: "-target-feature" "-ccmp"
+! CHECK-EVEX512: "-fc1" "-triple" "x86_64-unknown-linux-gnu"
+! CHECK-EVEX512-SAME: "-target-feature" "+evex512"
+
+! CHECK-NO-EVEX512: "-fc1" "-triple" "x86_64-unknown-linux-gnu"
+! CHECK-NO-EVEX512-SAME: "-target-feature" "-evex512"
+
! CHECK-X86_64H: "-fc1" "-triple" "x86_64h-unknown-linux-gnu"
! CHECK-X86_64H-SAME: "-target-cpu" "x86-64" "-target-feature" "-rdrnd" "-target-feature" "-aes" "-target-feature" "-pclmul" "-target-feature" "-rtm" "-target-feature" "-fsgsbase"
diff --git a/flang/test/Fir/CUDA/cuda-constructor.f90 b/flang/test/Fir/CUDA/cuda-constructor.f90
new file mode 100644
index 0000000..d02350b
--- /dev/null
+++ b/flang/test/Fir/CUDA/cuda-constructor.f90
@@ -0,0 +1,12 @@
+! RUN: bbc -fcuda -emit-hlfir %s -o - | fir-opt --cuf-add-constructor | FileCheck %s
+
+program main
+ real, device :: ahost(10)
+end
+
+! CHECK: llvm.func @_FortranACUFRegisterAllocator() attributes {sym_visibility = "private"}
+! CHECK-LABEL: llvm.func internal @__cudaFortranConstructor() {
+! CHECK: llvm.call @_FortranACUFRegisterAllocator() : () -> ()
+! CHECK: llvm.return
+! CHECK: }
+! CHECK: llvm.mlir.global_ctors {ctors = [@__cudaFortranConstructor], priorities = [0 : i32]}
diff --git a/libc/cmake/modules/CheckCompilerFeatures.cmake b/libc/cmake/modules/CheckCompilerFeatures.cmake
index 63145fe..862c7ec 100644
--- a/libc/cmake/modules/CheckCompilerFeatures.cmake
+++ b/libc/cmake/modules/CheckCompilerFeatures.cmake
@@ -10,6 +10,7 @@ set(
"builtin_round"
"builtin_roundeven"
"float16"
+ "float16_conversion"
"float128"
"fixed_point"
)
@@ -61,15 +62,21 @@ foreach(feature IN LISTS ALL_COMPILER_FEATURES)
set(link_options "")
if(${feature} STREQUAL "fixed_point")
list(APPEND compile_options "-ffixed-point")
- elseif(${feature} MATCHES "^builtin_")
+ elseif(${feature} MATCHES "^builtin_" OR
+ ${feature} STREQUAL "float16_conversion")
set(compile_options ${LIBC_COMPILE_OPTIONS_DEFAULT})
set(link_options -nostdlib)
- # The compiler might handle calls to rounding builtins by generating calls
- # to the respective libc math functions, in which case we cannot use these
+ # The compiler might handle calls to math builtins by generating calls to
+ # the respective libc math functions, in which case we cannot use these
# builtins in our implementations of these functions. We check that this is
# not the case by trying to link an executable, since linking would fail due
# to unresolved references with -nostdlib if calls to libc functions were
# generated.
+ #
+ # We also had issues with soft-float float16 conversion functions using both
+ # compiler-rt and libgcc, so we also check whether we can convert from and
+ # to float16 without calls to compiler runtime functions by trying to link
+ # an executable with -nostdlib.
set(CMAKE_TRY_COMPILE_TARGET_TYPE EXECUTABLE)
endif()
@@ -97,6 +104,8 @@ foreach(feature IN LISTS ALL_COMPILER_FEATURES)
list(APPEND AVAILABLE_COMPILER_FEATURES ${feature})
if(${feature} STREQUAL "float16")
set(LIBC_TYPES_HAS_FLOAT16 TRUE)
+ elseif(${feature} STREQUAL "float16_conversion")
+ add_compile_definitions(__LIBC_USE_FLOAT16_CONVERSION)
elseif(${feature} STREQUAL "float128")
set(LIBC_TYPES_HAS_FLOAT128 TRUE)
elseif(${feature} STREQUAL "fixed_point")
@@ -115,6 +124,10 @@ foreach(feature IN LISTS ALL_COMPILER_FEATURES)
endif()
endforeach()
+set(CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY)
+set(compile_options ${LIBC_COMPILE_OPTIONS_DEFAULT})
+set(link_options "")
+
message(STATUS "Compiler features available: ${AVAILABLE_COMPILER_FEATURES}")
### Compiler Feature Detection ###
diff --git a/libc/cmake/modules/compiler_features/check_float16_conversion.cpp b/libc/cmake/modules/compiler_features/check_float16_conversion.cpp
new file mode 100644
index 0000000..09ac8e9
--- /dev/null
+++ b/libc/cmake/modules/compiler_features/check_float16_conversion.cpp
@@ -0,0 +1,30 @@
+#include "include/llvm-libc-macros/float16-macros.h"
+#include "include/llvm-libc-types/float128.h"
+
+#ifndef LIBC_TYPES_HAS_FLOAT16
+#error unsupported
+#endif
+
+_Float16 cvt_from_float(float x) { return static_cast<_Float16>(x); }
+
+_Float16 cvt_from_double(double x) { return static_cast<_Float16>(x); }
+
+_Float16 cvt_from_long_double(long double x) {
+ return static_cast<_Float16>(x);
+}
+
+#ifdef LIBC_TYPES_HAS_FLOAT128
+_Float16 cvt_from_float128(float128 x) { return static_cast<_Float16>(x); }
+#endif
+
+float cvt_to_float(_Float16 x) { return x; }
+
+double cvt_to_double(_Float16 x) { return x; }
+
+long double cvt_to_long_double(_Float16 x) { return x; }
+
+#ifdef LIBC_TYPES_HAS_FLOAT128
+float128 cvt_to_float128(_Float16 x) { return x; }
+#endif
+
+extern "C" void _start() {}
diff --git a/libc/config/gpu/entrypoints.txt b/libc/config/gpu/entrypoints.txt
index 706f603..b4cfe47 100644
--- a/libc/config/gpu/entrypoints.txt
+++ b/libc/config/gpu/entrypoints.txt
@@ -191,6 +191,7 @@ set(TARGET_LIBC_ENTRYPOINTS
libc.src.stdlib.at_quick_exit
libc.src.stdlib.quick_exit
libc.src.stdlib.getenv
+ libc.src.stdlib.system
# TODO: Implement these correctly
libc.src.stdlib.aligned_alloc
@@ -239,6 +240,7 @@ set(TARGET_LIBC_ENTRYPOINTS
libc.src.stdio.putchar
libc.src.stdio.puts
libc.src.stdio.remove
+ libc.src.stdio.rename
libc.src.stdio.stderr
libc.src.stdio.stdin
libc.src.stdio.stdout
diff --git a/libc/config/gpu/headers.txt b/libc/config/gpu/headers.txt
index fc952c4..adbd014 100644
--- a/libc/config/gpu/headers.txt
+++ b/libc/config/gpu/headers.txt
@@ -2,6 +2,7 @@ set(TARGET_PUBLIC_HEADERS
libc.include.assert
libc.include.ctype
libc.include.string
+ libc.include.strings
libc.include.signal
libc.include.float
libc.include.stdint
diff --git a/libc/docs/gpu/support.rst b/libc/docs/gpu/support.rst
index c8b1052..9c151a5 100644
--- a/libc/docs/gpu/support.rst
+++ b/libc/docs/gpu/support.rst
@@ -47,7 +47,6 @@ Function Name Available RPC Required
bcmp |check|
bcopy |check|
bzero |check|
-index |check|
memccpy |check|
memchr |check|
memcmp |check|
@@ -57,11 +56,8 @@ memmove |check|
mempcpy |check|
memrchr |check|
memset |check|
-rindex |check|
stpcpy |check|
stpncpy |check|
-strcasecmp |check|
-strcasestr |check|
strcat |check|
strchr |check|
strchrnul |check|
@@ -74,7 +70,6 @@ strerror |check|
strlcat |check|
strlcpy |check|
strlen |check|
-strncasecmp |check|
strncat |check|
strncmp |check|
strncpy |check|
@@ -90,6 +85,21 @@ strtok_r |check|
strxfrm |check|
============= ========= ============
+strings.h
+---------
+
+============= ========= ============
+Function Name Available RPC Required
+============= ========= ============
+bcmp |check|
+bcopy |check|
+bzero |check|
+strcasecmp |check|
+strcasestr |check|
+index |check|
+rindex |check|
+============= ========= ============
+
stdbit.h
--------
@@ -176,6 +186,7 @@ atol |check|
atoll |check|
exit |check| |check|
abort |check| |check|
+system |check| |check|
labs |check|
llabs |check|
div |check|
@@ -229,6 +240,7 @@ fputs |check| |check|
fputc |check| |check|
fwrite |check| |check|
remove |check| |check|
+rename |check| |check|
putc |check| |check|
printf |check| |check|
vprintf |check| |check|
@@ -239,8 +251,8 @@ snprintf |check|
vsprintf |check|
vsnprintf |check|
sscanf |check|
-scanf |check|
-fscanf |check|
+scanf |check| |check|
+fscanf |check| |check|
putchar |check| |check|
fclose |check| |check|
fopen |check| |check|
diff --git a/libc/include/llvm-libc-macros/math-function-macros.h b/libc/include/llvm-libc-macros/math-function-macros.h
index f8cd9d8..68f9ff9 100644
--- a/libc/include/llvm-libc-macros/math-function-macros.h
+++ b/libc/include/llvm-libc-macros/math-function-macros.h
@@ -19,5 +19,6 @@
#define fpclassify(x) \
__builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL, FP_ZERO, x)
#define isnormal(x) __builtin_isnormal(x)
+#define issubnormal(x) (fpclassify(x) == FP_SUBNORMAL)
#endif // LLVM_LIBC_MACROS_MATH_FUNCTION_MACROS_H
diff --git a/libc/include/llvm-libc-types/rpc_opcodes_t.h b/libc/include/llvm-libc-types/rpc_opcodes_t.h
index 45050e8..1a6c0cd 100644
--- a/libc/include/llvm-libc-types/rpc_opcodes_t.h
+++ b/libc/include/llvm-libc-types/rpc_opcodes_t.h
@@ -38,6 +38,8 @@ typedef enum {
RPC_PRINTF_TO_STDERR_PACKED,
RPC_PRINTF_TO_STREAM_PACKED,
RPC_REMOVE,
+ RPC_RENAME,
+ RPC_SYSTEM,
RPC_LAST = 0xFFFF,
} rpc_opcode_t;
diff --git a/libc/newhdrgen/yaml/stdlib.yaml b/libc/newhdrgen/yaml/stdlib.yaml
index 5da49b8..c6c95e4 100644
--- a/libc/newhdrgen/yaml/stdlib.yaml
+++ b/libc/newhdrgen/yaml/stdlib.yaml
@@ -333,3 +333,9 @@ functions:
- type: char **__restrict
- type: int
- type: locale_t
+ - name: system
+ standards:
+ - stdc
+ return_type: int
+ arguments:
+ - type: const char *
diff --git a/libc/spec/stdc.td b/libc/spec/stdc.td
index c7b697d..7caf543 100644
--- a/libc/spec/stdc.td
+++ b/libc/spec/stdc.td
@@ -1340,6 +1340,8 @@ def StdC : StandardSpec<"stdc"> {
FunctionSpec<"atexit", RetValSpec<IntType>, [ArgSpec<AtexitHandlerT>]>,
FunctionSpec<"exit", RetValSpec<NoReturn>, [ArgSpec<IntType>]>,
FunctionSpec<"quick_exit", RetValSpec<NoReturn>, [ArgSpec<IntType>]>,
+
+ FunctionSpec<"system", RetValSpec<IntType>, [ArgSpec<ConstCharPtr>]>,
]
>;
diff --git a/libc/src/__support/FPUtil/CMakeLists.txt b/libc/src/__support/FPUtil/CMakeLists.txt
index ea1e0e8..522b4af 100644
--- a/libc/src/__support/FPUtil/CMakeLists.txt
+++ b/libc/src/__support/FPUtil/CMakeLists.txt
@@ -92,11 +92,14 @@ add_header_library(
HDRS
except_value_utils.h
DEPENDS
+ .cast
.fp_bits
.fenv_impl
.rounding_mode
libc.src.__support.CPP.optional
libc.src.__support.macros.optimization
+ libc.src.__support.macros.properties.cpu_features
+ libc.src.__support.macros.properties.types
)
@@ -175,9 +178,13 @@ add_header_library(
.fenv_impl
.fp_bits
.multiply_add
+ .rounding_mode
+ libc.hdr.errno_macros
+ libc.hdr.fenv_macros
libc.src.__support.CPP.type_traits
libc.src.__support.big_int
libc.src.__support.macros.optimization
+ libc.src.__support.macros.properties.types
)
add_header_library(
@@ -217,18 +224,32 @@ add_header_library(
HDRS
ManipulationFunctions.h
DEPENDS
+ .cast
+ .dyadic_float
.fenv_impl
.fp_bits
- .dyadic_float
.nearest_integer_operations
.normal_float
libc.hdr.math_macros
+ libc.src.errno.errno
+ libc.src.__support.common
libc.src.__support.CPP.bit
libc.src.__support.CPP.limits
libc.src.__support.CPP.type_traits
- libc.src.__support.common
libc.src.__support.macros.optimization
- libc.src.errno.errno
+)
+
+add_header_library(
+ cast
+ HDRS
+ cast.h
+ DEPENDS
+ .dyadic_float
+ .fp_bits
+ libc.hdr.fenv_macros
+ libc.src.__support.CPP.algorithm
+ libc.src.__support.CPP.type_traits
+ libc.src.__support.macros.properties.types
)
add_subdirectory(generic)
diff --git a/libc/src/__support/FPUtil/ManipulationFunctions.h b/libc/src/__support/FPUtil/ManipulationFunctions.h
index a14f355..66bfe2a 100644
--- a/libc/src/__support/FPUtil/ManipulationFunctions.h
+++ b/libc/src/__support/FPUtil/ManipulationFunctions.h
@@ -12,6 +12,7 @@
#include "FPBits.h"
#include "NearestIntegerOperations.h"
#include "NormalFloat.h"
+#include "cast.h"
#include "dyadic_float.h"
#include "rounding_mode.h"
@@ -192,7 +193,8 @@ ldexp(T x, U exp) {
// For all other values, NormalFloat to T conversion handles it the right way.
DyadicFloat<FPBits<T>::STORAGE_LEN> normal(bits.get_val());
normal.exponent += static_cast<int>(exp);
- return static_cast<T>(normal);
+ // TODO: Add tests for exceptions.
+ return normal.template as<T, /*ShouldRaiseExceptions=*/true>();
}
template <typename T, typename U,
@@ -207,17 +209,17 @@ LIBC_INLINE T nextafter(T from, U to) {
FPBits<U> to_bits(to);
if (to_bits.is_nan())
- return static_cast<T>(to);
+ return cast<T>(to);
// NOTE: This would work only if `U` has a greater or equal precision than
// `T`. Otherwise `from` could loose its precision and the following statement
// could incorrectly evaluate to `true`.
- if (static_cast<U>(from) == to)
- return static_cast<T>(to);
+ if (cast<U>(from) == to)
+ return cast<T>(to);
using StorageType = typename FPBits<T>::StorageType;
if (from != T(0)) {
- if ((static_cast<U>(from) < to) == (from > T(0))) {
+ if ((cast<U>(from) < to) == (from > T(0))) {
from_bits = FPBits<T>(StorageType(from_bits.uintval() + 1));
} else {
from_bits = FPBits<T>(StorageType(from_bits.uintval() - 1));
diff --git a/libc/src/__support/FPUtil/cast.h b/libc/src/__support/FPUtil/cast.h
new file mode 100644
index 0000000..126f385
--- /dev/null
+++ b/libc/src/__support/FPUtil/cast.h
@@ -0,0 +1,65 @@
+//===-- Conversion between floating-point types -----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC___SUPPORT_FPUTIL_CAST_H
+#define LLVM_LIBC_SRC___SUPPORT_FPUTIL_CAST_H
+
+#include "FPBits.h"
+#include "dyadic_float.h"
+#include "hdr/fenv_macros.h"
+#include "src/__support/CPP/algorithm.h"
+#include "src/__support/CPP/type_traits.h"
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE::fputil {
+
+template <typename OutType, typename InType>
+LIBC_INLINE constexpr cpp::enable_if_t<cpp::is_floating_point_v<OutType> &&
+ cpp::is_floating_point_v<InType>,
+ OutType>
+cast(InType x) {
+#if defined(LIBC_TYPES_HAS_FLOAT16) && !defined(__LIBC_USE_FLOAT16_CONVERSION)
+ if constexpr (cpp::is_same_v<OutType, float16> ||
+ cpp::is_same_v<InType, float16>) {
+ using InFPBits = FPBits<InType>;
+ using InStorageType = typename InFPBits::StorageType;
+ using OutFPBits = FPBits<OutType>;
+ using OutStorageType = typename OutFPBits::StorageType;
+
+ InFPBits x_bits(x);
+
+ if (x_bits.is_nan()) {
+ if (x_bits.is_signaling_nan()) {
+ raise_except_if_required(FE_INVALID);
+ return OutFPBits::quiet_nan().get_val();
+ }
+
+ InStorageType x_mant = x_bits.get_mantissa();
+ if (InFPBits::FRACTION_LEN > OutFPBits::FRACTION_LEN)
+ x_mant >>= InFPBits::FRACTION_LEN - OutFPBits::FRACTION_LEN;
+ return OutFPBits::quiet_nan(x_bits.sign(),
+ static_cast<OutStorageType>(x_mant))
+ .get_val();
+ }
+
+ if (x_bits.is_inf())
+ return OutFPBits::inf(x_bits.sign()).get_val();
+
+ constexpr size_t MAX_FRACTION_LEN =
+ cpp::max(OutFPBits::FRACTION_LEN, InFPBits::FRACTION_LEN);
+ DyadicFloat<cpp::bit_ceil(MAX_FRACTION_LEN)> xd(x);
+ return xd.template as<OutType, /*ShouldSignalExceptions=*/true>();
+ }
+#endif
+
+ return static_cast<OutType>(x);
+}
+
+} // namespace LIBC_NAMESPACE::fputil
+
+#endif // LLVM_LIBC_SRC___SUPPORT_FPUTIL_CAST_H
diff --git a/libc/src/__support/FPUtil/dyadic_float.h b/libc/src/__support/FPUtil/dyadic_float.h
index 86346a4..165ffc7 100644
--- a/libc/src/__support/FPUtil/dyadic_float.h
+++ b/libc/src/__support/FPUtil/dyadic_float.h
@@ -11,11 +11,15 @@
#include "FEnvImpl.h"
#include "FPBits.h"
+#include "hdr/errno_macros.h"
+#include "hdr/fenv_macros.h"
#include "multiply_add.h"
+#include "rounding_mode.h"
#include "src/__support/CPP/type_traits.h"
#include "src/__support/big_int.h"
#include "src/__support/macros/config.h"
#include "src/__support/macros/optimization.h" // LIBC_UNLIKELY
+#include "src/__support/macros/properties/types.h"
#include <stddef.h>
@@ -97,13 +101,120 @@ template <size_t Bits> struct DyadicFloat {
return exponent + (Bits - 1);
}
- // Assume that it is already normalized.
- // Output is rounded correctly with respect to the current rounding mode.
+#ifdef LIBC_TYPES_HAS_FLOAT16
+ template <typename T, bool ShouldSignalExceptions>
+ LIBC_INLINE constexpr cpp::enable_if_t<
+ cpp::is_floating_point_v<T> && (FPBits<T>::FRACTION_LEN < Bits), T>
+ generic_as() const {
+ using FPBits = FPBits<float16>;
+ using StorageType = typename FPBits::StorageType;
+
+ constexpr int EXTRA_FRACTION_LEN = Bits - 1 - FPBits::FRACTION_LEN;
+
+ if (mantissa == 0)
+ return FPBits::zero(sign).get_val();
+
+ int unbiased_exp = get_unbiased_exponent();
+
+ if (unbiased_exp + FPBits::EXP_BIAS >= FPBits::MAX_BIASED_EXPONENT) {
+ if constexpr (ShouldSignalExceptions) {
+ set_errno_if_required(ERANGE);
+ raise_except_if_required(FE_OVERFLOW | FE_INEXACT);
+ }
+
+ switch (quick_get_round()) {
+ case FE_TONEAREST:
+ return FPBits::inf(sign).get_val();
+ case FE_TOWARDZERO:
+ return FPBits::max_normal(sign).get_val();
+ case FE_DOWNWARD:
+ if (sign.is_pos())
+ return FPBits::max_normal(Sign::POS).get_val();
+ return FPBits::inf(Sign::NEG).get_val();
+ case FE_UPWARD:
+ if (sign.is_neg())
+ return FPBits::max_normal(Sign::NEG).get_val();
+ return FPBits::inf(Sign::POS).get_val();
+ default:
+ __builtin_unreachable();
+ }
+ }
+
+ StorageType out_biased_exp = 0;
+ StorageType out_mantissa = 0;
+ bool round = false;
+ bool sticky = false;
+ bool underflow = false;
+
+ if (unbiased_exp < -FPBits::EXP_BIAS - FPBits::FRACTION_LEN) {
+ sticky = true;
+ underflow = true;
+ } else if (unbiased_exp == -FPBits::EXP_BIAS - FPBits::FRACTION_LEN) {
+ round = true;
+ MantissaType sticky_mask = (MantissaType(1) << (Bits - 1)) - 1;
+ sticky = (mantissa & sticky_mask) != 0;
+ } else {
+ int extra_fraction_len = EXTRA_FRACTION_LEN;
+
+ if (unbiased_exp < 1 - FPBits::EXP_BIAS) {
+ underflow = true;
+ extra_fraction_len += 1 - FPBits::EXP_BIAS - unbiased_exp;
+ } else {
+ out_biased_exp =
+ static_cast<StorageType>(unbiased_exp + FPBits::EXP_BIAS);
+ }
+
+ MantissaType round_mask = MantissaType(1) << (extra_fraction_len - 1);
+ round = (mantissa & round_mask) != 0;
+ MantissaType sticky_mask = round_mask - 1;
+ sticky = (mantissa & sticky_mask) != 0;
+
+ out_mantissa = static_cast<StorageType>(mantissa >> extra_fraction_len);
+ }
+
+ bool lsb = (out_mantissa & 1) != 0;
+
+ StorageType result =
+ FPBits::create_value(sign, out_biased_exp, out_mantissa).uintval();
+
+ switch (quick_get_round()) {
+ case FE_TONEAREST:
+ if (round && (lsb || sticky))
+ ++result;
+ break;
+ case FE_DOWNWARD:
+ if (sign.is_neg() && (round || sticky))
+ ++result;
+ break;
+ case FE_UPWARD:
+ if (sign.is_pos() && (round || sticky))
+ ++result;
+ break;
+ default:
+ break;
+ }
+
+ if (ShouldSignalExceptions && (round || sticky)) {
+ int excepts = FE_INEXACT;
+ if (FPBits(result).is_inf()) {
+ set_errno_if_required(ERANGE);
+ excepts |= FE_OVERFLOW;
+ } else if (underflow) {
+ set_errno_if_required(ERANGE);
+ excepts |= FE_UNDERFLOW;
+ }
+ raise_except_if_required(excepts);
+ }
+
+ return FPBits(result).get_val();
+ }
+#endif // LIBC_TYPES_HAS_FLOAT16
+
template <typename T, bool ShouldSignalExceptions,
typename = cpp::enable_if_t<cpp::is_floating_point_v<T> &&
(FPBits<T>::FRACTION_LEN < Bits),
void>>
- LIBC_INLINE constexpr T as() const {
+ LIBC_INLINE constexpr T fast_as() const {
if (LIBC_UNLIKELY(mantissa.is_zero()))
return FPBits<T>::zero(sign).get_val();
@@ -224,6 +335,20 @@ template <size_t Bits> struct DyadicFloat {
return r;
}
+ // Assume that it is already normalized.
+ // Output is rounded correctly with respect to the current rounding mode.
+ template <typename T, bool ShouldSignalExceptions,
+ typename = cpp::enable_if_t<cpp::is_floating_point_v<T> &&
+ (FPBits<T>::FRACTION_LEN < Bits),
+ void>>
+ LIBC_INLINE constexpr T as() const {
+#if defined(LIBC_TYPES_HAS_FLOAT16) && !defined(__LIBC_USE_FLOAT16_CONVERSION)
+ if constexpr (cpp::is_same_v<T, float16>)
+ return generic_as<T, ShouldSignalExceptions>();
+#endif
+ return fast_as<T, ShouldSignalExceptions>();
+ }
+
template <typename T,
typename = cpp::enable_if_t<cpp::is_floating_point_v<T> &&
(FPBits<T>::FRACTION_LEN < Bits),
diff --git a/libc/src/__support/FPUtil/except_value_utils.h b/libc/src/__support/FPUtil/except_value_utils.h
index b9f54aa..f8e4e92 100644
--- a/libc/src/__support/FPUtil/except_value_utils.h
+++ b/libc/src/__support/FPUtil/except_value_utils.h
@@ -11,10 +11,13 @@
#include "FEnvImpl.h"
#include "FPBits.h"
+#include "cast.h"
#include "rounding_mode.h"
#include "src/__support/CPP/optional.h"
#include "src/__support/macros/config.h"
#include "src/__support/macros/optimization.h" // LIBC_UNLIKELY
+#include "src/__support/macros/properties/cpu_features.h"
+#include "src/__support/macros/properties/types.h"
namespace LIBC_NAMESPACE_DECL {
@@ -113,6 +116,21 @@ template <typename T> LIBC_INLINE T round_result_slightly_up(T value_rn) {
return tmp;
}
+#if defined(LIBC_TYPES_HAS_FLOAT16) && \
+ !defined(LIBC_TARGET_CPU_HAS_FAST_FLOAT16_OPS)
+template <> LIBC_INLINE float16 round_result_slightly_down(float16 value_rn) {
+ volatile float tmp = value_rn;
+ tmp -= FPBits<float16>::min_normal().get_val();
+ return cast<float16>(tmp);
+}
+
+template <> LIBC_INLINE float16 round_result_slightly_up(float16 value_rn) {
+ volatile float tmp = value_rn;
+ tmp += FPBits<float16>::min_normal().get_val();
+ return cast<float16>(tmp);
+}
+#endif
+
} // namespace fputil
} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/__support/FPUtil/generic/CMakeLists.txt b/libc/src/__support/FPUtil/generic/CMakeLists.txt
index 43096aa..60434d6 100644
--- a/libc/src/__support/FPUtil/generic/CMakeLists.txt
+++ b/libc/src/__support/FPUtil/generic/CMakeLists.txt
@@ -8,6 +8,7 @@ add_header_library(
libc.src.__support.common
libc.src.__support.CPP.bit
libc.src.__support.CPP.type_traits
+ libc.src.__support.FPUtil.cast
libc.src.__support.FPUtil.dyadic_float
libc.src.__support.FPUtil.fenv_impl
libc.src.__support.FPUtil.fp_bits
@@ -21,16 +22,17 @@ add_header_library(
FMA.h
DEPENDS
libc.hdr.fenv_macros
+ libc.src.__support.big_int
libc.src.__support.common
libc.src.__support.CPP.bit
libc.src.__support.CPP.limits
libc.src.__support.CPP.type_traits
libc.src.__support.FPUtil.basic_operations
+ libc.src.__support.FPUtil.cast
libc.src.__support.FPUtil.dyadic_float
libc.src.__support.FPUtil.fenv_impl
libc.src.__support.FPUtil.fp_bits
libc.src.__support.FPUtil.rounding_mode
- libc.src.__support.big_int
libc.src.__support.macros.optimization
libc.src.__support.uint128
)
@@ -60,9 +62,10 @@ add_header_library(
libc.src.__support.CPP.bit
libc.src.__support.CPP.type_traits
libc.src.__support.FPUtil.basic_operations
+ libc.src.__support.FPUtil.cast
+ libc.src.__support.FPUtil.dyadic_float
libc.src.__support.FPUtil.fenv_impl
libc.src.__support.FPUtil.fp_bits
- libc.src.__support.FPUtil.dyadic_float
libc.src.__support.FPUtil.rounding_mode
libc.src.__support.macros.attributes
libc.src.__support.macros.optimization
diff --git a/libc/src/__support/FPUtil/generic/FMA.h b/libc/src/__support/FPUtil/generic/FMA.h
index e5683c8f..bec312e 100644
--- a/libc/src/__support/FPUtil/generic/FMA.h
+++ b/libc/src/__support/FPUtil/generic/FMA.h
@@ -14,6 +14,7 @@
#include "src/__support/CPP/type_traits.h"
#include "src/__support/FPUtil/BasicOperations.h"
#include "src/__support/FPUtil/FPBits.h"
+#include "src/__support/FPUtil/cast.h"
#include "src/__support/FPUtil/dyadic_float.h"
#include "src/__support/FPUtil/rounding_mode.h"
#include "src/__support/big_int.h"
@@ -157,7 +158,7 @@ fma(InType x, InType y, InType z) {
}
if (LIBC_UNLIKELY(x == 0 || y == 0 || z == 0))
- return static_cast<OutType>(x * y + z);
+ return cast<OutType>(x * y + z);
int x_exp = 0;
int y_exp = 0;
@@ -198,7 +199,7 @@ fma(InType x, InType y, InType z) {
if (LIBC_UNLIKELY(x_exp == InFPBits::MAX_BIASED_EXPONENT ||
y_exp == InFPBits::MAX_BIASED_EXPONENT ||
z_exp == InFPBits::MAX_BIASED_EXPONENT))
- return static_cast<OutType>(x * y + z);
+ return cast<OutType>(x * y + z);
// Extract mantissa and append hidden leading bits.
InStorageType x_mant = x_bits.get_explicit_mantissa();
diff --git a/libc/src/__support/FPUtil/generic/add_sub.h b/libc/src/__support/FPUtil/generic/add_sub.h
index 850db3f..6bc9dcd 100644
--- a/libc/src/__support/FPUtil/generic/add_sub.h
+++ b/libc/src/__support/FPUtil/generic/add_sub.h
@@ -17,6 +17,7 @@
#include "src/__support/FPUtil/BasicOperations.h"
#include "src/__support/FPUtil/FEnvImpl.h"
#include "src/__support/FPUtil/FPBits.h"
+#include "src/__support/FPUtil/cast.h"
#include "src/__support/FPUtil/dyadic_float.h"
#include "src/__support/FPUtil/rounding_mode.h"
#include "src/__support/macros/attributes.h"
@@ -106,14 +107,14 @@ add_or_sub(InType x, InType y) {
volatile InType tmp = y;
if constexpr (IsSub)
tmp = -tmp;
- return static_cast<OutType>(tmp);
+ return cast<OutType>(tmp);
}
if (y_bits.is_zero()) {
volatile InType tmp = y;
if constexpr (IsSub)
tmp = -tmp;
- return static_cast<OutType>(tmp);
+ return cast<OutType>(tmp);
}
}
diff --git a/libc/src/__support/FPUtil/generic/sqrt.h b/libc/src/__support/FPUtil/generic/sqrt.h
index 4502cc0..01af4bb 100644
--- a/libc/src/__support/FPUtil/generic/sqrt.h
+++ b/libc/src/__support/FPUtil/generic/sqrt.h
@@ -14,6 +14,7 @@
#include "src/__support/CPP/type_traits.h"
#include "src/__support/FPUtil/FEnvImpl.h"
#include "src/__support/FPUtil/FPBits.h"
+#include "src/__support/FPUtil/cast.h"
#include "src/__support/FPUtil/dyadic_float.h"
#include "src/__support/common.h"
#include "src/__support/macros/config.h"
@@ -96,7 +97,7 @@ sqrt(InType x) {
// sqrt(-0) = -0
// sqrt(NaN) = NaN
// sqrt(-NaN) = -NaN
- return static_cast<OutType>(x);
+ return cast<OutType>(x);
} else if (bits.is_neg()) {
// sqrt(-Inf) = NaN
// sqrt(-x) = NaN
diff --git a/libc/src/math/generic/CMakeLists.txt b/libc/src/math/generic/CMakeLists.txt
index 5a1ee3b..d0676d0 100644
--- a/libc/src/math/generic/CMakeLists.txt
+++ b/libc/src/math/generic/CMakeLists.txt
@@ -109,9 +109,10 @@ add_entrypoint_object(
COMPILE_OPTIONS
-O3
DEPENDS
- libc.src.__support.macros.properties.types
+ libc.src.__support.FPUtil.cast
libc.src.__support.FPUtil.nearest_integer_operations
libc.src.__support.macros.properties.cpu_features
+ libc.src.__support.macros.properties.types
FLAGS
ROUND_OPT
)
@@ -672,9 +673,10 @@ add_entrypoint_object(
COMPILE_OPTIONS
-O3
DEPENDS
- libc.src.__support.macros.properties.types
+ libc.src.__support.FPUtil.cast
libc.src.__support.FPUtil.nearest_integer_operations
libc.src.__support.macros.properties.cpu_features
+ libc.src.__support.macros.properties.types
FLAGS
ROUND_OPT
)
@@ -741,9 +743,10 @@ add_entrypoint_object(
COMPILE_OPTIONS
-O3
DEPENDS
- libc.src.__support.macros.properties.types
+ libc.src.__support.FPUtil.cast
libc.src.__support.FPUtil.nearest_integer_operations
libc.src.__support.macros.properties.cpu_features
+ libc.src.__support.macros.properties.types
FLAGS
ROUND_OPT
)
@@ -810,9 +813,10 @@ add_entrypoint_object(
COMPILE_OPTIONS
-O3
DEPENDS
- libc.src.__support.macros.properties.types
+ libc.src.__support.FPUtil.cast
libc.src.__support.FPUtil.nearest_integer_operations
libc.src.__support.macros.properties.cpu_features
+ libc.src.__support.macros.properties.types
FLAGS
ROUND_OPT
)
@@ -881,6 +885,7 @@ add_entrypoint_object(
DEPENDS
libc.src.__support.macros.properties.types
libc.src.__support.FPUtil.nearest_integer_operations
+ libc.src.__support.FPUtil.cast
libc.src.__support.macros.properties.cpu_features
FLAGS
ROUND_OPT
@@ -1072,9 +1077,10 @@ add_entrypoint_object(
COMPILE_OPTIONS
-O3
DEPENDS
- libc.src.__support.macros.properties.types
+ libc.src.__support.FPUtil.cast
libc.src.__support.FPUtil.nearest_integer_operations
libc.src.__support.macros.properties.cpu_features
+ libc.src.__support.macros.properties.types
FLAGS
ROUND_OPT
)
@@ -1362,12 +1368,15 @@ add_entrypoint_object(
.expxf16
libc.hdr.errno_macros
libc.hdr.fenv_macros
+ libc.src.__support.CPP.array
+ libc.src.__support.FPUtil.cast
libc.src.__support.FPUtil.except_value_utils
libc.src.__support.FPUtil.fenv_impl
libc.src.__support.FPUtil.fp_bits
+ libc.src.__support.FPUtil.multiply_add
+ libc.src.__support.FPUtil.nearest_integer
libc.src.__support.FPUtil.polyeval
libc.src.__support.FPUtil.rounding_mode
- libc.src.__support.macros.attributes
libc.src.__support.macros.optimization
COMPILE_OPTIONS
-O3
@@ -1442,6 +1451,7 @@ add_entrypoint_object(
libc.hdr.errno_macros
libc.hdr.fenv_macros
libc.src.__support.CPP.array
+ libc.src.__support.FPUtil.cast
libc.src.__support.FPUtil.except_value_utils
libc.src.__support.FPUtil.fenv_impl
libc.src.__support.FPUtil.fp_bits
@@ -1545,6 +1555,7 @@ add_entrypoint_object(
libc.hdr.errno_macros
libc.hdr.fenv_macros
libc.src.__support.CPP.array
+ libc.src.__support.FPUtil.cast
libc.src.__support.FPUtil.except_value_utils
libc.src.__support.FPUtil.fenv_impl
libc.src.__support.FPUtil.fp_bits
@@ -1617,6 +1628,7 @@ add_entrypoint_object(
.expxf16
libc.hdr.errno_macros
libc.hdr.fenv_macros
+ libc.src.__support.FPUtil.cast
libc.src.__support.FPUtil.except_value_utils
libc.src.__support.FPUtil.fenv_impl
libc.src.__support.FPUtil.fp_bits
diff --git a/libc/src/math/generic/ceilf16.cpp b/libc/src/math/generic/ceilf16.cpp
index 8af31c6..9d89efc 100644
--- a/libc/src/math/generic/ceilf16.cpp
+++ b/libc/src/math/generic/ceilf16.cpp
@@ -8,6 +8,7 @@
#include "src/math/ceilf16.h"
#include "src/__support/FPUtil/NearestIntegerOperations.h"
+#include "src/__support/FPUtil/cast.h"
#include "src/__support/common.h"
#include "src/__support/macros/config.h"
#include "src/__support/macros/properties/cpu_features.h"
@@ -17,7 +18,7 @@ namespace LIBC_NAMESPACE_DECL {
LLVM_LIBC_FUNCTION(float16, ceilf16, (float16 x)) {
#if defined(__LIBC_USE_BUILTIN_CEIL_FLOOR_RINT_TRUNC) && \
defined(LIBC_TARGET_CPU_HAS_FAST_FLOAT16_OPS)
- return static_cast<float16>(__builtin_ceilf(x));
+ return fputil::cast<float16>(__builtin_ceilf(x));
#else
return fputil::ceil(x);
#endif
diff --git a/libc/src/math/generic/exp10f16.cpp b/libc/src/math/generic/exp10f16.cpp
index 9959f745..1c5966c 100644
--- a/libc/src/math/generic/exp10f16.cpp
+++ b/libc/src/math/generic/exp10f16.cpp
@@ -14,6 +14,7 @@
#include "src/__support/FPUtil/FEnvImpl.h"
#include "src/__support/FPUtil/FPBits.h"
#include "src/__support/FPUtil/PolyEval.h"
+#include "src/__support/FPUtil/cast.h"
#include "src/__support/FPUtil/except_value_utils.h"
#include "src/__support/FPUtil/multiply_add.h"
#include "src/__support/FPUtil/nearest_integer.h"
@@ -118,13 +119,13 @@ LLVM_LIBC_FUNCTION(float16, exp10f16, (float16 x)) {
if (LIBC_UNLIKELY((x_u & ~(0x3c00U | 0x4000U | 0x4200U | 0x4400U)) == 0)) {
switch (x_u) {
case 0x3c00U: // x = 1.0f16
- return static_cast<float16>(10.0);
+ return fputil::cast<float16>(10.0);
case 0x4000U: // x = 2.0f16
- return static_cast<float16>(100.0);
+ return fputil::cast<float16>(100.0);
case 0x4200U: // x = 3.0f16
- return static_cast<float16>(1'000.0);
+ return fputil::cast<float16>(1'000.0);
case 0x4400U: // x = 4.0f16
- return static_cast<float16>(10'000.0);
+ return fputil::cast<float16>(10'000.0);
}
}
@@ -164,7 +165,7 @@ LLVM_LIBC_FUNCTION(float16, exp10f16, (float16 x)) {
// > 1 + x * P;
float exp10_lo = fputil::polyeval(lo, 0x1p+0f, 0x1.26bb14p+1f, 0x1.53526p+1f,
0x1.04b434p+1f, 0x1.2bcf9ep+0f);
- return static_cast<float16>(exp2_hi_mid * exp10_lo);
+ return fputil::cast<float16>(exp2_hi_mid * exp10_lo);
}
} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/math/generic/exp2f16.cpp b/libc/src/math/generic/exp2f16.cpp
index 66b7956..3c43102 100644
--- a/libc/src/math/generic/exp2f16.cpp
+++ b/libc/src/math/generic/exp2f16.cpp
@@ -14,6 +14,7 @@
#include "src/__support/FPUtil/FEnvImpl.h"
#include "src/__support/FPUtil/FPBits.h"
#include "src/__support/FPUtil/PolyEval.h"
+#include "src/__support/FPUtil/cast.h"
#include "src/__support/FPUtil/except_value_utils.h"
#include "src/__support/FPUtil/multiply_add.h"
#include "src/__support/FPUtil/nearest_integer.h"
@@ -121,7 +122,7 @@ LLVM_LIBC_FUNCTION(float16, exp2f16, (float16 x)) {
// > 1 + x * P;
float exp2_lo = fputil::polyeval(lo, 0x1p+0f, 0x1.62e43p-1f, 0x1.ec0aa6p-3f,
0x1.c6b4a6p-5f);
- return static_cast<float16>(exp2_hi_mid * exp2_lo);
+ return fputil::cast<float16>(exp2_hi_mid * exp2_lo);
}
} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/math/generic/expf16.cpp b/libc/src/math/generic/expf16.cpp
index 7ffdbd5..0548ef3 100644
--- a/libc/src/math/generic/expf16.cpp
+++ b/libc/src/math/generic/expf16.cpp
@@ -13,6 +13,7 @@
#include "src/__support/FPUtil/FEnvImpl.h"
#include "src/__support/FPUtil/FPBits.h"
#include "src/__support/FPUtil/PolyEval.h"
+#include "src/__support/FPUtil/cast.h"
#include "src/__support/FPUtil/except_value_utils.h"
#include "src/__support/FPUtil/rounding_mode.h"
#include "src/__support/common.h"
@@ -103,7 +104,7 @@ LLVM_LIBC_FUNCTION(float16, expf16, (float16 x)) {
// > display = hexadecimal;
// > P = fpminimax(expm1(x)/x, 2, [|SG...|], [-2^-5, 2^-5]);
// > 1 + x * P;
- return static_cast<float16>(
+ return fputil::cast<float16>(
fputil::polyeval(xf, 0x1p+0f, 0x1p+0f, 0x1.0004p-1f, 0x1.555778p-3f));
}
}
@@ -113,7 +114,7 @@ LLVM_LIBC_FUNCTION(float16, expf16, (float16 x)) {
// exp(x) = exp(hi + mid) * exp(lo)
auto [exp_hi_mid, exp_lo] = exp_range_reduction(x);
- return static_cast<float16>(exp_hi_mid * exp_lo);
+ return fputil::cast<float16>(exp_hi_mid * exp_lo);
}
} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/math/generic/expm1f16.cpp b/libc/src/math/generic/expm1f16.cpp
index 0facdc5..4ce0efd 100644
--- a/libc/src/math/generic/expm1f16.cpp
+++ b/libc/src/math/generic/expm1f16.cpp
@@ -13,6 +13,7 @@
#include "src/__support/FPUtil/FEnvImpl.h"
#include "src/__support/FPUtil/FPBits.h"
#include "src/__support/FPUtil/PolyEval.h"
+#include "src/__support/FPUtil/cast.h"
#include "src/__support/FPUtil/except_value_utils.h"
#include "src/__support/FPUtil/multiply_add.h"
#include "src/__support/FPUtil/rounding_mode.h"
@@ -99,7 +100,7 @@ LLVM_LIBC_FUNCTION(float16, expm1f16, (float16 x)) {
FPBits::one(Sign::NEG).get_val());
// When x <= -0x1.0ap+3, round(expm1(x), HP, RN) = -0x1.ffcp-1.
return fputil::round_result_slightly_down(
- static_cast<float16>(-0x1.ffcp-1));
+ fputil::cast<float16>(-0x1.ffcp-1));
}
// When 0 < |x| <= 2^(-3).
@@ -114,7 +115,7 @@ LLVM_LIBC_FUNCTION(float16, expm1f16, (float16 x)) {
// > display = hexadecimal;
// > P = fpminimax(expm1(x)/x, 4, [|SG...|], [-2^-3, 2^-3]);
// > x * P;
- return static_cast<float16>(
+ return fputil::cast<float16>(
xf * fputil::polyeval(xf, 0x1p+0f, 0x1.fffff8p-2f, 0x1.555556p-3f,
0x1.55905ep-5f, 0x1.1124c2p-7f));
}
@@ -126,7 +127,7 @@ LLVM_LIBC_FUNCTION(float16, expm1f16, (float16 x)) {
// exp(x) = exp(hi + mid) * exp(lo)
auto [exp_hi_mid, exp_lo] = exp_range_reduction(x);
// expm1(x) = exp(hi + mid) * exp(lo) - 1
- return static_cast<float16>(fputil::multiply_add(exp_hi_mid, exp_lo, -1.0f));
+ return fputil::cast<float16>(fputil::multiply_add(exp_hi_mid, exp_lo, -1.0f));
}
} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/math/generic/floorf16.cpp b/libc/src/math/generic/floorf16.cpp
index 3092048..361b227 100644
--- a/libc/src/math/generic/floorf16.cpp
+++ b/libc/src/math/generic/floorf16.cpp
@@ -8,6 +8,7 @@
#include "src/math/floorf16.h"
#include "src/__support/FPUtil/NearestIntegerOperations.h"
+#include "src/__support/FPUtil/cast.h"
#include "src/__support/common.h"
#include "src/__support/macros/config.h"
#include "src/__support/macros/properties/cpu_features.h"
@@ -17,7 +18,7 @@ namespace LIBC_NAMESPACE_DECL {
LLVM_LIBC_FUNCTION(float16, floorf16, (float16 x)) {
#if defined(__LIBC_USE_BUILTIN_CEIL_FLOOR_RINT_TRUNC) && \
defined(LIBC_TARGET_CPU_HAS_FAST_FLOAT16_OPS)
- return static_cast<float16>(__builtin_floorf(x));
+ return fputil::cast<float16>(__builtin_floorf(x));
#else
return fputil::floor(x);
#endif
diff --git a/libc/src/math/generic/rintf16.cpp b/libc/src/math/generic/rintf16.cpp
index 3a53dd2..aefdcbe 100644
--- a/libc/src/math/generic/rintf16.cpp
+++ b/libc/src/math/generic/rintf16.cpp
@@ -8,6 +8,7 @@
#include "src/math/rintf16.h"
#include "src/__support/FPUtil/NearestIntegerOperations.h"
+#include "src/__support/FPUtil/cast.h"
#include "src/__support/common.h"
#include "src/__support/macros/config.h"
#include "src/__support/macros/properties/cpu_features.h"
@@ -17,7 +18,7 @@ namespace LIBC_NAMESPACE_DECL {
LLVM_LIBC_FUNCTION(float16, rintf16, (float16 x)) {
#if defined(__LIBC_USE_BUILTIN_CEIL_FLOOR_RINT_TRUNC) && \
defined(LIBC_TARGET_CPU_HAS_FAST_FLOAT16_OPS)
- return static_cast<float16>(__builtin_rintf(x));
+ return fputil::cast<float16>(__builtin_rintf(x));
#else
return fputil::round_using_current_rounding_mode(x);
#endif
diff --git a/libc/src/math/generic/roundevenf16.cpp b/libc/src/math/generic/roundevenf16.cpp
index c3dbd77..fdcd968b 100644
--- a/libc/src/math/generic/roundevenf16.cpp
+++ b/libc/src/math/generic/roundevenf16.cpp
@@ -8,6 +8,7 @@
#include "src/math/roundevenf16.h"
#include "src/__support/FPUtil/NearestIntegerOperations.h"
+#include "src/__support/FPUtil/cast.h"
#include "src/__support/common.h"
#include "src/__support/macros/config.h"
#include "src/__support/macros/properties/cpu_features.h"
@@ -17,7 +18,7 @@ namespace LIBC_NAMESPACE_DECL {
LLVM_LIBC_FUNCTION(float16, roundevenf16, (float16 x)) {
#if defined(__LIBC_USE_BUILTIN_ROUNDEVEN) && \
defined(LIBC_TARGET_CPU_HAS_FAST_FLOAT16_OPS)
- return static_cast<float16>(__builtin_roundevenf(x));
+ return fputil::cast<float16>(__builtin_roundevenf(x));
#else
return fputil::round_using_specific_rounding_mode(x, FP_INT_TONEAREST);
#endif
diff --git a/libc/src/math/generic/roundf16.cpp b/libc/src/math/generic/roundf16.cpp
index a5e2b44..9adfb52 100644
--- a/libc/src/math/generic/roundf16.cpp
+++ b/libc/src/math/generic/roundf16.cpp
@@ -8,6 +8,7 @@
#include "src/math/roundf16.h"
#include "src/__support/FPUtil/NearestIntegerOperations.h"
+#include "src/__support/FPUtil/cast.h"
#include "src/__support/common.h"
#include "src/__support/macros/config.h"
#include "src/__support/macros/properties/cpu_features.h"
@@ -17,7 +18,7 @@ namespace LIBC_NAMESPACE_DECL {
LLVM_LIBC_FUNCTION(float16, roundf16, (float16 x)) {
#if defined(__LIBC_USE_BUILTIN_ROUND) && \
defined(LIBC_TARGET_CPU_HAS_FAST_FLOAT16_OPS)
- return static_cast<float16>(__builtin_roundf(x));
+ return fputil::cast<float16>(__builtin_roundf(x));
#else
return fputil::round(x);
#endif
diff --git a/libc/src/math/generic/truncf16.cpp b/libc/src/math/generic/truncf16.cpp
index 31b1214..4d37e65 100644
--- a/libc/src/math/generic/truncf16.cpp
+++ b/libc/src/math/generic/truncf16.cpp
@@ -8,6 +8,7 @@
#include "src/math/truncf16.h"
#include "src/__support/FPUtil/NearestIntegerOperations.h"
+#include "src/__support/FPUtil/cast.h"
#include "src/__support/common.h"
#include "src/__support/macros/config.h"
#include "src/__support/macros/properties/cpu_features.h"
@@ -17,7 +18,7 @@ namespace LIBC_NAMESPACE_DECL {
LLVM_LIBC_FUNCTION(float16, truncf16, (float16 x)) {
#if defined(__LIBC_USE_BUILTIN_CEIL_FLOOR_RINT_TRUNC) && \
defined(LIBC_TARGET_CPU_HAS_FAST_FLOAT16_OPS)
- return static_cast<float16>(__builtin_truncf(x));
+ return fputil::cast<float16>(__builtin_truncf(x));
#else
return fputil::trunc(x);
#endif
diff --git a/libc/src/stdio/gpu/CMakeLists.txt b/libc/src/stdio/gpu/CMakeLists.txt
index 86470b8..9cac42e 100644
--- a/libc/src/stdio/gpu/CMakeLists.txt
+++ b/libc/src/stdio/gpu/CMakeLists.txt
@@ -295,6 +295,17 @@ add_entrypoint_object(
)
add_entrypoint_object(
+ rename
+ SRCS
+ rename.cpp
+ HDRS
+ ../rename.h
+ DEPENDS
+ libc.hdr.types.FILE
+ .gpu_file
+)
+
+add_entrypoint_object(
stdin
SRCS
stdin.cpp
diff --git a/libc/src/stdio/gpu/rename.cpp b/libc/src/stdio/gpu/rename.cpp
new file mode 100644
index 0000000..1087228
--- /dev/null
+++ b/libc/src/stdio/gpu/rename.cpp
@@ -0,0 +1,30 @@
+//===-- GPU Implementation of rename --------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/stdio/rename.h"
+#include "src/__support/CPP/string_view.h"
+#include "src/__support/macros/config.h"
+#include "src/stdio/gpu/file.h"
+
+#include "hdr/types/FILE.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+LLVM_LIBC_FUNCTION(int, rename, (const char *oldpath, const char *newpath)) {
+ int ret;
+ rpc::Client::Port port = rpc::client.open<RPC_RENAME>();
+ port.send_n(oldpath, internal::string_length(oldpath) + 1);
+ port.send_n(newpath, internal::string_length(newpath) + 1);
+ port.recv(
+ [&](rpc::Buffer *buffer) { ret = static_cast<int>(buffer->data[0]); });
+ port.close();
+
+ return ret;
+}
+
+} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/stdlib/CMakeLists.txt b/libc/src/stdlib/CMakeLists.txt
index 7fc68cb..1b5b2cb 100644
--- a/libc/src/stdlib/CMakeLists.txt
+++ b/libc/src/stdlib/CMakeLists.txt
@@ -621,3 +621,10 @@ add_entrypoint_object(
DEPENDS
.${LIBC_TARGET_OS}.abort
)
+
+add_entrypoint_object(
+ system
+ ALIAS
+ DEPENDS
+ .${LIBC_TARGET_OS}.system
+)
diff --git a/libc/src/stdlib/gpu/CMakeLists.txt b/libc/src/stdlib/gpu/CMakeLists.txt
index 073f815..3c0588a2 100644
--- a/libc/src/stdlib/gpu/CMakeLists.txt
+++ b/libc/src/stdlib/gpu/CMakeLists.txt
@@ -61,5 +61,16 @@ add_entrypoint_object(
../abort.h
DEPENDS
libc.include.stdlib
- libc.src.__support.GPU.allocator
+ libc.src.__support.RPC.rpc_client
+)
+
+add_entrypoint_object(
+ system
+ SRCS
+ system.cpp
+ HDRS
+ ../system.h
+ DEPENDS
+ libc.include.stdlib
+ libc.src.__support.RPC.rpc_client
)
diff --git a/libc/src/stdlib/gpu/system.cpp b/libc/src/stdlib/gpu/system.cpp
new file mode 100644
index 0000000..acf3a8c
--- /dev/null
+++ b/libc/src/stdlib/gpu/system.cpp
@@ -0,0 +1,29 @@
+//===-- GPU implementation of system --------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/__support/RPC/rpc_client.h"
+#include "src/__support/common.h"
+#include "src/__support/macros/config.h"
+#include "src/string/string_utils.h"
+
+#include "src/stdlib/system.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+LLVM_LIBC_FUNCTION(int, system, (const char *command)) {
+ int ret;
+ rpc::Client::Port port = rpc::client.open<RPC_SYSTEM>();
+ port.send_n(command, internal::string_length(command) + 1);
+ port.recv(
+ [&](rpc::Buffer *buffer) { ret = static_cast<int>(buffer->data[0]); });
+ port.close();
+
+ return ret;
+}
+
+} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/stdlib/system.h b/libc/src/stdlib/system.h
new file mode 100644
index 0000000..3358ca7
--- /dev/null
+++ b/libc/src/stdlib/system.h
@@ -0,0 +1,20 @@
+//===-- Implementation header for system ------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_STDLIB_SYSTEM_H
+#define LLVM_LIBC_SRC_STDLIB_SYSTEM_H
+
+#include "src/__support/macros/config.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+int system(const char *command);
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC_STDLIB_SYSTEM_H
diff --git a/libc/src/sys/socket/linux/CMakeLists.txt b/libc/src/sys/socket/linux/CMakeLists.txt
index f21679b..e1226aa 100644
--- a/libc/src/sys/socket/linux/CMakeLists.txt
+++ b/libc/src/sys/socket/linux/CMakeLists.txt
@@ -33,6 +33,7 @@ add_entrypoint_object(
DEPENDS
libc.include.sys_syscall
libc.include.sys_socket
+ libc.src.__support.macros.sanitizer
libc.src.__support.OSUtil.osutil
libc.src.errno.errno
)
@@ -87,6 +88,7 @@ add_entrypoint_object(
libc.include.sys_syscall
libc.hdr.types.struct_sockaddr
libc.hdr.types.socklen_t
+ libc.src.__support.macros.sanitizer
libc.src.__support.OSUtil.osutil
libc.src.errno.errno
)
@@ -101,6 +103,7 @@ add_entrypoint_object(
libc.include.sys_syscall
libc.hdr.types.struct_sockaddr
libc.hdr.types.socklen_t
+ libc.src.__support.macros.sanitizer
libc.src.__support.OSUtil.osutil
libc.src.errno.errno
)
@@ -114,6 +117,7 @@ add_entrypoint_object(
DEPENDS
libc.include.sys_syscall
libc.hdr.types.struct_msghdr
+ libc.src.__support.macros.sanitizer
libc.src.__support.OSUtil.osutil
libc.src.errno.errno
)
diff --git a/libc/src/sys/socket/linux/recv.cpp b/libc/src/sys/socket/linux/recv.cpp
index 96acf44..55a766a 100644
--- a/libc/src/sys/socket/linux/recv.cpp
+++ b/libc/src/sys/socket/linux/recv.cpp
@@ -13,6 +13,7 @@
#include "hdr/types/struct_sockaddr.h"
#include "src/__support/OSUtil/syscall.h" // For internal syscall function.
#include "src/__support/common.h"
+#include "src/__support/macros/sanitizer.h"
#include "src/errno/libc_errno.h"
#include <linux/net.h> // For SYS_SOCKET socketcall number.
#include <sys/syscall.h> // For syscall numbers.
@@ -41,6 +42,9 @@ LLVM_LIBC_FUNCTION(ssize_t, recv,
libc_errno = static_cast<int>(-ret);
return -1;
}
+
+ MSAN_UNPOISON(buf, ret);
+
return ret;
}
diff --git a/libc/src/sys/socket/linux/recvfrom.cpp b/libc/src/sys/socket/linux/recvfrom.cpp
index 17489a9..990e58d 100644
--- a/libc/src/sys/socket/linux/recvfrom.cpp
+++ b/libc/src/sys/socket/linux/recvfrom.cpp
@@ -13,6 +13,7 @@
#include "hdr/types/struct_sockaddr.h"
#include "src/__support/OSUtil/syscall.h" // For internal syscall function.
#include "src/__support/common.h"
+#include "src/__support/macros/sanitizer.h"
#include "src/errno/libc_errno.h"
#include <linux/net.h> // For SYS_SOCKET socketcall number.
#include <sys/syscall.h> // For syscall numbers.
@@ -43,6 +44,9 @@ LLVM_LIBC_FUNCTION(ssize_t, recvfrom,
libc_errno = static_cast<int>(-ret);
return -1;
}
+
+ MSAN_UNPOISON(buf, ret);
+
return ret;
}
diff --git a/libc/src/sys/socket/linux/recvmsg.cpp b/libc/src/sys/socket/linux/recvmsg.cpp
index 60045d6..f44e580 100644
--- a/libc/src/sys/socket/linux/recvmsg.cpp
+++ b/libc/src/sys/socket/linux/recvmsg.cpp
@@ -12,6 +12,7 @@
#include "hdr/types/struct_msghdr.h"
#include "src/__support/OSUtil/syscall.h" // For internal syscall function.
#include "src/__support/common.h"
+#include "src/__support/macros/sanitizer.h"
#include "src/errno/libc_errno.h"
#include <linux/net.h> // For SYS_SOCKET socketcall number.
#include <sys/syscall.h> // For syscall numbers.
@@ -36,6 +37,14 @@ LLVM_LIBC_FUNCTION(ssize_t, recvmsg,
libc_errno = static_cast<int>(-ret);
return -1;
}
+
+ // Unpoison the msghdr, as well as all its components.
+ MSAN_UNPOISON(msg->msg_name, msg->msg_namelen);
+ for (size_t i = 0; i < msg->msg_iovlen; ++i) {
+ MSAN_UNPOISON(msg->msg_iov->iov_base, msg->msg_iov->iov_len);
+ }
+ MSAN_UNPOISON(msg->msg_control, msg->msg_controllen);
+
return ret;
}
diff --git a/libc/src/sys/socket/linux/socketpair.cpp b/libc/src/sys/socket/linux/socketpair.cpp
index d459a74..60612ac 100644
--- a/libc/src/sys/socket/linux/socketpair.cpp
+++ b/libc/src/sys/socket/linux/socketpair.cpp
@@ -10,10 +10,9 @@
#include "src/__support/OSUtil/syscall.h" // For internal syscall function.
#include "src/__support/common.h"
-
#include "src/__support/macros/config.h"
+#include "src/__support/macros/sanitizer.h"
#include "src/errno/libc_errno.h"
-
#include <linux/net.h> // For SYS_SOCKET socketcall number.
#include <sys/syscall.h> // For syscall numbers.
@@ -37,6 +36,9 @@ LLVM_LIBC_FUNCTION(int, socketpair,
libc_errno = -ret;
return -1;
}
+
+ MSAN_UNPOISON(sv, sizeof(int) * 2);
+
return ret;
}
diff --git a/libc/test/include/CMakeLists.txt b/libc/test/include/CMakeLists.txt
index e500d27..12692ee 100644
--- a/libc/test/include/CMakeLists.txt
+++ b/libc/test/include/CMakeLists.txt
@@ -82,6 +82,36 @@ add_libc_test(
)
add_libc_test(
+ issubnormal_test
+ SUITE
+ libc_include_tests
+ SRCS
+ issubnormal_test.cpp
+ DEPENDS
+ libc.include.llvm-libc-macros.math_function_macros
+)
+
+add_libc_test(
+ issubnormalf_test
+ SUITE
+ libc_include_tests
+ SRCS
+ issubnormalf_test.cpp
+ DEPENDS
+ libc.include.llvm-libc-macros.math_function_macros
+)
+
+add_libc_test(
+ issubnormall_test
+ SUITE
+ libc_include_tests
+ SRCS
+ issubnormall_test.cpp
+ DEPENDS
+ libc.include.llvm-libc-macros.math_function_macros
+)
+
+add_libc_test(
isnormal_test
SUITE
libc_include_tests
@@ -367,6 +397,21 @@ add_libc_test(
)
add_libc_test(
+ issubnormal_c_test
+ C_TEST
+ UNIT_TEST_ONLY
+ SUITE
+ libc_include_tests
+ SRCS
+ issubnormal_test.c
+ COMPILE_OPTIONS
+ -Wall
+ -Werror
+ DEPENDS
+ libc.include.llvm-libc-macros.math_function_macros
+)
+
+add_libc_test(
fpclassify_c_test
C_TEST
UNIT_TEST_ONLY
diff --git a/libc/test/include/IsSubnormalTest.h b/libc/test/include/IsSubnormalTest.h
new file mode 100644
index 0000000..f26d6d2
--- /dev/null
+++ b/libc/test/include/IsSubnormalTest.h
@@ -0,0 +1,49 @@
+//===-- Utility class to test the issubnormal macro ------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_TEST_INCLUDE_MATH_ISSUBNORMAL_H
+#define LLVM_LIBC_TEST_INCLUDE_MATH_ISSUBNORMAL_H
+
+#include "test/UnitTest/FPMatcher.h"
+#include "test/UnitTest/Test.h"
+
+#include "include/llvm-libc-macros/math-function-macros.h"
+
+template <typename T>
+class IsSubnormalTest : public LIBC_NAMESPACE::testing::Test {
+ DECLARE_SPECIAL_CONSTANTS(T)
+
+public:
+ typedef bool (*IsSubnormalFunc)(T);
+
+ void testSpecialNumbers(IsSubnormalFunc func) {
+ EXPECT_FALSE(func(aNaN));
+ EXPECT_FALSE(func(neg_aNaN));
+ EXPECT_FALSE(func(sNaN));
+ EXPECT_FALSE(func(neg_sNaN));
+ EXPECT_FALSE(func(inf));
+ EXPECT_FALSE(func(neg_inf));
+ EXPECT_FALSE(func(min_normal));
+ EXPECT_FALSE(func(max_normal));
+ EXPECT_FALSE(func(neg_max_normal));
+ EXPECT_TRUE(func(min_denormal));
+ EXPECT_TRUE(func(neg_min_denormal));
+ EXPECT_TRUE(func(max_denormal));
+ EXPECT_FALSE(func(zero));
+ EXPECT_FALSE(func(neg_zero));
+ }
+};
+
+#define LIST_ISSUBNORMAL_TESTS(T, func) \
+ using LlvmLibcIsSubnormalTest = IsSubnormalTest<T>; \
+ TEST_F(LlvmLibcIsSubnormalTest, SpecialNumbers) { \
+ auto issubnormal_func = [](T x) { return func(x); }; \
+ testSpecialNumbers(issubnormal_func); \
+ }
+
+#endif // LLVM_LIBC_TEST_INCLUDE_MATH_ISSUBNORMAL_H
diff --git a/libc/test/include/issubnormal_test.c b/libc/test/include/issubnormal_test.c
new file mode 100644
index 0000000..8a45443
--- /dev/null
+++ b/libc/test/include/issubnormal_test.c
@@ -0,0 +1,24 @@
+//===-- Unittests for issubnormal macro -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDSList-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#include "include/llvm-libc-macros/math-function-macros.h"
+
+#include <assert.h>
+
+// check if macro is defined
+#ifndef issubnormal
+#error "issubnormal macro is not defined"
+#else
+int main(void) {
+ assert(issubnormal(1.819f) == 0);
+ assert(issubnormal(-1.726) == 0);
+ assert(issubnormal(1.426L) == 0);
+ assert(issubnormal(1e-308) == 1);
+ assert(issubnormal(-1e-308) == 1);
+ return 0;
+}
+#endif
diff --git a/libc/test/include/issubnormal_test.cpp b/libc/test/include/issubnormal_test.cpp
new file mode 100644
index 0000000..ff57a1f
--- /dev/null
+++ b/libc/test/include/issubnormal_test.cpp
@@ -0,0 +1,12 @@
+//===-- Unittest for issubnormal[d] macro ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDSList-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "IsSubnormalTest.h"
+#include "include/llvm-libc-macros/math-function-macros.h"
+
+LIST_ISSUBNORMAL_TESTS(double, issubnormal)
diff --git a/libc/test/include/issubnormalf_test.cpp b/libc/test/include/issubnormalf_test.cpp
new file mode 100644
index 0000000..7ffa07e
--- /dev/null
+++ b/libc/test/include/issubnormalf_test.cpp
@@ -0,0 +1,12 @@
+//===-- Unittest for issubnormal[f] macro ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDSList-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "IsSubnormalTest.h"
+#include "include/llvm-libc-macros/math-function-macros.h"
+
+LIST_ISSUBNORMAL_TESTS(float, issubnormal)
diff --git a/libc/test/include/issubnormall_test.cpp b/libc/test/include/issubnormall_test.cpp
new file mode 100644
index 0000000..4546e2d
--- /dev/null
+++ b/libc/test/include/issubnormall_test.cpp
@@ -0,0 +1,12 @@
+//===-- Unittest for issubnormal[l] macro ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDSList-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "IsSubnormalTest.h"
+#include "include/llvm-libc-macros/math-function-macros.h"
+
+LIST_ISSUBNORMAL_TESTS(long double, issubnormal)
diff --git a/libc/test/src/math/smoke/AddTest.h b/libc/test/src/math/smoke/AddTest.h
index 88c2067..f06a086 100644
--- a/libc/test/src/math/smoke/AddTest.h
+++ b/libc/test/src/math/smoke/AddTest.h
@@ -35,22 +35,22 @@ public:
using AddFunc = OutType (*)(InType, InType);
void test_special_numbers(AddFunc func) {
- EXPECT_FP_IS_NAN(func(aNaN, aNaN));
- EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(sNaN, sNaN), FE_INVALID);
+ EXPECT_FP_IS_NAN(func(in.aNaN, in.aNaN));
+ EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(in.sNaN, in.sNaN), FE_INVALID);
InType qnan_42 = InFPBits::quiet_nan(Sign::POS, 0x42).get_val();
- EXPECT_FP_IS_NAN(func(qnan_42, zero));
- EXPECT_FP_IS_NAN(func(zero, qnan_42));
+ EXPECT_FP_IS_NAN(func(qnan_42, in.zero));
+ EXPECT_FP_IS_NAN(func(in.zero, qnan_42));
- EXPECT_FP_EQ(inf, func(inf, zero));
- EXPECT_FP_EQ(neg_inf, func(neg_inf, zero));
- EXPECT_FP_EQ(inf, func(inf, neg_zero));
- EXPECT_FP_EQ(neg_inf, func(neg_inf, neg_zero));
+ EXPECT_FP_EQ(inf, func(in.inf, in.zero));
+ EXPECT_FP_EQ(neg_inf, func(in.neg_inf, in.zero));
+ EXPECT_FP_EQ(inf, func(in.inf, in.neg_zero));
+ EXPECT_FP_EQ(neg_inf, func(in.neg_inf, in.neg_zero));
}
void test_invalid_operations(AddFunc func) {
- EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(inf, neg_inf), FE_INVALID);
- EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(neg_inf, inf), FE_INVALID);
+ EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(in.inf, in.neg_inf), FE_INVALID);
+ EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(in.neg_inf, in.inf), FE_INVALID);
}
void test_range_errors(AddFunc func) {
@@ -58,10 +58,11 @@ public:
using namespace LIBC_NAMESPACE::fputil::testing;
if (ForceRoundingMode r(RoundingMode::Nearest); r.success) {
- EXPECT_FP_EQ_WITH_EXCEPTION(inf, func(max_normal, max_normal),
+ EXPECT_FP_EQ_WITH_EXCEPTION(inf, func(in.max_normal, in.max_normal),
FE_OVERFLOW | FE_INEXACT);
EXPECT_MATH_ERRNO(ERANGE);
- EXPECT_FP_EQ_WITH_EXCEPTION(-inf, func(neg_max_normal, neg_max_normal),
+ EXPECT_FP_EQ_WITH_EXCEPTION(-inf,
+ func(in.neg_max_normal, in.neg_max_normal),
FE_OVERFLOW | FE_INEXACT);
EXPECT_MATH_ERRNO(ERANGE);
@@ -75,10 +76,11 @@ public:
}
if (ForceRoundingMode r(RoundingMode::TowardZero); r.success) {
- EXPECT_FP_EQ_WITH_EXCEPTION(max_normal, func(max_normal, max_normal),
+ EXPECT_FP_EQ_WITH_EXCEPTION(max_normal,
+ func(in.max_normal, in.max_normal),
FE_OVERFLOW | FE_INEXACT);
EXPECT_FP_EQ_WITH_EXCEPTION(neg_max_normal,
- func(neg_max_normal, neg_max_normal),
+ func(in.neg_max_normal, in.neg_max_normal),
FE_OVERFLOW | FE_INEXACT);
EXPECT_FP_EQ_WITH_EXCEPTION(zero, func(in.min_denormal, in.min_denormal),
@@ -91,9 +93,11 @@ public:
}
if (ForceRoundingMode r(RoundingMode::Downward); r.success) {
- EXPECT_FP_EQ_WITH_EXCEPTION(max_normal, func(max_normal, max_normal),
+ EXPECT_FP_EQ_WITH_EXCEPTION(max_normal,
+ func(in.max_normal, in.max_normal),
FE_OVERFLOW | FE_INEXACT);
- EXPECT_FP_EQ_WITH_EXCEPTION(-inf, func(neg_max_normal, neg_max_normal),
+ EXPECT_FP_EQ_WITH_EXCEPTION(-inf,
+ func(in.neg_max_normal, in.neg_max_normal),
FE_OVERFLOW | FE_INEXACT);
EXPECT_MATH_ERRNO(ERANGE);
@@ -107,11 +111,11 @@ public:
}
if (ForceRoundingMode r(RoundingMode::Upward); r.success) {
- EXPECT_FP_EQ_WITH_EXCEPTION(inf, func(max_normal, max_normal),
+ EXPECT_FP_EQ_WITH_EXCEPTION(inf, func(in.max_normal, in.max_normal),
FE_OVERFLOW | FE_INEXACT);
EXPECT_MATH_ERRNO(ERANGE);
EXPECT_FP_EQ_WITH_EXCEPTION(neg_max_normal,
- func(neg_max_normal, neg_max_normal),
+ func(in.neg_max_normal, in.neg_max_normal),
FE_OVERFLOW | FE_INEXACT);
EXPECT_FP_EQ_WITH_EXCEPTION(min_denormal,
@@ -127,7 +131,7 @@ public:
}
void test_inexact_results(AddFunc func) {
- func(InType(1.0), min_denormal);
+ func(InType(1.0), in.min_denormal);
EXPECT_FP_EXCEPTION(FE_INEXACT);
}
};
diff --git a/libc/test/src/math/smoke/CMakeLists.txt b/libc/test/src/math/smoke/CMakeLists.txt
index 47e1692..9f9203c4 100644
--- a/libc/test/src/math/smoke/CMakeLists.txt
+++ b/libc/test/src/math/smoke/CMakeLists.txt
@@ -401,6 +401,7 @@ add_fp_unittest(
FmaTest.h
DEPENDS
libc.src.math.dfmal
+ libc.src.__support.macros.properties.types
)
add_fp_unittest(
@@ -413,6 +414,7 @@ add_fp_unittest(
FmaTest.h
DEPENDS
libc.src.math.dfmaf128
+ libc.src.__support.macros.properties.types
)
add_fp_unittest(
@@ -1062,6 +1064,7 @@ add_fp_unittest(
libc.hdr.fenv_macros
libc.src.errno.errno
libc.src.math.expf16
+ libc.src.__support.FPUtil.cast
)
add_fp_unittest(
@@ -1098,6 +1101,7 @@ add_fp_unittest(
libc.hdr.fenv_macros
libc.src.errno.errno
libc.src.math.exp2f16
+ libc.src.__support.FPUtil.cast
)
add_fp_unittest(
@@ -1145,6 +1149,7 @@ add_fp_unittest(
libc.hdr.fenv_macros
libc.src.errno.errno
libc.src.math.exp10f16
+ libc.src.__support.FPUtil.cast
)
add_fp_unittest(
@@ -3317,6 +3322,7 @@ add_fp_unittest(
FmaTest.h
DEPENDS
libc.src.math.fmaf
+ libc.src.__support.macros.properties.types
FLAGS
FMA_OPT__ONLY
)
@@ -3331,6 +3337,7 @@ add_fp_unittest(
FmaTest.h
DEPENDS
libc.src.math.fma
+ libc.src.__support.macros.properties.types
)
add_fp_unittest(
@@ -3368,6 +3375,7 @@ add_fp_unittest(
libc.hdr.fenv_macros
libc.src.errno.errno
libc.src.math.expm1f16
+ libc.src.__support.FPUtil.cast
)
add_fp_unittest(
@@ -4352,6 +4360,7 @@ add_fp_unittest(
FmaTest.h
DEPENDS
libc.src.math.f16fma
+ libc.src.__support.macros.properties.types
)
add_fp_unittest(
@@ -4364,6 +4373,7 @@ add_fp_unittest(
FmaTest.h
DEPENDS
libc.src.math.f16fmaf
+ libc.src.__support.macros.properties.types
)
add_fp_unittest(
@@ -4376,6 +4386,7 @@ add_fp_unittest(
FmaTest.h
DEPENDS
libc.src.math.f16fmal
+ libc.src.__support.macros.properties.types
)
add_fp_unittest(
@@ -4388,6 +4399,7 @@ add_fp_unittest(
FmaTest.h
DEPENDS
libc.src.math.f16fmaf128
+ libc.src.__support.macros.properties.types
)
add_fp_unittest(
@@ -4490,6 +4502,7 @@ add_fp_unittest(
FmaTest.h
DEPENDS
libc.src.math.ffma
+ libc.src.__support.macros.properties.types
)
add_fp_unittest(
@@ -4502,6 +4515,7 @@ add_fp_unittest(
FmaTest.h
DEPENDS
libc.src.math.ffmal
+ libc.src.__support.macros.properties.types
)
add_fp_unittest(
@@ -4514,6 +4528,7 @@ add_fp_unittest(
FmaTest.h
DEPENDS
libc.src.math.ffmaf128
+ libc.src.__support.macros.properties.types
)
add_fp_unittest(
diff --git a/libc/test/src/math/smoke/DivTest.h b/libc/test/src/math/smoke/DivTest.h
index 6661796..60e7a8a 100644
--- a/libc/test/src/math/smoke/DivTest.h
+++ b/libc/test/src/math/smoke/DivTest.h
@@ -28,45 +28,47 @@ class DivTest : public LIBC_NAMESPACE::testing::FEnvSafeTest {
using InFPBits = typename InConstants::FPBits;
using InStorageType = typename InConstants::StorageType;
+ InConstants in;
+
public:
using DivFunc = OutType (*)(InType, InType);
void test_special_numbers(DivFunc func) {
- EXPECT_FP_IS_NAN(func(aNaN, aNaN));
- EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(sNaN, sNaN), FE_INVALID);
+ EXPECT_FP_IS_NAN(func(in.aNaN, in.aNaN));
+ EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(in.sNaN, in.sNaN), FE_INVALID);
InType qnan_42 = InFPBits::quiet_nan(Sign::POS, 0x42).get_val();
- EXPECT_FP_IS_NAN(func(qnan_42, zero));
- EXPECT_FP_IS_NAN(func(zero, qnan_42));
+ EXPECT_FP_IS_NAN(func(qnan_42, in.zero));
+ EXPECT_FP_IS_NAN(func(in.zero, qnan_42));
- EXPECT_FP_EQ(inf, func(inf, zero));
- EXPECT_FP_EQ(neg_inf, func(neg_inf, zero));
- EXPECT_FP_EQ(neg_inf, func(inf, neg_zero));
- EXPECT_FP_EQ(inf, func(neg_inf, neg_zero));
+ EXPECT_FP_EQ(inf, func(in.inf, in.zero));
+ EXPECT_FP_EQ(neg_inf, func(in.neg_inf, in.zero));
+ EXPECT_FP_EQ(neg_inf, func(in.inf, in.neg_zero));
+ EXPECT_FP_EQ(inf, func(in.neg_inf, in.neg_zero));
}
void test_division_by_zero(DivFunc func) {
- EXPECT_FP_EQ_WITH_EXCEPTION(inf, func(InType(1.0), zero), FE_DIVBYZERO);
- EXPECT_FP_EQ_WITH_EXCEPTION(neg_inf, func(InType(-1.0), zero),
+ EXPECT_FP_EQ_WITH_EXCEPTION(inf, func(InType(1.0), in.zero), FE_DIVBYZERO);
+ EXPECT_FP_EQ_WITH_EXCEPTION(neg_inf, func(InType(-1.0), in.zero),
FE_DIVBYZERO);
- EXPECT_FP_EQ_WITH_EXCEPTION(neg_inf, func(InType(1.0), neg_zero),
+ EXPECT_FP_EQ_WITH_EXCEPTION(neg_inf, func(InType(1.0), in.neg_zero),
FE_DIVBYZERO);
- EXPECT_FP_EQ_WITH_EXCEPTION(inf, func(InType(1.0), zero), FE_DIVBYZERO);
+ EXPECT_FP_EQ_WITH_EXCEPTION(inf, func(InType(1.0), in.zero), FE_DIVBYZERO);
}
void test_invalid_operations(DivFunc func) {
- EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(zero, zero), FE_INVALID);
- EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(neg_zero, zero), FE_INVALID);
- EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(zero, neg_zero), FE_INVALID);
- EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(neg_zero, neg_zero), FE_INVALID);
+ EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(in.zero, in.zero), FE_INVALID);
+ EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(in.neg_zero, in.zero), FE_INVALID);
+ EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(in.zero, in.neg_zero), FE_INVALID);
+ EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(in.neg_zero, in.neg_zero), FE_INVALID);
- EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(inf, inf), FE_INVALID);
+ EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(in.inf, in.inf), FE_INVALID);
EXPECT_MATH_ERRNO(EDOM);
- EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(neg_inf, inf), FE_INVALID);
+ EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(in.neg_inf, in.inf), FE_INVALID);
EXPECT_MATH_ERRNO(EDOM);
- EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(inf, neg_inf), FE_INVALID);
+ EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(in.inf, in.neg_inf), FE_INVALID);
EXPECT_MATH_ERRNO(EDOM);
- EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(neg_inf, neg_inf), FE_INVALID);
+ EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(in.neg_inf, in.neg_inf), FE_INVALID);
EXPECT_MATH_ERRNO(EDOM);
}
@@ -74,64 +76,72 @@ public:
using namespace LIBC_NAMESPACE::fputil::testing;
if (ForceRoundingMode r(RoundingMode::Nearest); r.success) {
- EXPECT_FP_EQ_WITH_EXCEPTION(inf, func(max_normal, min_normal),
+ EXPECT_FP_EQ_WITH_EXCEPTION(inf, func(in.max_normal, in.min_normal),
FE_OVERFLOW | FE_INEXACT);
EXPECT_MATH_ERRNO(ERANGE);
- EXPECT_FP_EQ_WITH_EXCEPTION(-inf, func(neg_max_normal, min_denormal),
+ EXPECT_FP_EQ_WITH_EXCEPTION(-inf,
+ func(in.neg_max_normal, in.min_denormal),
FE_OVERFLOW | FE_INEXACT);
EXPECT_MATH_ERRNO(ERANGE);
- EXPECT_FP_EQ_WITH_EXCEPTION(zero, func(min_denormal, max_normal),
+ EXPECT_FP_EQ_WITH_EXCEPTION(zero, func(in.min_denormal, in.max_normal),
FE_UNDERFLOW | FE_INEXACT);
EXPECT_MATH_ERRNO(ERANGE);
- EXPECT_FP_EQ_WITH_EXCEPTION(neg_zero, func(neg_min_denormal, max_normal),
+ EXPECT_FP_EQ_WITH_EXCEPTION(neg_zero,
+ func(in.neg_min_denormal, in.max_normal),
FE_UNDERFLOW | FE_INEXACT);
EXPECT_MATH_ERRNO(ERANGE);
}
if (ForceRoundingMode r(RoundingMode::TowardZero); r.success) {
- EXPECT_FP_EQ_WITH_EXCEPTION(max_normal, func(max_normal, min_normal),
+ EXPECT_FP_EQ_WITH_EXCEPTION(max_normal,
+ func(in.max_normal, in.min_normal),
FE_OVERFLOW | FE_INEXACT);
EXPECT_FP_EQ_WITH_EXCEPTION(neg_max_normal,
- func(neg_max_normal, min_denormal),
+ func(in.neg_max_normal, in.min_denormal),
FE_OVERFLOW | FE_INEXACT);
- EXPECT_FP_EQ_WITH_EXCEPTION(zero, func(min_denormal, max_normal),
+ EXPECT_FP_EQ_WITH_EXCEPTION(zero, func(in.min_denormal, in.max_normal),
FE_UNDERFLOW | FE_INEXACT);
EXPECT_MATH_ERRNO(ERANGE);
- EXPECT_FP_EQ_WITH_EXCEPTION(neg_zero, func(neg_min_denormal, max_normal),
+ EXPECT_FP_EQ_WITH_EXCEPTION(neg_zero,
+ func(in.neg_min_denormal, in.max_normal),
FE_UNDERFLOW | FE_INEXACT);
EXPECT_MATH_ERRNO(ERANGE);
}
if (ForceRoundingMode r(RoundingMode::Downward); r.success) {
- EXPECT_FP_EQ_WITH_EXCEPTION(max_normal, func(max_normal, min_normal),
+ EXPECT_FP_EQ_WITH_EXCEPTION(max_normal,
+ func(in.max_normal, in.min_normal),
FE_OVERFLOW | FE_INEXACT);
- EXPECT_FP_EQ_WITH_EXCEPTION(-inf, func(neg_max_normal, min_denormal),
+ EXPECT_FP_EQ_WITH_EXCEPTION(-inf,
+ func(in.neg_max_normal, in.min_denormal),
FE_OVERFLOW | FE_INEXACT);
EXPECT_MATH_ERRNO(ERANGE);
- EXPECT_FP_EQ_WITH_EXCEPTION(zero, func(min_denormal, max_normal),
+ EXPECT_FP_EQ_WITH_EXCEPTION(zero, func(in.min_denormal, in.max_normal),
FE_UNDERFLOW | FE_INEXACT);
EXPECT_MATH_ERRNO(ERANGE);
EXPECT_FP_EQ_WITH_EXCEPTION(neg_min_denormal,
- func(neg_min_denormal, max_normal),
+ func(in.neg_min_denormal, in.max_normal),
FE_UNDERFLOW | FE_INEXACT);
EXPECT_MATH_ERRNO(ERANGE);
}
if (ForceRoundingMode r(RoundingMode::Upward); r.success) {
- EXPECT_FP_EQ_WITH_EXCEPTION(inf, func(max_normal, min_normal),
+ EXPECT_FP_EQ_WITH_EXCEPTION(inf, func(in.max_normal, in.min_normal),
FE_OVERFLOW | FE_INEXACT);
EXPECT_MATH_ERRNO(ERANGE);
EXPECT_FP_EQ_WITH_EXCEPTION(neg_max_normal,
- func(neg_max_normal, min_denormal),
+ func(in.neg_max_normal, in.min_denormal),
FE_OVERFLOW | FE_INEXACT);
- EXPECT_FP_EQ_WITH_EXCEPTION(min_denormal, func(min_denormal, max_normal),
+ EXPECT_FP_EQ_WITH_EXCEPTION(min_denormal,
+ func(in.min_denormal, in.max_normal),
FE_UNDERFLOW | FE_INEXACT);
EXPECT_MATH_ERRNO(ERANGE);
- EXPECT_FP_EQ_WITH_EXCEPTION(neg_zero, func(neg_min_denormal, max_normal),
+ EXPECT_FP_EQ_WITH_EXCEPTION(neg_zero,
+ func(in.neg_min_denormal, in.max_normal),
FE_UNDERFLOW | FE_INEXACT);
EXPECT_MATH_ERRNO(ERANGE);
}
diff --git a/libc/test/src/math/smoke/FModTest.h b/libc/test/src/math/smoke/FModTest.h
index 0a4227d..ad9688f 100644
--- a/libc/test/src/math/smoke/FModTest.h
+++ b/libc/test/src/math/smoke/FModTest.h
@@ -108,61 +108,61 @@ public:
TEST_SPECIAL(T(3.0), neg_inf, T(3.0), false, 0);
TEST_SPECIAL(zero, aNaN, aNaN, false, 0);
- TEST_SPECIAL(zero, -aNaN, aNaN, false, 0);
+ TEST_SPECIAL(zero, neg_aNaN, aNaN, false, 0);
TEST_SPECIAL(neg_zero, aNaN, aNaN, false, 0);
- TEST_SPECIAL(neg_zero, -aNaN, aNaN, false, 0);
+ TEST_SPECIAL(neg_zero, neg_aNaN, aNaN, false, 0);
TEST_SPECIAL(T(1.0), aNaN, aNaN, false, 0);
- TEST_SPECIAL(T(1.0), -aNaN, aNaN, false, 0);
+ TEST_SPECIAL(T(1.0), neg_aNaN, aNaN, false, 0);
TEST_SPECIAL(inf, aNaN, aNaN, false, 0);
- TEST_SPECIAL(inf, -aNaN, aNaN, false, 0);
+ TEST_SPECIAL(inf, neg_aNaN, aNaN, false, 0);
TEST_SPECIAL(neg_inf, aNaN, aNaN, false, 0);
- TEST_SPECIAL(neg_inf, -aNaN, aNaN, false, 0);
+ TEST_SPECIAL(neg_inf, neg_aNaN, aNaN, false, 0);
TEST_SPECIAL(zero, sNaN, aNaN, false, FE_INVALID);
- TEST_SPECIAL(zero, -sNaN, aNaN, false, FE_INVALID);
+ TEST_SPECIAL(zero, neg_sNaN, aNaN, false, FE_INVALID);
TEST_SPECIAL(neg_zero, sNaN, aNaN, false, FE_INVALID);
- TEST_SPECIAL(neg_zero, -sNaN, aNaN, false, FE_INVALID);
+ TEST_SPECIAL(neg_zero, neg_sNaN, aNaN, false, FE_INVALID);
TEST_SPECIAL(T(1.0), sNaN, aNaN, false, FE_INVALID);
- TEST_SPECIAL(T(1.0), -sNaN, aNaN, false, FE_INVALID);
+ TEST_SPECIAL(T(1.0), neg_sNaN, aNaN, false, FE_INVALID);
TEST_SPECIAL(inf, sNaN, aNaN, false, FE_INVALID);
- TEST_SPECIAL(inf, -sNaN, aNaN, false, FE_INVALID);
+ TEST_SPECIAL(inf, neg_sNaN, aNaN, false, FE_INVALID);
TEST_SPECIAL(neg_inf, sNaN, aNaN, false, FE_INVALID);
- TEST_SPECIAL(neg_inf, -sNaN, aNaN, false, FE_INVALID);
+ TEST_SPECIAL(neg_inf, neg_sNaN, aNaN, false, FE_INVALID);
TEST_SPECIAL(aNaN, zero, aNaN, false, 0);
- TEST_SPECIAL(-aNaN, zero, aNaN, false, 0);
+ TEST_SPECIAL(neg_aNaN, zero, aNaN, false, 0);
TEST_SPECIAL(aNaN, neg_zero, aNaN, false, 0);
- TEST_SPECIAL(-aNaN, neg_zero, aNaN, false, 0);
+ TEST_SPECIAL(neg_aNaN, neg_zero, aNaN, false, 0);
TEST_SPECIAL(aNaN, T(1.0), aNaN, false, 0);
- TEST_SPECIAL(-aNaN, T(1.0), aNaN, false, 0);
+ TEST_SPECIAL(neg_aNaN, T(1.0), aNaN, false, 0);
TEST_SPECIAL(aNaN, inf, aNaN, false, 0);
- TEST_SPECIAL(-aNaN, inf, aNaN, false, 0);
+ TEST_SPECIAL(neg_aNaN, inf, aNaN, false, 0);
TEST_SPECIAL(aNaN, neg_inf, aNaN, false, 0);
- TEST_SPECIAL(-aNaN, neg_inf, aNaN, false, 0);
+ TEST_SPECIAL(neg_aNaN, neg_inf, aNaN, false, 0);
TEST_SPECIAL(sNaN, zero, aNaN, false, FE_INVALID);
- TEST_SPECIAL(-sNaN, zero, aNaN, false, FE_INVALID);
+ TEST_SPECIAL(neg_sNaN, zero, aNaN, false, FE_INVALID);
TEST_SPECIAL(sNaN, neg_zero, aNaN, false, FE_INVALID);
- TEST_SPECIAL(-sNaN, neg_zero, aNaN, false, FE_INVALID);
+ TEST_SPECIAL(neg_sNaN, neg_zero, aNaN, false, FE_INVALID);
TEST_SPECIAL(sNaN, T(1.0), aNaN, false, FE_INVALID);
- TEST_SPECIAL(-sNaN, T(1.0), aNaN, false, FE_INVALID);
+ TEST_SPECIAL(neg_sNaN, T(1.0), aNaN, false, FE_INVALID);
TEST_SPECIAL(sNaN, inf, aNaN, false, FE_INVALID);
- TEST_SPECIAL(-sNaN, inf, aNaN, false, FE_INVALID);
+ TEST_SPECIAL(neg_sNaN, inf, aNaN, false, FE_INVALID);
TEST_SPECIAL(sNaN, neg_inf, aNaN, false, FE_INVALID);
- TEST_SPECIAL(-sNaN, neg_inf, aNaN, false, FE_INVALID);
+ TEST_SPECIAL(neg_sNaN, neg_inf, aNaN, false, FE_INVALID);
TEST_SPECIAL(aNaN, aNaN, aNaN, false, 0);
- TEST_SPECIAL(aNaN, -aNaN, aNaN, false, 0);
- TEST_SPECIAL(-aNaN, aNaN, aNaN, false, 0);
- TEST_SPECIAL(-aNaN, -aNaN, aNaN, false, 0);
+ TEST_SPECIAL(aNaN, neg_aNaN, aNaN, false, 0);
+ TEST_SPECIAL(neg_aNaN, aNaN, aNaN, false, 0);
+ TEST_SPECIAL(neg_aNaN, neg_aNaN, aNaN, false, 0);
TEST_SPECIAL(aNaN, sNaN, aNaN, false, FE_INVALID);
- TEST_SPECIAL(aNaN, -sNaN, aNaN, false, FE_INVALID);
- TEST_SPECIAL(-aNaN, sNaN, aNaN, false, FE_INVALID);
- TEST_SPECIAL(-aNaN, -sNaN, aNaN, false, FE_INVALID);
+ TEST_SPECIAL(aNaN, neg_sNaN, aNaN, false, FE_INVALID);
+ TEST_SPECIAL(neg_aNaN, sNaN, aNaN, false, FE_INVALID);
+ TEST_SPECIAL(neg_aNaN, neg_sNaN, aNaN, false, FE_INVALID);
TEST_SPECIAL(sNaN, aNaN, aNaN, false, FE_INVALID);
- TEST_SPECIAL(sNaN, -aNaN, aNaN, false, FE_INVALID);
- TEST_SPECIAL(-sNaN, aNaN, aNaN, false, FE_INVALID);
- TEST_SPECIAL(-sNaN, -aNaN, aNaN, false, FE_INVALID);
+ TEST_SPECIAL(sNaN, neg_aNaN, aNaN, false, FE_INVALID);
+ TEST_SPECIAL(neg_sNaN, aNaN, aNaN, false, FE_INVALID);
+ TEST_SPECIAL(neg_sNaN, neg_aNaN, aNaN, false, FE_INVALID);
TEST_SPECIAL(sNaN, sNaN, aNaN, false, FE_INVALID);
- TEST_SPECIAL(sNaN, -sNaN, aNaN, false, FE_INVALID);
- TEST_SPECIAL(-sNaN, sNaN, aNaN, false, FE_INVALID);
- TEST_SPECIAL(-sNaN, -sNaN, aNaN, false, FE_INVALID);
+ TEST_SPECIAL(sNaN, neg_sNaN, aNaN, false, FE_INVALID);
+ TEST_SPECIAL(neg_sNaN, sNaN, aNaN, false, FE_INVALID);
+ TEST_SPECIAL(neg_sNaN, neg_sNaN, aNaN, false, FE_INVALID);
TEST_SPECIAL(T(6.5), T(2.25), T(2.0), false, 0);
TEST_SPECIAL(T(-6.5), T(2.25), T(-2.0), false, 0);
diff --git a/libc/test/src/math/smoke/FmaTest.h b/libc/test/src/math/smoke/FmaTest.h
index bf6d06d..4109342 100644
--- a/libc/test/src/math/smoke/FmaTest.h
+++ b/libc/test/src/math/smoke/FmaTest.h
@@ -9,6 +9,9 @@
#ifndef LLVM_LIBC_TEST_SRC_MATH_FMATEST_H
#define LLVM_LIBC_TEST_SRC_MATH_FMATEST_H
+#include "src/__support/CPP/type_traits.h"
+#include "src/__support/FPUtil/cast.h"
+#include "src/__support/macros/properties/types.h"
#include "test/UnitTest/FEnvSafeTest.h"
#include "test/UnitTest/FPMatcher.h"
#include "test/UnitTest/Test.h"
@@ -37,6 +40,11 @@ class FmaTestTemplate : public LIBC_NAMESPACE::testing::FEnvSafeTest {
OutConstants out;
InConstants in;
+ const InType in_out_min_normal =
+ LIBC_NAMESPACE::fputil::cast<InType>(out.min_normal);
+ const InType in_out_min_denormal =
+ LIBC_NAMESPACE::fputil::cast<InType>(out.min_denormal);
+
public:
using FmaFunc = OutType (*)(InType, InType, InType);
@@ -52,7 +60,7 @@ public:
// Test underflow rounding up.
EXPECT_FP_EQ(OutFPBits(OutStorageType(2)).get_val(),
- func(OutType(0.5), out.min_denormal, out.min_denormal));
+ func(InType(0.5), in_out_min_denormal, in_out_min_denormal));
if constexpr (sizeof(OutType) < sizeof(InType)) {
EXPECT_FP_EQ(out.zero,
@@ -63,8 +71,9 @@ public:
OutType v = OutFPBits(static_cast<OutStorageType>(OUT_MIN_NORMAL_U +
OutStorageType(1)))
.get_val();
- EXPECT_FP_EQ(v, func(OutType(1) / OutType(OUT_MIN_NORMAL_U << 1), v,
- out.min_normal));
+ EXPECT_FP_EQ(v, func(InType(1) / InType(OUT_MIN_NORMAL_U << 1),
+ LIBC_NAMESPACE::fputil::cast<InType>(v),
+ in_out_min_normal));
if constexpr (sizeof(OutType) < sizeof(InType)) {
InFPBits tmp = InFPBits::one();
@@ -74,12 +83,21 @@ public:
InType v = InFPBits(static_cast<InStorageType>(IN_MIN_NORMAL_U +
InStorageType(1)))
.get_val();
- EXPECT_FP_EQ(out.min_normal, func(reciprocal_value, v, out.min_normal));
+ EXPECT_FP_EQ(out.min_normal,
+ func(reciprocal_value, v, in_out_min_normal));
}
// Test overflow.
OutType z = out.max_normal;
- EXPECT_FP_EQ_ALL_ROUNDING(OutType(0.75) * z, func(InType(1.75), z, -z));
+ InType in_z = LIBC_NAMESPACE::fputil::cast<InType>(out.max_normal);
+#if defined(LIBC_TYPES_HAS_FLOAT16) && !defined(__LIBC_USE_FLOAT16_CONVERSION)
+ // Rounding modes other than the default might not be usable with float16.
+ if constexpr (LIBC_NAMESPACE::cpp::is_same_v<OutType, float16>)
+ EXPECT_FP_EQ(OutType(0.75) * z, func(InType(1.75), in_z, -in_z));
+ else
+#endif
+ EXPECT_FP_EQ_ALL_ROUNDING(OutType(0.75) * z,
+ func(InType(1.75), in_z, -in_z));
// Exact cancellation.
EXPECT_FP_EQ_ROUNDING_NEAREST(
diff --git a/libc/test/src/math/smoke/ModfTest.h b/libc/test/src/math/smoke/ModfTest.h
index 6226e5d..24cfb115 100644
--- a/libc/test/src/math/smoke/ModfTest.h
+++ b/libc/test/src/math/smoke/ModfTest.h
@@ -97,7 +97,7 @@ public:
T integral;
T frac = func(x, &integral);
- ASSERT_TRUE(LIBC_NAMESPACE::fputil::abs(frac) < 1.0l);
+ ASSERT_TRUE(LIBC_NAMESPACE::fputil::abs(frac) < T(1.0));
ASSERT_TRUE(LIBC_NAMESPACE::fputil::trunc(x) == integral);
ASSERT_TRUE(integral + frac == x);
}
diff --git a/libc/test/src/math/smoke/MulTest.h b/libc/test/src/math/smoke/MulTest.h
index 0c847e3..c409122 100644
--- a/libc/test/src/math/smoke/MulTest.h
+++ b/libc/test/src/math/smoke/MulTest.h
@@ -34,22 +34,22 @@ public:
using MulFunc = OutType (*)(InType, InType);
void test_special_numbers(MulFunc func) {
- EXPECT_FP_IS_NAN(func(aNaN, aNaN));
- EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(sNaN, sNaN), FE_INVALID);
+ EXPECT_FP_IS_NAN(func(in.aNaN, in.aNaN));
+ EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(in.sNaN, in.sNaN), FE_INVALID);
InType qnan_42 = InFPBits::quiet_nan(Sign::POS, 0x42).get_val();
- EXPECT_FP_IS_NAN(func(qnan_42, zero));
- EXPECT_FP_IS_NAN(func(zero, qnan_42));
+ EXPECT_FP_IS_NAN(func(qnan_42, in.zero));
+ EXPECT_FP_IS_NAN(func(in.zero, qnan_42));
- EXPECT_FP_EQ(inf, func(inf, InType(1.0)));
- EXPECT_FP_EQ(neg_inf, func(neg_inf, InType(1.0)));
- EXPECT_FP_EQ(neg_inf, func(inf, InType(-1.0)));
- EXPECT_FP_EQ(inf, func(neg_inf, InType(-1.0)));
+ EXPECT_FP_EQ(inf, func(in.inf, InType(1.0)));
+ EXPECT_FP_EQ(neg_inf, func(in.neg_inf, InType(1.0)));
+ EXPECT_FP_EQ(neg_inf, func(in.inf, InType(-1.0)));
+ EXPECT_FP_EQ(inf, func(in.neg_inf, InType(-1.0)));
- EXPECT_FP_EQ_ALL_ROUNDING(zero, func(zero, zero));
- EXPECT_FP_EQ_ALL_ROUNDING(zero, func(neg_zero, neg_zero));
- EXPECT_FP_EQ_ALL_ROUNDING(neg_zero, func(zero, neg_zero));
- EXPECT_FP_EQ_ALL_ROUNDING(neg_zero, func(neg_zero, zero));
+ EXPECT_FP_EQ_ALL_ROUNDING(zero, func(in.zero, in.zero));
+ EXPECT_FP_EQ_ALL_ROUNDING(zero, func(in.neg_zero, in.neg_zero));
+ EXPECT_FP_EQ_ALL_ROUNDING(neg_zero, func(in.zero, in.neg_zero));
+ EXPECT_FP_EQ_ALL_ROUNDING(neg_zero, func(in.neg_zero, in.zero));
EXPECT_FP_EQ_ALL_ROUNDING(OutType(1.0), func(1.0, 1.0));
EXPECT_FP_EQ_ALL_ROUNDING(OutType(15.0), func(3.0, 5.0));
@@ -58,20 +58,21 @@ public:
}
void test_invalid_operations(MulFunc func) {
- EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(inf, zero), FE_INVALID);
- EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(inf, neg_zero), FE_INVALID);
- EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(neg_inf, zero), FE_INVALID);
- EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(neg_inf, neg_zero), FE_INVALID);
+ EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(in.inf, in.zero), FE_INVALID);
+ EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(in.inf, in.neg_zero), FE_INVALID);
+ EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(in.neg_inf, in.zero), FE_INVALID);
+ EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(in.neg_inf, in.neg_zero), FE_INVALID);
}
void test_range_errors(MulFunc func) {
using namespace LIBC_NAMESPACE::fputil::testing;
if (ForceRoundingMode r(RoundingMode::Nearest); r.success) {
- EXPECT_FP_EQ_WITH_EXCEPTION(inf, func(max_normal, max_normal),
+ EXPECT_FP_EQ_WITH_EXCEPTION(inf, func(in.max_normal, in.max_normal),
FE_OVERFLOW | FE_INEXACT);
EXPECT_MATH_ERRNO(ERANGE);
- EXPECT_FP_EQ_WITH_EXCEPTION(neg_inf, func(neg_max_normal, max_normal),
+ EXPECT_FP_EQ_WITH_EXCEPTION(neg_inf,
+ func(in.neg_max_normal, in.max_normal),
FE_OVERFLOW | FE_INEXACT);
EXPECT_MATH_ERRNO(ERANGE);
@@ -85,10 +86,11 @@ public:
}
if (ForceRoundingMode r(RoundingMode::TowardZero); r.success) {
- EXPECT_FP_EQ_WITH_EXCEPTION(max_normal, func(max_normal, max_normal),
+ EXPECT_FP_EQ_WITH_EXCEPTION(max_normal,
+ func(in.max_normal, in.max_normal),
FE_OVERFLOW | FE_INEXACT);
EXPECT_FP_EQ_WITH_EXCEPTION(neg_max_normal,
- func(neg_max_normal, max_normal),
+ func(in.neg_max_normal, in.max_normal),
FE_OVERFLOW | FE_INEXACT);
EXPECT_FP_EQ_WITH_EXCEPTION(zero, func(in.min_denormal, in.min_denormal),
@@ -101,9 +103,11 @@ public:
}
if (ForceRoundingMode r(RoundingMode::Downward); r.success) {
- EXPECT_FP_EQ_WITH_EXCEPTION(max_normal, func(max_normal, max_normal),
+ EXPECT_FP_EQ_WITH_EXCEPTION(max_normal,
+ func(in.max_normal, in.max_normal),
FE_OVERFLOW | FE_INEXACT);
- EXPECT_FP_EQ_WITH_EXCEPTION(neg_inf, func(neg_max_normal, max_normal),
+ EXPECT_FP_EQ_WITH_EXCEPTION(neg_inf,
+ func(in.neg_max_normal, in.max_normal),
FE_OVERFLOW | FE_INEXACT);
EXPECT_MATH_ERRNO(ERANGE);
@@ -117,11 +121,11 @@ public:
}
if (ForceRoundingMode r(RoundingMode::Upward); r.success) {
- EXPECT_FP_EQ_WITH_EXCEPTION(inf, func(max_normal, max_normal),
+ EXPECT_FP_EQ_WITH_EXCEPTION(inf, func(in.max_normal, in.max_normal),
FE_OVERFLOW | FE_INEXACT);
EXPECT_MATH_ERRNO(ERANGE);
EXPECT_FP_EQ_WITH_EXCEPTION(neg_max_normal,
- func(neg_max_normal, max_normal),
+ func(in.neg_max_normal, in.max_normal),
FE_OVERFLOW | FE_INEXACT);
EXPECT_FP_EQ_WITH_EXCEPTION(min_denormal,
diff --git a/libc/test/src/math/smoke/NextTowardTest.h b/libc/test/src/math/smoke/NextTowardTest.h
index 5992273..61528f7 100644
--- a/libc/test/src/math/smoke/NextTowardTest.h
+++ b/libc/test/src/math/smoke/NextTowardTest.h
@@ -43,6 +43,8 @@ class NextTowardTestTemplate : public LIBC_NAMESPACE::testing::FEnvSafeTest {
const T neg_zero = FPBits::zero(Sign::NEG).get_val();
const T nan = FPBits::quiet_nan().get_val();
+ const long double to_inf = ToFPBits::inf(Sign::POS).get_val();
+ const long double to_neg_inf = ToFPBits::inf(Sign::NEG).get_val();
const long double to_zero = ToFPBits::zero().get_val();
const long double to_neg_zero = ToFPBits::zero(Sign::NEG).get_val();
const long double to_nan = ToFPBits::quiet_nan().get_val();
@@ -134,7 +136,7 @@ public:
expected = LIBC_NAMESPACE::cpp::bit_cast<T>(expected_bits);
ASSERT_FP_EQ_WITH_UNDERFLOW(result, expected);
- result = func(x, inf);
+ result = func(x, to_inf);
expected_bits = min_normal + 1;
expected = LIBC_NAMESPACE::cpp::bit_cast<T>(expected_bits);
ASSERT_FP_EQ(result, expected);
@@ -145,7 +147,7 @@ public:
expected = LIBC_NAMESPACE::cpp::bit_cast<T>(expected_bits);
ASSERT_FP_EQ_WITH_UNDERFLOW(result, expected);
- result = func(x, -inf);
+ result = func(x, to_neg_inf);
expected_bits = FPBits::SIGN_MASK + min_normal + 1;
expected = LIBC_NAMESPACE::cpp::bit_cast<T>(expected_bits);
ASSERT_FP_EQ(result, expected);
@@ -156,14 +158,14 @@ public:
expected_bits = max_normal - 1;
expected = LIBC_NAMESPACE::cpp::bit_cast<T>(expected_bits);
ASSERT_FP_EQ(result, expected);
- ASSERT_FP_EQ_WITH_OVERFLOW(func(x, inf), inf);
+ ASSERT_FP_EQ_WITH_OVERFLOW(func(x, to_inf), inf);
x = -x;
result = func(x, 0);
expected_bits = FPBits::SIGN_MASK + max_normal - 1;
expected = LIBC_NAMESPACE::cpp::bit_cast<T>(expected_bits);
ASSERT_FP_EQ(result, expected);
- ASSERT_FP_EQ_WITH_OVERFLOW(func(x, -inf), -inf);
+ ASSERT_FP_EQ_WITH_OVERFLOW(func(x, to_neg_inf), neg_inf);
// 'from' is infinity.
x = inf;
@@ -171,14 +173,14 @@ public:
expected_bits = max_normal;
expected = LIBC_NAMESPACE::cpp::bit_cast<T>(expected_bits);
ASSERT_FP_EQ(result, expected);
- ASSERT_FP_EQ(func(x, inf), inf);
+ ASSERT_FP_EQ(func(x, to_inf), inf);
x = neg_inf;
result = func(x, 0);
expected_bits = FPBits::SIGN_MASK + max_normal;
expected = LIBC_NAMESPACE::cpp::bit_cast<T>(expected_bits);
ASSERT_FP_EQ(result, expected);
- ASSERT_FP_EQ(func(x, neg_inf), neg_inf);
+ ASSERT_FP_EQ(func(x, to_neg_inf), neg_inf);
// 'from' is a power of 2.
x = T(32.0);
diff --git a/libc/test/src/math/smoke/SqrtTest.h b/libc/test/src/math/smoke/SqrtTest.h
index ce9f2f8..b5eaee2 100644
--- a/libc/test/src/math/smoke/SqrtTest.h
+++ b/libc/test/src/math/smoke/SqrtTest.h
@@ -15,15 +15,21 @@ class SqrtTest : public LIBC_NAMESPACE::testing::FEnvSafeTest {
DECLARE_SPECIAL_CONSTANTS(OutType)
+ struct InConstants {
+ DECLARE_SPECIAL_CONSTANTS(InType)
+ };
+
+ InConstants in;
+
public:
typedef OutType (*SqrtFunc)(InType);
void test_special_numbers(SqrtFunc func) {
- ASSERT_FP_EQ(aNaN, func(aNaN));
- ASSERT_FP_EQ(inf, func(inf));
- ASSERT_FP_EQ(aNaN, func(neg_inf));
- ASSERT_FP_EQ(zero, func(zero));
- ASSERT_FP_EQ(neg_zero, func(neg_zero));
+ ASSERT_FP_EQ(aNaN, func(in.aNaN));
+ ASSERT_FP_EQ(inf, func(in.inf));
+ ASSERT_FP_EQ(aNaN, func(in.neg_inf));
+ ASSERT_FP_EQ(zero, func(in.zero));
+ ASSERT_FP_EQ(neg_zero, func(in.neg_zero));
ASSERT_FP_EQ(aNaN, func(InType(-1.0)));
ASSERT_FP_EQ(OutType(1.0), func(InType(1.0)));
ASSERT_FP_EQ(OutType(2.0), func(InType(4.0)));
diff --git a/libc/test/src/math/smoke/SubTest.h b/libc/test/src/math/smoke/SubTest.h
index 99c4b6c..8793b9f 100644
--- a/libc/test/src/math/smoke/SubTest.h
+++ b/libc/test/src/math/smoke/SubTest.h
@@ -34,22 +34,22 @@ public:
using SubFunc = OutType (*)(InType, InType);
void test_special_numbers(SubFunc func) {
- EXPECT_FP_IS_NAN(func(aNaN, aNaN));
- EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(sNaN, sNaN), FE_INVALID);
+ EXPECT_FP_IS_NAN(func(in.aNaN, in.aNaN));
+ EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(in.sNaN, in.sNaN), FE_INVALID);
InType qnan_42 = InFPBits::quiet_nan(Sign::POS, 0x42).get_val();
- EXPECT_FP_IS_NAN(func(qnan_42, zero));
- EXPECT_FP_IS_NAN(func(zero, qnan_42));
+ EXPECT_FP_IS_NAN(func(qnan_42, in.zero));
+ EXPECT_FP_IS_NAN(func(in.zero, qnan_42));
- EXPECT_FP_EQ(inf, func(inf, zero));
- EXPECT_FP_EQ(neg_inf, func(neg_inf, zero));
- EXPECT_FP_EQ(inf, func(inf, neg_zero));
- EXPECT_FP_EQ(neg_inf, func(neg_inf, neg_zero));
+ EXPECT_FP_EQ(inf, func(in.inf, in.zero));
+ EXPECT_FP_EQ(neg_inf, func(in.neg_inf, in.zero));
+ EXPECT_FP_EQ(inf, func(in.inf, in.neg_zero));
+ EXPECT_FP_EQ(neg_inf, func(in.neg_inf, in.neg_zero));
}
void test_invalid_operations(SubFunc func) {
- EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(inf, inf), FE_INVALID);
- EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(neg_inf, neg_inf), FE_INVALID);
+ EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(in.inf, in.inf), FE_INVALID);
+ EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(in.neg_inf, in.neg_inf), FE_INVALID);
}
void test_range_errors(SubFunc func) {
@@ -57,10 +57,10 @@ public:
using namespace LIBC_NAMESPACE::fputil::testing;
if (ForceRoundingMode r(RoundingMode::Nearest); r.success) {
- EXPECT_FP_EQ_WITH_EXCEPTION(inf, func(max_normal, neg_max_normal),
+ EXPECT_FP_EQ_WITH_EXCEPTION(inf, func(in.max_normal, in.neg_max_normal),
FE_OVERFLOW | FE_INEXACT);
EXPECT_MATH_ERRNO(ERANGE);
- EXPECT_FP_EQ_WITH_EXCEPTION(-inf, func(neg_max_normal, max_normal),
+ EXPECT_FP_EQ_WITH_EXCEPTION(-inf, func(in.neg_max_normal, in.max_normal),
FE_OVERFLOW | FE_INEXACT);
EXPECT_MATH_ERRNO(ERANGE);
@@ -75,10 +75,11 @@ public:
}
if (ForceRoundingMode r(RoundingMode::TowardZero); r.success) {
- EXPECT_FP_EQ_WITH_EXCEPTION(max_normal, func(max_normal, neg_max_normal),
+ EXPECT_FP_EQ_WITH_EXCEPTION(max_normal,
+ func(in.max_normal, in.neg_max_normal),
FE_OVERFLOW | FE_INEXACT);
EXPECT_FP_EQ_WITH_EXCEPTION(neg_max_normal,
- func(neg_max_normal, max_normal),
+ func(in.neg_max_normal, in.max_normal),
FE_OVERFLOW | FE_INEXACT);
EXPECT_FP_EQ_WITH_EXCEPTION(zero,
@@ -92,9 +93,10 @@ public:
}
if (ForceRoundingMode r(RoundingMode::Downward); r.success) {
- EXPECT_FP_EQ_WITH_EXCEPTION(max_normal, func(max_normal, neg_max_normal),
+ EXPECT_FP_EQ_WITH_EXCEPTION(max_normal,
+ func(in.max_normal, in.neg_max_normal),
FE_OVERFLOW | FE_INEXACT);
- EXPECT_FP_EQ_WITH_EXCEPTION(-inf, func(neg_max_normal, max_normal),
+ EXPECT_FP_EQ_WITH_EXCEPTION(-inf, func(in.neg_max_normal, in.max_normal),
FE_OVERFLOW | FE_INEXACT);
EXPECT_MATH_ERRNO(ERANGE);
@@ -109,11 +111,11 @@ public:
}
if (ForceRoundingMode r(RoundingMode::Upward); r.success) {
- EXPECT_FP_EQ_WITH_EXCEPTION(inf, func(max_normal, neg_max_normal),
+ EXPECT_FP_EQ_WITH_EXCEPTION(inf, func(in.max_normal, in.neg_max_normal),
FE_OVERFLOW | FE_INEXACT);
EXPECT_MATH_ERRNO(ERANGE);
EXPECT_FP_EQ_WITH_EXCEPTION(neg_max_normal,
- func(neg_max_normal, max_normal),
+ func(in.neg_max_normal, in.max_normal),
FE_OVERFLOW | FE_INEXACT);
EXPECT_FP_EQ_WITH_EXCEPTION(min_denormal,
@@ -129,7 +131,7 @@ public:
}
void test_inexact_results(SubFunc func) {
- func(InType(1.0), min_denormal);
+ func(InType(1.0), in.min_denormal);
EXPECT_FP_EXCEPTION(FE_INEXACT);
}
};
diff --git a/libc/test/src/math/smoke/exp10f16_test.cpp b/libc/test/src/math/smoke/exp10f16_test.cpp
index 006dfafa..1c4ef2a 100644
--- a/libc/test/src/math/smoke/exp10f16_test.cpp
+++ b/libc/test/src/math/smoke/exp10f16_test.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "hdr/fenv_macros.h"
+#include "src/__support/FPUtil/cast.h"
#include "src/errno/libc_errno.h"
#include "src/math/exp10f16.h"
#include "test/UnitTest/FPMatcher.h"
@@ -26,15 +27,14 @@ TEST_F(LlvmLibcExp10f16Test, SpecialNumbers) {
EXPECT_FP_EQ_ALL_ROUNDING(inf, LIBC_NAMESPACE::exp10f16(inf));
EXPECT_MATH_ERRNO(0);
- EXPECT_FP_EQ_ALL_ROUNDING(static_cast<float16>(zero),
- LIBC_NAMESPACE::exp10f16(neg_inf));
+ EXPECT_FP_EQ_ALL_ROUNDING(zero, LIBC_NAMESPACE::exp10f16(neg_inf));
EXPECT_MATH_ERRNO(0);
- EXPECT_FP_EQ_ALL_ROUNDING(static_cast<float16>(1.0f),
+ EXPECT_FP_EQ_ALL_ROUNDING(LIBC_NAMESPACE::fputil::cast<float16>(1.0f),
LIBC_NAMESPACE::exp10f16(zero));
EXPECT_MATH_ERRNO(0);
- EXPECT_FP_EQ_ALL_ROUNDING(static_cast<float16>(1.0f),
+ EXPECT_FP_EQ_ALL_ROUNDING(LIBC_NAMESPACE::fputil::cast<float16>(1.0f),
LIBC_NAMESPACE::exp10f16(neg_zero));
EXPECT_MATH_ERRNO(0);
}
@@ -47,7 +47,8 @@ TEST_F(LlvmLibcExp10f16Test, Overflow) {
EXPECT_MATH_ERRNO(ERANGE);
EXPECT_FP_EQ_WITH_EXCEPTION(
- inf, LIBC_NAMESPACE::exp10f16(static_cast<float16>(5.0)), FE_OVERFLOW);
+ inf, LIBC_NAMESPACE::exp10f16(LIBC_NAMESPACE::fputil::cast<float16>(5.0)),
+ FE_OVERFLOW);
EXPECT_MATH_ERRNO(ERANGE);
}
@@ -59,7 +60,8 @@ TEST_F(LlvmLibcExp10f16Test, Underflow) {
EXPECT_MATH_ERRNO(ERANGE);
EXPECT_FP_EQ_WITH_EXCEPTION(
- zero, LIBC_NAMESPACE::exp10f16(static_cast<float16>(-8.0)),
+ zero,
+ LIBC_NAMESPACE::exp10f16(LIBC_NAMESPACE::fputil::cast<float16>(-8.0)),
FE_UNDERFLOW | FE_INEXACT);
EXPECT_MATH_ERRNO(ERANGE);
}
diff --git a/libc/test/src/math/smoke/exp2f16_test.cpp b/libc/test/src/math/smoke/exp2f16_test.cpp
index cd87e61..f69b33a 100644
--- a/libc/test/src/math/smoke/exp2f16_test.cpp
+++ b/libc/test/src/math/smoke/exp2f16_test.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "hdr/fenv_macros.h"
+#include "src/__support/FPUtil/cast.h"
#include "src/errno/libc_errno.h"
#include "src/math/exp2f16.h"
#include "test/UnitTest/FPMatcher.h"
@@ -26,15 +27,14 @@ TEST_F(LlvmLibcExp2f16Test, SpecialNumbers) {
EXPECT_FP_EQ_ALL_ROUNDING(inf, LIBC_NAMESPACE::exp2f16(inf));
EXPECT_MATH_ERRNO(0);
- EXPECT_FP_EQ_ALL_ROUNDING(static_cast<float16>(zero),
- LIBC_NAMESPACE::exp2f16(neg_inf));
+ EXPECT_FP_EQ_ALL_ROUNDING(zero, LIBC_NAMESPACE::exp2f16(neg_inf));
EXPECT_MATH_ERRNO(0);
- EXPECT_FP_EQ_ALL_ROUNDING(static_cast<float16>(1.0f),
+ EXPECT_FP_EQ_ALL_ROUNDING(LIBC_NAMESPACE::fputil::cast<float16>(1.0f),
LIBC_NAMESPACE::exp2f16(zero));
EXPECT_MATH_ERRNO(0);
- EXPECT_FP_EQ_ALL_ROUNDING(static_cast<float16>(1.0f),
+ EXPECT_FP_EQ_ALL_ROUNDING(LIBC_NAMESPACE::fputil::cast<float16>(1.0f),
LIBC_NAMESPACE::exp2f16(neg_zero));
EXPECT_MATH_ERRNO(0);
}
@@ -47,7 +47,8 @@ TEST_F(LlvmLibcExp2f16Test, Overflow) {
EXPECT_MATH_ERRNO(ERANGE);
EXPECT_FP_EQ_WITH_EXCEPTION(
- inf, LIBC_NAMESPACE::exp2f16(static_cast<float16>(16.0)), FE_OVERFLOW);
+ inf, LIBC_NAMESPACE::exp2f16(LIBC_NAMESPACE::fputil::cast<float16>(16.0)),
+ FE_OVERFLOW);
EXPECT_MATH_ERRNO(ERANGE);
}
@@ -59,7 +60,8 @@ TEST_F(LlvmLibcExp2f16Test, Underflow) {
EXPECT_MATH_ERRNO(ERANGE);
EXPECT_FP_EQ_WITH_EXCEPTION(
- zero, LIBC_NAMESPACE::exp2f16(static_cast<float16>(-25.0)),
+ zero,
+ LIBC_NAMESPACE::exp2f16(LIBC_NAMESPACE::fputil::cast<float16>(-25.0)),
FE_UNDERFLOW | FE_INEXACT);
EXPECT_MATH_ERRNO(ERANGE);
}
diff --git a/libc/test/src/math/smoke/expf16_test.cpp b/libc/test/src/math/smoke/expf16_test.cpp
index 969870f..ab745a3 100644
--- a/libc/test/src/math/smoke/expf16_test.cpp
+++ b/libc/test/src/math/smoke/expf16_test.cpp
@@ -8,6 +8,7 @@
#include "hdr/errno_macros.h"
#include "hdr/fenv_macros.h"
+#include "src/__support/FPUtil/cast.h"
#include "src/errno/libc_errno.h"
#include "src/math/expf16.h"
#include "test/UnitTest/FPMatcher.h"
@@ -27,15 +28,14 @@ TEST_F(LlvmLibcExpf16Test, SpecialNumbers) {
EXPECT_FP_EQ_ALL_ROUNDING(inf, LIBC_NAMESPACE::expf16(inf));
EXPECT_MATH_ERRNO(0);
- EXPECT_FP_EQ_ALL_ROUNDING(static_cast<float16>(zero),
- LIBC_NAMESPACE::expf16(neg_inf));
+ EXPECT_FP_EQ_ALL_ROUNDING(zero, LIBC_NAMESPACE::expf16(neg_inf));
EXPECT_MATH_ERRNO(0);
- EXPECT_FP_EQ_ALL_ROUNDING(static_cast<float16>(1.0f),
+ EXPECT_FP_EQ_ALL_ROUNDING(LIBC_NAMESPACE::fputil::cast<float16>(1.0f),
LIBC_NAMESPACE::expf16(zero));
EXPECT_MATH_ERRNO(0);
- EXPECT_FP_EQ_ALL_ROUNDING(static_cast<float16>(1.0f),
+ EXPECT_FP_EQ_ALL_ROUNDING(LIBC_NAMESPACE::fputil::cast<float16>(1.0f),
LIBC_NAMESPACE::expf16(neg_zero));
EXPECT_MATH_ERRNO(0);
}
@@ -48,7 +48,8 @@ TEST_F(LlvmLibcExpf16Test, Overflow) {
EXPECT_MATH_ERRNO(ERANGE);
EXPECT_FP_EQ_WITH_EXCEPTION(
- inf, LIBC_NAMESPACE::expf16(static_cast<float16>(12.0)), FE_OVERFLOW);
+ inf, LIBC_NAMESPACE::expf16(LIBC_NAMESPACE::fputil::cast<float16>(12.0)),
+ FE_OVERFLOW);
EXPECT_MATH_ERRNO(ERANGE);
}
@@ -60,7 +61,8 @@ TEST_F(LlvmLibcExpf16Test, Underflow) {
EXPECT_MATH_ERRNO(ERANGE);
EXPECT_FP_EQ_WITH_EXCEPTION(
- zero, LIBC_NAMESPACE::expf16(static_cast<float16>(-18.0)),
+ zero,
+ LIBC_NAMESPACE::expf16(LIBC_NAMESPACE::fputil::cast<float16>(-18.0)),
FE_UNDERFLOW | FE_INEXACT);
EXPECT_MATH_ERRNO(ERANGE);
}
diff --git a/libc/test/src/math/smoke/expm1f16_test.cpp b/libc/test/src/math/smoke/expm1f16_test.cpp
index 3bdbaad..f297c5d 100644
--- a/libc/test/src/math/smoke/expm1f16_test.cpp
+++ b/libc/test/src/math/smoke/expm1f16_test.cpp
@@ -8,6 +8,7 @@
#include "hdr/errno_macros.h"
#include "hdr/fenv_macros.h"
+#include "src/__support/FPUtil/cast.h"
#include "src/errno/libc_errno.h"
#include "src/math/expm1f16.h"
#include "test/UnitTest/FPMatcher.h"
@@ -27,7 +28,7 @@ TEST_F(LlvmLibcExpm1f16Test, SpecialNumbers) {
EXPECT_FP_EQ_ALL_ROUNDING(inf, LIBC_NAMESPACE::expm1f16(inf));
EXPECT_MATH_ERRNO(0);
- EXPECT_FP_EQ_ALL_ROUNDING(static_cast<float16>(-1.0),
+ EXPECT_FP_EQ_ALL_ROUNDING(LIBC_NAMESPACE::fputil::cast<float16>(-1.0),
LIBC_NAMESPACE::expm1f16(neg_inf));
EXPECT_MATH_ERRNO(0);
@@ -46,7 +47,7 @@ TEST_F(LlvmLibcExpm1f16Test, Overflow) {
EXPECT_MATH_ERRNO(ERANGE);
// round(16 * log(2), HP, RN);
- float16 x = static_cast<float16>(0x1.63p+3);
+ float16 x = LIBC_NAMESPACE::fputil::cast<float16>(0x1.63p+3);
EXPECT_FP_EQ_WITH_EXCEPTION_ROUNDING_NEAREST(inf, LIBC_NAMESPACE::expm1f16(x),
FE_OVERFLOW | FE_INEXACT);
@@ -68,41 +69,44 @@ TEST_F(LlvmLibcExpm1f16Test, Overflow) {
TEST_F(LlvmLibcExpm1f16Test, ResultNearNegOne) {
LIBC_NAMESPACE::libc_errno = 0;
- EXPECT_FP_EQ_WITH_EXCEPTION(static_cast<float16>(-1.0),
+ EXPECT_FP_EQ_WITH_EXCEPTION(LIBC_NAMESPACE::fputil::cast<float16>(-1.0),
LIBC_NAMESPACE::expm1f16(neg_max_normal),
FE_INEXACT);
// round(-11 * log(2), HP, RN);
- float16 x = static_cast<float16>(-0x1.e8p+2);
+ float16 x = LIBC_NAMESPACE::fputil::cast<float16>(-0x1.e8p+2);
EXPECT_FP_EQ_WITH_EXCEPTION_ROUNDING_NEAREST(
- static_cast<float16>(-0x1.ffcp-1), LIBC_NAMESPACE::expm1f16(x),
- FE_INEXACT);
+ LIBC_NAMESPACE::fputil::cast<float16>(-0x1.ffcp-1),
+ LIBC_NAMESPACE::expm1f16(x), FE_INEXACT);
- EXPECT_FP_EQ_WITH_EXCEPTION_ROUNDING_UPWARD(static_cast<float16>(-0x1.ffcp-1),
- LIBC_NAMESPACE::expm1f16(x),
- FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION_ROUNDING_UPWARD(
+ LIBC_NAMESPACE::fputil::cast<float16>(-0x1.ffcp-1),
+ LIBC_NAMESPACE::expm1f16(x), FE_INEXACT);
EXPECT_FP_EQ_WITH_EXCEPTION_ROUNDING_DOWNWARD(
- static_cast<float16>(-1.0), LIBC_NAMESPACE::expm1f16(x), FE_INEXACT);
+ LIBC_NAMESPACE::fputil::cast<float16>(-1.0), LIBC_NAMESPACE::expm1f16(x),
+ FE_INEXACT);
EXPECT_FP_EQ_WITH_EXCEPTION_ROUNDING_TOWARD_ZERO(
- static_cast<float16>(-0x1.ffcp-1), LIBC_NAMESPACE::expm1f16(x),
- FE_INEXACT);
+ LIBC_NAMESPACE::fputil::cast<float16>(-0x1.ffcp-1),
+ LIBC_NAMESPACE::expm1f16(x), FE_INEXACT);
- x = static_cast<float16>(-0x1.0a4p+3);
+ x = LIBC_NAMESPACE::fputil::cast<float16>(-0x1.0a4p+3);
EXPECT_FP_EQ_WITH_EXCEPTION_ROUNDING_NEAREST(
- static_cast<float16>(-1.0), LIBC_NAMESPACE::expm1f16(x), FE_INEXACT);
+ LIBC_NAMESPACE::fputil::cast<float16>(-1.0), LIBC_NAMESPACE::expm1f16(x),
+ FE_INEXACT);
- EXPECT_FP_EQ_WITH_EXCEPTION_ROUNDING_UPWARD(static_cast<float16>(-0x1.ffcp-1),
- LIBC_NAMESPACE::expm1f16(x),
- FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION_ROUNDING_UPWARD(
+ LIBC_NAMESPACE::fputil::cast<float16>(-0x1.ffcp-1),
+ LIBC_NAMESPACE::expm1f16(x), FE_INEXACT);
EXPECT_FP_EQ_WITH_EXCEPTION_ROUNDING_DOWNWARD(
- static_cast<float16>(-1.0), LIBC_NAMESPACE::expm1f16(x), FE_INEXACT);
+ LIBC_NAMESPACE::fputil::cast<float16>(-1.0), LIBC_NAMESPACE::expm1f16(x),
+ FE_INEXACT);
EXPECT_FP_EQ_WITH_EXCEPTION_ROUNDING_TOWARD_ZERO(
- static_cast<float16>(-0x1.ffcp-1), LIBC_NAMESPACE::expm1f16(x),
- FE_INEXACT);
+ LIBC_NAMESPACE::fputil::cast<float16>(-0x1.ffcp-1),
+ LIBC_NAMESPACE::expm1f16(x), FE_INEXACT);
}
diff --git a/libc/utils/MPFRWrapper/CMakeLists.txt b/libc/utils/MPFRWrapper/CMakeLists.txt
index 941d3cf..0101c9f 100644
--- a/libc/utils/MPFRWrapper/CMakeLists.txt
+++ b/libc/utils/MPFRWrapper/CMakeLists.txt
@@ -14,6 +14,7 @@ if(LIBC_TESTS_CAN_USE_MPFR)
libc.src.__support.CPP.stringstream
libc.src.__support.CPP.string_view
libc.src.__support.CPP.type_traits
+ libc.src.__support.FPUtil.cast
libc.src.__support.FPUtil.fp_bits
libc.src.__support.FPUtil.fpbits_str
LibcTest.unit
diff --git a/libc/utils/MPFRWrapper/MPFRUtils.cpp b/libc/utils/MPFRWrapper/MPFRUtils.cpp
index 7ce6a70..27ff1f7 100644
--- a/libc/utils/MPFRWrapper/MPFRUtils.cpp
+++ b/libc/utils/MPFRWrapper/MPFRUtils.cpp
@@ -13,6 +13,7 @@
#include "src/__support/CPP/string_view.h"
#include "src/__support/CPP/stringstream.h"
#include "src/__support/FPUtil/FPBits.h"
+#include "src/__support/FPUtil/cast.h"
#include "src/__support/FPUtil/fpbits_str.h"
#include "src/__support/macros/config.h"
#include "src/__support/macros/properties/types.h"
@@ -683,7 +684,7 @@ template <> long double MPFRNumber::as<long double>() const {
template <> float16 MPFRNumber::as<float16>() const {
// TODO: Either prove that this cast won't cause double-rounding errors, or
// find a better way to get a float16.
- return static_cast<float16>(mpfr_get_d(value, mpfr_rounding));
+ return fputil::cast<float16>(mpfr_get_d(value, mpfr_rounding));
}
#endif
diff --git a/libc/utils/gpu/server/rpc_server.cpp b/libc/utils/gpu/server/rpc_server.cpp
index 0d4d1ad..aa65dfe 100644
--- a/libc/utils/gpu/server/rpc_server.cpp
+++ b/libc/utils/gpu/server/rpc_server.cpp
@@ -392,6 +392,35 @@ rpc_status_t handle_server_impl(
});
break;
}
+ case RPC_RENAME: {
+ uint64_t oldsizes[lane_size] = {0};
+ uint64_t newsizes[lane_size] = {0};
+ void *oldpath[lane_size] = {nullptr};
+ void *newpath[lane_size] = {nullptr};
+ port->recv_n(oldpath, oldsizes,
+ [&](uint64_t size) { return new char[size]; });
+ port->recv_n(newpath, newsizes,
+ [&](uint64_t size) { return new char[size]; });
+ port->send([&](rpc::Buffer *buffer, uint32_t id) {
+ buffer->data[0] = static_cast<uint64_t>(
+ rename(reinterpret_cast<const char *>(oldpath[id]),
+ reinterpret_cast<const char *>(newpath[id])));
+ delete[] reinterpret_cast<uint8_t *>(oldpath[id]);
+ delete[] reinterpret_cast<uint8_t *>(newpath[id]);
+ });
+ break;
+ }
+ case RPC_SYSTEM: {
+ uint64_t sizes[lane_size] = {0};
+ void *args[lane_size] = {nullptr};
+ port->recv_n(args, sizes, [&](uint64_t size) { return new char[size]; });
+ port->send([&](rpc::Buffer *buffer, uint32_t id) {
+ buffer->data[0] = static_cast<uint64_t>(
+ system(reinterpret_cast<const char *>(args[id])));
+ delete[] reinterpret_cast<uint8_t *>(args[id]);
+ });
+ break;
+ }
case RPC_NOOP: {
port->recv([](rpc::Buffer *) {});
break;
diff --git a/libcxx/docs/Status/Cxx23Issues.csv b/libcxx/docs/Status/Cxx23Issues.csv
index da4cce6..1c8bb05 100644
--- a/libcxx/docs/Status/Cxx23Issues.csv
+++ b/libcxx/docs/Status/Cxx23Issues.csv
@@ -281,7 +281,7 @@
"`LWG3631 <https://wg21.link/LWG3631>`__","``basic_format_arg(T&&)`` should use ``remove_cvref_t<T>`` throughout","2023-02 (Issaquah)","|Complete|","17.0",""
"`LWG3645 <https://wg21.link/LWG3645>`__","``resize_and_overwrite`` is overspecified to call its callback with lvalues","2023-02 (Issaquah)","|Complete|","14.0",""
"`LWG3655 <https://wg21.link/LWG3655>`__","The ``INVOKE`` operation and union types","2023-02 (Issaquah)","|Complete|","18.0",""
-"`LWG3723 <https://wg21.link/LWG3723>`__","``priority_queue::push_range`` needs to ``append_range``","2023-02 (Issaquah)","","",""
+"`LWG3723 <https://wg21.link/LWG3723>`__","``priority_queue::push_range`` needs to ``append_range``","2023-02 (Issaquah)","|Complete|","17.0",""
"`LWG3734 <https://wg21.link/LWG3734>`__","Inconsistency in ``inout_ptr`` and ``out_ptr`` for empty case","2023-02 (Issaquah)","|Complete|","19.0",""
"`LWG3772 <https://wg21.link/LWG3772>`__","``repeat_view``'s ``piecewise`` constructor is missing Postconditions","2023-02 (Issaquah)","|Complete|","17.0",""
"`LWG3786 <https://wg21.link/LWG3786>`__","Flat maps' deduction guide needs to default ``Allocator`` to be useful","2023-02 (Issaquah)","","",""
diff --git a/libcxx/include/__memory/unique_temporary_buffer.h b/libcxx/include/__memory/unique_temporary_buffer.h
index b9e2a47..001254e 100644
--- a/libcxx/include/__memory/unique_temporary_buffer.h
+++ b/libcxx/include/__memory/unique_temporary_buffer.h
@@ -47,7 +47,7 @@ template <class _Tp>
using __unique_temporary_buffer = unique_ptr<_Tp, __temporary_buffer_deleter<_Tp> >;
template <class _Tp>
-inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 __unique_temporary_buffer<_Tp>
+inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_NO_CFI _LIBCPP_CONSTEXPR_SINCE_CXX23 __unique_temporary_buffer<_Tp>
__allocate_unique_temporary_buffer(ptrdiff_t __count) {
using __deleter_type = __temporary_buffer_deleter<_Tp>;
using __unique_buffer_type = __unique_temporary_buffer<_Tp>;
diff --git a/libcxx/utils/ci/docker-compose.yml b/libcxx/utils/ci/docker-compose.yml
index 795e0dc..c32e016 100644
--- a/libcxx/utils/ci/docker-compose.yml
+++ b/libcxx/utils/ci/docker-compose.yml
@@ -21,7 +21,7 @@ services:
dockerfile: Dockerfile
target: actions-builder
args:
- BASE_IMAGE: ghcr.io/actions/actions-runner:2.317.0
+ BASE_IMAGE: ghcr.io/actions/actions-runner:2.319.1
<<: *compiler_versions
android-buildkite-builder:
image: ghcr.io/libcxx/android-buildkite-builder:${TAG:-latest}
diff --git a/libcxxabi/src/demangle/ItaniumDemangle.h b/libcxxabi/src/demangle/ItaniumDemangle.h
index 6b3f228..723bdfe 100644
--- a/libcxxabi/src/demangle/ItaniumDemangle.h
+++ b/libcxxabi/src/demangle/ItaniumDemangle.h
@@ -4450,6 +4450,7 @@ Node *AbstractManglingParser<Derived, Alloc>::parseType() {
// parse them, take the second production.
if (TryToParseTemplateArgs && look() == 'I') {
+ Subs.push_back(Result);
Node *TA = getDerived().parseTemplateArgs();
if (TA == nullptr)
return nullptr;
diff --git a/libcxxabi/test/test_demangle.pass.cpp b/libcxxabi/test/test_demangle.pass.cpp
index c8d4ca8..17786a3 100644
--- a/libcxxabi/test/test_demangle.pass.cpp
+++ b/libcxxabi/test/test_demangle.pass.cpp
@@ -30024,6 +30024,9 @@ const char* cases[][2] =
// See https://github.com/itanium-cxx-abi/cxx-abi/issues/165.
{"_ZN1C1fIiEEvDTtlNS_UlT_TL0__E_EEE", "void C::f<int>(decltype(C::'lambda'(int, auto){}))"},
+ // See https://github.com/llvm/llvm-project/issues/108009.
+ {"_ZN3FooIiE6methodIb3BarEEvT0_IT_ES3_IiE", "void Foo<int>::method<bool, Bar>(Bar<bool>, Bar<int>)"},
+
// C++20 class type non-type template parameters:
{"_Z1fIXtl1BLPi0ELi1EEEEvv", "void f<B{(int*)0, 1}>()"},
{"_Z1fIXtl1BLPi32EEEEvv", "void f<B{(int*)32}>()"},
diff --git a/lld/COFF/Driver.cpp b/lld/COFF/Driver.cpp
index 1b94f10..5a6a4a6 100644
--- a/lld/COFF/Driver.cpp
+++ b/lld/COFF/Driver.cpp
@@ -37,6 +37,7 @@
#include "llvm/Support/BinaryStreamReader.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/GlobPattern.h"
#include "llvm/Support/LEB128.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/Parallel.h"
@@ -113,9 +114,8 @@ static std::string getOutputPath(StringRef path, bool isDll, bool isDriver) {
// Returns true if S matches /crtend.?\.o$/.
static bool isCrtend(StringRef s) {
- if (!s.ends_with(".o"))
+ if (!s.consume_back(".o"))
return false;
- s = s.drop_back(2);
if (s.ends_with("crtend"))
return true;
return !s.empty() && s.drop_back().ends_with("crtend");
@@ -705,6 +705,24 @@ Symbol *LinkerDriver::addUndefined(StringRef name) {
return b;
}
+void LinkerDriver::addUndefinedGlob(StringRef arg) {
+ Expected<GlobPattern> pat = GlobPattern::create(arg);
+ if (!pat) {
+ error("/includeglob: " + toString(pat.takeError()));
+ return;
+ }
+
+ SmallVector<Symbol *, 0> syms;
+ ctx.symtab.forEachSymbol([&syms, &pat](Symbol *sym) {
+ if (pat->match(sym->getName())) {
+ syms.push_back(sym);
+ }
+ });
+
+ for (Symbol *sym : syms)
+ addUndefined(sym->getName());
+}
+
StringRef LinkerDriver::mangleMaybe(Symbol *s) {
// If the plain symbol name has already been resolved, do nothing.
Undefined *unmangled = dyn_cast<Undefined>(s);
@@ -2525,6 +2543,10 @@ void LinkerDriver::linkerMain(ArrayRef<const char *> argsArr) {
} while (run());
}
+ // Handle /includeglob
+ for (StringRef pat : args::getStrings(args, OPT_incl_glob))
+ addUndefinedGlob(pat);
+
// Create wrapped symbols for -wrap option.
std::vector<WrappedSymbol> wrapped = addWrappedSymbols(ctx, args);
// Load more object files that might be needed for wrapped symbols.
diff --git a/lld/COFF/Driver.h b/lld/COFF/Driver.h
index 0c195a7..58a2ed2 100644
--- a/lld/COFF/Driver.h
+++ b/lld/COFF/Driver.h
@@ -172,6 +172,8 @@ private:
Symbol *addUndefined(StringRef sym);
+ void addUndefinedGlob(StringRef arg);
+
StringRef mangleMaybe(Symbol *s);
// Windows specific -- "main" is not the only main function in Windows.
diff --git a/lld/COFF/Options.td b/lld/COFF/Options.td
index 4bc4d7c..7ceb824 100644
--- a/lld/COFF/Options.td
+++ b/lld/COFF/Options.td
@@ -310,6 +310,9 @@ defm build_id: B<
"Generate build ID (always on when generating PDB)",
"Do not Generate build ID">;
+def incl_glob : Joined<["/", "-", "/?", "-?"], "includeglob:">,
+ HelpText<"Force symbol to be added to symbol table as undefined one using a glob pattern">;
+
// Flags for debugging
def lldmap : F<"lldmap">;
def lldmap_file : P_priv<"lldmap">;
diff --git a/lld/COFF/Writer.cpp b/lld/COFF/Writer.cpp
index c276545..7cf723a8 100644
--- a/lld/COFF/Writer.cpp
+++ b/lld/COFF/Writer.cpp
@@ -472,7 +472,7 @@ bool Writer::createThunks(OutputSection *os, int margin) {
// Recheck Chunks.size() each iteration, since we can insert more
// elements into it.
for (size_t i = 0; i != os->chunks.size(); ++i) {
- SectionChunk *sc = dyn_cast_or_null<SectionChunk>(os->chunks[i]);
+ SectionChunk *sc = dyn_cast<SectionChunk>(os->chunks[i]);
if (!sc)
continue;
MachineTypes machine = sc->getMachine();
@@ -606,7 +606,7 @@ void Writer::createECCodeMap() {
// Verify that all relocations are in range, with no extra margin requirements.
bool Writer::verifyRanges(const std::vector<Chunk *> chunks) {
for (Chunk *c : chunks) {
- SectionChunk *sc = dyn_cast_or_null<SectionChunk>(c);
+ SectionChunk *sc = dyn_cast<SectionChunk>(c);
if (!sc)
continue;
MachineTypes machine = sc->getMachine();
@@ -872,8 +872,8 @@ bool Writer::fixGnuImportChunks() {
if (!pSec->chunks.empty())
hasIdata = true;
llvm::stable_sort(pSec->chunks, [&](Chunk *s, Chunk *t) {
- SectionChunk *sc1 = dyn_cast_or_null<SectionChunk>(s);
- SectionChunk *sc2 = dyn_cast_or_null<SectionChunk>(t);
+ SectionChunk *sc1 = dyn_cast<SectionChunk>(s);
+ SectionChunk *sc2 = dyn_cast<SectionChunk>(t);
if (!sc1 || !sc2) {
// if SC1, order them ascending. If SC2 or both null,
// S is not less than T.
diff --git a/lld/Common/DriverDispatcher.cpp b/lld/Common/DriverDispatcher.cpp
index f5c8bcd..fe18c32 100644
--- a/lld/Common/DriverDispatcher.cpp
+++ b/lld/Common/DriverDispatcher.cpp
@@ -113,8 +113,7 @@ parseFlavorWithoutMinGW(llvm::SmallVectorImpl<const char *> &argsV) {
// Deduct the flavor from argv[0].
StringRef arg0 = path::filename(argsV[0]);
- if (arg0.ends_with_insensitive(".exe"))
- arg0 = arg0.drop_back(4);
+ arg0.consume_back_insensitive(".exe");
Flavor f = parseProgname(arg0);
if (f == Invalid) {
err("lld is a generic driver.\n"
diff --git a/lld/ELF/Arch/ARM.cpp b/lld/ELF/Arch/ARM.cpp
index 3484e66..1bbd2e1 100644
--- a/lld/ELF/Arch/ARM.cpp
+++ b/lld/ELF/Arch/ARM.cpp
@@ -1215,7 +1215,7 @@ template <class ELFT> void ObjFile<ELFT>::importCmseSymbols() {
continue;
}
- if (symtab.cmseImportLib.count(sym->getName())) {
+ if (ctx.symtab->cmseImportLib.count(sym->getName())) {
error("CMSE symbol '" + sym->getName() +
"' is multiply defined in import library '" + toString(this) + "'");
continue;
@@ -1227,7 +1227,7 @@ template <class ELFT> void ObjFile<ELFT>::importCmseSymbols() {
Twine(ACLESESYM_SIZE) + " bytes");
}
- symtab.cmseImportLib[sym->getName()] = sym;
+ ctx.symtab->cmseImportLib[sym->getName()] = sym;
}
}
@@ -1263,9 +1263,9 @@ static std::string checkCmseSymAttributes(Symbol *acleSeSym, Symbol *sym) {
void elf::processArmCmseSymbols() {
if (!ctx.arg.cmseImplib)
return;
- // Only symbols with external linkage end up in symtab, so no need to do
+ // Only symbols with external linkage end up in ctx.symtab, so no need to do
// linkage checks. Only check symbol type.
- for (Symbol *acleSeSym : symtab.getSymbols()) {
+ for (Symbol *acleSeSym : ctx.symtab->getSymbols()) {
if (!acleSeSym->getName().starts_with(ACLESESYM_PREFIX))
continue;
// If input object build attributes do not support CMSE, error and disable
@@ -1279,7 +1279,7 @@ void elf::processArmCmseSymbols() {
// Try to find the associated symbol definition.
// Symbol must have external linkage.
StringRef name = acleSeSym->getName().substr(std::strlen(ACLESESYM_PREFIX));
- Symbol *sym = symtab.find(name);
+ Symbol *sym = ctx.symtab->find(name);
if (!sym) {
error(toString(acleSeSym->file) + ": cmse special symbol '" +
acleSeSym->getName() +
@@ -1295,7 +1295,7 @@ void elf::processArmCmseSymbols() {
}
// <sym> may be redefined later in the link in .gnu.sgstubs
- symtab.cmseSymMap[name] = {acleSeSym, sym};
+ ctx.symtab->cmseSymMap[name] = {acleSeSym, sym};
}
// If this is an Arm CMSE secure app, replace references to entry symbol <sym>
@@ -1304,8 +1304,8 @@ void elf::processArmCmseSymbols() {
MutableArrayRef<Symbol *> syms = file->getMutableSymbols();
for (size_t i = 0, e = syms.size(); i != e; ++i) {
StringRef symName = syms[i]->getName();
- if (symtab.cmseSymMap.count(symName))
- syms[i] = symtab.cmseSymMap[symName].acleSeSym;
+ if (ctx.symtab->cmseSymMap.count(symName))
+ syms[i] = ctx.symtab->cmseSymMap[symName].acleSeSym;
}
});
}
@@ -1332,26 +1332,26 @@ ArmCmseSGSection::ArmCmseSGSection()
/*alignment=*/32, ".gnu.sgstubs") {
entsize = ACLESESYM_SIZE;
// The range of addresses used in the CMSE import library should be fixed.
- for (auto &[_, sym] : symtab.cmseImportLib) {
+ for (auto &[_, sym] : ctx.symtab->cmseImportLib) {
if (impLibMaxAddr <= sym->value)
impLibMaxAddr = sym->value + sym->size;
}
- if (symtab.cmseSymMap.empty())
+ if (ctx.symtab->cmseSymMap.empty())
return;
addMappingSymbol();
- for (auto &[_, entryFunc] : symtab.cmseSymMap)
+ for (auto &[_, entryFunc] : ctx.symtab->cmseSymMap)
addSGVeneer(cast<Defined>(entryFunc.acleSeSym),
cast<Defined>(entryFunc.sym));
- for (auto &[_, sym] : symtab.cmseImportLib) {
- if (!symtab.inCMSEOutImpLib.count(sym->getName()))
+ for (auto &[_, sym] : ctx.symtab->cmseImportLib) {
+ if (!ctx.symtab->inCMSEOutImpLib.count(sym->getName()))
warn("entry function '" + sym->getName() +
"' from CMSE import library is not present in secure application");
}
- if (!symtab.cmseImportLib.empty() && ctx.arg.cmseOutputLib.empty()) {
- for (auto &[_, entryFunc] : symtab.cmseSymMap) {
+ if (!ctx.symtab->cmseImportLib.empty() && ctx.arg.cmseOutputLib.empty()) {
+ for (auto &[_, entryFunc] : ctx.symtab->cmseSymMap) {
Symbol *sym = entryFunc.sym;
- if (!symtab.inCMSEOutImpLib.count(sym->getName()))
+ if (!ctx.symtab->inCMSEOutImpLib.count(sym->getName()))
warn("new entry function '" + sym->getName() +
"' introduced but no output import library specified");
}
@@ -1360,8 +1360,8 @@ ArmCmseSGSection::ArmCmseSGSection()
void ArmCmseSGSection::addSGVeneer(Symbol *acleSeSym, Symbol *sym) {
entries.emplace_back(acleSeSym, sym);
- if (symtab.cmseImportLib.count(sym->getName()))
- symtab.inCMSEOutImpLib[sym->getName()] = true;
+ if (ctx.symtab->cmseImportLib.count(sym->getName()))
+ ctx.symtab->inCMSEOutImpLib[sym->getName()] = true;
// Symbol addresses different, nothing to do.
if (acleSeSym->file != sym->file ||
cast<Defined>(*acleSeSym).value != cast<Defined>(*sym).value)
@@ -1369,8 +1369,8 @@ void ArmCmseSGSection::addSGVeneer(Symbol *acleSeSym, Symbol *sym) {
// Only secure symbols with values equal to that of it's non-secure
// counterpart needs to be in the .gnu.sgstubs section.
ArmCmseSGVeneer *ss = nullptr;
- if (symtab.cmseImportLib.count(sym->getName())) {
- Defined *impSym = symtab.cmseImportLib[sym->getName()];
+ if (ctx.symtab->cmseImportLib.count(sym->getName())) {
+ Defined *impSym = ctx.symtab->cmseImportLib[sym->getName()];
ss = make<ArmCmseSGVeneer>(sym, acleSeSym, impSym->value);
} else {
ss = make<ArmCmseSGVeneer>(sym, acleSeSym);
@@ -1451,12 +1451,12 @@ template <typename ELFT> void elf::writeARMCmseImportLib() {
osIsPairs.emplace_back(make<OutputSection>(impSymTab->name, 0, 0), impSymTab);
osIsPairs.emplace_back(make<OutputSection>(shstrtab->name, 0, 0), shstrtab);
- std::sort(symtab.cmseSymMap.begin(), symtab.cmseSymMap.end(),
+ std::sort(ctx.symtab->cmseSymMap.begin(), ctx.symtab->cmseSymMap.end(),
[](const auto &a, const auto &b) -> bool {
return a.second.sym->getVA() < b.second.sym->getVA();
});
// Copy the secure gateway entry symbols to the import library symbol table.
- for (auto &p : symtab.cmseSymMap) {
+ for (auto &p : ctx.symtab->cmseSymMap) {
Defined *d = cast<Defined>(p.second.sym);
impSymTab->addSymbol(makeDefined(
ctx.internalFile, d->getName(), d->computeBinding(),
diff --git a/lld/ELF/Arch/PPC64.cpp b/lld/ELF/Arch/PPC64.cpp
index 803cc54..fdf3d07b 100644
--- a/lld/ELF/Arch/PPC64.cpp
+++ b/lld/ELF/Arch/PPC64.cpp
@@ -251,7 +251,7 @@ void elf::writePrefixedInstruction(uint8_t *loc, uint64_t insn) {
static bool addOptional(StringRef name, uint64_t value,
std::vector<Defined *> &defined) {
- Symbol *sym = symtab.find(name);
+ Symbol *sym = ctx.symtab->find(name);
if (!sym || sym->isDefined())
return false;
sym->resolve(Defined{ctx.internalFile, StringRef(), STB_GLOBAL, STV_HIDDEN,
diff --git a/lld/ELF/Config.h b/lld/ELF/Config.h
index 875463d..80a45bc 100644
--- a/lld/ELF/Config.h
+++ b/lld/ELF/Config.h
@@ -44,6 +44,7 @@ class InputSectionBase;
class EhInputSection;
class Defined;
class Symbol;
+class SymbolTable;
class BitcodeCompiler;
class OutputSection;
class LinkerScript;
@@ -600,6 +601,7 @@ struct Ctx {
Defined *tlsModuleBase;
};
ElfSym sym;
+ std::unique_ptr<SymbolTable> symtab;
SmallVector<std::unique_ptr<MemoryBuffer>> memoryBuffers;
SmallVector<ELFFileBase *, 0> objectFiles;
diff --git a/lld/ELF/Driver.cpp b/lld/ELF/Driver.cpp
index 14188f9..343fc49 100644
--- a/lld/ELF/Driver.cpp
+++ b/lld/ELF/Driver.cpp
@@ -109,6 +109,7 @@ void Ctx::reset() {
in.reset();
sym = ElfSym{};
+ symtab = std::make_unique<SymbolTable>();
memoryBuffers.clear();
objectFiles.clear();
@@ -155,7 +156,6 @@ bool link(ArrayRef<const char *> args, llvm::raw_ostream &stdoutOS,
context->e.cleanupCallback = []() {
elf::ctx.reset();
elf::ctx.partitions.emplace_back();
- symtab = SymbolTable();
SharedFile::vernauxNum = 0;
};
@@ -167,6 +167,7 @@ bool link(ArrayRef<const char *> args, llvm::raw_ostream &stdoutOS,
LinkerScript script(ctx);
ctx.script = &script;
ctx.symAux.emplace_back();
+ ctx.symtab = std::make_unique<SymbolTable>();
ctx.partitions.clear();
ctx.partitions.emplace_back();
@@ -866,8 +867,7 @@ static StripPolicy getStrip(opt::InputArgList &args) {
static uint64_t parseSectionAddress(StringRef s, opt::InputArgList &args,
const opt::Arg &arg) {
uint64_t va = 0;
- if (s.starts_with("0x"))
- s = s.drop_front(2);
+ s.consume_front("0x");
if (!to_integer(s, va, 16))
error("invalid argument: " + arg.getAsString(args));
return va;
@@ -2195,7 +2195,7 @@ static void handleUndefinedGlob(Ctx &ctx, StringRef arg) {
// Calling sym->extract() in the loop is not safe because it may add new
// symbols to the symbol table, invalidating the current iterator.
SmallVector<Symbol *, 0> syms;
- for (Symbol *sym : symtab.getSymbols())
+ for (Symbol *sym : ctx.symtab->getSymbols())
if (!sym->isPlaceholder() && pat->match(sym->getName()))
syms.push_back(sym);
@@ -2204,7 +2204,7 @@ static void handleUndefinedGlob(Ctx &ctx, StringRef arg) {
}
static void handleLibcall(Ctx &ctx, StringRef name) {
- Symbol *sym = symtab.find(name);
+ Symbol *sym = ctx.symtab->find(name);
if (sym && sym->isLazy() && isa<BitcodeFile>(sym->file)) {
if (!ctx.arg.whyExtract.empty())
ctx.whyExtractRecords.emplace_back("<libcall>", sym->file, *sym);
@@ -2391,7 +2391,7 @@ template <class ELFT>
static void findKeepUniqueSections(Ctx &ctx, opt::InputArgList &args) {
for (auto *arg : args.filtered(OPT_keep_unique)) {
StringRef name = arg->getValue();
- auto *d = dyn_cast_or_null<Defined>(symtab.find(name));
+ auto *d = dyn_cast_or_null<Defined>(ctx.symtab->find(name));
if (!d || !d->section) {
warn("could not find symbol " + name + " to keep unique");
continue;
@@ -2406,7 +2406,7 @@ static void findKeepUniqueSections(Ctx &ctx, opt::InputArgList &args) {
// Symbols in the dynsym could be address-significant in other executables
// or DSOs, so we conservatively mark them as address-significant.
- for (Symbol *sym : symtab.getSymbols())
+ for (Symbol *sym : ctx.symtab->getSymbols())
if (sym->includeInDynsym())
markAddrsig(sym);
@@ -2575,24 +2575,24 @@ static std::vector<WrappedSymbol> addWrappedSymbols(opt::InputArgList &args) {
if (!seen.insert(name).second)
continue;
- Symbol *sym = symtab.find(name);
+ Symbol *sym = ctx.symtab->find(name);
if (!sym)
continue;
- Symbol *wrap =
- symtab.addUnusedUndefined(saver().save("__wrap_" + name), sym->binding);
+ Symbol *wrap = ctx.symtab->addUnusedUndefined(
+ saver().save("__wrap_" + name), sym->binding);
// If __real_ is referenced, pull in the symbol if it is lazy. Do this after
// processing __wrap_ as that may have referenced __real_.
StringRef realName = saver().save("__real_" + name);
- if (Symbol *real = symtab.find(realName)) {
- symtab.addUnusedUndefined(name, sym->binding);
+ if (Symbol *real = ctx.symtab->find(realName)) {
+ ctx.symtab->addUnusedUndefined(name, sym->binding);
// Update sym's binding, which will replace real's later in
// SymbolTable::wrap.
sym->binding = real->binding;
}
- Symbol *real = symtab.addUnusedUndefined(realName);
+ Symbol *real = ctx.symtab->addUnusedUndefined(realName);
v.push_back({sym, real, wrap});
// We want to tell LTO not to inline symbols to be overwritten
@@ -2627,7 +2627,7 @@ static void combineVersionedSymbol(Symbol &sym,
//
// * There is a definition of foo@v1 and foo@@v1.
// * There is a definition of foo@v1 and foo.
- Defined *sym2 = dyn_cast_or_null<Defined>(symtab.find(sym.getName()));
+ Defined *sym2 = dyn_cast_or_null<Defined>(ctx.symtab->find(sym.getName()));
if (!sym2)
return;
const char *suffix2 = sym2->getVersionSuffix();
@@ -2682,7 +2682,7 @@ static void redirectSymbols(Ctx &ctx, ArrayRef<WrappedSymbol> wrapped) {
// symbols with a non-default version (foo@v1) and check whether it should be
// combined with foo or foo@@v1.
if (ctx.arg.versionDefinitions.size() > 2)
- for (Symbol *sym : symtab.getSymbols())
+ for (Symbol *sym : ctx.symtab->getSymbols())
if (sym->hasVersionSuffix)
combineVersionedSymbol(*sym, map);
@@ -2698,7 +2698,7 @@ static void redirectSymbols(Ctx &ctx, ArrayRef<WrappedSymbol> wrapped) {
// Update pointers in the symbol table.
for (const WrappedSymbol &w : wrapped)
- symtab.wrap(w.sym, w.real, w.wrap);
+ ctx.symtab->wrap(w.sym, w.real, w.wrap);
}
static void reportMissingFeature(StringRef config, const Twine &report) {
@@ -2726,7 +2726,7 @@ static void checkAndReportMissingFeature(StringRef config, uint32_t features,
// For AArch64 PAuth-enabled object files, the core info of all of them must
// match. Missing info for some object files with matching info for remaining
// ones can be allowed (see -z pauth-report).
-static void readSecurityNotes() {
+static void readSecurityNotes(Ctx &ctx) {
if (ctx.arg.emachine != EM_386 && ctx.arg.emachine != EM_X86_64 &&
ctx.arg.emachine != EM_AARCH64)
return;
@@ -2862,14 +2862,14 @@ template <class ELFT> void LinkerDriver::link(opt::InputArgList &args) {
// Handle --trace-symbol.
for (auto *arg : args.filtered(OPT_trace_symbol))
- symtab.insert(arg->getValue())->traced = true;
+ ctx.symtab->insert(arg->getValue())->traced = true;
ctx.internalFile = createInternalFile("<internal>");
// Handle -u/--undefined before input files. If both a.a and b.so define foo,
// -u foo a.a b.so will extract a.a.
for (StringRef name : ctx.arg.undefined)
- symtab.addUnusedUndefined(name)->referenced = true;
+ ctx.symtab->addUnusedUndefined(name)->referenced = true;
parseFiles(files, armCmseImpLib);
@@ -2877,7 +2877,7 @@ template <class ELFT> void LinkerDriver::link(opt::InputArgList &args) {
ctx.arg.hasDynSymTab = !ctx.sharedFiles.empty() || ctx.arg.isPic;
// If an entry symbol is in a static archive, pull out that file now.
- if (Symbol *sym = symtab.find(ctx.arg.entry))
+ if (Symbol *sym = ctx.symtab->find(ctx.arg.entry))
handleUndefined(ctx, sym, "--entry");
// Handle the `--undefined-glob <pattern>` options.
@@ -2891,13 +2891,13 @@ template <class ELFT> void LinkerDriver::link(opt::InputArgList &args) {
// Prevent LTO from removing any definition referenced by -u.
for (StringRef name : ctx.arg.undefined)
- if (Defined *sym = dyn_cast_or_null<Defined>(symtab.find(name)))
+ if (Defined *sym = dyn_cast_or_null<Defined>(ctx.symtab->find(name)))
sym->isUsedInRegularObj = true;
// Mark -init and -fini symbols so that the LTO doesn't eliminate them.
- if (Symbol *sym = dyn_cast_or_null<Defined>(symtab.find(ctx.arg.init)))
+ if (Symbol *sym = dyn_cast_or_null<Defined>(ctx.symtab->find(ctx.arg.init)))
sym->isUsedInRegularObj = true;
- if (Symbol *sym = dyn_cast_or_null<Defined>(symtab.find(ctx.arg.fini)))
+ if (Symbol *sym = dyn_cast_or_null<Defined>(ctx.symtab->find(ctx.arg.fini)))
sym->isUsedInRegularObj = true;
// If any of our inputs are bitcode files, the LTO code generator may create
@@ -2969,7 +2969,7 @@ template <class ELFT> void LinkerDriver::link(opt::InputArgList &args) {
// We need to create some reserved symbols such as _end. Create them.
if (!ctx.arg.relocatable)
- addReservedSymbols();
+ addReservedSymbols(ctx);
// Apply version scripts.
//
@@ -2978,7 +2978,7 @@ template <class ELFT> void LinkerDriver::link(opt::InputArgList &args) {
// name "foo@ver1") rather do harm, so we don't call this if -r is given.
if (!ctx.arg.relocatable) {
llvm::TimeTraceScope timeScope("Process symbol versions");
- symtab.scanVersionScript();
+ ctx.symtab->scanVersionScript();
}
// Skip the normal linked output if some LTO options are specified.
@@ -3113,7 +3113,7 @@ template <class ELFT> void LinkerDriver::link(opt::InputArgList &args) {
// Read .note.gnu.property sections from input object files which
// contain a hint to tweak linker's and loader's behaviors.
- readSecurityNotes();
+ readSecurityNotes(ctx);
// The Target instance handles target-specific stuff, such as applying
// relocations or writing a PLT section. It also contains target-dependent
@@ -3147,7 +3147,7 @@ template <class ELFT> void LinkerDriver::link(opt::InputArgList &args) {
// Make copies of any input sections that need to be copied into each
// partition.
- copySectionsIntoPartitions();
+ copySectionsIntoPartitions(ctx);
if (canHaveMemtagGlobals()) {
llvm::TimeTraceScope timeScope("Process memory tagged symbols");
diff --git a/lld/ELF/ICF.cpp b/lld/ELF/ICF.cpp
index 9caff0b..3f4f479 100644
--- a/lld/ELF/ICF.cpp
+++ b/lld/ELF/ICF.cpp
@@ -468,7 +468,7 @@ template <class ELFT> void ICF<ELFT>::run() {
// cannot be merged with the later computeIsPreemptible() pass which is used
// by scanRelocations().
if (ctx.arg.hasDynSymTab)
- for (Symbol *sym : symtab.getSymbols())
+ for (Symbol *sym : ctx.symtab->getSymbols())
sym->isPreemptible = computeIsPreemptible(*sym);
// Two text sections may have identical content and relocations but different
@@ -568,7 +568,7 @@ template <class ELFT> void ICF<ELFT>::run() {
d->folded = true;
}
};
- for (Symbol *sym : symtab.getSymbols())
+ for (Symbol *sym : ctx.symtab->getSymbols())
fold(sym);
parallelForEach(ctx.objectFiles, [&](ELFFileBase *file) {
for (Symbol *sym : file->getLocalSymbols())
diff --git a/lld/ELF/InputFiles.cpp b/lld/ELF/InputFiles.cpp
index 07a32a5..8dc6811 100644
--- a/lld/ELF/InputFiles.cpp
+++ b/lld/ELF/InputFiles.cpp
@@ -667,10 +667,10 @@ template <class ELFT> void ObjFile<ELFT>::parse(bool ignoreComdats) {
if (flag && flag != GRP_COMDAT)
fatal(toString(this) + ": unsupported SHT_GROUP format");
- bool keepGroup =
- (flag & GRP_COMDAT) == 0 || ignoreComdats ||
- symtab.comdatGroups.try_emplace(CachedHashStringRef(signature), this)
- .second;
+ bool keepGroup = (flag & GRP_COMDAT) == 0 || ignoreComdats ||
+ ctx.symtab->comdatGroups
+ .try_emplace(CachedHashStringRef(signature), this)
+ .second;
if (keepGroup) {
if (!ctx.arg.resolveGroups)
this->sections[i] = createInputSection(
@@ -817,8 +817,8 @@ void ObjFile<ELFT>::initializeSections(bool ignoreComdats,
ArrayRef<Elf_Word> entries =
cantFail(obj.template getSectionContentsAsArray<Elf_Word>(sec));
if ((entries[0] & GRP_COMDAT) == 0 || ignoreComdats ||
- symtab.comdatGroups.find(CachedHashStringRef(signature))->second ==
- this)
+ ctx.symtab->comdatGroups.find(CachedHashStringRef(signature))
+ ->second == this)
selectedGroups.push_back(entries);
break;
}
@@ -1130,7 +1130,8 @@ void ObjFile<ELFT>::initializeSymbols(const object::ELFFile<ELFT> &obj) {
// Some entries have been filled by LazyObjFile.
for (size_t i = firstGlobal, end = eSyms.size(); i != end; ++i)
if (!symbols[i])
- symbols[i] = symtab.insert(CHECK(eSyms[i].getName(stringTable), this));
+ symbols[i] =
+ ctx.symtab->insert(CHECK(eSyms[i].getName(stringTable), this));
// Perform symbol resolution on non-local symbols.
SmallVector<unsigned, 32> undefineds;
@@ -1508,7 +1509,7 @@ template <class ELFT> void SharedFile::parse() {
DenseMap<CachedHashStringRef, SharedFile *>::iterator it;
bool wasInserted;
std::tie(it, wasInserted) =
- symtab.soNames.try_emplace(CachedHashStringRef(soName), this);
+ ctx.symtab->soNames.try_emplace(CachedHashStringRef(soName), this);
// If a DSO appears more than once on the command line with and without
// --as-needed, --no-as-needed takes precedence over --as-needed because a
@@ -1574,7 +1575,7 @@ template <class ELFT> void SharedFile::parse() {
name = saver().save(
(name + "@" + verName).toStringRef(versionedNameBuffer));
}
- Symbol *s = symtab.addSymbol(
+ Symbol *s = ctx.symtab->addSymbol(
Undefined{this, name, sym.getBinding(), sym.st_other, sym.getType()});
s->exportDynamic = true;
if (sym.getBinding() != STB_WEAK &&
@@ -1598,7 +1599,7 @@ template <class ELFT> void SharedFile::parse() {
uint32_t alignment = getAlignment<ELFT>(sections, sym);
if (ver == idx) {
- auto *s = symtab.addSymbol(
+ auto *s = ctx.symtab->addSymbol(
SharedSymbol{*this, name, sym.getBinding(), sym.st_other,
sym.getType(), sym.st_value, sym.st_size, alignment});
s->dsoDefined = true;
@@ -1616,7 +1617,7 @@ template <class ELFT> void SharedFile::parse() {
reinterpret_cast<const Elf_Verdef *>(verdefs[idx])->getAux()->vda_name;
versionedNameBuffer.clear();
name = (name + "@" + verName).toStringRef(versionedNameBuffer);
- auto *s = symtab.addSymbol(
+ auto *s = ctx.symtab->addSymbol(
SharedSymbol{*this, saver().save(name), sym.getBinding(), sym.st_other,
sym.getType(), sym.st_value, sym.st_size, alignment});
s->dsoDefined = true;
@@ -1751,7 +1752,7 @@ createBitcodeSymbol(Symbol *&sym, const std::vector<bool> &keptComdats,
// this way LTO can reference the same string saver's copy rather than
// keeping copies of its own.
objSym.Name = uniqueSaver().save(objSym.getName());
- sym = symtab.insert(objSym.getName());
+ sym = ctx.symtab->insert(objSym.getName());
}
int c = objSym.getComdatIndex();
@@ -1778,7 +1779,7 @@ void BitcodeFile::parse() {
for (std::pair<StringRef, Comdat::SelectionKind> s : obj->getComdatTable()) {
keptComdats.push_back(
s.second == Comdat::NoDeduplicate ||
- symtab.comdatGroups.try_emplace(CachedHashStringRef(s.first), this)
+ ctx.symtab->comdatGroups.try_emplace(CachedHashStringRef(s.first), this)
.second);
}
@@ -1810,7 +1811,7 @@ void BitcodeFile::parseLazy() {
// keeping copies of its own.
irSym.Name = uniqueSaver().save(irSym.getName());
if (!irSym.isUndefined()) {
- auto *sym = symtab.insert(irSym.getName());
+ auto *sym = ctx.symtab->insert(irSym.getName());
sym->resolve(LazySymbol{*this});
symbols[i] = sym;
}
@@ -1847,15 +1848,15 @@ void BinaryFile::parse() {
llvm::StringSaver &saver = lld::saver();
- symtab.addAndCheckDuplicate(Defined{this, saver.save(s + "_start"),
- STB_GLOBAL, STV_DEFAULT, STT_OBJECT, 0, 0,
- section});
- symtab.addAndCheckDuplicate(Defined{this, saver.save(s + "_end"), STB_GLOBAL,
- STV_DEFAULT, STT_OBJECT, data.size(), 0,
- section});
- symtab.addAndCheckDuplicate(Defined{this, saver.save(s + "_size"), STB_GLOBAL,
- STV_DEFAULT, STT_OBJECT, data.size(), 0,
- nullptr});
+ ctx.symtab->addAndCheckDuplicate(Defined{this, saver.save(s + "_start"),
+ STB_GLOBAL, STV_DEFAULT, STT_OBJECT,
+ 0, 0, section});
+ ctx.symtab->addAndCheckDuplicate(Defined{this, saver.save(s + "_end"),
+ STB_GLOBAL, STV_DEFAULT, STT_OBJECT,
+ data.size(), 0, section});
+ ctx.symtab->addAndCheckDuplicate(Defined{this, saver.save(s + "_size"),
+ STB_GLOBAL, STV_DEFAULT, STT_OBJECT,
+ data.size(), 0, nullptr});
}
InputFile *elf::createInternalFile(StringRef name) {
@@ -1902,7 +1903,7 @@ template <class ELFT> void ObjFile<ELFT>::parseLazy() {
for (size_t i = firstGlobal, end = eSyms.size(); i != end; ++i) {
if (eSyms[i].st_shndx == SHN_UNDEF)
continue;
- symbols[i] = symtab.insert(CHECK(eSyms[i].getName(stringTable), this));
+ symbols[i] = ctx.symtab->insert(CHECK(eSyms[i].getName(stringTable), this));
symbols[i]->resolve(LazySymbol{*this});
if (!lazy)
break;
diff --git a/lld/ELF/InputSection.cpp b/lld/ELF/InputSection.cpp
index e9985bb..54a214e 100644
--- a/lld/ELF/InputSection.cpp
+++ b/lld/ELF/InputSection.cpp
@@ -1129,7 +1129,7 @@ static void switchMorestackCallsToMorestackNonSplit(
// If the target adjusted a function's prologue, all calls to
// __morestack inside that function should be switched to
// __morestack_non_split.
- Symbol *moreStackNonSplit = symtab.find("__morestack_non_split");
+ Symbol *moreStackNonSplit = ctx.symtab->find("__morestack_non_split");
if (!moreStackNonSplit) {
error("mixing split-stack objects requires a definition of "
"__morestack_non_split");
diff --git a/lld/ELF/LTO.cpp b/lld/ELF/LTO.cpp
index 1dca824..56668a3 100644
--- a/lld/ELF/LTO.cpp
+++ b/lld/ELF/LTO.cpp
@@ -200,7 +200,7 @@ BitcodeCompiler::BitcodeCompiler(Ctx &ctx) : ctx(ctx) {
// Initialize usedStartStop.
if (ctx.bitcodeFiles.empty())
return;
- for (Symbol *sym : symtab.getSymbols()) {
+ for (Symbol *sym : ctx.symtab->getSymbols()) {
if (sym->isPlaceholder())
continue;
StringRef s = sym->getName();
diff --git a/lld/ELF/LinkerScript.cpp b/lld/ELF/LinkerScript.cpp
index 55e5f23..1ff3336 100644
--- a/lld/ELF/LinkerScript.cpp
+++ b/lld/ELF/LinkerScript.cpp
@@ -230,7 +230,7 @@ void LinkerScript::addSymbol(SymbolAssignment *cmd) {
Defined newSym(createInternalFile(cmd->location), cmd->name, STB_GLOBAL,
visibility, value.type, symValue, 0, sec);
- Symbol *sym = symtab.insert(cmd->name);
+ Symbol *sym = ctx.symtab->insert(cmd->name);
sym->mergeProperties(newSym);
newSym.overwrite(*sym);
sym->isUsedInRegularObj = true;
@@ -249,7 +249,7 @@ static void declareSymbol(SymbolAssignment *cmd) {
// If the symbol is already defined, its order is 0 (with absence indicating
// 0); otherwise it's assigned the order of the SymbolAssignment.
- Symbol *sym = symtab.insert(cmd->name);
+ Symbol *sym = ctx.symtab->insert(cmd->name);
if (!sym->isDefined())
ctx.scriptSymOrder.insert({sym, cmd->symOrder});
@@ -1682,7 +1682,7 @@ ExprValue LinkerScript::getSymbolValue(StringRef name, const Twine &loc) {
return 0;
}
- if (Symbol *sym = symtab.find(name)) {
+ if (Symbol *sym = ctx.symtab->find(name)) {
if (auto *ds = dyn_cast<Defined>(sym)) {
ExprValue v{ds->section, false, ds->value, loc};
// Retain the original st_type, so that the alias will get the same
@@ -1781,8 +1781,8 @@ void LinkerScript::checkFinalScriptConditions() const {
void LinkerScript::addScriptReferencedSymbolsToSymTable() {
// Some symbols (such as __ehdr_start) are defined lazily only when there
// are undefined symbols for them, so we add these to trigger that logic.
- auto reference = [](StringRef name) {
- Symbol *sym = symtab.addUnusedUndefined(name);
+ auto reference = [&ctx = ctx](StringRef name) {
+ Symbol *sym = ctx.symtab->addUnusedUndefined(name);
sym->isUsedInRegularObj = true;
sym->referenced = true;
};
@@ -1811,6 +1811,6 @@ void LinkerScript::addScriptReferencedSymbolsToSymTable() {
}
bool LinkerScript::shouldAddProvideSym(StringRef symName) {
- Symbol *sym = symtab.find(symName);
+ Symbol *sym = elf::ctx.symtab->find(symName);
return sym && !sym->isDefined() && !sym->isCommon();
}
diff --git a/lld/ELF/MarkLive.cpp b/lld/ELF/MarkLive.cpp
index 60e62c0..b9a4e39 100644
--- a/lld/ELF/MarkLive.cpp
+++ b/lld/ELF/MarkLive.cpp
@@ -219,7 +219,7 @@ template <class ELFT> void MarkLive<ELFT>::run() {
// Preserve externally-visible symbols if the symbols defined by this
// file can interpose other ELF file's symbols at runtime.
- for (Symbol *sym : symtab.getSymbols())
+ for (Symbol *sym : ctx.symtab->getSymbols())
if (sym->includeInDynsym() && sym->partition == partition)
markSymbol(sym);
@@ -229,16 +229,16 @@ template <class ELFT> void MarkLive<ELFT>::run() {
return;
}
- markSymbol(symtab.find(ctx.arg.entry));
- markSymbol(symtab.find(ctx.arg.init));
- markSymbol(symtab.find(ctx.arg.fini));
+ markSymbol(ctx.symtab->find(ctx.arg.entry));
+ markSymbol(ctx.symtab->find(ctx.arg.init));
+ markSymbol(ctx.symtab->find(ctx.arg.fini));
for (StringRef s : ctx.arg.undefined)
- markSymbol(symtab.find(s));
+ markSymbol(ctx.symtab->find(s));
for (StringRef s : ctx.script->referencedSymbols)
- markSymbol(symtab.find(s));
- for (auto [symName, _] : symtab.cmseSymMap) {
- markSymbol(symtab.cmseSymMap[symName].sym);
- markSymbol(symtab.cmseSymMap[symName].acleSeSym);
+ markSymbol(ctx.symtab->find(s));
+ for (auto [symName, _] : ctx.symtab->cmseSymMap) {
+ markSymbol(ctx.symtab->cmseSymMap[symName].sym);
+ markSymbol(ctx.symtab->cmseSymMap[symName].acleSeSym);
}
// Mark .eh_frame sections as live because there are usually no relocations
@@ -350,8 +350,8 @@ template <class ELFT> void MarkLive<ELFT>::moveToMain() {
for (InputSectionBase *sec : ctx.inputSections) {
if (!sec->isLive() || !isValidCIdentifier(sec->name))
continue;
- if (symtab.find(("__start_" + sec->name).str()) ||
- symtab.find(("__stop_" + sec->name).str()))
+ if (ctx.symtab->find(("__start_" + sec->name).str()) ||
+ ctx.symtab->find(("__stop_" + sec->name).str()))
enqueue(sec, 0);
}
@@ -366,7 +366,7 @@ template <class ELFT> void elf::markLive() {
// If --gc-sections is not given, retain all input sections.
if (!ctx.arg.gcSections) {
// If a DSO defines a symbol referenced in a regular object, it is needed.
- for (Symbol *sym : symtab.getSymbols())
+ for (Symbol *sym : ctx.symtab->getSymbols())
if (auto *s = dyn_cast<SharedSymbol>(sym))
if (s->isUsedInRegularObj && !s->isWeak())
cast<SharedFile>(s->file)->isNeeded = true;
diff --git a/lld/ELF/Relocations.cpp b/lld/ELF/Relocations.cpp
index 3cc6515..e0181f0 100644
--- a/lld/ELF/Relocations.cpp
+++ b/lld/ELF/Relocations.cpp
@@ -295,7 +295,7 @@ static SmallSet<SharedSymbol *, 4> getSymbolsAt(SharedSymbol &ss) {
s.getType() == STT_TLS || s.st_value != ss.value)
continue;
StringRef name = check(s.getName(file.getStringTable()));
- Symbol *sym = symtab.find(name);
+ Symbol *sym = ctx.symtab->find(name);
if (auto *alias = dyn_cast_or_null<SharedSymbol>(sym))
ret.insert(alias);
}
@@ -545,7 +545,7 @@ static std::string maybeReportDiscarded(Undefined &sym) {
// If the discarded section is a COMDAT.
StringRef signature = file->getShtGroupSignature(objSections, elfSec);
if (const InputFile *prevailing =
- symtab.comdatGroups.lookup(CachedHashStringRef(signature))) {
+ ctx.symtab->comdatGroups.lookup(CachedHashStringRef(signature))) {
msg += "\n>>> section group signature: " + signature.str() +
"\n>>> prevailing definition is in " + toString(prevailing);
if (sym.nonPrevailing) {
@@ -618,7 +618,7 @@ static const Symbol *getAlternativeSpelling(const Undefined &sym,
return s;
// If in the symbol table and not undefined.
- if (const Symbol *s = symtab.find(newName))
+ if (const Symbol *s = ctx.symtab->find(newName))
if (!s->isUndefined())
return s;
@@ -667,7 +667,7 @@ static const Symbol *getAlternativeSpelling(const Undefined &sym,
for (auto &it : map)
if (name.equals_insensitive(it.first))
return it.second;
- for (Symbol *sym : symtab.getSymbols())
+ for (Symbol *sym : ctx.symtab->getSymbols())
if (!sym->isUndefined() && name.equals_insensitive(sym->getName()))
return sym;
@@ -693,7 +693,7 @@ static const Symbol *getAlternativeSpelling(const Undefined &sym,
break;
}
if (!s)
- for (Symbol *sym : symtab.getSymbols())
+ for (Symbol *sym : ctx.symtab->getSymbols())
if (canSuggestExternCForCXX(name, sym->getName())) {
s = sym;
break;
@@ -1870,7 +1870,7 @@ void elf::postScanRelocations() {
}
assert(ctx.symAux.size() == 1);
- for (Symbol *sym : symtab.getSymbols())
+ for (Symbol *sym : ctx.symtab->getSymbols())
fn(*sym);
// Local symbols may need the aforementioned non-preemptible ifunc and GOT
@@ -2384,7 +2384,7 @@ bool elf::hexagonNeedsTLSSymbol(ArrayRef<OutputSection *> outputSections) {
}
void elf::hexagonTLSSymbolUpdate(ArrayRef<OutputSection *> outputSections) {
- Symbol *sym = symtab.find("__tls_get_addr");
+ Symbol *sym = ctx.symtab->find("__tls_get_addr");
if (!sym)
return;
bool needEntry = true;
diff --git a/lld/ELF/ScriptParser.cpp b/lld/ELF/ScriptParser.cpp
index 12cd905..b16b2e5 100644
--- a/lld/ELF/ScriptParser.cpp
+++ b/lld/ELF/ScriptParser.cpp
@@ -1581,7 +1581,7 @@ Expr ScriptParser::readPrimary() {
// script, it must happen before this DEFINED.
auto order = ctx.scriptSymOrderCounter++;
return [=, &ctx = this->ctx] {
- Symbol *s = symtab.find(name);
+ Symbol *s = ctx.symtab->find(name);
return s && s->isDefined() && ctx.scriptSymOrder.lookup(s) < order ? 1
: 0;
};
diff --git a/lld/ELF/SymbolTable.cpp b/lld/ELF/SymbolTable.cpp
index 4a4b4d3..74fa66e 100644
--- a/lld/ELF/SymbolTable.cpp
+++ b/lld/ELF/SymbolTable.cpp
@@ -29,8 +29,6 @@ using namespace llvm::ELF;
using namespace lld;
using namespace lld::elf;
-SymbolTable elf::symtab;
-
void SymbolTable::wrap(Symbol *sym, Symbol *real, Symbol *wrap) {
// Redirect __real_foo to the original foo and foo to the original __wrap_foo.
int &idx1 = symMap[CachedHashStringRef(sym->getName())];
diff --git a/lld/ELF/SymbolTable.h b/lld/ELF/SymbolTable.h
index 269f7f2..c0bc735 100644
--- a/lld/ELF/SymbolTable.h
+++ b/lld/ELF/SymbolTable.h
@@ -104,8 +104,6 @@ private:
std::optional<llvm::StringMap<SmallVector<Symbol *, 0>>> demangledSyms;
};
-LLVM_LIBRARY_VISIBILITY extern SymbolTable symtab;
-
} // namespace lld::elf
#endif
diff --git a/lld/ELF/SyntheticSections.cpp b/lld/ELF/SyntheticSections.cpp
index a736b5d..ce31c37 100644
--- a/lld/ELF/SyntheticSections.cpp
+++ b/lld/ELF/SyntheticSections.cpp
@@ -1509,10 +1509,10 @@ DynamicSection<ELFT>::computeContents() {
addInt(DT_FINI_ARRAYSZ, ctx.out.finiArray->size);
}
- if (Symbol *b = symtab.find(ctx.arg.init))
+ if (Symbol *b = ctx.symtab->find(ctx.arg.init))
if (b->isDefined())
addInt(DT_INIT, b->getVA());
- if (Symbol *b = symtab.find(ctx.arg.fini))
+ if (Symbol *b = ctx.symtab->find(ctx.arg.fini))
if (b->isDefined())
addInt(DT_FINI, b->getVA());
}
@@ -1692,9 +1692,9 @@ void RelocationBaseSection::finalizeContents() {
}
}
-void DynamicReloc::computeRaw(SymbolTableBaseSection *symtab) {
+void DynamicReloc::computeRaw(SymbolTableBaseSection *symt) {
r_offset = getOffset();
- r_sym = getSymIndex(symtab);
+ r_sym = getSymIndex(symt);
addend = computeAddend();
kind = AddendOnly; // Catch errors
}
@@ -2327,8 +2327,9 @@ SymtabShndxSection::SymtabShndxSection()
void SymtabShndxSection::writeTo(uint8_t *buf) {
// We write an array of 32 bit values, where each value has 1:1 association
- // with an entry in .symtab. If the corresponding entry contains SHN_XINDEX,
- // we need to write actual index, otherwise, we must write SHN_UNDEF(0).
+ // with an entry in ctx.in.symTab if the corresponding entry contains
+ // SHN_XINDEX, we need to write actual index, otherwise, we must write
+ // SHN_UNDEF(0).
buf += 4; // Ignore .symtab[0] entry.
for (const SymbolTableEntry &entry : ctx.in.symTab->getSymbols()) {
if (!getCommonSec(entry.sym) && getSymSectionIndex(entry.sym) == SHN_XINDEX)
@@ -4640,7 +4641,7 @@ static OutputSection *findSection(StringRef name) {
static Defined *addOptionalRegular(StringRef name, SectionBase *sec,
uint64_t val, uint8_t stOther = STV_HIDDEN) {
- Symbol *s = symtab.find(name);
+ Symbol *s = ctx.symtab->find(name);
if (!s || s->isDefined() || s->isCommon())
return nullptr;
diff --git a/lld/ELF/SyntheticSections.h b/lld/ELF/SyntheticSections.h
index 6d0634e..34654a2 100644
--- a/lld/ELF/SyntheticSections.h
+++ b/lld/ELF/SyntheticSections.h
@@ -459,7 +459,7 @@ public:
/// address/the address of the corresponding GOT entry/etc.
int64_t computeAddend() const;
- void computeRaw(SymbolTableBaseSection *symtab);
+ void computeRaw(SymbolTableBaseSection *symt);
Symbol *sym;
const OutputSection *outputSec = nullptr;
diff --git a/lld/ELF/Writer.cpp b/lld/ELF/Writer.cpp
index 49a319c..ce7cbc2 100644
--- a/lld/ELF/Writer.cpp
+++ b/lld/ELF/Writer.cpp
@@ -96,7 +96,7 @@ template <class ELFT> void elf::writeResult(Ctx &ctx) {
Writer<ELFT>(ctx).run();
}
-static void removeEmptyPTLoad(SmallVector<PhdrEntry *, 0> &phdrs) {
+static void removeEmptyPTLoad(Ctx &ctx, SmallVector<PhdrEntry *, 0> &phdrs) {
auto it = std::stable_partition(
phdrs.begin(), phdrs.end(), [&](const PhdrEntry *p) {
if (p->p_type != PT_LOAD)
@@ -116,7 +116,7 @@ static void removeEmptyPTLoad(SmallVector<PhdrEntry *, 0> &phdrs) {
phdrs.erase(it, phdrs.end());
}
-void elf::copySectionsIntoPartitions() {
+void elf::copySectionsIntoPartitions(Ctx &ctx) {
SmallVector<InputSectionBase *, 0> newSections;
const size_t ehSize = ctx.ehInputSections.size();
for (unsigned part = 2; part != ctx.partitions.size() + 1; ++part) {
@@ -139,9 +139,9 @@ void elf::copySectionsIntoPartitions() {
newSections.end());
}
-static Defined *addOptionalRegular(StringRef name, SectionBase *sec,
+static Defined *addOptionalRegular(Ctx &ctx, StringRef name, SectionBase *sec,
uint64_t val, uint8_t stOther = STV_HIDDEN) {
- Symbol *s = symtab.find(name);
+ Symbol *s = ctx.symtab->find(name);
if (!s || s->isDefined() || s->isCommon())
return nullptr;
@@ -154,12 +154,12 @@ static Defined *addOptionalRegular(StringRef name, SectionBase *sec,
// The linker is expected to define some symbols depending on
// the linking result. This function defines such symbols.
-void elf::addReservedSymbols() {
+void elf::addReservedSymbols(Ctx &ctx) {
if (ctx.arg.emachine == EM_MIPS) {
- auto addAbsolute = [](StringRef name) {
+ auto addAbsolute = [&](StringRef name) {
Symbol *sym =
- symtab.addSymbol(Defined{ctx.internalFile, name, STB_GLOBAL,
- STV_HIDDEN, STT_NOTYPE, 0, 0, nullptr});
+ ctx.symtab->addSymbol(Defined{ctx.internalFile, name, STB_GLOBAL,
+ STV_HIDDEN, STT_NOTYPE, 0, 0, nullptr});
sym->isUsedInRegularObj = true;
return cast<Defined>(sym);
};
@@ -172,19 +172,19 @@ void elf::addReservedSymbols() {
// On MIPS O32 ABI, _gp_disp is a magic symbol designates offset between
// start of function and 'gp' pointer into GOT.
- if (symtab.find("_gp_disp"))
+ if (ctx.symtab->find("_gp_disp"))
ctx.sym.mipsGpDisp = addAbsolute("_gp_disp");
// The __gnu_local_gp is a magic symbol equal to the current value of 'gp'
// pointer. This symbol is used in the code generated by .cpload pseudo-op
// in case of using -mno-shared option.
// https://sourceware.org/ml/binutils/2004-12/msg00094.html
- if (symtab.find("__gnu_local_gp"))
+ if (ctx.symtab->find("__gnu_local_gp"))
ctx.sym.mipsLocalGp = addAbsolute("__gnu_local_gp");
} else if (ctx.arg.emachine == EM_PPC) {
// glibc *crt1.o has a undefined reference to _SDA_BASE_. Since we don't
// support Small Data Area, define it arbitrarily as 0.
- addOptionalRegular("_SDA_BASE_", nullptr, 0, STV_HIDDEN);
+ addOptionalRegular(ctx, "_SDA_BASE_", nullptr, 0, STV_HIDDEN);
} else if (ctx.arg.emachine == EM_PPC64) {
addPPC64SaveRestore();
}
@@ -200,7 +200,7 @@ void elf::addReservedSymbols() {
StringRef gotSymName =
(ctx.arg.emachine == EM_PPC64) ? ".TOC." : "_GLOBAL_OFFSET_TABLE_";
- if (Symbol *s = symtab.find(gotSymName)) {
+ if (Symbol *s = ctx.symtab->find(gotSymName)) {
if (s->isDefined()) {
error(toString(s->file) + " cannot redefine linker defined symbol '" +
gotSymName + "'");
@@ -220,23 +220,24 @@ void elf::addReservedSymbols() {
// this symbol unconditionally even when using a linker script, which
// differs from the behavior implemented by GNU linker which only define
// this symbol if ELF headers are in the memory mapped segment.
- addOptionalRegular("__ehdr_start", ctx.out.elfHeader, 0, STV_HIDDEN);
+ addOptionalRegular(ctx, "__ehdr_start", ctx.out.elfHeader, 0, STV_HIDDEN);
// __executable_start is not documented, but the expectation of at
// least the Android libc is that it points to the ELF header.
- addOptionalRegular("__executable_start", ctx.out.elfHeader, 0, STV_HIDDEN);
+ addOptionalRegular(ctx, "__executable_start", ctx.out.elfHeader, 0,
+ STV_HIDDEN);
// __dso_handle symbol is passed to cxa_finalize as a marker to identify
// each DSO. The address of the symbol doesn't matter as long as they are
// different in different DSOs, so we chose the start address of the DSO.
- addOptionalRegular("__dso_handle", ctx.out.elfHeader, 0, STV_HIDDEN);
+ addOptionalRegular(ctx, "__dso_handle", ctx.out.elfHeader, 0, STV_HIDDEN);
// If linker script do layout we do not need to create any standard symbols.
if (ctx.script->hasSectionsCommand)
return;
- auto add = [](StringRef s, int64_t pos) {
- return addOptionalRegular(s, ctx.out.elfHeader, pos, STV_DEFAULT);
+ auto add = [&](StringRef s, int64_t pos) {
+ return addOptionalRegular(ctx, s, ctx.out.elfHeader, pos, STV_DEFAULT);
};
ctx.sym.bss = add("__bss_start", 0);
@@ -270,10 +271,10 @@ static void demoteDefined(Defined &sym, DenseMap<SectionBase *, size_t> &map) {
//
// In addition, demote symbols defined in discarded sections, so that
// references to /DISCARD/ discarded symbols will lead to errors.
-static void demoteSymbolsAndComputeIsPreemptible() {
+static void demoteSymbolsAndComputeIsPreemptible(Ctx &ctx) {
llvm::TimeTraceScope timeScope("Demote symbols");
DenseMap<InputFile *, DenseMap<SectionBase *, size_t>> sectionIndexMap;
- for (Symbol *sym : symtab.getSymbols()) {
+ for (Symbol *sym : ctx.symtab->getSymbols()) {
if (auto *d = dyn_cast<Defined>(sym)) {
if (d->section && !d->section->isLive())
demoteDefined(*d, sectionIndexMap[d->file]);
@@ -322,7 +323,7 @@ template <class ELFT> void Writer<ELFT>::run() {
// 0 sized region. This has to be done late since only after assignAddresses
// we know the size of the sections.
for (Partition &part : ctx.partitions)
- removeEmptyPTLoad(part.phdrs);
+ removeEmptyPTLoad(ctx, part.phdrs);
if (!ctx.arg.oFormatBinary)
assignFileOffsets();
@@ -391,7 +392,7 @@ static void markUsedLocalSymbolsImpl(ObjFile<ELFT> *file,
// The function ensures that the "used" field of local symbols reflects the fact
// that the symbol is used in a relocation from a live section.
-template <class ELFT> static void markUsedLocalSymbols() {
+template <class ELFT> static void markUsedLocalSymbols(Ctx &ctx) {
// With --gc-sections, the field is already filled.
// See MarkLive<ELFT>::resolveReloc().
if (ctx.arg.gcSections)
@@ -419,7 +420,7 @@ template <class ELFT> static void markUsedLocalSymbols() {
}
}
-static bool shouldKeepInSymtab(const Defined &sym) {
+static bool shouldKeepInSymtab(Ctx &ctx, const Defined &sym) {
if (sym.isSection())
return false;
@@ -474,7 +475,7 @@ bool lld::elf::includeInSymtab(const Symbol &b) {
// - demote symbols defined relative to /DISCARD/ discarded input sections so
// that relocations referencing them will lead to errors.
// - copy eligible symbols to .symTab
-static void demoteAndCopyLocalSymbols() {
+static void demoteAndCopyLocalSymbols(Ctx &ctx) {
llvm::TimeTraceScope timeScope("Add local symbols");
for (ELFFileBase *file : ctx.objectFiles) {
DenseMap<SectionBase *, size_t> sectionIndexMap;
@@ -486,7 +487,8 @@ static void demoteAndCopyLocalSymbols() {
if (dr->section && !dr->section->isLive())
demoteDefined(*dr, sectionIndexMap);
- else if (ctx.in.symTab && includeInSymtab(*b) && shouldKeepInSymtab(*dr))
+ else if (ctx.in.symTab && includeInSymtab(*b) &&
+ shouldKeepInSymtab(ctx, *dr))
ctx.in.symTab->addSymbol(b);
}
}
@@ -811,10 +813,10 @@ template <class ELFT> void Writer<ELFT>::addRelIpltSymbols() {
// .rela.dyn will be present in the output.
std::string name = ctx.arg.isRela ? "__rela_iplt_start" : "__rel_iplt_start";
ctx.sym.relaIpltStart =
- addOptionalRegular(name, ctx.out.elfHeader, 0, STV_HIDDEN);
+ addOptionalRegular(ctx, name, ctx.out.elfHeader, 0, STV_HIDDEN);
name.replace(name.size() - 5, 5, "end");
ctx.sym.relaIpltEnd =
- addOptionalRegular(name, ctx.out.elfHeader, 0, STV_HIDDEN);
+ addOptionalRegular(ctx, name, ctx.out.elfHeader, 0, STV_HIDDEN);
}
// This function generates assignments for predefined symbols (e.g. _end or
@@ -1106,7 +1108,7 @@ static DenseMap<const InputSectionBase *, int> buildSectionOrder() {
// We want both global and local symbols. We get the global ones from the
// symbol table and iterate the object files for the local ones.
- for (Symbol *sym : symtab.getSymbols())
+ for (Symbol *sym : ctx.symtab->getSymbols())
addSym(*sym);
for (ELFFileBase *file : ctx.objectFiles)
@@ -1661,7 +1663,7 @@ template <class ELFT> void Writer<ELFT>::optimizeBasicBlockJumps() {
// To deal with the above problem, this function is called after
// scanRelocations is called to remove synthetic sections that turn
// out to be empty.
-static void removeUnusedSyntheticSections() {
+static void removeUnusedSyntheticSections(Ctx &ctx) {
// All input synthetic sections that can be empty are placed after
// all regular ones. Reverse iterate to find the first synthetic section
// after a non-synthetic one which will be our starting point.
@@ -1724,7 +1726,7 @@ template <class ELFT> void Writer<ELFT>::finalizeSections() {
// Even the author of gold doesn't remember why gold behaves that way.
// https://sourceware.org/ml/binutils/2002-03/msg00360.html
if (ctx.mainPart->dynamic->parent) {
- Symbol *s = symtab.addSymbol(Defined{
+ Symbol *s = ctx.symtab->addSymbol(Defined{
ctx.internalFile, "_DYNAMIC", STB_WEAK, STV_HIDDEN, STT_NOTYPE,
/*value=*/0, /*size=*/0, ctx.mainPart->dynamic.get()});
s->isUsedInRegularObj = true;
@@ -1740,12 +1742,12 @@ template <class ELFT> void Writer<ELFT>::finalizeSections() {
if (ctx.arg.emachine == EM_RISCV) {
if (!ctx.arg.shared) {
OutputSection *sec = findSection(".sdata");
- addOptionalRegular("__global_pointer$", sec ? sec : ctx.out.elfHeader,
- 0x800, STV_DEFAULT);
+ addOptionalRegular(ctx, "__global_pointer$",
+ sec ? sec : ctx.out.elfHeader, 0x800, STV_DEFAULT);
// Set riscvGlobalPointer to be used by the optional global pointer
// relaxation.
if (ctx.arg.relaxGP) {
- Symbol *s = symtab.find("__global_pointer$");
+ Symbol *s = ctx.symtab->find("__global_pointer$");
if (s && s->isDefined())
ctx.sym.riscvGlobalPointer = cast<Defined>(s);
}
@@ -1764,7 +1766,7 @@ template <class ELFT> void Writer<ELFT>::finalizeSections() {
// 2) is special cased in @tpoff computation. To satisfy 1), we define it
// as an absolute symbol of zero. This is different from GNU linkers which
// define _TLS_MODULE_BASE_ relative to the first TLS section.
- Symbol *s = symtab.find("_TLS_MODULE_BASE_");
+ Symbol *s = ctx.symtab->find("_TLS_MODULE_BASE_");
if (s && s->isUndefined()) {
s->resolve(Defined{ctx.internalFile, StringRef(), STB_GLOBAL,
STV_HIDDEN, STT_TLS, /*value=*/0, 0,
@@ -1783,11 +1785,11 @@ template <class ELFT> void Writer<ELFT>::finalizeSections() {
}
}
- demoteSymbolsAndComputeIsPreemptible();
+ demoteSymbolsAndComputeIsPreemptible(ctx);
if (ctx.arg.copyRelocs && ctx.arg.discard != DiscardPolicy::None)
- markUsedLocalSymbols<ELFT>();
- demoteAndCopyLocalSymbols();
+ markUsedLocalSymbols<ELFT>(ctx);
+ demoteAndCopyLocalSymbols(ctx);
if (ctx.arg.copyRelocs)
addSectionSymbols();
@@ -1832,7 +1834,7 @@ template <class ELFT> void Writer<ELFT>::finalizeSections() {
for (SharedFile *file : ctx.sharedFiles) {
bool allNeededIsKnown =
llvm::all_of(file->dtNeeded, [&](StringRef needed) {
- return symtab.soNames.count(CachedHashStringRef(needed));
+ return ctx.symtab->soNames.count(CachedHashStringRef(needed));
});
if (!allNeededIsKnown)
continue;
@@ -1857,7 +1859,7 @@ template <class ELFT> void Writer<ELFT>::finalizeSections() {
llvm::TimeTraceScope timeScope("Add symbols to symtabs");
// Now that we have defined all possible global symbols including linker-
// synthesized ones. Visit all symbols to give the finishing touches.
- for (Symbol *sym : symtab.getSymbols()) {
+ for (Symbol *sym : ctx.symtab->getSymbols()) {
if (!sym->isUsedInRegularObj || !includeInSymtab(*sym))
continue;
if (!ctx.arg.relocatable)
@@ -1890,7 +1892,7 @@ template <class ELFT> void Writer<ELFT>::finalizeSections() {
if (ctx.in.mipsGot)
ctx.in.mipsGot->build();
- removeUnusedSyntheticSections();
+ removeUnusedSyntheticSections(ctx);
ctx.script->diagnoseOrphanHandling();
ctx.script->diagnoseMissingSGSectionAddress();
@@ -1922,8 +1924,8 @@ template <class ELFT> void Writer<ELFT>::finalizeSections() {
if (ctx.arg.emachine == EM_HEXAGON &&
hexagonNeedsTLSSymbol(ctx.outputSections)) {
Symbol *sym =
- symtab.addSymbol(Undefined{ctx.internalFile, "__tls_get_addr",
- STB_GLOBAL, STV_DEFAULT, STT_NOTYPE});
+ ctx.symtab->addSymbol(Undefined{ctx.internalFile, "__tls_get_addr",
+ STB_GLOBAL, STV_DEFAULT, STT_NOTYPE});
sym->isPreemptible = true;
ctx.partitions[0].dynSymTab->addSymbol(sym);
}
@@ -2111,13 +2113,13 @@ template <class ELFT> void Writer<ELFT>::addStartEndSymbols() {
// correct.
auto define = [=](StringRef start, StringRef end, OutputSection *os) {
if (os) {
- Defined *startSym = addOptionalRegular(start, os, 0);
- Defined *stopSym = addOptionalRegular(end, os, -1);
+ Defined *startSym = addOptionalRegular(ctx, start, os, 0);
+ Defined *stopSym = addOptionalRegular(ctx, end, os, -1);
if (startSym || stopSym)
os->usedInExpression = true;
} else {
- addOptionalRegular(start, ctx.out.elfHeader, 0);
- addOptionalRegular(end, ctx.out.elfHeader, 0);
+ addOptionalRegular(ctx, start, ctx.out.elfHeader, 0);
+ addOptionalRegular(ctx, end, ctx.out.elfHeader, 0);
}
};
@@ -2141,10 +2143,11 @@ void Writer<ELFT>::addStartStopSymbols(OutputSection &osec) {
StringRef s = osec.name;
if (!isValidCIdentifier(s))
return;
- Defined *startSym = addOptionalRegular(saver().save("__start_" + s), &osec, 0,
- ctx.arg.zStartStopVisibility);
- Defined *stopSym = addOptionalRegular(saver().save("__stop_" + s), &osec, -1,
- ctx.arg.zStartStopVisibility);
+ Defined *startSym =
+ addOptionalRegular(ctx, saver().save("__start_" + s), &osec, 0,
+ ctx.arg.zStartStopVisibility);
+ Defined *stopSym = addOptionalRegular(ctx, saver().save("__stop_" + s), &osec,
+ -1, ctx.arg.zStartStopVisibility);
if (startSym || stopSym)
osec.usedInExpression = true;
}
@@ -2162,7 +2165,7 @@ static bool needsPtLoad(OutputSection *sec) {
}
// Adjust phdr flags according to certain options.
-static uint64_t computeFlags(uint64_t flags) {
+static uint64_t computeFlags(Ctx &ctx, uint64_t flags) {
if (ctx.arg.omagic)
return PF_R | PF_W | PF_X;
if (ctx.arg.executeOnly && (flags & PF_X))
@@ -2184,7 +2187,7 @@ SmallVector<PhdrEntry *, 0> Writer<ELFT>::createPhdrs(Partition &part) {
bool isMain = partNo == 1;
// Add the first PT_LOAD segment for regular output sections.
- uint64_t flags = computeFlags(PF_R);
+ uint64_t flags = computeFlags(ctx, PF_R);
PhdrEntry *load = nullptr;
// nmagic or omagic output does not have PT_PHDR, PT_INTERP, or the readonly
@@ -2247,7 +2250,7 @@ SmallVector<PhdrEntry *, 0> Writer<ELFT>::createPhdrs(Partition &part) {
// partitions.
if (sec->partition != partNo) {
if (isMain && sec->partition == 255)
- addHdr(PT_LOAD, computeFlags(sec->getPhdrFlags()))->add(sec);
+ addHdr(PT_LOAD, computeFlags(ctx, sec->getPhdrFlags()))->add(sec);
continue;
}
@@ -2267,7 +2270,7 @@ SmallVector<PhdrEntry *, 0> Writer<ELFT>::createPhdrs(Partition &part) {
// supposed-to-be-NOBITS section to the output file. (However, we cannot do
// so when hasSectionsCommand, since we cannot introduce the extra alignment
// needed to create a new LOAD)
- uint64_t newFlags = computeFlags(sec->getPhdrFlags());
+ uint64_t newFlags = computeFlags(ctx, sec->getPhdrFlags());
// When --no-rosegment is specified, RO and RX sections are compatible.
uint32_t incompatible = flags ^ newFlags;
if (ctx.arg.singleRoRx && !(newFlags & PF_W))
@@ -2701,7 +2704,7 @@ template <class ELFT> void Writer<ELFT>::checkSections() {
// 5. the address 0.
static uint64_t getEntryAddr() {
// Case 1, 2 or 3
- if (Symbol *b = symtab.find(ctx.arg.entry))
+ if (Symbol *b = ctx.symtab->find(ctx.arg.entry))
return b->getVA();
// Case 4
diff --git a/lld/ELF/Writer.h b/lld/ELF/Writer.h
index 7644b62..bf4783c 100644
--- a/lld/ELF/Writer.h
+++ b/lld/ELF/Writer.h
@@ -16,7 +16,7 @@
namespace lld::elf {
class InputFile;
class OutputSection;
-void copySectionsIntoPartitions();
+void copySectionsIntoPartitions(Ctx &ctx);
template <class ELFT> void writeResult(Ctx &ctx);
// This describes a program header entry.
@@ -44,7 +44,7 @@ struct PhdrEntry {
uint64_t lmaOffset = 0;
};
-void addReservedSymbols();
+void addReservedSymbols(Ctx &ctx);
bool includeInSymtab(const Symbol &b);
unsigned getSectionRank(OutputSection &osec);
diff --git a/lld/MachO/InputSection.cpp b/lld/MachO/InputSection.cpp
index 64c5849..c1b3297 100644
--- a/lld/MachO/InputSection.cpp
+++ b/lld/MachO/InputSection.cpp
@@ -167,8 +167,7 @@ std::string InputSection::getSourceLocation(uint64_t off) const {
// Symbols are generally prefixed with an underscore, which is not included
// in the debug information.
StringRef symName = sym->getName();
- if (!symName.empty() && symName[0] == '_')
- symName = symName.substr(1);
+ symName.consume_front("_");
if (std::optional<std::pair<std::string, unsigned>> fileLine =
dwarf->getVariableLoc(symName))
diff --git a/lld/docs/ReleaseNotes.rst b/lld/docs/ReleaseNotes.rst
index 6d09de1..6e04377 100644
--- a/lld/docs/ReleaseNotes.rst
+++ b/lld/docs/ReleaseNotes.rst
@@ -41,6 +41,7 @@ Breaking changes
COFF Improvements
-----------------
+* ``/includeglob`` has been implemented to match the behavior of ``--undefined-glob`` available for ELF.
MinGW Improvements
------------------
diff --git a/lld/test/COFF/Inputs/include1d.yaml b/lld/test/COFF/Inputs/include1d.yaml
new file mode 100644
index 0000000..d315cc8
--- /dev/null
+++ b/lld/test/COFF/Inputs/include1d.yaml
@@ -0,0 +1,29 @@
+--- !COFF
+header:
+ Machine: IMAGE_FILE_MACHINE_AMD64
+ Characteristics: []
+sections:
+ - Name: .text
+ Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ]
+ Alignment: 4
+ SectionData: B800000000506800000000680000000050E80000000050E800000000
+symbols:
+ - Name: .text
+ Value: 0
+ SectionNumber: 1
+ SimpleType: IMAGE_SYM_TYPE_NULL
+ ComplexType: IMAGE_SYM_DTYPE_NULL
+ StorageClass: IMAGE_SYM_CLASS_STATIC
+ SectionDefinition:
+ Length: 28
+ NumberOfRelocations: 4
+ NumberOfLinenumbers: 0
+ CheckSum: 0
+ Number: 0
+ - Name: baz
+ Value: 0
+ SectionNumber: 1
+ SimpleType: IMAGE_SYM_TYPE_NULL
+ ComplexType: IMAGE_SYM_DTYPE_NULL
+ StorageClass: IMAGE_SYM_CLASS_EXTERNAL
+...
diff --git a/lld/test/COFF/include.test b/lld/test/COFF/include.test
index 8879ee5..2a8a8fe 100644
--- a/lld/test/COFF/include.test
+++ b/lld/test/COFF/include.test
@@ -9,10 +9,18 @@
# RUN: echo dummy >> %t.log
# RUN: FileCheck -check-prefix=CHECK2 %s < %t.log
+# RUN: lld-link /out:%t.exe /entry:main %t.obj /verbose /includeglob:"glob_*" >& %t.log
+# RUN: echo dummy >> %t.log
+# RUN: FileCheck -check-prefix=CHECK3 %s < %t.log
+
# CHECK1: Discarded unused
+# CHECK1: Discarded glob_match1
+# CHECK1: Discarded glob_match2
# CHECK1-NOT: Discarded used
# CHECK2-NOT: Discarded unused
# CHECK2-NOT: Discarded used
+# CHECK3-NOT: Discarded glob_match1
+# CHECK3-NOT: Discarded glob_match2
--- !COFF
header:
@@ -31,6 +39,14 @@ sections:
Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_LNK_COMDAT, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ]
Alignment: 4
SectionData: B82A000000C3
+ - Name: '.text$mn'
+ Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_LNK_COMDAT, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ]
+ Alignment: 4
+ SectionData: B82A000000C3
+ - Name: '.text$mn'
+ Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_LNK_COMDAT, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ]
+ Alignment: 4
+ SectionData: B82A000000C3
- Name: .drectve
Characteristics: [ IMAGE_SCN_LNK_INFO, IMAGE_SCN_LNK_REMOVE ]
Alignment: 1
@@ -75,6 +91,32 @@ symbols:
CheckSum: 0
Number: 0
Selection: IMAGE_COMDAT_SELECT_ANY
+ - Name: '.text$mn'
+ Value: 0
+ SectionNumber: 4
+ SimpleType: IMAGE_SYM_TYPE_NULL
+ ComplexType: IMAGE_SYM_DTYPE_NULL
+ StorageClass: IMAGE_SYM_CLASS_STATIC
+ SectionDefinition:
+ Length: 6
+ NumberOfRelocations: 0
+ NumberOfLinenumbers: 0
+ CheckSum: 0
+ Number: 0
+ Selection: IMAGE_COMDAT_SELECT_ANY
+ - Name: '.text$mn'
+ Value: 0
+ SectionNumber: 5
+ SimpleType: IMAGE_SYM_TYPE_NULL
+ ComplexType: IMAGE_SYM_DTYPE_NULL
+ StorageClass: IMAGE_SYM_CLASS_STATIC
+ SectionDefinition:
+ Length: 6
+ NumberOfRelocations: 0
+ NumberOfLinenumbers: 0
+ CheckSum: 0
+ Number: 0
+ Selection: IMAGE_COMDAT_SELECT_ANY
- Name: main
Value: 0
SectionNumber: 1
@@ -93,4 +135,16 @@ symbols:
SimpleType: IMAGE_SYM_TYPE_NULL
ComplexType: IMAGE_SYM_DTYPE_FUNCTION
StorageClass: IMAGE_SYM_CLASS_EXTERNAL
+ - Name: glob_match1
+ Value: 0
+ SectionNumber: 4
+ SimpleType: IMAGE_SYM_TYPE_NULL
+ ComplexType: IMAGE_SYM_DTYPE_FUNCTION
+ StorageClass: IMAGE_SYM_CLASS_EXTERNAL
+ - Name: glob_match2
+ Value: 0
+ SectionNumber: 5
+ SimpleType: IMAGE_SYM_TYPE_NULL
+ ComplexType: IMAGE_SYM_DTYPE_FUNCTION
+ StorageClass: IMAGE_SYM_CLASS_EXTERNAL
...
diff --git a/lld/test/COFF/include2.test b/lld/test/COFF/include2.test
index 557de47..4796512 100644
--- a/lld/test/COFF/include2.test
+++ b/lld/test/COFF/include2.test
@@ -1,14 +1,20 @@
# RUN: yaml2obj %p/Inputs/include1a.yaml -o %t1.obj
# RUN: yaml2obj %p/Inputs/include1b.yaml -o %t2.obj
# RUN: yaml2obj %p/Inputs/include1c.yaml -o %t3.obj
-# RUN: rm -f %t2.lib %t3.lib
+# RUN: yaml2obj %p/Inputs/include1d.yaml -o %t4.obj
+# RUN: rm -f %t2.lib %t3.lib %t4.lib
# RUN: llvm-ar cru %t2.lib %t2.obj
# RUN: llvm-ar cru %t3.lib %t3.obj
-# RUN: lld-link /out:%t.exe /entry:main %t1.obj %t2.lib %t3.lib /verbose >& %t.log
+# RUN: llvm-ar cru %t4.lib %t4.obj
+# RUN: lld-link /out:%t.exe /entry:main %t1.obj %t2.lib %t3.lib %t4.lib /verbose >& %t.log
# RUN: FileCheck %s < %t.log
+# RUN: lld-link /out:%t.exe /entry:main %t1.obj %t2.lib %t3.lib %t4.lib /includeglob:baz /verbose >& %t.glob.log
+# RUN: FileCheck -check-prefix=GLOB %s < %t.glob.log
CHECK: include2.test.tmp1.obj
CHECK: include2.test.tmp2.lib
CHECK: include2.test.tmp2.lib(include2.test.tmp2.obj) for foo
CHECK: include2.test.tmp3.lib
CHECK: include2.test.tmp3.lib(include2.test.tmp3.obj) for bar
+CHECK-NOT: include2.test.tmp4.lib(include2.test.tmp4.obj) for baz
+GLOB: include2.test.tmp4.lib(include2.test.tmp4.obj) for baz
diff --git a/lld/test/wasm/unsupported-pic-relocations.s b/lld/test/wasm/unsupported-pic-relocations.s
index ea32e84..2f85afa 100644
--- a/lld/test/wasm/unsupported-pic-relocations.s
+++ b/lld/test/wasm/unsupported-pic-relocations.s
@@ -15,6 +15,10 @@
# RUN: not wasm-ld --experimental-pic -shared %t.o -o /dev/null --unresolved-symbols=import-dynamic 2>&1 | \
# RUN: FileCheck %s
+## These errors should not be reported under -r/--relocation (i.e. when
+## generating an object file)
+# RUN: wasm-ld --experimental-pic -r %t.o -o /dev/null
+
.functype external_func () -> ()
use_undefined_function:
@@ -23,7 +27,7 @@ use_undefined_function:
# CHECK: error: {{.*}}.o: relocation R_WASM_TABLE_INDEX_REL_SLEB is not supported against an undefined symbol `external_func`
drop
end_function
-
+
use_undefined_data:
.functype use_undefined_data () -> ()
i32.const external_data@MBREL
diff --git a/lld/test/wasm/unsupported-pic-relocations64.s b/lld/test/wasm/unsupported-pic-relocations64.s
index db9707b..df885b8 100644
--- a/lld/test/wasm/unsupported-pic-relocations64.s
+++ b/lld/test/wasm/unsupported-pic-relocations64.s
@@ -15,6 +15,10 @@
# RUN: not wasm-ld -mwasm64 --experimental-pic -shared %t.o -o /dev/null --unresolved-symbols=import-dynamic 2>&1 | \
# RUN: FileCheck %s
+## These errors should not be reported under -r/--relocation (i.e. when
+## generating an object file)
+# RUN: wasm-ld -mwasm64 --experimental-pic -r %t.o -o /dev/null
+
.functype external_func () -> ()
use_undefined_function:
@@ -23,7 +27,7 @@ use_undefined_function:
# CHECK: error: {{.*}}.o: relocation R_WASM_TABLE_INDEX_REL_SLEB64 is not supported against an undefined symbol `external_func`
drop
end_function
-
+
use_undefined_data:
.functype use_undefined_data () -> ()
i64.const external_data@MBREL
diff --git a/lld/wasm/Relocations.cpp b/lld/wasm/Relocations.cpp
index 2dbfe33..45ad327 100644
--- a/lld/wasm/Relocations.cpp
+++ b/lld/wasm/Relocations.cpp
@@ -173,7 +173,7 @@ void scanRelocations(InputChunk *chunk) {
}
}
- if (sym->isUndefined()) {
+ if (!config->relocatable && sym->isUndefined()) {
switch (reloc.Type) {
case R_WASM_TABLE_INDEX_REL_SLEB:
case R_WASM_TABLE_INDEX_REL_SLEB64:
@@ -187,11 +187,11 @@ void scanRelocations(InputChunk *chunk) {
toString(*sym) + "`");
break;
}
- }
- if (sym->isUndefined() && !config->relocatable && !sym->isWeak()) {
- // Report undefined symbols
- reportUndefined(file, sym);
+ if (!sym->isWeak()) {
+ // Report undefined symbols
+ reportUndefined(file, sym);
+ }
}
}
}
diff --git a/lldb/bindings/python/python-wrapper.swig b/lldb/bindings/python/python-wrapper.swig
index 961fb2d..b72a462 100644
--- a/lldb/bindings/python/python-wrapper.swig
+++ b/lldb/bindings/python/python-wrapper.swig
@@ -667,6 +667,79 @@ lldb_private::python::SWIGBridge::LLDBSwigPythonGetRepeatCommandForScriptedComma
return result.Str().GetString().str();
}
+StructuredData::DictionarySP
+lldb_private::python::SWIGBridge::LLDBSwigPythonHandleArgumentCompletionForScriptedCommand(PyObject *implementor,
+ std::vector<llvm::StringRef> &args_vec, size_t args_pos, size_t pos_in_arg) {
+
+ PyErr_Cleaner py_err_cleaner(true);
+
+ PythonObject self(PyRefType::Borrowed, implementor);
+ auto pfunc = self.ResolveName<PythonCallable>("handle_argument_completion");
+ // If this isn't implemented, return an empty dict to signal falling back to default completion:
+ if (!pfunc.IsAllocated())
+ return {};
+
+ PythonList args_list(PyInitialValue::Empty);
+ for (auto elem : args_vec)
+ args_list.AppendItem(PythonString(elem));
+
+ PythonObject result = pfunc(args_list, PythonInteger(args_pos), PythonInteger(pos_in_arg));
+ // Returning None means do the ordinary completion
+ if (result.IsNone())
+ return {};
+
+ // Convert the return dictionary to a DictionarySP.
+ StructuredData::ObjectSP result_obj_sp = result.CreateStructuredObject();
+ if (!result_obj_sp)
+ return {};
+
+ StructuredData::DictionarySP dict_sp(new StructuredData::Dictionary(result_obj_sp));
+ if (dict_sp->GetType() == lldb::eStructuredDataTypeInvalid)
+ return {};
+ return dict_sp;
+}
+
+StructuredData::DictionarySP
+lldb_private::python::SWIGBridge::LLDBSwigPythonHandleOptionArgumentCompletionForScriptedCommand(PyObject *implementor,
+ llvm::StringRef &long_option, size_t pos_in_arg) {
+
+ PyErr_Cleaner py_err_cleaner(true);
+
+ PythonObject self(PyRefType::Borrowed, implementor);
+ auto pfunc = self.ResolveName<PythonCallable>("handle_option_argument_completion");
+ // If this isn't implemented, return an empty dict to signal falling back to default completion:
+ if (!pfunc.IsAllocated())
+ return {};
+
+ PythonObject result = pfunc(PythonString(long_option), PythonInteger(pos_in_arg));
+ // Returning None means do the ordinary completion
+ if (result.IsNone())
+ return {};
+
+ // Returning a boolean:
+ // True means the completion was handled, but there were no completions
+ // False means that the completion was not handled, again, do the ordinary completion:
+ if (result.GetObjectType() == PyObjectType::Boolean) {
+ if (!result.IsTrue())
+ return {};
+ // Make up a completion dictionary with the right element:
+ StructuredData::DictionarySP dict_sp(new StructuredData::Dictionary());
+ dict_sp->AddBooleanItem("no-completion", true);
+ return dict_sp;
+ }
+
+
+ // Convert the return dictionary to a DictionarySP.
+ StructuredData::ObjectSP result_obj_sp = result.CreateStructuredObject();
+ if (!result_obj_sp)
+ return {};
+
+ StructuredData::DictionarySP dict_sp(new StructuredData::Dictionary(result_obj_sp));
+ if (dict_sp->GetType() == lldb::eStructuredDataTypeInvalid)
+ return {};
+ return dict_sp;
+}
+
#include "lldb/Interpreter/CommandReturnObject.h"
bool lldb_private::python::SWIGBridge::LLDBSwigPythonCallParsedCommandObject(
diff --git a/lldb/docs/index.rst b/lldb/docs/index.rst
index d9b8e58..dd44a84 100644
--- a/lldb/docs/index.rst
+++ b/lldb/docs/index.rst
@@ -163,6 +163,7 @@ interesting areas to contribute to lldb.
resources/caveats
resources/projects
resources/lldbdap
+ resources/addinglanguagesupport
Public C++ API <https://lldb.llvm.org/cpp_reference/namespacelldb.html>
Private C++ API <https://lldb.llvm.org/cpp_reference/index.html>
diff --git a/lldb/docs/resources/addinglanguagesupport.md b/lldb/docs/resources/addinglanguagesupport.md
new file mode 100644
index 0000000..2878904
--- /dev/null
+++ b/lldb/docs/resources/addinglanguagesupport.md
@@ -0,0 +1,95 @@
+# Adding Programming Language Support
+
+LLDB has been architected to make it straightforward to add support for a
+programming language. Only a small enum in core LLDB needs to be modified to
+make LLDB aware of a new programming language. Everything else can be supplied
+in derived classes that need not even be present in the core LLDB repository.
+This makes it convenient for developers adding language support in downstream
+repositories since it practically eliminates the potential for merge conflicts.
+
+The basic steps are:
+* Add the language to the `LanguageType` enum.
+* Add a `TypeSystem` for the language.
+* Add expression evaluation support.
+
+Additionally, you may want to create a `Language` and `LanguageRuntime` plugin
+for your language, which enables support for advanced features like dynamic
+typing and data formatting.
+
+## Add the Language to the LanguageType enum
+
+The `LanguageType` enum
+(see [lldb-enumerations.h](https://github.com/llvm/llvm-project/blob/main/lldb/include/lldb/lldb-enumerations.h))
+contains a list of every language known to LLDB. It is the one place where
+support for a language must live that will need to merge cleanly with upstream
+LLDB if you are developing your language support in a separate branch. When
+adding support for a language previously unknown to LLDB, start by adding an
+enumeration entry to `LanguageType`.
+
+## Add a TypeSystem for the Language
+
+Both [Module](https://github.com/llvm/llvm-project/blob/main/lldb/include/lldb/Core/Module.h)
+and [Target](https://github.com/llvm/llvm-project/blob/main/lldb/include/lldb/Target/Target.h)
+support the retrieval of a `TypeSystem` instance via `GetTypeSystemForLanguage()`.
+For `Module`, this method is directly on the `Module` instance. For `Target`,
+this is retrieved indirectly via the `TypeSystemMap` for the `Target` instance.
+
+The `TypeSystem` instance returned by the `Target` is expected to be capable of
+evaluating expressions, while the `TypeSystem` instance returned by the `Module`
+is not. If you want to support expression evaluation for your language, you could
+consider one of the following approaches:
+* Implement a single `TypeSystem` class that supports evaluation when given an
+ optional `Target`, implementing all the expression evaluation methods on the
+ `TypeSystem`.
+* Create multiple `TypeSystem` classes, one for evaluation and one for static
+ `Module` usage.
+
+For clang and Swift, the latter approach was chosen. Primarily to make it
+clearer that evaluation with the static `Module`-returned `TypeSystem` instances
+make no sense, and have them error out on those calls. But either approach is
+fine.
+
+# Creating Types
+
+Your `TypeSystem` will need an approach for creating types based on a set of
+`Module`s. If your type info is going to come from DWARF info, you will want to
+subclass [DWARFASTParser](https://github.com/llvm/llvm-project/blob/main/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParser.h).
+
+
+# Add Expression Evaluation Support
+
+Expression Evaluation support is enabled by implementing the relevant methods on
+a `TypeSystem`-derived class. Search for `Expression` in the
+[TypeSystem header](https://github.com/llvm/llvm-project/blob/main/lldb/include/lldb/Symbol/TypeSystem.h)
+to find the methods to implement.
+
+# Type Completion
+
+There are three levels of type completion, each requiring more type information:
+1. Pointer size: When you have a forward decl or a reference, and that's all you
+ need. At this stage, the pointer size is all you need.
+2. Layout info: You need the size of an instance of the type, but you still don't
+ need to know all the guts of the type.
+3. Full type info: Here you need everything, because you're playing with
+ internals of it, such as modifying a member variable.
+
+Ensure you never complete more of a type than is needed for a given situation.
+This will keep your type system from doing more work than necessary.
+
+# Language and LanguageRuntime Plugins
+
+If you followed the steps outlined above, you already have taught LLDB a great
+deal about your language. If your language's runtime model and fundamental data
+types don't differ much from the C model, you are pretty much done.
+
+However it is likely that your language offers its own data types for things
+like strings and arrays, and probably has a notion of dynamic types, where the
+effective type of a variable can only be known at runtime.
+
+These tasks are covered by two plugins:
+* a `LanguageRuntime` plugin, which provides LLDB with a dynamic view of your
+ language; this plugin answers questions that require a live process to acquire
+ information (for example dynamic type resolution).
+* a `Language` plugin, which provides LLDB with a static view of your language;
+ questions that are statically knowable and do not require a process are
+ answered by this plugin (for example data formatters). \ No newline at end of file
diff --git a/lldb/docs/use/python-reference.rst b/lldb/docs/use/python-reference.rst
index b12048f..95a6020 100644
--- a/lldb/docs/use/python-reference.rst
+++ b/lldb/docs/use/python-reference.rst
@@ -551,7 +551,7 @@ command definition form can't do the right thing.
Since lldb 3.7, Python commands can also be implemented by means of a class
which should implement the following interface:
-::
+.. code-block:: python
class CommandObjectType:
def __init__(self, debugger, internal_dict):
@@ -586,20 +586,193 @@ which should implement the following interface:
As a convenience, you can treat the result object as a Python file object, and
say
-::
+.. code-block:: python
print >>result, "my command does lots of cool stuff"
SBCommandReturnObject and SBStream both support this file-like behavior by
providing write() and flush() calls at the Python layer.
+The commands that are added using this class definition are what lldb calls
+"raw" commands. The command interpreter doesn't attempt to parse the command,
+doesn't handle option values, neither generating help for them, or their
+completion. Raw commands are useful when the arguments passed to the command
+are unstructured, and having to protect them against lldb command parsing would
+be onerous. For instance, "expr" is a raw command.
+
+You can also add scripted commands that implement the "parsed command", where
+the options and their types are specified, as well as the argument and argument
+types. These commands look and act like the majority of lldb commands, and you
+can also add custom completions for the options and/or the arguments if you have
+special needs.
+
+The easiest way to do this is to derive your new command from the lldb.ParsedCommand
+class. That responds in the same way to the help & repeat command interfaces, and
+provides some convenience methods, and most importantly an LLDBOptionValueParser,
+accessed throught lldb.ParsedCommand.get_parser(). The parser is used to set
+your command definitions, and to retrieve option values in the __call__ method.
+
+To set up the command definition, implement the ParsedCommand abstract method:
+
+.. code-block:: python
+
+ def setup_command_definition(self):
+
+This is called when your command is added to lldb. In this method you add the
+options and their types, the option help strings, etc. to the command using the API:
+
+.. code-block:: python
+
+ def add_option(self, short_option, long_option, help, default,
+ dest = None, required=False, groups = None,
+ value_type=lldb.eArgTypeNone, completion_type=None,
+ enum_values=None):
+ """
+ short_option: one character, must be unique, not required
+ long_option: no spaces, must be unique, required
+ help: a usage string for this option, will print in the command help
+ default: the initial value for this option (if it has a value)
+ dest: the name of the property that gives you access to the value for
+ this value. Defaults to the long option if not provided.
+ required: if true, this option must be provided or the command will error out
+ groups: Which "option groups" does this option belong to. This can either be
+ a simple list (e.g. [1, 3, 4, 5]) or you can specify ranges by sublists:
+ so [1, [3,5]] is the same as [1, 3, 4, 5].
+ value_type: one of the lldb.eArgType enum values. Some of the common arg
+ types also have default completers, which will be applied automatically.
+ completion_type: currently these are values form the lldb.CompletionType enum. If
+ you need custom completions, implement handle_option_argument_completion.
+ enum_values: An array of duples: ["element_name", "element_help"]. If provided,
+ only one of the enum elements is allowed. The value will be the
+ element_name for the chosen enum element as a string.
+ """
+
+Similarly, you can add argument types to the command:
+
+.. code-block:: python
+
+ def make_argument_element(self, arg_type, repeat = "optional", groups = None):
+ """
+ arg_type: The argument type, one of the lldb.eArgType enum values.
+ repeat: Choose from the following options:
+ "plain" - one value
+ "optional" - zero or more values
+ "plus" - one or more values
+ groups: As with add_option.
+ """
+
+Then implement the body of the command by defining:
+
+.. code-block:: python
+
+ def __call__(self, debugger, args_array, exe_ctx, result):
+ """This is the command callback. The option values are
+ provided by the 'dest' properties on the parser.
+
+ args_array: This is the list of arguments provided.
+ exe_ctx: Gives the SBExecutionContext on which the
+ command should operate.
+ result: Any results of the command should be
+ written into this SBCommandReturnObject.
+ """
+
+This differs from the "raw" command's __call__ in that the arguments are already
+parsed into the args_array, and the option values are set in the parser, and
+can be accessed using their property name. The LLDBOptionValueParser class has
+a couple of other handy methods:
+
+.. code-block:: python
+ def was_set(self, long_option_name):
+
+returns True if the option was specified on the command line.
+
+.. code-block:: python
+
+ def dest_for_option(self, long_option_name):
+ """
+ This will return the value of the dest variable you defined for opt_name.
+ Mostly useful for handle_completion where you get passed the long option.
+ """
+
+lldb will handle completing your option names, and all your enum values
+automatically. If your option or argument types have associated built-in completers,
+then lldb will also handle that completion for you. But if you have a need for
+custom completions, either in your arguments or option values, you can handle
+completion by hand as well. To handle completion of option value arguments,
+your lldb.ParsedCommand subclass should implement:
+
+.. code-block:: python
+
+ def handle_option_argument_completion(self, long_option, cursor_pos):
+ """
+ long_option: The long option name of the option whose value you are
+ asked to complete.
+ cursor_pos: The cursor position in the value for that option - which
+ you can get from the option parser.
+ """
+
+And to handle the completion of arguments:
+
+.. code-block:: python
+
+ def handle_argument_completion(self, args, arg_pos, cursor_pos):
+ """
+ args: A list of the arguments to the command
+ arg_pos: An index into the args list of the argument with the cursor
+ cursor_pos: The cursor position in the arg specified by arg_pos
+ """
+
+When either of these API's is called, the command line will have been parsed up to
+the word containing the cursor, and any option values set in that part of the command
+string are available from the option value parser. That's useful for instance
+if you have a --shared-library option that would constrain the completions for,
+say, a symbol name option or argument.
+
+The return value specifies what the completion options are. You have four
+choices:
+
+- `True`: the completion was handled with no completions.
+
+- `False`: the completion was not handled, forward it to the regular
+completion machinery.
+
+- A dictionary with the key: "completion": there is one candidate,
+whose value is the value of the "completion" key. Optionally you can pass a
+"mode" key whose value is either "partial" or "complete". Return partial if
+the "completion" string is a prefix for all the completed value.
+
+For instance, if the string you are completing is "Test" and the available completions are:
+"Test1", "Test11" and "Test111", you should return the dictionary:
+
+.. code-block:: python
+
+ return {"completion": "Test1", "mode" : "partial"}
+
+and then lldb will add the "1" at the curson and advance it after the added string,
+waiting for more completions. But if "Test1" is the only completion, return:
+
+.. code-block:: python
+
+ {"completion": "Test1", "mode": "complete"}
+
+and lldb will add "1 " at the cursor, indicating the command string is complete.
+
+The default is "complete", you don't need to specify a "mode" in that case.
+
+- A dictionary with the key: "values" whose value is a list of candidate completion
+strings. The command interpreter will present those strings as the available choices.
+You can optionally include a "descriptions" key, whose value is a parallel array
+of description strings, and the completion will show the description next to
+each completion.
+
+
One other handy convenience when defining lldb command-line commands is the
-command command script import which will import a module specified by file
+command "command script import" which will import a module specified by file
path, so you don't have to change your PYTHONPATH for temporary scripts. It
also has another convenience that if your new script module has a function of
the form:
-::
+.. code-block python
def __lldb_init_module(debugger, internal_dict):
# Command Initialization code goes here
@@ -615,7 +788,7 @@ creating scripts that can be run from the command line. However, for command
line scripts, the debugger instance must be created manually. Sample code would
look like:
-::
+.. code-block:: python
if __name__ == '__main__':
# Initialize the debugger before making any API calls.
@@ -638,7 +811,7 @@ look like:
Now we can create a module called ls.py in the file ~/ls.py that will implement
a function that can be used by LLDB's python command code:
-::
+.. code-block:: python
#!/usr/bin/env python
diff --git a/lldb/examples/python/cmdtemplate.py b/lldb/examples/python/cmdtemplate.py
index b6a21cb..a9fbe0b 100644
--- a/lldb/examples/python/cmdtemplate.py
+++ b/lldb/examples/python/cmdtemplate.py
@@ -29,8 +29,8 @@ class FrameStatCommand(ParsedCommand):
return lldb.eCommandRequiresFrame | lldb.eCommandProcessMustBePaused
def setup_command_definition(self):
-
- self.ov_parser.add_option(
+ ov_parser = self.get_parser()
+ ov_parser.add_option(
"i",
"in-scope",
help = "in_scope_only = True",
@@ -39,7 +39,7 @@ class FrameStatCommand(ParsedCommand):
default = True,
)
- self.ov_parser.add_option(
+ ov_parser.add_option(
"i",
"in-scope",
help = "in_scope_only = True",
@@ -48,7 +48,7 @@ class FrameStatCommand(ParsedCommand):
default=True,
)
- self.ov_parser.add_option(
+ ov_parser.add_option(
"a",
"arguments",
help = "arguments = True",
@@ -57,7 +57,7 @@ class FrameStatCommand(ParsedCommand):
default = True,
)
- self.ov_parser.add_option(
+ ov_parser.add_option(
"l",
"locals",
help = "locals = True",
@@ -66,7 +66,7 @@ class FrameStatCommand(ParsedCommand):
default = True,
)
- self.ov_parser.add_option(
+ ov_parser.add_option(
"s",
"statics",
help = "statics = True",
@@ -103,8 +103,9 @@ class FrameStatCommand(ParsedCommand):
result.SetError("invalid frame")
return
+ ov_parser = self.get_parser()
variables_list = frame.GetVariables(
- self.ov_parser.arguments, self.ov_parser.locals, self.ov_parser.statics, self.ov_parser.inscope
+ ov_parser.arguments, ov_parser.locals, ov_parser.statics, ov_parser.inscope
)
variables_count = variables_list.GetSize()
if variables_count == 0:
diff --git a/lldb/examples/python/templates/parsed_cmd.py b/lldb/examples/python/templates/parsed_cmd.py
index 06124ad..13d6eae 100644
--- a/lldb/examples/python/templates/parsed_cmd.py
+++ b/lldb/examples/python/templates/parsed_cmd.py
@@ -4,7 +4,8 @@ lldb parsed commands more Pythonic.
The way to use it is to make a class for your command that inherits from ParsedCommandBase.
That will make an LLDBOptionValueParser which you will use for your
option definition, and to fetch option values for the current invocation
-of your command. Access to the OV parser is through:
+of your command. For concision, I'll call this the `OVParser`.
+Access to the `OVParser` is through:
ParsedCommandBase.get_parser()
@@ -43,7 +44,65 @@ will fetch the value, and:
will return True if the user set this option, and False if it was left at its default
value.
-There are example commands in the lldb testsuite at:
+Custom Completions:
+
+You can also implement custom completers for your custom command, either for the
+arguments to your command or to the option values in your command. If you use enum
+values or if your option/argument uses is one of the types we have completers for,
+you should not need to do this. But if you have your own completeable types, or if
+you want completion of one option to be conditioned by other options on the command
+line, you can use this interface to take over the completion.
+
+You can choose to add a completion for the option values defined for your command,
+or for the arguments, separately. For the option values, define:
+
+def handle_option_argument_completion(self, long_option, cursor_pos):
+
+The line to be completed will be parsed up to the option containint the cursor position,
+and the values will be set in the OptionValue parser object. long_option will be
+the option name containing the cursor, and cursor_pos will be the position of the cursor
+in that option's value. You can call the `OVParser` method: `dest_for_option(long_option)`
+to get the value for that option. The other options that came before the cursor in the command
+line will also be set in the `OVParser` when the completion handler is called.
+
+For argument values, define:
+
+def handle_argument_completion(self, args, arg_pos, cursor_pos):
+
+Again, the command line will be parsed up to the cursor position, and all the options
+before the cursor pose will be set in the `OVParser`. args is a python list of the
+arguments, arg_pos is the index of the argument with the cursor, and cursor_pos is
+the position of the cursor in the argument.
+
+In both cases, the return value determines the completion.
+
+Return False to mean "Not Handled" - in which case lldb will fall back on the
+standard completion machinery.
+
+Return True to mean "Handled with no completions".
+
+If there is a single unique completion, return a Python dictionary with two elements:
+
+return {"completion" : "completed_value", "mode" : <"partial", "complete">}
+
+If the mode is "partial", then the completion is to a common base, if it is "complete"
+then the argument is considered done - mostly meaning lldb will put a space after the
+completion string. "complete" is the default if no "mode" is specified.
+
+If there are multiple completion options, then return:
+
+return {"values" : ["option1", "option2"]}
+
+Optionally, you can return a parallel array of "descriptions" which the completer will
+print alongside the options:
+
+return {"values" : ["option1", "option2"], "descriptions" : ["the first option", "the second option"]}
+
+The cmdtemplate example currently uses the parsed command infrastructure:
+
+llvm-project/lldb/examples/python/cmdtemplate.py
+
+There are also a few example commands in the lldb testsuite at:
llvm-project/lldb/test/API/commands/command/script/add/test_commands.py
"""
@@ -226,10 +285,14 @@ class LLDBOptionValueParser:
return True
def was_set(self, opt_name):
- """ Call this in the __call__ method of your command to determine
- whether this option was set on the command line. It is sometimes
- useful to know whether an option has the default value because the
- user set it explicitly (was_set -> True) or not. """
+ """Call this in the __call__ method of your command to determine
+ whether this option was set on the command line. It is sometimes
+ useful to know whether an option has the default value because the
+ user set it explicitly (was_set -> True) or not.
+ You can also call this in a handle_completion method, but it will
+ currently only report true values for the options mentioned
+ BEFORE the cursor point in the command line.
+ """
elem = self.get_option_element(opt_name)
if not elem:
@@ -239,6 +302,16 @@ class LLDBOptionValueParser:
except AttributeError:
return False
+ def dest_for_option(self, opt_name):
+ """This will return the value of the dest variable you defined for opt_name.
+ Mostly useful for handle_completion where you get passed the long option.
+ """
+ elem = self.get_option_element(opt_name)
+ if not elem:
+ return None
+ value = self.__dict__[elem["dest"]]
+ return value
+
def add_option(self, short_option, long_option, help, default,
dest = None, required=False, groups = None,
value_type=lldb.eArgTypeNone, completion_type=None,
@@ -251,14 +324,16 @@ class LLDBOptionValueParser:
dest: the name of the property that gives you access to the value for
this value. Defaults to the long option if not provided.
required: if true, this option must be provided or the command will error out
- groups: Which "option groups" does this option belong to
+ groups: Which "option groups" does this option belong to. This can either be
+ a simple list (e.g. [1, 3, 4, 5]) or you can specify ranges by sublists:
+ so [1, [3,5]] is the same as [1, 3, 4, 5].
value_type: one of the lldb.eArgType enum values. Some of the common arg
types also have default completers, which will be applied automatically.
- completion_type: currently these are values form the lldb.CompletionType enum, I
- haven't done custom completions yet.
+ completion_type: currently these are values form the lldb.CompletionType enum. If
+ you need custom completions, implement handle_option_argument_completion.
enum_values: An array of duples: ["element_name", "element_help"]. If provided,
- only one of the enum elements is allowed. The value will be the
- element_name for the chosen enum element as a string.
+ only one of the enum elements is allowed. The value will be the
+ element_name for the chosen enum element as a string.
"""
if not dest:
dest = long_option
diff --git a/lldb/include/lldb/Interpreter/ScriptInterpreter.h b/lldb/include/lldb/Interpreter/ScriptInterpreter.h
index 901ecf3..2c2bd6f 100644
--- a/lldb/include/lldb/Interpreter/ScriptInterpreter.h
+++ b/lldb/include/lldb/Interpreter/ScriptInterpreter.h
@@ -420,6 +420,20 @@ public:
return std::nullopt;
}
+ virtual StructuredData::DictionarySP
+ HandleArgumentCompletionForScriptedCommand(
+ StructuredData::GenericSP impl_obj_sp, std::vector<llvm::StringRef> &args,
+ size_t args_pos, size_t char_in_arg) {
+ return {};
+ }
+
+ virtual StructuredData::DictionarySP
+ HandleOptionArgumentCompletionForScriptedCommand(
+ StructuredData::GenericSP impl_obj_sp, llvm::StringRef &long_name,
+ size_t char_in_arg) {
+ return {};
+ }
+
virtual bool RunScriptFormatKeyword(const char *impl_function,
Process *process, std::string &output,
Status &error) {
diff --git a/lldb/include/lldb/Symbol/UnwindPlan.h b/lldb/include/lldb/Symbol/UnwindPlan.h
index a9e8406..e1567c73 100644
--- a/lldb/include/lldb/Symbol/UnwindPlan.h
+++ b/lldb/include/lldb/Symbol/UnwindPlan.h
@@ -54,7 +54,7 @@ class UnwindPlan {
public:
class Row {
public:
- class RegisterLocation {
+ class AbstractRegisterLocation {
public:
enum RestoreType {
unspecified, // not specified, we may be able to assume this
@@ -72,11 +72,11 @@ public:
isConstant // reg = constant
};
- RegisterLocation() : m_location() {}
+ AbstractRegisterLocation() : m_location() {}
- bool operator==(const RegisterLocation &rhs) const;
+ bool operator==(const AbstractRegisterLocation &rhs) const;
- bool operator!=(const RegisterLocation &rhs) const {
+ bool operator!=(const AbstractRegisterLocation &rhs) const {
return !(*this == rhs);
}
@@ -337,10 +337,10 @@ public:
bool operator==(const Row &rhs) const;
bool GetRegisterInfo(uint32_t reg_num,
- RegisterLocation &register_location) const;
+ AbstractRegisterLocation &register_location) const;
void SetRegisterInfo(uint32_t reg_num,
- const RegisterLocation register_location);
+ const AbstractRegisterLocation register_location);
void RemoveRegisterInfo(uint32_t reg_num);
@@ -370,6 +370,13 @@ public:
bool SetRegisterLocationToSame(uint32_t reg_num, bool must_replace);
+ /// This method does not make a copy of the \a opcodes memory, it is
+ /// assumed to have the same lifetime as the Module this UnwindPlan will
+ /// be registered in.
+ bool SetRegisterLocationToIsDWARFExpression(uint32_t reg_num,
+ const uint8_t *opcodes,
+ uint32_t len, bool can_replace);
+
bool SetRegisterLocationToIsConstant(uint32_t reg_num, uint64_t constant,
bool can_replace);
@@ -398,7 +405,7 @@ public:
lldb::addr_t base_addr) const;
protected:
- typedef std::map<uint32_t, RegisterLocation> collection;
+ typedef std::map<uint32_t, AbstractRegisterLocation> collection;
lldb::addr_t m_offset = 0; // Offset into the function for this row
FAValue m_cfa_value;
diff --git a/lldb/include/lldb/Target/ABI.h b/lldb/include/lldb/Target/ABI.h
index 7b646d7..dd941d1 100644
--- a/lldb/include/lldb/Target/ABI.h
+++ b/lldb/include/lldb/Target/ABI.h
@@ -102,9 +102,9 @@ public:
virtual bool RegisterIsVolatile(const RegisterInfo *reg_info) = 0;
- virtual bool
- GetFallbackRegisterLocation(const RegisterInfo *reg_info,
- UnwindPlan::Row::RegisterLocation &unwind_regloc);
+ virtual bool GetFallbackRegisterLocation(
+ const RegisterInfo *reg_info,
+ UnwindPlan::Row::AbstractRegisterLocation &unwind_regloc);
// Should take a look at a call frame address (CFA) which is just the stack
// pointer value upon entry to a function. ABIs usually impose alignment
diff --git a/lldb/include/lldb/Target/RegisterContextUnwind.h b/lldb/include/lldb/Target/RegisterContextUnwind.h
index ef8ae88..3be9eb5 100644
--- a/lldb/include/lldb/Target/RegisterContextUnwind.h
+++ b/lldb/include/lldb/Target/RegisterContextUnwind.h
@@ -84,7 +84,7 @@ private:
// past the top (end) of the stack
};
- // UnwindLLDB needs to pass around references to RegisterLocations
+ // UnwindLLDB needs to pass around references to ConcreteRegisterLocations
friend class UnwindLLDB;
// Returns true if we have an unwind loop -- the same stack frame unwinding
@@ -135,29 +135,28 @@ private:
// preserved a register that this
// function didn't modify/use.
//
- // The RegisterLocation type may be set to eRegisterNotAvailable -- this will
- // happen for a volatile register
- // being queried mid-stack. Instead of floating frame 0's contents of that
- // register up the stack (which may
- // or may not be the value of that reg when the function was executing), we
- // won't return any value.
+ // The ConcreteRegisterLocation type may be set to eRegisterNotAvailable --
+ // this will happen for a volatile register being queried mid-stack. Instead
+ // of floating frame 0's contents of that register up the stack (which may or
+ // may not be the value of that reg when the function was executing), we won't
+ // return any value.
//
// If a non-volatile register (a "preserved" register) is requested mid-stack
// and no frames "below" the requested
// stack have saved the register anywhere, it is safe to assume that frame 0's
// register values are still the same
// as the requesting frame's.
- lldb_private::UnwindLLDB::RegisterSearchResult
- SavedLocationForRegister(uint32_t lldb_regnum,
- lldb_private::UnwindLLDB::RegisterLocation &regloc);
+ lldb_private::UnwindLLDB::RegisterSearchResult SavedLocationForRegister(
+ uint32_t lldb_regnum,
+ lldb_private::UnwindLLDB::ConcreteRegisterLocation &regloc);
bool ReadRegisterValueFromRegisterLocation(
- lldb_private::UnwindLLDB::RegisterLocation regloc,
+ lldb_private::UnwindLLDB::ConcreteRegisterLocation regloc,
const lldb_private::RegisterInfo *reg_info,
lldb_private::RegisterValue &value);
bool WriteRegisterValueToRegisterLocation(
- lldb_private::UnwindLLDB::RegisterLocation regloc,
+ lldb_private::UnwindLLDB::ConcreteRegisterLocation regloc,
const lldb_private::RegisterInfo *reg_info,
const lldb_private::RegisterValue &value);
@@ -249,7 +248,7 @@ private:
uint32_t m_frame_number; // What stack frame this RegisterContext is
- std::map<uint32_t, lldb_private::UnwindLLDB::RegisterLocation>
+ std::map<uint32_t, lldb_private::UnwindLLDB::ConcreteRegisterLocation>
m_registers; // where to find reg values for this frame
lldb_private::UnwindLLDB &m_parent_unwind; // The UnwindLLDB that is creating
diff --git a/lldb/include/lldb/Target/UnwindLLDB.h b/lldb/include/lldb/Target/UnwindLLDB.h
index f80212c..f2f65e6 100644
--- a/lldb/include/lldb/Target/UnwindLLDB.h
+++ b/lldb/include/lldb/Target/UnwindLLDB.h
@@ -38,7 +38,10 @@ public:
protected:
friend class lldb_private::RegisterContextUnwind;
- struct RegisterLocation {
+ /// An UnwindPlan::Row::AbstractRegisterLocation, combined with the register
+ /// context and memory for a specific stop point, is used to create a
+ /// ConcreteRegisterLocation.
+ struct ConcreteRegisterLocation {
enum RegisterLocationTypes {
eRegisterNotSaved = 0, // register was not preserved by callee. If
// volatile reg, is unavailable
@@ -90,7 +93,8 @@ protected:
// Iterate over the RegisterContextUnwind's in our m_frames vector, look for
// the first one that has a saved location for this reg.
bool SearchForSavedLocationForRegister(
- uint32_t lldb_regnum, lldb_private::UnwindLLDB::RegisterLocation &regloc,
+ uint32_t lldb_regnum,
+ lldb_private::UnwindLLDB::ConcreteRegisterLocation &regloc,
uint32_t starting_frame_num, bool pc_register);
/// Provide the list of user-specified trap handler functions
diff --git a/lldb/include/lldb/Utility/CompletionRequest.h b/lldb/include/lldb/Utility/CompletionRequest.h
index 1a2b1d6..650158a 100644
--- a/lldb/include/lldb/Utility/CompletionRequest.h
+++ b/lldb/include/lldb/Utility/CompletionRequest.h
@@ -139,6 +139,8 @@ public:
return GetParsedLine()[GetCursorIndex()];
}
+ size_t GetCursorCharPos() const { return m_cursor_char_position; }
+
/// Drops the first argument from the argument list.
void ShiftArguments() {
m_cursor_index--;
diff --git a/lldb/include/lldb/Utility/Status.h b/lldb/include/lldb/Utility/Status.h
index 795c830..084ce4a 100644
--- a/lldb/include/lldb/Utility/Status.h
+++ b/lldb/include/lldb/Utility/Status.h
@@ -28,6 +28,67 @@ namespace lldb_private {
const char *ExpressionResultAsCString(lldb::ExpressionResults result);
+/// Going a bit against the spirit of llvm::Error,
+/// lldb_private::Status need to store errors long-term and sometimes
+/// copy them. This base class defines an interface for this
+/// operation.
+class CloneableError
+ : public llvm::ErrorInfo<CloneableError, llvm::ErrorInfoBase> {
+public:
+ using llvm::ErrorInfo<CloneableError, llvm::ErrorInfoBase>::ErrorInfo;
+ CloneableError() : ErrorInfo() {}
+ virtual std::unique_ptr<CloneableError> Clone() const = 0;
+ static char ID;
+};
+
+/// Common base class for all error-code errors.
+class CloneableECError
+ : public llvm::ErrorInfo<CloneableECError, CloneableError> {
+public:
+ using llvm::ErrorInfo<CloneableECError, CloneableError>::ErrorInfo;
+ std::error_code convertToErrorCode() const override { return EC; }
+ void log(llvm::raw_ostream &OS) const override { OS << EC.message(); }
+ static char ID;
+
+protected:
+ CloneableECError() = delete;
+ CloneableECError(std::error_code ec) : ErrorInfo(), EC(ec) {}
+ std::error_code EC;
+};
+/// FIXME: Move these declarations closer to where they're used.
+class MachKernelError
+ : public llvm::ErrorInfo<MachKernelError, CloneableECError> {
+public:
+ using llvm::ErrorInfo<MachKernelError, CloneableECError>::ErrorInfo;
+ MachKernelError(std::error_code ec) : ErrorInfo(ec) {}
+ std::string message() const override;
+ std::unique_ptr<CloneableError> Clone() const override;
+ static char ID;
+};
+
+class Win32Error : public llvm::ErrorInfo<Win32Error, CloneableECError> {
+public:
+ using llvm::ErrorInfo<Win32Error, CloneableECError>::ErrorInfo;
+ Win32Error(std::error_code ec, const llvm::Twine &msg = {}) : ErrorInfo(ec) {}
+ std::string message() const override;
+ std::unique_ptr<CloneableError> Clone() const override;
+ static char ID;
+};
+
+class ExpressionError
+ : public llvm::ErrorInfo<ExpressionError, CloneableECError> {
+public:
+ using llvm::ErrorInfo<ExpressionError, CloneableECError>::ErrorInfo;
+ ExpressionError(std::error_code ec, std::string msg = {})
+ : ErrorInfo(ec), m_string(msg) {}
+ std::unique_ptr<CloneableError> Clone() const override;
+ std::string message() const override { return m_string; }
+ static char ID;
+
+protected:
+ std::string m_string;
+};
+
/// \class Status Status.h "lldb/Utility/Status.h" An error handling class.
///
/// This class is designed to be able to hold any error code that can be
@@ -100,9 +161,7 @@ public:
}
static Status FromExpressionError(lldb::ExpressionResults result,
- std::string msg) {
- return Status(result, lldb::eErrorTypeExpression, msg);
- }
+ std::string msg);
/// Set the current error to errno.
///
@@ -115,6 +174,7 @@ public:
const Status &operator=(Status &&);
/// Avoid using this in new code. Migrate APIs to llvm::Expected instead.
static Status FromError(llvm::Error error);
+
/// FIXME: Replace this with a takeError() method.
llvm::Error ToError() const;
/// Don't call this function in new code. Instead, redesign the API
@@ -149,12 +209,20 @@ public:
/// Access the error value.
///
+ /// If the internally stored \ref llvm::Error is an \ref
+ /// llvm::ErrorList then this returns the error value of the first
+ /// error.
+ ///
/// \return
/// The error value.
ValueType GetError() const;
/// Access the error type.
///
+ /// If the internally stored \ref llvm::Error is an \ref
+ /// llvm::ErrorList then this returns the error value of the first
+ /// error.
+ ///
/// \return
/// The error type enumeration value.
lldb::ErrorType GetType() const;
@@ -170,12 +238,9 @@ public:
bool Success() const;
protected:
- Status(llvm::Error error);
- /// Status code as an integer value.
- ValueType m_code = 0;
- /// The type of the above error code.
- lldb::ErrorType m_type = lldb::eErrorTypeInvalid;
- /// A string representation of the error code.
+ Status(llvm::Error error) : m_error(std::move(error)) {}
+ llvm::Error m_error;
+ /// TODO: Replace this with just calling toString(m_error).
mutable std::string m_string;
};
diff --git a/lldb/source/Commands/CommandObjectCommands.cpp b/lldb/source/Commands/CommandObjectCommands.cpp
index e3291640..845b89a 100644
--- a/lldb/source/Commands/CommandObjectCommands.cpp
+++ b/lldb/source/Commands/CommandObjectCommands.cpp
@@ -1637,6 +1637,129 @@ private:
size_t GetNumOptions() { return m_num_options; }
+ void PrepareOptionsForCompletion(CompletionRequest &request,
+ OptionElementVector &option_vec,
+ ExecutionContext *exe_ctx) {
+ // I'm not sure if we'll get into trouble doing an option parsing start
+ // and end in this context. If so, then I'll have to directly tell the
+ // scripter to do this.
+ OptionParsingStarting(exe_ctx);
+ auto opt_defs = GetDefinitions();
+
+ // Iterate through the options we found so far, and push them into
+ // the scripted side.
+ for (auto option_elem : option_vec) {
+ int cur_defs_index = option_elem.opt_defs_index;
+ // If we don't recognize this option we can't set it.
+ if (cur_defs_index == OptionArgElement::eUnrecognizedArg ||
+ cur_defs_index == OptionArgElement::eBareDash ||
+ cur_defs_index == OptionArgElement::eBareDoubleDash)
+ continue;
+ bool option_has_arg = opt_defs[cur_defs_index].option_has_arg;
+ llvm::StringRef cur_arg_value;
+ if (option_has_arg) {
+ int cur_arg_pos = option_elem.opt_arg_pos;
+ if (cur_arg_pos != OptionArgElement::eUnrecognizedArg &&
+ cur_arg_pos != OptionArgElement::eBareDash &&
+ cur_arg_pos != OptionArgElement::eBareDoubleDash) {
+ cur_arg_value =
+ request.GetParsedLine().GetArgumentAtIndex(cur_arg_pos);
+ }
+ }
+ SetOptionValue(cur_defs_index, cur_arg_value, exe_ctx);
+ }
+ OptionParsingFinished(exe_ctx);
+ }
+
+ void
+ ProcessCompletionDict(CompletionRequest &request,
+ StructuredData::DictionarySP &completion_dict_sp) {
+ // We don't know how to process an empty completion dict, our callers have
+ // to do that.
+ assert(completion_dict_sp && "Must have valid completion dict");
+ // First handle the case of a single completion:
+ llvm::StringRef completion;
+ // If the dictionary has one element "no-completion" then we return here
+ if (completion_dict_sp->GetValueForKeyAsString("no-completion",
+ completion))
+ return;
+
+ if (completion_dict_sp->GetValueForKeyAsString("completion",
+ completion)) {
+ llvm::StringRef mode_str;
+ CompletionMode mode = CompletionMode::Normal;
+ if (completion_dict_sp->GetValueForKeyAsString("mode", mode_str)) {
+ if (mode_str == "complete")
+ mode = CompletionMode::Normal;
+ else if (mode_str == "partial")
+ mode = CompletionMode::Partial;
+ else {
+ // FIXME - how do I report errors here?
+ return;
+ }
+ }
+ request.AddCompletion(completion, "", mode);
+ return;
+ }
+ // The completions are required, the descriptions are not:
+ StructuredData::Array *completions;
+ StructuredData::Array *descriptions;
+ if (completion_dict_sp->GetValueForKeyAsArray("values", completions)) {
+ completion_dict_sp->GetValueForKeyAsArray("descriptions", descriptions);
+ size_t num_completions = completions->GetSize();
+ for (size_t idx = 0; idx < num_completions; idx++) {
+ auto val = completions->GetItemAtIndexAsString(idx);
+ if (!val)
+ // FIXME: How do I report this error?
+ return;
+
+ if (descriptions) {
+ auto desc = descriptions->GetItemAtIndexAsString(idx);
+ request.AddCompletion(*val, desc ? *desc : "");
+ } else
+ request.AddCompletion(*val);
+ }
+ }
+ }
+
+ void
+ HandleOptionArgumentCompletion(lldb_private::CompletionRequest &request,
+ OptionElementVector &option_vec,
+ int opt_element_index,
+ CommandInterpreter &interpreter) override {
+ ScriptInterpreter *scripter =
+ interpreter.GetDebugger().GetScriptInterpreter();
+
+ if (!scripter)
+ return;
+
+ ExecutionContext exe_ctx = interpreter.GetExecutionContext();
+ PrepareOptionsForCompletion(request, option_vec, &exe_ctx);
+
+ auto defs = GetDefinitions();
+
+ size_t defs_index = option_vec[opt_element_index].opt_defs_index;
+ llvm::StringRef option_name = defs[defs_index].long_option;
+ bool is_enum = defs[defs_index].enum_values.size() != 0;
+ if (option_name.empty())
+ return;
+ // If this is an enum, we don't call the custom completer, just let the
+ // regular option completer handle that:
+ StructuredData::DictionarySP completion_dict_sp;
+ if (!is_enum)
+ completion_dict_sp =
+ scripter->HandleOptionArgumentCompletionForScriptedCommand(
+ m_cmd_obj_sp, option_name, request.GetCursorCharPos());
+
+ if (!completion_dict_sp) {
+ Options::HandleOptionArgumentCompletion(request, option_vec,
+ opt_element_index, interpreter);
+ return;
+ }
+
+ ProcessCompletionDict(request, completion_dict_sp);
+ }
+
private:
struct EnumValueStorage {
EnumValueStorage() {
@@ -1878,6 +2001,74 @@ public:
Status GetArgsError() { return m_args_error.Clone(); }
bool WantsCompletion() override { return true; }
+private:
+ void PrepareOptionsForCompletion(CompletionRequest &request,
+ OptionElementVector &option_vec) {
+ // First, we have to tell the Scripted side to set the values in its
+ // option store, then we call into the handle_completion passing in
+ // an array of the args, the arg index and the cursor position in the arg.
+ // We want the script side to have a chance to clear its state, so tell
+ // it argument parsing has started:
+ Options *options = GetOptions();
+ // If there are not options, this will be nullptr, and in that case we
+ // can just skip setting the options on the scripted side:
+ if (options)
+ m_options.PrepareOptionsForCompletion(request, option_vec, &m_exe_ctx);
+ }
+
+public:
+ void HandleArgumentCompletion(CompletionRequest &request,
+ OptionElementVector &option_vec) override {
+ ScriptInterpreter *scripter = GetDebugger().GetScriptInterpreter();
+
+ if (!scripter)
+ return;
+
+ // Set up the options values on the scripted side:
+ PrepareOptionsForCompletion(request, option_vec);
+
+ // Now we have to make up the argument list.
+ // The ParseForCompletion only identifies tokens in the m_parsed_line
+ // it doesn't remove the options leaving only the args as it does for
+ // the regular Parse, so we have to filter out the option ones using the
+ // option_element_vector:
+
+ Options *options = GetOptions();
+ auto defs = options->GetDefinitions();
+
+ std::unordered_set<size_t> option_slots;
+ for (const auto &elem : option_vec) {
+ if (elem.opt_defs_index == -1)
+ continue;
+ option_slots.insert(elem.opt_pos);
+ if (defs[elem.opt_defs_index].option_has_arg)
+ option_slots.insert(elem.opt_arg_pos);
+ }
+
+ std::vector<llvm::StringRef> args_vec;
+ Args &args = request.GetParsedLine();
+ size_t num_args = args.GetArgumentCount();
+ size_t cursor_idx = request.GetCursorIndex();
+ size_t args_elem_pos = cursor_idx;
+
+ for (size_t idx = 0; idx < num_args; idx++) {
+ if (option_slots.count(idx) == 0)
+ args_vec.push_back(args[idx].ref());
+ else if (idx < cursor_idx)
+ args_elem_pos--;
+ }
+ StructuredData::DictionarySP completion_dict_sp =
+ scripter->HandleArgumentCompletionForScriptedCommand(
+ m_cmd_obj_sp, args_vec, args_elem_pos, request.GetCursorCharPos());
+
+ if (!completion_dict_sp) {
+ CommandObject::HandleArgumentCompletion(request, option_vec);
+ return;
+ }
+
+ m_options.ProcessCompletionDict(request, completion_dict_sp);
+ }
+
bool IsRemovable() const override { return true; }
ScriptedCommandSynchronicity GetSynchronicity() { return m_synchro; }
diff --git a/lldb/source/Commands/CommandObjectFrame.cpp b/lldb/source/Commands/CommandObjectFrame.cpp
index 142f969..e220329 100644
--- a/lldb/source/Commands/CommandObjectFrame.cpp
+++ b/lldb/source/Commands/CommandObjectFrame.cpp
@@ -1223,7 +1223,7 @@ CommandObjectMultiwordFrame::CommandObjectMultiwordFrame(
CommandInterpreter &interpreter)
: CommandObjectMultiword(interpreter, "frame",
"Commands for selecting and "
- "examing the current "
+ "examining the current "
"thread's stack frames.",
"frame <subcommand> [<subcommand-options>]") {
LoadSubCommand("diagnose",
diff --git a/lldb/source/Commands/CommandObjectProcess.cpp b/lldb/source/Commands/CommandObjectProcess.cpp
index 5b0f4f6..e7c7d07 100644
--- a/lldb/source/Commands/CommandObjectProcess.cpp
+++ b/lldb/source/Commands/CommandObjectProcess.cpp
@@ -1420,7 +1420,7 @@ protected:
PlatformSP platform_sp = process->GetTarget().GetPlatform();
if (!platform_sp) {
- result.AppendError("Couldn'retrieve the target's platform");
+ result.AppendError("Couldn't retrieve the target's platform");
return;
}
diff --git a/lldb/source/Commands/CommandObjectScripting.cpp b/lldb/source/Commands/CommandObjectScripting.cpp
index 9a1a2b6..1f8ee0a 100644
--- a/lldb/source/Commands/CommandObjectScripting.cpp
+++ b/lldb/source/Commands/CommandObjectScripting.cpp
@@ -254,7 +254,7 @@ CommandObjectMultiwordScripting::CommandObjectMultiwordScripting(
CommandInterpreter &interpreter)
: CommandObjectMultiword(
interpreter, "scripting",
- "Commands for operating on the scripting functionnalities.",
+ "Commands for operating on the scripting functionalities.",
"scripting <subcommand> [<subcommand-options>]") {
LoadSubCommand("run",
CommandObjectSP(new CommandObjectScriptingRun(interpreter)));
diff --git a/lldb/source/Commands/Options.td b/lldb/source/Commands/Options.td
index df906e9..4276d9e 100644
--- a/lldb/source/Commands/Options.td
+++ b/lldb/source/Commands/Options.td
@@ -1199,7 +1199,7 @@ let Command = "thread trace dump instructions" in {
def thread_trace_dump_instruction_only_events : Option<"only-events", "E">,
Group<1>,
Desc<"Dump only the events that happened during the execution of the "
- "target. No instrutions are dumped.">;
+ "target. No instructions are dumped.">;
def thread_trace_dump_instructions_continue: Option<"continue", "C">,
Group<1>,
Desc<"Continue dumping instructions right where the previous invocation of "
diff --git a/lldb/source/Interpreter/CommandInterpreter.cpp b/lldb/source/Interpreter/CommandInterpreter.cpp
index b93f47a..acd592c 100644
--- a/lldb/source/Interpreter/CommandInterpreter.cpp
+++ b/lldb/source/Interpreter/CommandInterpreter.cpp
@@ -797,7 +797,7 @@ void CommandInterpreter::LoadCommandDictionary() {
new CommandObjectRegexCommand(
*this, "gdb-remote",
"Connect to a process via remote GDB server.\n"
- "If no host is specifed, localhost is assumed.\n"
+ "If no host is specified, localhost is assumed.\n"
"gdb-remote is an abbreviation for 'process connect --plugin "
"gdb-remote connect://<hostname>:<port>'\n",
"gdb-remote [<hostname>:]<portnum>", 0, false));
diff --git a/lldb/source/Interpreter/Options.cpp b/lldb/source/Interpreter/Options.cpp
index b8a3f68a..3888a58 100644
--- a/lldb/source/Interpreter/Options.cpp
+++ b/lldb/source/Interpreter/Options.cpp
@@ -661,7 +661,9 @@ bool Options::HandleOptionCompletion(CompletionRequest &request,
} else if (opt_arg_pos == request.GetCursorIndex()) {
// Okay the cursor is on the completion of an argument. See if it has a
- // completion, otherwise return no matches.
+ // completion, otherwise return no matches. Note, opt_defs_index == -1
+ // means we're after an option, but that option doesn't exist. We'll
+ // end up treating that as an argument. Not sure we can do much better.
if (opt_defs_index != -1) {
HandleOptionArgumentCompletion(request, opt_element_vector, i,
interpreter);
@@ -688,7 +690,6 @@ void Options::HandleOptionArgumentCompletion(
int opt_defs_index = opt_element_vector[opt_element_index].opt_defs_index;
// See if this is an enumeration type option, and if so complete it here:
-
const auto &enum_values = opt_defs[opt_defs_index].enum_values;
if (!enum_values.empty())
for (const auto &enum_value : enum_values)
diff --git a/lldb/source/Plugins/ABI/AArch64/ABIAArch64.cpp b/lldb/source/Plugins/ABI/AArch64/ABIAArch64.cpp
index 256c1f8..7d8d0a4 100644
--- a/lldb/source/Plugins/ABI/AArch64/ABIAArch64.cpp
+++ b/lldb/source/Plugins/ABI/AArch64/ABIAArch64.cpp
@@ -136,6 +136,8 @@ void ABIAArch64::AugmentRegisterInfo(
std::array<std::optional<uint32_t>, 32> x_regs;
std::array<std::optional<uint32_t>, 32> v_regs;
+ std::array<std::optional<uint32_t>, 32> z_regs;
+ std::optional<uint32_t> z_byte_size;
for (auto it : llvm::enumerate(regs)) {
lldb_private::DynamicRegisterInfo::Register &info = it.value();
@@ -157,16 +159,44 @@ void ABIAArch64::AugmentRegisterInfo(
x_regs[reg_num] = it.index();
else if (get_reg("v"))
v_regs[reg_num] = it.index();
+ else if (get_reg("z")) {
+ z_regs[reg_num] = it.index();
+ if (!z_byte_size)
+ z_byte_size = info.byte_size;
+ }
// if we have at least one subregister, abort
else if (get_reg("w") || get_reg("s") || get_reg("d"))
return;
}
- // Create aliases for partial registers: wN for xN, and sN/dN for vN.
+ // Create aliases for partial registers.
+
+ // Wn for Xn.
addPartialRegisters(regs, x_regs, 8, "w{0}", 4, lldb::eEncodingUint,
lldb::eFormatHex);
- addPartialRegisters(regs, v_regs, 16, "s{0}", 4, lldb::eEncodingIEEE754,
- lldb::eFormatFloat);
- addPartialRegisters(regs, v_regs, 16, "d{0}", 8, lldb::eEncodingIEEE754,
- lldb::eFormatFloat);
+
+ auto bool_predicate = [](const auto &reg_num) { return bool(reg_num); };
+ bool saw_v_regs = std::any_of(v_regs.begin(), v_regs.end(), bool_predicate);
+ bool saw_z_regs = std::any_of(z_regs.begin(), z_regs.end(), bool_predicate);
+
+ // Sn/Dn for Vn.
+ if (saw_v_regs) {
+ addPartialRegisters(regs, v_regs, 16, "s{0}", 4, lldb::eEncodingIEEE754,
+ lldb::eFormatFloat);
+ addPartialRegisters(regs, v_regs, 16, "d{0}", 8, lldb::eEncodingIEEE754,
+ lldb::eFormatFloat);
+ } else if (saw_z_regs && z_byte_size) {
+ // When SVE is enabled, some debug stubs will not describe the Neon V
+ // registers because they can be read from the bottom 128 bits of the SVE
+ // registers.
+
+ // The size used here is the one sent by the debug server. This only needs
+ // to be correct right now. Later we will rely on the value of vg instead.
+ addPartialRegisters(regs, z_regs, *z_byte_size, "v{0}", 16,
+ lldb::eEncodingVector, lldb::eFormatVectorOfUInt8);
+ addPartialRegisters(regs, z_regs, *z_byte_size, "s{0}", 4,
+ lldb::eEncodingIEEE754, lldb::eFormatFloat);
+ addPartialRegisters(regs, z_regs, *z_byte_size, "d{0}", 8,
+ lldb::eEncodingIEEE754, lldb::eFormatFloat);
+ }
}
diff --git a/lldb/source/Plugins/ABI/SystemZ/ABISysV_s390x.cpp b/lldb/source/Plugins/ABI/SystemZ/ABISysV_s390x.cpp
index cbfca1e..ac2d198 100644
--- a/lldb/source/Plugins/ABI/SystemZ/ABISysV_s390x.cpp
+++ b/lldb/source/Plugins/ABI/SystemZ/ABISysV_s390x.cpp
@@ -644,7 +644,7 @@ bool ABISysV_s390x::CreateDefaultUnwindPlan(UnwindPlan &unwind_plan) {
bool ABISysV_s390x::GetFallbackRegisterLocation(
const RegisterInfo *reg_info,
- UnwindPlan::Row::RegisterLocation &unwind_regloc) {
+ UnwindPlan::Row::AbstractRegisterLocation &unwind_regloc) {
// If a volatile register is being requested, we don't want to forward the
// next frame's register contents up the stack -- the register is not
// retrievable at this frame.
diff --git a/lldb/source/Plugins/ABI/SystemZ/ABISysV_s390x.h b/lldb/source/Plugins/ABI/SystemZ/ABISysV_s390x.h
index f6c248d..ecf3e39 100644
--- a/lldb/source/Plugins/ABI/SystemZ/ABISysV_s390x.h
+++ b/lldb/source/Plugins/ABI/SystemZ/ABISysV_s390x.h
@@ -43,7 +43,8 @@ public:
bool GetFallbackRegisterLocation(
const lldb_private::RegisterInfo *reg_info,
- lldb_private::UnwindPlan::Row::RegisterLocation &unwind_regloc) override;
+ lldb_private::UnwindPlan::Row::AbstractRegisterLocation &unwind_regloc)
+ override;
bool CallFrameAddressIsValid(lldb::addr_t cfa) override {
// Make sure the stack call frame addresses are 8 byte aligned
diff --git a/lldb/source/Plugins/Process/FreeBSD/NativeProcessFreeBSD.cpp b/lldb/source/Plugins/Process/FreeBSD/NativeProcessFreeBSD.cpp
index 80b2757..bf552e1 100644
--- a/lldb/source/Plugins/Process/FreeBSD/NativeProcessFreeBSD.cpp
+++ b/lldb/source/Plugins/Process/FreeBSD/NativeProcessFreeBSD.cpp
@@ -324,7 +324,7 @@ void NativeProcessFreeBSD::MonitorSIGTRAP(lldb::pid_t pid) {
auto thread_info =
m_threads_stepping_with_breakpoint.find(thread->GetID());
if (thread_info != m_threads_stepping_with_breakpoint.end() &&
- threads_info->second == regctx.GetPC()) {
+ thread_info->second == regctx.GetPC()) {
thread->SetStoppedByTrace();
Status brkpt_error = RemoveBreakpoint(thread_info->second);
if (brkpt_error.Fail())
diff --git a/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp b/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp
index d5dfe79..3e09c31 100644
--- a/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp
+++ b/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp
@@ -1600,7 +1600,6 @@ bool ProcessGDBRemote::CalculateThreadStopInfo(ThreadGDBRemote *thread) {
// If we have "jstopinfo" then we have stop descriptions for all threads
// that have stop reasons, and if there is no entry for a thread, then it
// has no stop reason.
- thread->GetRegisterContext()->InvalidateIfNeeded(true);
if (!GetThreadStopInfoFromJSON(thread, m_jstopinfo_sp)) {
// If a thread is stopped at a breakpoint site, set that as the stop
// reason even if it hasn't executed the breakpoint instruction yet.
@@ -4717,9 +4716,14 @@ bool ParseRegisters(
reg_info.encoding = eEncodingIEEE754;
} else if (gdb_type == "aarch64v" ||
llvm::StringRef(gdb_type).starts_with("vec") ||
- gdb_type == "i387_ext" || gdb_type == "uint128") {
+ gdb_type == "i387_ext" || gdb_type == "uint128" ||
+ reg_info.byte_size > 16) {
// lldb doesn't handle 128-bit uints correctly (for ymm*h), so
- // treat them as vector (similarly to xmm/ymm)
+ // treat them as vector (similarly to xmm/ymm).
+ // We can fall back to handling anything else <= 128 bit as an
+ // unsigned integer, more than that, call it a vector of bytes.
+ // This can happen if we don't recognise the type for AArc64 SVE
+ // registers.
reg_info.format = eFormatVectorOfUInt8;
reg_info.encoding = eEncodingVector;
} else {
diff --git a/lldb/source/Plugins/ScriptInterpreter/Python/PythonDataObjects.cpp b/lldb/source/Plugins/ScriptInterpreter/Python/PythonDataObjects.cpp
index 24cf3430..90ccd10 100644
--- a/lldb/source/Plugins/ScriptInterpreter/Python/PythonDataObjects.cpp
+++ b/lldb/source/Plugins/ScriptInterpreter/Python/PythonDataObjects.cpp
@@ -1108,9 +1108,10 @@ public:
py_error = Status::FromError(r.takeError());
}
base_error = Base::Close();
+ // Cloning since the wrapped exception may still reference the PyThread.
if (py_error.Fail())
- return py_error;
- return base_error;
+ return py_error.Clone();
+ return base_error.Clone();
};
PyObject *GetPythonObject() const {
@@ -1196,7 +1197,8 @@ public:
return Flush();
auto r = m_py_obj.CallMethod("close");
if (!r)
- return Status::FromError(r.takeError());
+ // Cloning since the wrapped exception may still reference the PyThread.
+ return Status::FromError(r.takeError()).Clone();
return Status();
}
@@ -1204,7 +1206,8 @@ public:
GIL takeGIL;
auto r = m_py_obj.CallMethod("flush");
if (!r)
- return Status::FromError(r.takeError());
+ // Cloning since the wrapped exception may still reference the PyThread.
+ return Status::FromError(r.takeError()).Clone();
return Status();
}
@@ -1240,7 +1243,8 @@ public:
PyObject *pybuffer_p = PyMemoryView_FromMemory(
const_cast<char *>((const char *)buf), num_bytes, PyBUF_READ);
if (!pybuffer_p)
- return Status::FromError(llvm::make_error<PythonException>());
+ // Cloning since the wrapped exception may still reference the PyThread.
+ return Status::FromError(llvm::make_error<PythonException>()).Clone();
auto pybuffer = Take<PythonObject>(pybuffer_p);
num_bytes = 0;
auto bytes_written = As<long long>(m_py_obj.CallMethod("write", pybuffer));
@@ -1260,7 +1264,8 @@ public:
auto pybuffer_obj =
m_py_obj.CallMethod("read", (unsigned long long)num_bytes);
if (!pybuffer_obj)
- return Status::FromError(pybuffer_obj.takeError());
+ // Cloning since the wrapped exception may still reference the PyThread.
+ return Status::FromError(pybuffer_obj.takeError()).Clone();
num_bytes = 0;
if (pybuffer_obj.get().IsNone()) {
// EOF
@@ -1269,7 +1274,8 @@ public:
}
auto pybuffer = PythonBuffer::Create(pybuffer_obj.get());
if (!pybuffer)
- return Status::FromError(pybuffer.takeError());
+ // Cloning since the wrapped exception may still reference the PyThread.
+ return Status::FromError(pybuffer.takeError()).Clone();
memcpy(buf, pybuffer.get().get().buf, pybuffer.get().get().len);
num_bytes = pybuffer.get().get().len;
return Status();
@@ -1300,7 +1306,8 @@ public:
auto bytes_written =
As<long long>(m_py_obj.CallMethod("write", pystring.get()));
if (!bytes_written)
- return Status::FromError(bytes_written.takeError());
+ // Cloning since the wrapped exception may still reference the PyThread.
+ return Status::FromError(bytes_written.takeError()).Clone();
if (bytes_written.get() < 0)
return Status::FromErrorString(
".write() method returned a negative number!");
@@ -1321,14 +1328,16 @@ public:
auto pystring = As<PythonString>(
m_py_obj.CallMethod("read", (unsigned long long)num_chars));
if (!pystring)
- return Status::FromError(pystring.takeError());
+ // Cloning since the wrapped exception may still reference the PyThread.
+ return Status::FromError(pystring.takeError()).Clone();
if (pystring.get().IsNone()) {
// EOF
return Status();
}
auto stringref = pystring.get().AsUTF8();
if (!stringref)
- return Status::FromError(stringref.takeError());
+ // Cloning since the wrapped exception may still reference the PyThread.
+ return Status::FromError(stringref.takeError()).Clone();
num_bytes = stringref.get().size();
memcpy(buf, stringref.get().begin(), num_bytes);
return Status();
diff --git a/lldb/source/Plugins/ScriptInterpreter/Python/SWIGPythonBridge.h b/lldb/source/Plugins/ScriptInterpreter/Python/SWIGPythonBridge.h
index 81ee9ea0..518a478 100644
--- a/lldb/source/Plugins/ScriptInterpreter/Python/SWIGPythonBridge.h
+++ b/lldb/source/Plugins/ScriptInterpreter/Python/SWIGPythonBridge.h
@@ -200,6 +200,15 @@ public:
LLDBSwigPythonGetRepeatCommandForScriptedCommand(PyObject *implementor,
std::string &command);
+ static StructuredData::DictionarySP
+ LLDBSwigPythonHandleArgumentCompletionForScriptedCommand(
+ PyObject *implementor, std::vector<llvm::StringRef> &args_impl,
+ size_t args_pos, size_t pos_in_arg);
+
+ static StructuredData::DictionarySP
+ LLDBSwigPythonHandleOptionArgumentCompletionForScriptedCommand(
+ PyObject *implementor, llvm::StringRef &long_option, size_t pos_in_arg);
+
static bool LLDBSwigPythonCallModuleInit(const char *python_module_name,
const char *session_dictionary_name,
lldb::DebuggerSP debugger);
diff --git a/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp b/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp
index 155efc0..db1a10e 100644
--- a/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp
+++ b/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp
@@ -2720,6 +2720,46 @@ ScriptInterpreterPythonImpl::GetRepeatCommandForScriptedCommand(
return ret_val;
}
+StructuredData::DictionarySP
+ScriptInterpreterPythonImpl::HandleArgumentCompletionForScriptedCommand(
+ StructuredData::GenericSP impl_obj_sp, std::vector<llvm::StringRef> &args,
+ size_t args_pos, size_t char_in_arg) {
+ StructuredData::DictionarySP completion_dict_sp;
+ if (!impl_obj_sp || !impl_obj_sp->IsValid())
+ return completion_dict_sp;
+
+ {
+ Locker py_lock(this, Locker::AcquireLock | Locker::NoSTDIN,
+ Locker::FreeLock);
+
+ completion_dict_sp =
+ SWIGBridge::LLDBSwigPythonHandleArgumentCompletionForScriptedCommand(
+ static_cast<PyObject *>(impl_obj_sp->GetValue()), args, args_pos,
+ char_in_arg);
+ }
+ return completion_dict_sp;
+}
+
+StructuredData::DictionarySP
+ScriptInterpreterPythonImpl::HandleOptionArgumentCompletionForScriptedCommand(
+ StructuredData::GenericSP impl_obj_sp, llvm::StringRef &long_option,
+ size_t char_in_arg) {
+ StructuredData::DictionarySP completion_dict_sp;
+ if (!impl_obj_sp || !impl_obj_sp->IsValid())
+ return completion_dict_sp;
+
+ {
+ Locker py_lock(this, Locker::AcquireLock | Locker::NoSTDIN,
+ Locker::FreeLock);
+
+ completion_dict_sp = SWIGBridge::
+ LLDBSwigPythonHandleOptionArgumentCompletionForScriptedCommand(
+ static_cast<PyObject *>(impl_obj_sp->GetValue()), long_option,
+ char_in_arg);
+ }
+ return completion_dict_sp;
+}
+
/// In Python, a special attribute __doc__ contains the docstring for an object
/// (function, method, class, ...) if any is defined Otherwise, the attribute's
/// value is None.
diff --git a/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPythonImpl.h b/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPythonImpl.h
index d15e2fd..2dc7847 100644
--- a/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPythonImpl.h
+++ b/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPythonImpl.h
@@ -166,6 +166,14 @@ public:
GetRepeatCommandForScriptedCommand(StructuredData::GenericSP impl_obj_sp,
Args &args) override;
+ StructuredData::DictionarySP HandleArgumentCompletionForScriptedCommand(
+ StructuredData::GenericSP impl_obj_sp, std::vector<llvm::StringRef> &args,
+ size_t args_pos, size_t char_in_arg) override;
+
+ StructuredData::DictionarySP HandleOptionArgumentCompletionForScriptedCommand(
+ StructuredData::GenericSP impl_obj_sp, llvm::StringRef &long_options,
+ size_t char_in_arg) override;
+
Status GenerateFunction(const char *signature, const StringList &input,
bool is_callback) override;
diff --git a/lldb/source/Plugins/SymbolFile/Breakpad/SymbolFileBreakpad.cpp b/lldb/source/Plugins/SymbolFile/Breakpad/SymbolFileBreakpad.cpp
index 3977dc3a..9e78ba8 100644
--- a/lldb/source/Plugins/SymbolFile/Breakpad/SymbolFileBreakpad.cpp
+++ b/lldb/source/Plugins/SymbolFile/Breakpad/SymbolFileBreakpad.cpp
@@ -614,7 +614,7 @@ bool SymbolFileBreakpad::ParseCFIUnwindRow(llvm::StringRef unwind_rules,
row.GetCFAValue().SetIsDWARFExpression(saved.data(), saved.size());
} else if (const RegisterInfo *info =
ResolveRegisterOrRA(triple, resolver, lhs)) {
- UnwindPlan::Row::RegisterLocation loc;
+ UnwindPlan::Row::AbstractRegisterLocation loc;
loc.SetIsDWARFExpression(saved.data(), saved.size());
row.SetRegisterInfo(info->kinds[eRegisterKindLLDB], loc);
} else
@@ -766,7 +766,7 @@ SymbolFileBreakpad::ParseWinUnwindPlan(const Bookmark &bookmark,
}
llvm::ArrayRef<uint8_t> saved = SaveAsDWARF(*it->second);
- UnwindPlan::Row::RegisterLocation loc;
+ UnwindPlan::Row::AbstractRegisterLocation loc;
loc.SetIsDWARFExpression(saved.data(), saved.size());
row_sp->SetRegisterInfo(info->kinds[eRegisterKindLLDB], loc);
}
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/DWARFFormValue.cpp b/lldb/source/Plugins/SymbolFile/DWARF/DWARFFormValue.cpp
index e1f73f1..f58c626 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/DWARFFormValue.cpp
+++ b/lldb/source/Plugins/SymbolFile/DWARF/DWARFFormValue.cpp
@@ -25,7 +25,7 @@ using namespace lldb_private::plugin::dwarf;
void DWARFFormValue::Clear() {
m_unit = nullptr;
m_form = dw_form_t(0);
- m_value = ValueTypeTag();
+ m_value = ValueType();
}
bool DWARFFormValue::ExtractValue(const DWARFDataExtractor &data,
@@ -44,68 +44,68 @@ bool DWARFFormValue::ExtractValue(const DWARFDataExtractor &data,
switch (m_form) {
case DW_FORM_addr:
assert(m_unit);
- m_value.value.uval =
+ m_value.uval =
data.GetMaxU64(offset_ptr, DWARFUnit::GetAddressByteSize(m_unit));
break;
case DW_FORM_block1:
- m_value.value.uval = data.GetU8(offset_ptr);
+ m_value.uval = data.GetU8(offset_ptr);
is_block = true;
break;
case DW_FORM_block2:
- m_value.value.uval = data.GetU16(offset_ptr);
+ m_value.uval = data.GetU16(offset_ptr);
is_block = true;
break;
case DW_FORM_block4:
- m_value.value.uval = data.GetU32(offset_ptr);
+ m_value.uval = data.GetU32(offset_ptr);
is_block = true;
break;
case DW_FORM_data16:
- m_value.value.uval = 16;
+ m_value.uval = 16;
is_block = true;
break;
case DW_FORM_exprloc:
case DW_FORM_block:
- m_value.value.uval = data.GetULEB128(offset_ptr);
+ m_value.uval = data.GetULEB128(offset_ptr);
is_block = true;
break;
case DW_FORM_string:
- m_value.value.cstr = data.GetCStr(offset_ptr);
+ m_value.cstr = data.GetCStr(offset_ptr);
break;
case DW_FORM_sdata:
- m_value.value.sval = data.GetSLEB128(offset_ptr);
+ m_value.sval = data.GetSLEB128(offset_ptr);
break;
case DW_FORM_strp:
case DW_FORM_line_strp:
case DW_FORM_sec_offset:
- m_value.value.uval = data.GetMaxU64(offset_ptr, 4);
+ m_value.uval = data.GetMaxU64(offset_ptr, 4);
break;
case DW_FORM_addrx1:
case DW_FORM_strx1:
case DW_FORM_ref1:
case DW_FORM_data1:
case DW_FORM_flag:
- m_value.value.uval = data.GetU8(offset_ptr);
+ m_value.uval = data.GetU8(offset_ptr);
break;
case DW_FORM_addrx2:
case DW_FORM_strx2:
case DW_FORM_ref2:
case DW_FORM_data2:
- m_value.value.uval = data.GetU16(offset_ptr);
+ m_value.uval = data.GetU16(offset_ptr);
break;
case DW_FORM_addrx3:
case DW_FORM_strx3:
- m_value.value.uval = data.GetMaxU64(offset_ptr, 3);
+ m_value.uval = data.GetMaxU64(offset_ptr, 3);
break;
case DW_FORM_addrx4:
case DW_FORM_strx4:
case DW_FORM_ref4:
case DW_FORM_data4:
- m_value.value.uval = data.GetU32(offset_ptr);
+ m_value.uval = data.GetU32(offset_ptr);
break;
case DW_FORM_data8:
case DW_FORM_ref8:
case DW_FORM_ref_sig8:
- m_value.value.uval = data.GetU64(offset_ptr);
+ m_value.uval = data.GetU64(offset_ptr);
break;
case DW_FORM_addrx:
case DW_FORM_loclistx:
@@ -115,7 +115,7 @@ bool DWARFFormValue::ExtractValue(const DWARFDataExtractor &data,
case DW_FORM_ref_udata:
case DW_FORM_GNU_str_index:
case DW_FORM_GNU_addr_index:
- m_value.value.uval = data.GetULEB128(offset_ptr);
+ m_value.uval = data.GetULEB128(offset_ptr);
break;
case DW_FORM_ref_addr:
assert(m_unit);
@@ -123,14 +123,14 @@ bool DWARFFormValue::ExtractValue(const DWARFDataExtractor &data,
ref_addr_size = m_unit->GetAddressByteSize();
else
ref_addr_size = 4;
- m_value.value.uval = data.GetMaxU64(offset_ptr, ref_addr_size);
+ m_value.uval = data.GetMaxU64(offset_ptr, ref_addr_size);
break;
case DW_FORM_indirect:
m_form = static_cast<dw_form_t>(data.GetULEB128(offset_ptr));
indirect = true;
break;
case DW_FORM_flag_present:
- m_value.value.uval = 1;
+ m_value.uval = 1;
break;
default:
return false;
@@ -138,9 +138,9 @@ bool DWARFFormValue::ExtractValue(const DWARFDataExtractor &data,
} while (indirect);
if (is_block) {
- m_value.data = data.PeekData(*offset_ptr, m_value.value.uval);
+ m_value.data = data.PeekData(*offset_ptr, m_value.uval);
if (m_value.data != nullptr) {
- *offset_ptr += m_value.value.uval;
+ *offset_ptr += m_value.uval;
}
}
@@ -461,23 +461,23 @@ const char *DWARFFormValue::AsCString() const {
DWARFContext &context = m_unit->GetSymbolFileDWARF().GetDWARFContext();
if (m_form == DW_FORM_string)
- return m_value.value.cstr;
+ return m_value.cstr;
if (m_form == DW_FORM_strp)
- return context.getOrLoadStrData().PeekCStr(m_value.value.uval);
+ return context.getOrLoadStrData().PeekCStr(m_value.uval);
if (m_form == DW_FORM_GNU_str_index || m_form == DW_FORM_strx ||
m_form == DW_FORM_strx1 || m_form == DW_FORM_strx2 ||
m_form == DW_FORM_strx3 || m_form == DW_FORM_strx4) {
std::optional<uint64_t> offset =
- m_unit->GetStringOffsetSectionItem(m_value.value.uval);
+ m_unit->GetStringOffsetSectionItem(m_value.uval);
if (!offset)
return nullptr;
return context.getOrLoadStrData().PeekCStr(*offset);
}
if (m_form == DW_FORM_line_strp)
- return context.getOrLoadLineStrData().PeekCStr(m_value.value.uval);
+ return context.getOrLoadLineStrData().PeekCStr(m_value.uval);
return nullptr;
}
@@ -495,14 +495,14 @@ dw_addr_t DWARFFormValue::Address() const {
uint32_t index_size = m_unit->GetAddressByteSize();
dw_offset_t addr_base = m_unit->GetAddrBase();
- lldb::offset_t offset = addr_base + m_value.value.uval * index_size;
+ lldb::offset_t offset = addr_base + m_value.uval * index_size;
return symbol_file.GetDWARFContext().getOrLoadAddrData().GetMaxU64(
&offset, index_size);
}
std::pair<DWARFUnit *, uint64_t>
DWARFFormValue::ReferencedUnitAndOffset() const {
- uint64_t value = m_value.value.uval;
+ uint64_t value = m_value.uval;
switch (m_form) {
case DW_FORM_ref1:
case DW_FORM_ref2:
@@ -550,7 +550,7 @@ DWARFDIE DWARFFormValue::Reference() const {
}
uint64_t DWARFFormValue::Reference(dw_offset_t base_offset) const {
- uint64_t value = m_value.value.uval;
+ uint64_t value = m_value.uval;
switch (m_form) {
case DW_FORM_ref1:
case DW_FORM_ref2:
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/DWARFFormValue.h b/lldb/source/Plugins/SymbolFile/DWARF/DWARFFormValue.h
index fdd5b3c..8ab9163 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/DWARFFormValue.h
+++ b/lldb/source/Plugins/SymbolFile/DWARF/DWARFFormValue.h
@@ -10,7 +10,7 @@
#define LLDB_SOURCE_PLUGINS_SYMBOLFILE_DWARF_DWARFFORMVALUE_H
#include "DWARFDataExtractor.h"
-#include <cstddef>
+#include "llvm/DebugInfo/DWARF/DWARFFormValue.h"
#include <optional>
namespace lldb_private::plugin {
@@ -21,17 +21,7 @@ class DWARFDIE;
class DWARFFormValue {
public:
- typedef struct ValueTypeTag {
- ValueTypeTag() : value() { value.uval = 0; }
-
- union {
- uint64_t uval;
- int64_t sval;
- const char *cstr;
- } value;
- const uint8_t *data = nullptr;
- } ValueType;
-
+ typedef llvm::DWARFFormValue::ValueType ValueType;
enum {
eValueTypeInvalid = 0,
eValueTypeUnsigned,
@@ -67,11 +57,11 @@ public:
std::pair<DWARFUnit *, uint64_t> ReferencedUnitAndOffset() const;
uint64_t Reference(dw_offset_t offset) const;
- bool Boolean() const { return m_value.value.uval != 0; }
- uint64_t Unsigned() const { return m_value.value.uval; }
- void SetUnsigned(uint64_t uval) { m_value.value.uval = uval; }
- int64_t Signed() const { return m_value.value.sval; }
- void SetSigned(int64_t sval) { m_value.value.sval = sval; }
+ bool Boolean() const { return m_value.uval != 0; }
+ uint64_t Unsigned() const { return m_value.uval; }
+ void SetUnsigned(uint64_t uval) { m_value.uval = uval; }
+ int64_t Signed() const { return m_value.sval; }
+ void SetSigned(int64_t sval) { m_value.sval = sval; }
const char *AsCString() const;
dw_addr_t Address() const;
bool IsValid() const { return m_form != 0; }
diff --git a/lldb/source/Plugins/UnwindAssembly/x86/UnwindAssembly-x86.cpp b/lldb/source/Plugins/UnwindAssembly/x86/UnwindAssembly-x86.cpp
index eca78a9..5c846ba 100644
--- a/lldb/source/Plugins/UnwindAssembly/x86/UnwindAssembly-x86.cpp
+++ b/lldb/source/Plugins/UnwindAssembly/x86/UnwindAssembly-x86.cpp
@@ -97,7 +97,7 @@ bool UnwindAssembly_x86::AugmentUnwindPlanFromCallSite(
first_row->GetCFAValue().GetOffset() != wordsize) {
return false;
}
- UnwindPlan::Row::RegisterLocation first_row_pc_loc;
+ UnwindPlan::Row::AbstractRegisterLocation first_row_pc_loc;
if (!first_row->GetRegisterInfo(
pc_regnum.GetAsKind(unwind_plan.GetRegisterKind()),
first_row_pc_loc) ||
@@ -126,7 +126,7 @@ bool UnwindAssembly_x86::AugmentUnwindPlanFromCallSite(
// Get the register locations for eip/rip from the first & last rows. Are
// they both CFA plus an offset? Is it the same offset?
- UnwindPlan::Row::RegisterLocation last_row_pc_loc;
+ UnwindPlan::Row::AbstractRegisterLocation last_row_pc_loc;
if (last_row->GetRegisterInfo(
pc_regnum.GetAsKind(unwind_plan.GetRegisterKind()),
last_row_pc_loc)) {
diff --git a/lldb/source/Plugins/UnwindAssembly/x86/x86AssemblyInspectionEngine.cpp b/lldb/source/Plugins/UnwindAssembly/x86/x86AssemblyInspectionEngine.cpp
index 6bfaa54..81b7f13 100644
--- a/lldb/source/Plugins/UnwindAssembly/x86/x86AssemblyInspectionEngine.cpp
+++ b/lldb/source/Plugins/UnwindAssembly/x86/x86AssemblyInspectionEngine.cpp
@@ -915,7 +915,7 @@ bool x86AssemblyInspectionEngine::GetNonCallSiteUnwindPlanFromAssembly(
addr_t current_func_text_offset = 0;
int current_sp_bytes_offset_from_fa = 0;
bool is_aligned = false;
- UnwindPlan::Row::RegisterLocation initial_regloc;
+ UnwindPlan::Row::AbstractRegisterLocation initial_regloc;
UnwindPlan::RowSP row(new UnwindPlan::Row);
unwind_plan.SetPlanValidAddressRange(func_range);
@@ -1051,7 +1051,7 @@ bool x86AssemblyInspectionEngine::GetNonCallSiteUnwindPlanFromAssembly(
if (nonvolatile_reg_p(machine_regno) &&
machine_regno_to_lldb_regno(machine_regno, lldb_regno) &&
!saved_registers[machine_regno]) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
if (is_aligned)
regloc.SetAtAFAPlusOffset(-current_sp_bytes_offset_from_fa);
else
@@ -1142,7 +1142,7 @@ bool x86AssemblyInspectionEngine::GetNonCallSiteUnwindPlanFromAssembly(
!saved_registers[machine_regno]) {
saved_registers[machine_regno] = true;
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
// stack_offset for 'movq %r15, -80(%rbp)' will be 80. In the Row, we
// want to express this as the offset from the FA. If the frame base is
@@ -1234,7 +1234,7 @@ bool x86AssemblyInspectionEngine::GetNonCallSiteUnwindPlanFromAssembly(
// determine the effcts of. Verify that the stack frame state
// has been unwound to the same as it was at function entry to avoid
// mis-identifying a JMP instruction as an epilogue.
- UnwindPlan::Row::RegisterLocation sp, pc;
+ UnwindPlan::Row::AbstractRegisterLocation sp, pc;
if (row->GetRegisterInfo(m_lldb_sp_regnum, sp) &&
row->GetRegisterInfo(m_lldb_ip_regnum, pc)) {
// Any ret instruction variant is definitely indicative of an
diff --git a/lldb/source/Symbol/ArmUnwindInfo.cpp b/lldb/source/Symbol/ArmUnwindInfo.cpp
index 6bc3bd6c..569e0f5 100644
--- a/lldb/source/Symbol/ArmUnwindInfo.cpp
+++ b/lldb/source/Symbol/ArmUnwindInfo.cpp
@@ -333,7 +333,7 @@ bool ArmUnwindInfo::GetUnwindPlan(Target &target, const Address &addr,
}
if (!have_location_for_pc) {
- UnwindPlan::Row::RegisterLocation lr_location;
+ UnwindPlan::Row::AbstractRegisterLocation lr_location;
if (row->GetRegisterInfo(dwarf_lr, lr_location))
row->SetRegisterInfo(dwarf_pc, lr_location);
else
diff --git a/lldb/source/Symbol/DWARFCallFrameInfo.cpp b/lldb/source/Symbol/DWARFCallFrameInfo.cpp
index ff2610c..a743de59 100644
--- a/lldb/source/Symbol/DWARFCallFrameInfo.cpp
+++ b/lldb/source/Symbol/DWARFCallFrameInfo.cpp
@@ -633,7 +633,7 @@ bool DWARFCallFrameInfo::FDEToUnwindPlan(dw_offset_t dwarf_offset,
std::vector<UnwindPlan::RowSP> stack;
- UnwindPlan::Row::RegisterLocation reg_location;
+ UnwindPlan::Row::AbstractRegisterLocation reg_location;
while (m_cfi_data.ValidOffset(offset) && offset < end_offset) {
uint8_t inst = m_cfi_data.GetU8(&offset);
uint8_t primary_opcode = inst & 0xC0;
@@ -822,7 +822,7 @@ bool DWARFCallFrameInfo::HandleCommonDwarfOpcode(uint8_t primary_opcode,
int32_t data_align,
lldb::offset_t &offset,
UnwindPlan::Row &row) {
- UnwindPlan::Row::RegisterLocation reg_location;
+ UnwindPlan::Row::AbstractRegisterLocation reg_location;
if (primary_opcode) {
switch (primary_opcode) {
@@ -852,7 +852,7 @@ bool DWARFCallFrameInfo::HandleCommonDwarfOpcode(uint8_t primary_opcode,
// except for the encoding and size of the register argument.
uint32_t reg_num = (uint32_t)m_cfi_data.GetULEB128(&offset);
int32_t op_offset = (int32_t)m_cfi_data.GetULEB128(&offset) * data_align;
- UnwindPlan::Row::RegisterLocation reg_location;
+ UnwindPlan::Row::AbstractRegisterLocation reg_location;
reg_location.SetAtCFAPlusOffset(op_offset);
row.SetRegisterInfo(reg_num, reg_location);
return true;
@@ -864,7 +864,7 @@ bool DWARFCallFrameInfo::HandleCommonDwarfOpcode(uint8_t primary_opcode,
// number. The required action is to set the rule for the specified
// register to undefined.
uint32_t reg_num = (uint32_t)m_cfi_data.GetULEB128(&offset);
- UnwindPlan::Row::RegisterLocation reg_location;
+ UnwindPlan::Row::AbstractRegisterLocation reg_location;
reg_location.SetUndefined();
row.SetRegisterInfo(reg_num, reg_location);
return true;
@@ -876,7 +876,7 @@ bool DWARFCallFrameInfo::HandleCommonDwarfOpcode(uint8_t primary_opcode,
// number. The required action is to set the rule for the specified
// register to same value.
uint32_t reg_num = (uint32_t)m_cfi_data.GetULEB128(&offset);
- UnwindPlan::Row::RegisterLocation reg_location;
+ UnwindPlan::Row::AbstractRegisterLocation reg_location;
reg_location.SetSame();
row.SetRegisterInfo(reg_num, reg_location);
return true;
@@ -889,7 +889,7 @@ bool DWARFCallFrameInfo::HandleCommonDwarfOpcode(uint8_t primary_opcode,
// second register.
uint32_t reg_num = (uint32_t)m_cfi_data.GetULEB128(&offset);
uint32_t other_reg_num = (uint32_t)m_cfi_data.GetULEB128(&offset);
- UnwindPlan::Row::RegisterLocation reg_location;
+ UnwindPlan::Row::AbstractRegisterLocation reg_location;
reg_location.SetInRegister(other_reg_num);
row.SetRegisterInfo(reg_num, reg_location);
return true;
@@ -950,7 +950,7 @@ bool DWARFCallFrameInfo::HandleCommonDwarfOpcode(uint8_t primary_opcode,
uint32_t block_len = (uint32_t)m_cfi_data.GetULEB128(&offset);
const uint8_t *block_data =
static_cast<const uint8_t *>(m_cfi_data.GetData(&offset, block_len));
- UnwindPlan::Row::RegisterLocation reg_location;
+ UnwindPlan::Row::AbstractRegisterLocation reg_location;
reg_location.SetAtDWARFExpression(block_data, block_len);
row.SetRegisterInfo(reg_num, reg_location);
return true;
@@ -964,7 +964,7 @@ bool DWARFCallFrameInfo::HandleCommonDwarfOpcode(uint8_t primary_opcode,
// signed and factored.
uint32_t reg_num = (uint32_t)m_cfi_data.GetULEB128(&offset);
int32_t op_offset = (int32_t)m_cfi_data.GetSLEB128(&offset) * data_align;
- UnwindPlan::Row::RegisterLocation reg_location;
+ UnwindPlan::Row::AbstractRegisterLocation reg_location;
reg_location.SetAtCFAPlusOffset(op_offset);
row.SetRegisterInfo(reg_num, reg_location);
return true;
diff --git a/lldb/source/Symbol/FuncUnwinders.cpp b/lldb/source/Symbol/FuncUnwinders.cpp
index 228d9a10..d01a899 100644
--- a/lldb/source/Symbol/FuncUnwinders.cpp
+++ b/lldb/source/Symbol/FuncUnwinders.cpp
@@ -371,8 +371,8 @@ LazyBool FuncUnwinders::CompareUnwindPlansForIdenticalInitialPCLocation(
UnwindPlan::RowSP b_first_row = b->GetRowAtIndex(0);
if (a_first_row.get() && b_first_row.get()) {
- UnwindPlan::Row::RegisterLocation a_pc_regloc;
- UnwindPlan::Row::RegisterLocation b_pc_regloc;
+ UnwindPlan::Row::AbstractRegisterLocation a_pc_regloc;
+ UnwindPlan::Row::AbstractRegisterLocation b_pc_regloc;
a_first_row->GetRegisterInfo(pc_reg_lldb_regnum, a_pc_regloc);
b_first_row->GetRegisterInfo(pc_reg_lldb_regnum, b_pc_regloc);
diff --git a/lldb/source/Symbol/UnwindPlan.cpp b/lldb/source/Symbol/UnwindPlan.cpp
index e2dbd81..a06e7cf 100644
--- a/lldb/source/Symbol/UnwindPlan.cpp
+++ b/lldb/source/Symbol/UnwindPlan.cpp
@@ -22,8 +22,8 @@
using namespace lldb;
using namespace lldb_private;
-bool UnwindPlan::Row::RegisterLocation::
-operator==(const UnwindPlan::Row::RegisterLocation &rhs) const {
+bool UnwindPlan::Row::AbstractRegisterLocation::operator==(
+ const UnwindPlan::Row::AbstractRegisterLocation &rhs) const {
if (m_type == rhs.m_type) {
switch (m_type) {
case unspecified:
@@ -55,7 +55,7 @@ operator==(const UnwindPlan::Row::RegisterLocation &rhs) const {
// This function doesn't copy the dwarf expression bytes; they must remain in
// allocated memory for the lifespan of this UnwindPlan object.
-void UnwindPlan::Row::RegisterLocation::SetAtDWARFExpression(
+void UnwindPlan::Row::AbstractRegisterLocation::SetAtDWARFExpression(
const uint8_t *opcodes, uint32_t len) {
m_type = atDWARFExpression;
m_location.expr.opcodes = opcodes;
@@ -64,7 +64,7 @@ void UnwindPlan::Row::RegisterLocation::SetAtDWARFExpression(
// This function doesn't copy the dwarf expression bytes; they must remain in
// allocated memory for the lifespan of this UnwindPlan object.
-void UnwindPlan::Row::RegisterLocation::SetIsDWARFExpression(
+void UnwindPlan::Row::AbstractRegisterLocation::SetIsDWARFExpression(
const uint8_t *opcodes, uint32_t len) {
m_type = isDWARFExpression;
m_location.expr.opcodes = opcodes;
@@ -92,11 +92,9 @@ static void DumpDWARFExpr(Stream &s, llvm::ArrayRef<uint8_t> expr, Thread *threa
s.PutCString("dwarf-expr");
}
-void UnwindPlan::Row::RegisterLocation::Dump(Stream &s,
- const UnwindPlan *unwind_plan,
- const UnwindPlan::Row *row,
- Thread *thread,
- bool verbose) const {
+void UnwindPlan::Row::AbstractRegisterLocation::Dump(
+ Stream &s, const UnwindPlan *unwind_plan, const UnwindPlan::Row *row,
+ Thread *thread, bool verbose) const {
switch (m_type) {
case unspecified:
if (verbose)
@@ -255,7 +253,7 @@ UnwindPlan::Row::Row() : m_cfa_value(), m_afa_value(), m_register_locations() {}
bool UnwindPlan::Row::GetRegisterInfo(
uint32_t reg_num,
- UnwindPlan::Row::RegisterLocation &register_location) const {
+ UnwindPlan::Row::AbstractRegisterLocation &register_location) const {
collection::const_iterator pos = m_register_locations.find(reg_num);
if (pos != m_register_locations.end()) {
register_location = pos->second;
@@ -277,7 +275,7 @@ void UnwindPlan::Row::RemoveRegisterInfo(uint32_t reg_num) {
void UnwindPlan::Row::SetRegisterInfo(
uint32_t reg_num,
- const UnwindPlan::Row::RegisterLocation register_location) {
+ const UnwindPlan::Row::AbstractRegisterLocation register_location) {
m_register_locations[reg_num] = register_location;
}
@@ -287,7 +285,7 @@ bool UnwindPlan::Row::SetRegisterLocationToAtCFAPlusOffset(uint32_t reg_num,
if (!can_replace &&
m_register_locations.find(reg_num) != m_register_locations.end())
return false;
- RegisterLocation reg_loc;
+ AbstractRegisterLocation reg_loc;
reg_loc.SetAtCFAPlusOffset(offset);
m_register_locations[reg_num] = reg_loc;
return true;
@@ -299,7 +297,7 @@ bool UnwindPlan::Row::SetRegisterLocationToIsCFAPlusOffset(uint32_t reg_num,
if (!can_replace &&
m_register_locations.find(reg_num) != m_register_locations.end())
return false;
- RegisterLocation reg_loc;
+ AbstractRegisterLocation reg_loc;
reg_loc.SetIsCFAPlusOffset(offset);
m_register_locations[reg_num] = reg_loc;
return true;
@@ -316,7 +314,7 @@ bool UnwindPlan::Row::SetRegisterLocationToUndefined(
if (can_replace_only_if_unspecified && !pos->second.IsUnspecified())
return false;
}
- RegisterLocation reg_loc;
+ AbstractRegisterLocation reg_loc;
reg_loc.SetUndefined();
m_register_locations[reg_num] = reg_loc;
return true;
@@ -327,7 +325,7 @@ bool UnwindPlan::Row::SetRegisterLocationToUnspecified(uint32_t reg_num,
if (!can_replace &&
m_register_locations.find(reg_num) != m_register_locations.end())
return false;
- RegisterLocation reg_loc;
+ AbstractRegisterLocation reg_loc;
reg_loc.SetUnspecified();
m_register_locations[reg_num] = reg_loc;
return true;
@@ -339,7 +337,7 @@ bool UnwindPlan::Row::SetRegisterLocationToRegister(uint32_t reg_num,
if (!can_replace &&
m_register_locations.find(reg_num) != m_register_locations.end())
return false;
- RegisterLocation reg_loc;
+ AbstractRegisterLocation reg_loc;
reg_loc.SetInRegister(other_reg_num);
m_register_locations[reg_num] = reg_loc;
return true;
@@ -350,19 +348,30 @@ bool UnwindPlan::Row::SetRegisterLocationToSame(uint32_t reg_num,
if (must_replace &&
m_register_locations.find(reg_num) == m_register_locations.end())
return false;
- RegisterLocation reg_loc;
+ AbstractRegisterLocation reg_loc;
reg_loc.SetSame();
m_register_locations[reg_num] = reg_loc;
return true;
}
+bool UnwindPlan::Row::SetRegisterLocationToIsDWARFExpression(
+ uint32_t reg_num, const uint8_t *opcodes, uint32_t len, bool can_replace) {
+ if (!can_replace &&
+ m_register_locations.find(reg_num) != m_register_locations.end())
+ return false;
+ AbstractRegisterLocation reg_loc;
+ reg_loc.SetIsDWARFExpression(opcodes, len);
+ m_register_locations[reg_num] = reg_loc;
+ return true;
+}
+
bool UnwindPlan::Row::SetRegisterLocationToIsConstant(uint32_t reg_num,
uint64_t constant,
bool can_replace) {
if (!can_replace &&
m_register_locations.find(reg_num) != m_register_locations.end())
return false;
- RegisterLocation reg_loc;
+ AbstractRegisterLocation reg_loc;
reg_loc.SetIsConstant(constant);
m_register_locations[reg_num] = reg_loc;
return true;
diff --git a/lldb/source/Target/ABI.cpp b/lldb/source/Target/ABI.cpp
index 110b5c8..1a301d4 100644
--- a/lldb/source/Target/ABI.cpp
+++ b/lldb/source/Target/ABI.cpp
@@ -210,7 +210,7 @@ bool ABI::PrepareTrivialCall(Thread &thread, lldb::addr_t sp,
bool ABI::GetFallbackRegisterLocation(
const RegisterInfo *reg_info,
- UnwindPlan::Row::RegisterLocation &unwind_regloc) {
+ UnwindPlan::Row::AbstractRegisterLocation &unwind_regloc) {
// Did the UnwindPlan fail to give us the caller's stack pointer? The stack
// pointer is defined to be the same as THIS frame's CFA, so return the CFA
// value as the caller's stack pointer. This is true on x86-32/x86-64 at
diff --git a/lldb/source/Target/RegisterContextUnwind.cpp b/lldb/source/Target/RegisterContextUnwind.cpp
index a61228d..b683ea72 100644
--- a/lldb/source/Target/RegisterContextUnwind.cpp
+++ b/lldb/source/Target/RegisterContextUnwind.cpp
@@ -1108,14 +1108,14 @@ uint32_t RegisterContextUnwind::ConvertRegisterKindToRegisterNumber(
}
bool RegisterContextUnwind::ReadRegisterValueFromRegisterLocation(
- lldb_private::UnwindLLDB::RegisterLocation regloc,
+ lldb_private::UnwindLLDB::ConcreteRegisterLocation regloc,
const RegisterInfo *reg_info, RegisterValue &value) {
if (!IsValid())
return false;
bool success = false;
switch (regloc.type) {
- case UnwindLLDB::RegisterLocation::eRegisterInLiveRegisterContext: {
+ case UnwindLLDB::ConcreteRegisterLocation::eRegisterInLiveRegisterContext: {
const RegisterInfo *other_reg_info =
GetRegisterInfoAtIndex(regloc.location.register_number);
@@ -1125,7 +1125,7 @@ bool RegisterContextUnwind::ReadRegisterValueFromRegisterLocation(
success =
m_thread.GetRegisterContext()->ReadRegister(other_reg_info, value);
} break;
- case UnwindLLDB::RegisterLocation::eRegisterInRegister: {
+ case UnwindLLDB::ConcreteRegisterLocation::eRegisterInRegister: {
const RegisterInfo *other_reg_info =
GetRegisterInfoAtIndex(regloc.location.register_number);
@@ -1139,29 +1139,29 @@ bool RegisterContextUnwind::ReadRegisterValueFromRegisterLocation(
success = GetNextFrame()->ReadRegister(other_reg_info, value);
}
} break;
- case UnwindLLDB::RegisterLocation::eRegisterValueInferred:
+ case UnwindLLDB::ConcreteRegisterLocation::eRegisterValueInferred:
success =
value.SetUInt(regloc.location.inferred_value, reg_info->byte_size);
break;
- case UnwindLLDB::RegisterLocation::eRegisterNotSaved:
+ case UnwindLLDB::ConcreteRegisterLocation::eRegisterNotSaved:
break;
- case UnwindLLDB::RegisterLocation::eRegisterSavedAtHostMemoryLocation:
+ case UnwindLLDB::ConcreteRegisterLocation::eRegisterSavedAtHostMemoryLocation:
llvm_unreachable("FIXME debugger inferior function call unwind");
- case UnwindLLDB::RegisterLocation::eRegisterSavedAtMemoryLocation: {
+ case UnwindLLDB::ConcreteRegisterLocation::eRegisterSavedAtMemoryLocation: {
Status error(ReadRegisterValueFromMemory(
reg_info, regloc.location.target_memory_location, reg_info->byte_size,
value));
success = error.Success();
} break;
default:
- llvm_unreachable("Unknown RegisterLocation type.");
+ llvm_unreachable("Unknown ConcreteRegisterLocation type.");
}
return success;
}
bool RegisterContextUnwind::WriteRegisterValueToRegisterLocation(
- lldb_private::UnwindLLDB::RegisterLocation regloc,
+ lldb_private::UnwindLLDB::ConcreteRegisterLocation regloc,
const RegisterInfo *reg_info, const RegisterValue &value) {
if (!IsValid())
return false;
@@ -1169,13 +1169,13 @@ bool RegisterContextUnwind::WriteRegisterValueToRegisterLocation(
bool success = false;
switch (regloc.type) {
- case UnwindLLDB::RegisterLocation::eRegisterInLiveRegisterContext: {
+ case UnwindLLDB::ConcreteRegisterLocation::eRegisterInLiveRegisterContext: {
const RegisterInfo *other_reg_info =
GetRegisterInfoAtIndex(regloc.location.register_number);
success =
m_thread.GetRegisterContext()->WriteRegister(other_reg_info, value);
} break;
- case UnwindLLDB::RegisterLocation::eRegisterInRegister: {
+ case UnwindLLDB::ConcreteRegisterLocation::eRegisterInRegister: {
const RegisterInfo *other_reg_info =
GetRegisterInfoAtIndex(regloc.location.register_number);
if (IsFrameZero()) {
@@ -1185,19 +1185,19 @@ bool RegisterContextUnwind::WriteRegisterValueToRegisterLocation(
success = GetNextFrame()->WriteRegister(other_reg_info, value);
}
} break;
- case UnwindLLDB::RegisterLocation::eRegisterValueInferred:
- case UnwindLLDB::RegisterLocation::eRegisterNotSaved:
+ case UnwindLLDB::ConcreteRegisterLocation::eRegisterValueInferred:
+ case UnwindLLDB::ConcreteRegisterLocation::eRegisterNotSaved:
break;
- case UnwindLLDB::RegisterLocation::eRegisterSavedAtHostMemoryLocation:
+ case UnwindLLDB::ConcreteRegisterLocation::eRegisterSavedAtHostMemoryLocation:
llvm_unreachable("FIXME debugger inferior function call unwind");
- case UnwindLLDB::RegisterLocation::eRegisterSavedAtMemoryLocation: {
+ case UnwindLLDB::ConcreteRegisterLocation::eRegisterSavedAtMemoryLocation: {
Status error(WriteRegisterValueToMemory(
reg_info, regloc.location.target_memory_location, reg_info->byte_size,
value));
success = error.Success();
} break;
default:
- llvm_unreachable("Unknown RegisterLocation type.");
+ llvm_unreachable("Unknown ConcreteRegisterLocation type.");
}
return success;
}
@@ -1259,14 +1259,15 @@ bool RegisterContextUnwind::IsTrapHandlerSymbol(
enum UnwindLLDB::RegisterSearchResult
RegisterContextUnwind::SavedLocationForRegister(
- uint32_t lldb_regnum, lldb_private::UnwindLLDB::RegisterLocation &regloc) {
+ uint32_t lldb_regnum,
+ lldb_private::UnwindLLDB::ConcreteRegisterLocation &regloc) {
RegisterNumber regnum(m_thread, eRegisterKindLLDB, lldb_regnum);
Log *log = GetLog(LLDBLog::Unwind);
// Have we already found this register location?
if (!m_registers.empty()) {
std::map<uint32_t,
- lldb_private::UnwindLLDB::RegisterLocation>::const_iterator
+ lldb_private::UnwindLLDB::ConcreteRegisterLocation>::const_iterator
iterator;
iterator = m_registers.find(regnum.GetAsKind(eRegisterKindLLDB));
if (iterator != m_registers.end()) {
@@ -1279,7 +1280,7 @@ RegisterContextUnwind::SavedLocationForRegister(
// Look through the available UnwindPlans for the register location.
- UnwindPlan::Row::RegisterLocation unwindplan_regloc;
+ UnwindPlan::Row::AbstractRegisterLocation unwindplan_regloc;
bool have_unwindplan_regloc = false;
RegisterKind unwindplan_registerkind = kNumRegisterKinds;
@@ -1353,7 +1354,7 @@ RegisterContextUnwind::SavedLocationForRegister(
// signal was received, we should fetch the actual saved $pc
// value instead of the Return Address register.
// If $pc is not available, fall back to the RA reg.
- UnwindPlan::Row::RegisterLocation scratch;
+ UnwindPlan::Row::AbstractRegisterLocation scratch;
if (m_frame_type == eTrapHandlerFrame &&
active_row->GetRegisterInfo
(pc_regnum.GetAsKind (unwindplan_registerkind), scratch)) {
@@ -1404,9 +1405,9 @@ RegisterContextUnwind::SavedLocationForRegister(
BehavesLikeZerothFrame()) {
if (return_address_reg.GetAsKind(eRegisterKindLLDB) !=
LLDB_INVALID_REGNUM) {
- lldb_private::UnwindLLDB::RegisterLocation new_regloc;
- new_regloc.type =
- UnwindLLDB::RegisterLocation::eRegisterInLiveRegisterContext;
+ lldb_private::UnwindLLDB::ConcreteRegisterLocation new_regloc;
+ new_regloc.type = UnwindLLDB::ConcreteRegisterLocation::
+ eRegisterInLiveRegisterContext;
new_regloc.location.register_number =
return_address_reg.GetAsKind(eRegisterKindLLDB);
m_registers[regnum.GetAsKind(eRegisterKindLLDB)] = new_regloc;
@@ -1513,9 +1514,9 @@ RegisterContextUnwind::SavedLocationForRegister(
if (IsFrameZero()) {
// This is frame 0 - we should return the actual live register context
// value
- lldb_private::UnwindLLDB::RegisterLocation new_regloc;
+ lldb_private::UnwindLLDB::ConcreteRegisterLocation new_regloc;
new_regloc.type =
- UnwindLLDB::RegisterLocation::eRegisterInLiveRegisterContext;
+ UnwindLLDB::ConcreteRegisterLocation::eRegisterInLiveRegisterContext;
new_regloc.location.register_number = regnum.GetAsKind(eRegisterKindLLDB);
m_registers[regnum.GetAsKind(eRegisterKindLLDB)] = new_regloc;
regloc = new_regloc;
@@ -1539,8 +1540,8 @@ RegisterContextUnwind::SavedLocationForRegister(
// unwindplan_regloc has valid contents about where to retrieve the register
if (unwindplan_regloc.IsUnspecified()) {
- lldb_private::UnwindLLDB::RegisterLocation new_regloc = {};
- new_regloc.type = UnwindLLDB::RegisterLocation::eRegisterNotSaved;
+ lldb_private::UnwindLLDB::ConcreteRegisterLocation new_regloc = {};
+ new_regloc.type = UnwindLLDB::ConcreteRegisterLocation::eRegisterNotSaved;
m_registers[regnum.GetAsKind(eRegisterKindLLDB)] = new_regloc;
UnwindLogMsg("save location for %s (%d) is unspecified, continue searching",
regnum.GetName(), regnum.GetAsKind(eRegisterKindLLDB));
@@ -1564,7 +1565,7 @@ RegisterContextUnwind::SavedLocationForRegister(
regnum.GetName(), regnum.GetAsKind(eRegisterKindLLDB));
return UnwindLLDB::RegisterSearchResult::eRegisterNotFound;
} else {
- regloc.type = UnwindLLDB::RegisterLocation::eRegisterInRegister;
+ regloc.type = UnwindLLDB::ConcreteRegisterLocation::eRegisterInRegister;
regloc.location.register_number = regnum.GetAsKind(eRegisterKindLLDB);
m_registers[regnum.GetAsKind(eRegisterKindLLDB)] = regloc;
UnwindLogMsg(
@@ -1577,7 +1578,7 @@ RegisterContextUnwind::SavedLocationForRegister(
if (unwindplan_regloc.IsCFAPlusOffset()) {
int offset = unwindplan_regloc.GetOffset();
- regloc.type = UnwindLLDB::RegisterLocation::eRegisterValueInferred;
+ regloc.type = UnwindLLDB::ConcreteRegisterLocation::eRegisterValueInferred;
regloc.location.inferred_value = m_cfa + offset;
m_registers[regnum.GetAsKind(eRegisterKindLLDB)] = regloc;
UnwindLogMsg("supplying caller's register %s (%d), value is CFA plus "
@@ -1589,7 +1590,8 @@ RegisterContextUnwind::SavedLocationForRegister(
if (unwindplan_regloc.IsAtCFAPlusOffset()) {
int offset = unwindplan_regloc.GetOffset();
- regloc.type = UnwindLLDB::RegisterLocation::eRegisterSavedAtMemoryLocation;
+ regloc.type =
+ UnwindLLDB::ConcreteRegisterLocation::eRegisterSavedAtMemoryLocation;
regloc.location.target_memory_location = m_cfa + offset;
m_registers[regnum.GetAsKind(eRegisterKindLLDB)] = regloc;
UnwindLogMsg("supplying caller's register %s (%d) from the stack, saved at "
@@ -1604,7 +1606,7 @@ RegisterContextUnwind::SavedLocationForRegister(
return UnwindLLDB::RegisterSearchResult::eRegisterNotFound;
int offset = unwindplan_regloc.GetOffset();
- regloc.type = UnwindLLDB::RegisterLocation::eRegisterValueInferred;
+ regloc.type = UnwindLLDB::ConcreteRegisterLocation::eRegisterValueInferred;
regloc.location.inferred_value = m_afa + offset;
m_registers[regnum.GetAsKind(eRegisterKindLLDB)] = regloc;
UnwindLogMsg("supplying caller's register %s (%d), value is AFA plus "
@@ -1619,7 +1621,8 @@ RegisterContextUnwind::SavedLocationForRegister(
return UnwindLLDB::RegisterSearchResult::eRegisterNotFound;
int offset = unwindplan_regloc.GetOffset();
- regloc.type = UnwindLLDB::RegisterLocation::eRegisterSavedAtMemoryLocation;
+ regloc.type =
+ UnwindLLDB::ConcreteRegisterLocation::eRegisterSavedAtMemoryLocation;
regloc.location.target_memory_location = m_afa + offset;
m_registers[regnum.GetAsKind(eRegisterKindLLDB)] = regloc;
UnwindLogMsg("supplying caller's register %s (%d) from the stack, saved at "
@@ -1639,7 +1642,7 @@ RegisterContextUnwind::SavedLocationForRegister(
regnum.GetName(), regnum.GetAsKind(eRegisterKindLLDB));
return UnwindLLDB::RegisterSearchResult::eRegisterNotFound;
}
- regloc.type = UnwindLLDB::RegisterLocation::eRegisterInRegister;
+ regloc.type = UnwindLLDB::ConcreteRegisterLocation::eRegisterInRegister;
regloc.location.register_number = row_regnum.GetAsKind(eRegisterKindLLDB);
m_registers[regnum.GetAsKind(eRegisterKindLLDB)] = regloc;
UnwindLogMsg(
@@ -1670,7 +1673,8 @@ RegisterContextUnwind::SavedLocationForRegister(
addr_t val;
val = result->GetScalar().ULongLong();
if (unwindplan_regloc.IsDWARFExpression()) {
- regloc.type = UnwindLLDB::RegisterLocation::eRegisterValueInferred;
+ regloc.type =
+ UnwindLLDB::ConcreteRegisterLocation::eRegisterValueInferred;
regloc.location.inferred_value = val;
m_registers[regnum.GetAsKind(eRegisterKindLLDB)] = regloc;
UnwindLogMsg("supplying caller's register %s (%d) via DWARF expression "
@@ -1678,8 +1682,8 @@ RegisterContextUnwind::SavedLocationForRegister(
regnum.GetName(), regnum.GetAsKind(eRegisterKindLLDB));
return UnwindLLDB::RegisterSearchResult::eRegisterFound;
} else {
- regloc.type =
- UnwindLLDB::RegisterLocation::eRegisterSavedAtMemoryLocation;
+ regloc.type = UnwindLLDB::ConcreteRegisterLocation::
+ eRegisterSavedAtMemoryLocation;
regloc.location.target_memory_location = val;
m_registers[regnum.GetAsKind(eRegisterKindLLDB)] = regloc;
UnwindLogMsg("supplying caller's register %s (%d) via DWARF expression "
@@ -1695,7 +1699,7 @@ RegisterContextUnwind::SavedLocationForRegister(
}
if (unwindplan_regloc.IsConstant()) {
- regloc.type = UnwindLLDB::RegisterLocation::eRegisterValueInferred;
+ regloc.type = UnwindLLDB::ConcreteRegisterLocation::eRegisterValueInferred;
regloc.location.inferred_value = unwindplan_regloc.GetConstant();
m_registers[regnum.GetAsKind(eRegisterKindLLDB)] = regloc;
UnwindLogMsg("supplying caller's register %s (%d) via constant value",
@@ -1756,7 +1760,7 @@ bool RegisterContextUnwind::TryFallbackUnwindPlan() {
addr_t old_caller_pc_value = LLDB_INVALID_ADDRESS;
addr_t new_caller_pc_value = LLDB_INVALID_ADDRESS;
- UnwindLLDB::RegisterLocation regloc = {};
+ UnwindLLDB::ConcreteRegisterLocation regloc = {};
if (SavedLocationForRegister(pc_regnum.GetAsKind(eRegisterKindLLDB),
regloc) ==
UnwindLLDB::RegisterSearchResult::eRegisterFound) {
@@ -2188,7 +2192,7 @@ bool RegisterContextUnwind::ReadGPRValue(lldb::RegisterKind register_kind,
generic_regnum == LLDB_REGNUM_GENERIC_RA))
pc_register = true;
- lldb_private::UnwindLLDB::RegisterLocation regloc;
+ lldb_private::UnwindLLDB::ConcreteRegisterLocation regloc;
if (!m_parent_unwind.SearchForSavedLocationForRegister(
lldb_regnum, regloc, m_frame_number - 1, pc_register)) {
return false;
@@ -2235,7 +2239,7 @@ bool RegisterContextUnwind::ReadRegister(const RegisterInfo *reg_info,
is_pc_regnum = true;
}
- lldb_private::UnwindLLDB::RegisterLocation regloc;
+ lldb_private::UnwindLLDB::ConcreteRegisterLocation regloc;
// Find out where the NEXT frame saved THIS frame's register contents
if (!m_parent_unwind.SearchForSavedLocationForRegister(
lldb_regnum, regloc, m_frame_number - 1, is_pc_regnum))
@@ -2270,7 +2274,7 @@ bool RegisterContextUnwind::WriteRegister(const RegisterInfo *reg_info,
return m_thread.GetRegisterContext()->WriteRegister(reg_info, value);
}
- lldb_private::UnwindLLDB::RegisterLocation regloc;
+ lldb_private::UnwindLLDB::ConcreteRegisterLocation regloc;
// Find out where the NEXT frame saved THIS frame's register contents
if (!m_parent_unwind.SearchForSavedLocationForRegister(
lldb_regnum, regloc, m_frame_number - 1, false))
diff --git a/lldb/source/Target/Target.cpp b/lldb/source/Target/Target.cpp
index f9b6f7d..29e9efb 100644
--- a/lldb/source/Target/Target.cpp
+++ b/lldb/source/Target/Target.cpp
@@ -77,6 +77,80 @@
using namespace lldb;
using namespace lldb_private;
+namespace {
+
+struct ExecutableInstaller {
+
+ ExecutableInstaller(PlatformSP platform, ModuleSP module)
+ : m_platform{platform}, m_module{module},
+ m_local_file{m_module->GetFileSpec()},
+ m_remote_file{m_module->GetRemoteInstallFileSpec()} {}
+
+ void setupRemoteFile() const { m_module->SetPlatformFileSpec(m_remote_file); }
+
+ PlatformSP m_platform;
+ ModuleSP m_module;
+ const FileSpec m_local_file;
+ const FileSpec m_remote_file;
+};
+
+struct MainExecutableInstaller {
+
+ MainExecutableInstaller(PlatformSP platform, ModuleSP module, TargetSP target,
+ ProcessLaunchInfo &launch_info)
+ : m_platform{platform}, m_module{module},
+ m_local_file{m_module->GetFileSpec()},
+ m_remote_file{
+ getRemoteFileSpec(m_platform, target, m_module, m_local_file)},
+ m_launch_info{launch_info} {}
+
+ void setupRemoteFile() const {
+ m_module->SetPlatformFileSpec(m_remote_file);
+ m_launch_info.SetExecutableFile(m_remote_file,
+ /*add_exe_file_as_first_arg=*/false);
+ m_platform->SetFilePermissions(m_remote_file, 0700 /*-rwx------*/);
+ }
+
+ PlatformSP m_platform;
+ ModuleSP m_module;
+ const FileSpec m_local_file;
+ const FileSpec m_remote_file;
+
+private:
+ static FileSpec getRemoteFileSpec(PlatformSP platform, TargetSP target,
+ ModuleSP module,
+ const FileSpec &local_file) {
+ FileSpec remote_file = module->GetRemoteInstallFileSpec();
+ if (remote_file || !target->GetAutoInstallMainExecutable())
+ return remote_file;
+
+ if (!local_file)
+ return {};
+
+ remote_file = platform->GetRemoteWorkingDirectory();
+ remote_file.AppendPathComponent(local_file.GetFilename().GetCString());
+
+ return remote_file;
+ }
+
+ ProcessLaunchInfo &m_launch_info;
+};
+} // namespace
+
+template <typename Installer>
+static Status installExecutable(const Installer &installer) {
+ if (!installer.m_local_file || !installer.m_remote_file)
+ return Status();
+
+ Status error = installer.m_platform->Install(installer.m_local_file,
+ installer.m_remote_file);
+ if (error.Fail())
+ return error;
+
+ installer.setupRemoteFile();
+ return Status();
+}
+
constexpr std::chrono::milliseconds EvaluateExpressionOptions::default_timeout;
Target::Arch::Arch(const ArchSpec &spec)
@@ -3077,48 +3151,28 @@ TargetProperties &Target::GetGlobalProperties() {
Status Target::Install(ProcessLaunchInfo *launch_info) {
Status error;
PlatformSP platform_sp(GetPlatform());
- if (platform_sp) {
- if (platform_sp->IsRemote()) {
- if (platform_sp->IsConnected()) {
- // Install all files that have an install path when connected to a
- // remote platform. If target.auto-install-main-executable is set then
- // also install the main executable even if it does not have an explicit
- // install path specified.
- const ModuleList &modules = GetImages();
- const size_t num_images = modules.GetSize();
- for (size_t idx = 0; idx < num_images; ++idx) {
- ModuleSP module_sp(modules.GetModuleAtIndex(idx));
- if (module_sp) {
- const bool is_main_executable = module_sp == GetExecutableModule();
- FileSpec local_file(module_sp->GetFileSpec());
- if (local_file) {
- FileSpec remote_file(module_sp->GetRemoteInstallFileSpec());
- if (!remote_file) {
- if (is_main_executable && GetAutoInstallMainExecutable()) {
- // Automatically install the main executable.
- remote_file = platform_sp->GetRemoteWorkingDirectory();
- remote_file.AppendPathComponent(
- module_sp->GetFileSpec().GetFilename().GetCString());
- }
- }
- if (remote_file) {
- error = platform_sp->Install(local_file, remote_file);
- if (error.Success()) {
- module_sp->SetPlatformFileSpec(remote_file);
- if (is_main_executable) {
- platform_sp->SetFilePermissions(remote_file, 0700);
- if (launch_info)
- launch_info->SetExecutableFile(remote_file, false);
- }
- } else
- break;
- }
- }
- }
- }
- }
+ if (!platform_sp || !platform_sp->IsRemote() || !platform_sp->IsConnected())
+ return error;
+
+ // Install all files that have an install path when connected to a
+ // remote platform. If target.auto-install-main-executable is set then
+ // also install the main executable even if it does not have an explicit
+ // install path specified.
+
+ for (auto module_sp : GetImages().Modules()) {
+ if (module_sp == GetExecutableModule()) {
+ MainExecutableInstaller installer{platform_sp, module_sp,
+ shared_from_this(), *launch_info};
+ error = installExecutable(installer);
+ } else {
+ ExecutableInstaller installer{platform_sp, module_sp};
+ error = installExecutable(installer);
}
+
+ if (error.Fail())
+ return error;
}
+
return error;
}
diff --git a/lldb/source/Target/TargetProperties.td b/lldb/source/Target/TargetProperties.td
index 0f68deb..fb61478 100644
--- a/lldb/source/Target/TargetProperties.td
+++ b/lldb/source/Target/TargetProperties.td
@@ -235,7 +235,7 @@ let Definition = "process" in {
def DisableLangRuntimeUnwindPlans: Property<"disable-language-runtime-unwindplans", "Boolean">,
Global,
DefaultFalse,
- Desc<"If true, language runtime augmented/overidden backtraces will not be used when printing a stack trace.">;
+ Desc<"If true, language runtime augmented/overridden backtraces will not be used when printing a stack trace.">;
def DetachKeepsStopped: Property<"detach-keeps-stopped", "Boolean">,
Global,
DefaultFalse,
diff --git a/lldb/source/Target/UnwindLLDB.cpp b/lldb/source/Target/UnwindLLDB.cpp
index f43e940..4d3f239 100644
--- a/lldb/source/Target/UnwindLLDB.cpp
+++ b/lldb/source/Target/UnwindLLDB.cpp
@@ -474,7 +474,8 @@ UnwindLLDB::GetRegisterContextForFrameNum(uint32_t frame_num) {
}
bool UnwindLLDB::SearchForSavedLocationForRegister(
- uint32_t lldb_regnum, lldb_private::UnwindLLDB::RegisterLocation &regloc,
+ uint32_t lldb_regnum,
+ lldb_private::UnwindLLDB::ConcreteRegisterLocation &regloc,
uint32_t starting_frame_num, bool pc_reg) {
int64_t frame_num = starting_frame_num;
if (static_cast<size_t>(frame_num) >= m_frames.size())
@@ -497,8 +498,8 @@ bool UnwindLLDB::SearchForSavedLocationForRegister(
// We descended down to the live register context aka stack frame 0 and are
// reading the value out of a live register.
if (result == UnwindLLDB::RegisterSearchResult::eRegisterFound &&
- regloc.type ==
- UnwindLLDB::RegisterLocation::eRegisterInLiveRegisterContext) {
+ regloc.type == UnwindLLDB::ConcreteRegisterLocation::
+ eRegisterInLiveRegisterContext) {
return true;
}
@@ -509,7 +510,8 @@ bool UnwindLLDB::SearchForSavedLocationForRegister(
// down the stack, or an actual value from a live RegisterContext at frame
// 0.
if (result == UnwindLLDB::RegisterSearchResult::eRegisterFound &&
- regloc.type == UnwindLLDB::RegisterLocation::eRegisterInRegister &&
+ regloc.type ==
+ UnwindLLDB::ConcreteRegisterLocation::eRegisterInRegister &&
frame_num > 0) {
result = UnwindLLDB::RegisterSearchResult::eRegisterNotFound;
lldb_regnum = regloc.location.register_number;
diff --git a/lldb/source/Utility/Status.cpp b/lldb/source/Utility/Status.cpp
index 4af3af5..7f73962 100644
--- a/lldb/source/Utility/Status.cpp
+++ b/lldb/source/Utility/Status.cpp
@@ -8,6 +8,8 @@
#include "lldb/Utility/Status.h"
+#include "lldb/Utility/LLDBLog.h"
+#include "lldb/Utility/Log.h"
#include "lldb/Utility/VASPrintf.h"
#include "lldb/lldb-defines.h"
#include "lldb/lldb-enumerations.h"
@@ -37,48 +39,84 @@ class raw_ostream;
using namespace lldb;
using namespace lldb_private;
-Status::Status() {}
+char CloneableError::ID;
+char CloneableECError::ID;
+char MachKernelError::ID;
+char Win32Error::ID;
+char ExpressionError::ID;
+
+namespace {
+/// A std::error_code category for eErrorTypeGeneric.
+class LLDBGenericCategory : public std::error_category {
+ const char *name() const noexcept override { return "LLDBGenericCategory"; }
+ std::string message(int __ev) const override { return "generic LLDB error"; };
+};
+LLDBGenericCategory &lldb_generic_category() {
+ static LLDBGenericCategory g_generic_category;
+ return g_generic_category;
+}
+
+/// A std::error_code category for eErrorTypeExpression.
+class ExpressionCategory : public std::error_category {
+ const char *name() const noexcept override {
+ return "LLDBExpressionCategory";
+ }
+ std::string message(int __ev) const override {
+ return ExpressionResultAsCString(
+ static_cast<lldb::ExpressionResults>(__ev));
+ };
+};
+ExpressionCategory &expression_category() {
+ static ExpressionCategory g_expression_category;
+ return g_expression_category;
+}
+} // namespace
+
+Status::Status() : m_error(llvm::Error::success()) {}
+
+static llvm::Error ErrorFromEnums(Status::ValueType err, ErrorType type,
+ std::string msg) {
+ switch (type) {
+ case eErrorTypeMachKernel:
+ return llvm::make_error<MachKernelError>(
+ std::error_code(err, std::system_category()));
+ case eErrorTypeWin32:
+#ifdef _WIN32
+ if (err == NO_ERROR)
+ return llvm::Error::success();
+#endif
+ return llvm::make_error<Win32Error>(
+ std::error_code(err, std::system_category()));
+ case eErrorTypePOSIX:
+ if (msg.empty())
+ return llvm::errorCodeToError(
+ std::error_code(err, std::generic_category()));
+ return llvm::createStringError(
+ std::move(msg), std::error_code(err, std::generic_category()));
+ default:
+ return llvm::createStringError(
+ std::move(msg), std::error_code(err, lldb_generic_category()));
+ }
+}
Status::Status(ValueType err, ErrorType type, std::string msg)
- : m_code(err), m_type(type), m_string(std::move(msg)) {}
+ : m_error(ErrorFromEnums(err, type, msg)) {}
-// This logic is confusing because c++ calls the traditional (posix) errno codes
+// This logic is confusing because C++ calls the traditional (posix) errno codes
// "generic errors", while we use the term "generic" to mean completely
// arbitrary (text-based) errors.
Status::Status(std::error_code EC)
- : m_code(EC.value()),
- m_type(EC.category() == std::generic_category() ? eErrorTypePOSIX
- : eErrorTypeGeneric),
- m_string(EC.message()) {}
+ : m_error(!EC ? llvm::Error::success() : llvm::errorCodeToError(EC)) {}
Status::Status(std::string err_str)
- : m_code(LLDB_GENERIC_ERROR), m_type(eErrorTypeGeneric),
- m_string(std::move(err_str)) {}
+ : m_error(
+ llvm::createStringError(llvm::inconvertibleErrorCode(), err_str)) {}
-Status::Status(llvm::Error error) {
- if (!error) {
- Clear();
- return;
- }
-
- // if the error happens to be a errno error, preserve the error code
- error = llvm::handleErrors(
- std::move(error), [&](std::unique_ptr<llvm::ECError> e) -> llvm::Error {
- std::error_code ec = e->convertToErrorCode();
- if (ec.category() == std::generic_category()) {
- m_code = ec.value();
- m_type = ErrorType::eErrorTypePOSIX;
- return llvm::Error::success();
- }
- return llvm::Error(std::move(e));
- });
-
- // Otherwise, just preserve the message
- if (error) {
- m_code = LLDB_GENERIC_ERROR;
- m_type = eErrorTypeGeneric;
- m_string = llvm::toString(std::move(error));
- }
+const Status &Status::operator=(Status &&other) {
+ Clear();
+ llvm::consumeError(std::move(m_error));
+ m_error = std::move(other.m_error);
+ return *this;
}
Status Status::FromErrorStringWithFormat(const char *format, ...) {
@@ -94,25 +132,35 @@ Status Status::FromErrorStringWithFormat(const char *format, ...) {
return Status(string);
}
-Status Status::FromError(llvm::Error error) { return Status(std::move(error)); }
+Status Status::FromExpressionError(lldb::ExpressionResults result,
+ std::string msg) {
+ return Status(llvm::make_error<ExpressionError>(
+ std::error_code(result, expression_category()), msg));
+}
-llvm::Error Status::ToError() const {
- if (Success())
- return llvm::Error::success();
- if (m_type == ErrorType::eErrorTypePOSIX)
- return llvm::errorCodeToError(
- std::error_code(m_code, std::generic_category()));
- return llvm::createStringError(AsCString());
+/// Creates a deep copy of all known errors and converts all other
+/// errors to a new llvm::StringError.
+static llvm::Error CloneError(const llvm::Error &error) {
+ llvm::Error result = llvm::Error::success();
+ auto clone = [](const llvm::ErrorInfoBase &e) {
+ if (e.isA<CloneableError>())
+ return llvm::Error(static_cast<const CloneableError &>(e).Clone());
+ if (e.isA<llvm::ECError>())
+ return llvm::errorCodeToError(e.convertToErrorCode());
+ return llvm::make_error<llvm::StringError>(e.message(),
+ e.convertToErrorCode(), true);
+ };
+ llvm::visitErrors(error, [&](const llvm::ErrorInfoBase &e) {
+ result = joinErrors(std::move(result), clone(e));
+ });
+ return result;
}
-Status::~Status() = default;
+Status Status::FromError(llvm::Error error) { return Status(std::move(error)); }
+
+llvm::Error Status::ToError() const { return CloneError(m_error); }
-const Status &Status::operator=(Status &&other) {
- m_code = other.m_code;
- m_type = other.m_type;
- m_string = std::move(other.m_string);
- return *this;
-}
+Status::~Status() { llvm::consumeError(std::move(m_error)); }
#ifdef _WIN32
static std::string RetrieveWin32ErrorString(uint32_t error_code) {
@@ -140,6 +188,33 @@ static std::string RetrieveWin32ErrorString(uint32_t error_code) {
}
#endif
+std::string MachKernelError::message() const {
+#if defined(__APPLE__)
+ if (const char *s = ::mach_error_string(convertToErrorCode().value()))
+ return s;
+#endif
+ return "MachKernelError";
+}
+
+std::string Win32Error::message() const {
+#if defined(_WIN32)
+ return RetrieveWin32ErrorString(convertToErrorCode().value());
+#endif
+ return "Win32Error";
+}
+
+std::unique_ptr<CloneableError> MachKernelError::Clone() const {
+ return std::make_unique<MachKernelError>(convertToErrorCode());
+}
+
+std::unique_ptr<CloneableError> Win32Error::Clone() const {
+ return std::make_unique<Win32Error>(convertToErrorCode());
+}
+
+std::unique_ptr<CloneableError> ExpressionError::Clone() const {
+ return std::make_unique<ExpressionError>(convertToErrorCode(), message());
+}
+
// Get the error value as a NULL C string. The error string will be fetched and
// cached on demand. The cached error string value will remain until the error
// value is changed or cleared.
@@ -147,29 +222,12 @@ const char *Status::AsCString(const char *default_error_str) const {
if (Success())
return nullptr;
- if (m_string.empty()) {
- switch (m_type) {
- case eErrorTypeMachKernel:
-#if defined(__APPLE__)
- if (const char *s = ::mach_error_string(m_code))
- m_string.assign(s);
-#endif
- break;
-
- case eErrorTypePOSIX:
- m_string = llvm::sys::StrError(m_code);
- break;
-
- case eErrorTypeWin32:
-#if defined(_WIN32)
- m_string = RetrieveWin32ErrorString(m_code);
-#endif
- break;
+ m_string = llvm::toStringWithoutConsuming(m_error);
+ // Backwards compatibility with older implementations of Status.
+ if (m_error.isA<llvm::ECError>())
+ if (!m_string.empty() && m_string[m_string.size() - 1] == '\n')
+ m_string.pop_back();
- default:
- break;
- }
- }
if (m_string.empty()) {
if (default_error_str)
m_string.assign(default_error_str);
@@ -181,29 +239,59 @@ const char *Status::AsCString(const char *default_error_str) const {
// Clear the error and any cached error string that it might contain.
void Status::Clear() {
- m_code = 0;
- m_type = eErrorTypeInvalid;
- m_string.clear();
+ if (m_error)
+ LLDB_LOG_ERRORV(GetLog(LLDBLog::API), std::move(m_error),
+ "dropping error {0}");
+ m_error = llvm::Error::success();
}
-// Access the error value.
-Status::ValueType Status::GetError() const { return m_code; }
+Status::ValueType Status::GetError() const {
+ Status::ValueType result = 0;
+ llvm::visitErrors(m_error, [&](const llvm::ErrorInfoBase &error) {
+ // Return the first only.
+ if (result)
+ return;
+ std::error_code ec = error.convertToErrorCode();
+ result = ec.value();
+ });
+ return result;
+}
// Access the error type.
-ErrorType Status::GetType() const { return m_type; }
-
-// Returns true if this object contains a value that describes an error or
-// otherwise non-success result.
-bool Status::Fail() const { return m_code != 0; }
+ErrorType Status::GetType() const {
+ ErrorType result = eErrorTypeInvalid;
+ llvm::visitErrors(m_error, [&](const llvm::ErrorInfoBase &error) {
+ // Return the first only.
+ if (result != eErrorTypeInvalid)
+ return;
+ if (error.isA<MachKernelError>())
+ result = eErrorTypeMachKernel;
+ else if (error.isA<Win32Error>())
+ result = eErrorTypeWin32;
+ else if (error.isA<ExpressionError>())
+ result = eErrorTypeExpression;
+ else if (error.convertToErrorCode().category() == std::generic_category())
+ result = eErrorTypePOSIX;
+ else if (error.convertToErrorCode().category() == lldb_generic_category() ||
+ error.convertToErrorCode() == llvm::inconvertibleErrorCode())
+ result = eErrorTypeGeneric;
+ else
+ result = eErrorTypeInvalid;
+ });
+ return result;
+}
-Status Status::FromErrno() {
- // Update the error value to be "errno" and update the type to be "POSIX".
- return Status(errno, eErrorTypePOSIX);
+bool Status::Fail() const {
+ // Note that this does not clear the checked flag in
+ // m_error. Otherwise we'd need to make this thread-safe.
+ return m_error.isA<llvm::ErrorInfoBase>();
}
+Status Status::FromErrno() { return Status(llvm::errnoAsErrorCode()); }
+
// Returns true if the error code in this object is considered a successful
// return value.
-bool Status::Success() const { return m_code == 0; }
+bool Status::Success() const { return !Fail(); }
void llvm::format_provider<lldb_private::Status>::format(
const lldb_private::Status &error, llvm::raw_ostream &OS,
diff --git a/lldb/test/API/commands/command/script/add/TestAddParsedCommand.py b/lldb/test/API/commands/command/script/add/TestAddParsedCommand.py
index c7680e9..6fac1eb 100644
--- a/lldb/test/API/commands/command/script/add/TestAddParsedCommand.py
+++ b/lldb/test/API/commands/command/script/add/TestAddParsedCommand.py
@@ -68,6 +68,57 @@ class ParsedCommandTestCase(TestBase):
return results
+ def handle_completion(
+ self,
+ cmd_str,
+ exp_num_completions,
+ exp_matches,
+ exp_descriptions,
+ match_description,
+ ):
+ matches = lldb.SBStringList()
+ descriptions = lldb.SBStringList()
+
+ interp = self.dbg.GetCommandInterpreter()
+ num_completions = interp.HandleCompletionWithDescriptions(
+ cmd_str, len(cmd_str), 0, 1000, matches, descriptions
+ )
+ self.assertEqual(
+ num_completions, exp_num_completions, "Number of completions is right."
+ )
+ num_matches = matches.GetSize()
+ self.assertEqual(
+ num_matches,
+ exp_matches.GetSize(),
+ "matches and expected matches of different lengths",
+ )
+ num_descriptions = descriptions.GetSize()
+ if match_description:
+ self.assertEqual(
+ num_descriptions,
+ exp_descriptions.GetSize(),
+ "descriptions and expected of different lengths",
+ )
+
+ self.assertEqual(
+ matches.GetSize(),
+ num_completions + 1,
+ "The first element is the complete additional text",
+ )
+
+ for idx in range(0, num_matches):
+ match = matches.GetStringAtIndex(idx)
+ exp_match = exp_matches.GetStringAtIndex(idx)
+ self.assertEqual(
+ match, exp_match, f"{match} did not match expectation: {exp_match}"
+ )
+ if match_description:
+ desc = descriptions.GetStringAtIndex(idx)
+ exp_desc = exp_descriptions.GetStringAtIndex(idx)
+ self.assertEqual(
+ desc, exp_desc, f"{desc} didn't match expectation: {exp_desc}"
+ )
+
def pycmd_tests(self):
source_dir = self.getSourceDir()
test_file_path = os.path.join(source_dir, "test_commands.py")
@@ -176,24 +227,10 @@ class ParsedCommandTestCase(TestBase):
descriptions = lldb.SBStringList()
# First try an enum completion:
- num_completions = interp.HandleCompletionWithDescriptions(
- "no-args -e f", 12, 0, 1000, matches, descriptions
- )
- self.assertEqual(num_completions, 1, "Only one completion for foo")
- self.assertEqual(
- matches.GetSize(), 2, "The first element is the complete additional text"
- )
- self.assertEqual(
- matches.GetStringAtIndex(0), "oo ", "And we got the right extra characters"
- )
- self.assertEqual(
- matches.GetStringAtIndex(1), "foo", "And we got the right match"
- )
- self.assertEqual(
- descriptions.GetSize(), 2, "descriptions matche the return length"
- )
- # FIXME: we don't return descriptions for enum elements
- # self.assertEqual(descriptions.GetStringAtIndex(1), "does foo things", "And we got the right description")
+ # Note - this is an enum so all the values are returned:
+ matches.AppendList(["oo ", "foo"], 2)
+
+ self.handle_completion("no-args -e f", 1, matches, descriptions, False)
# Now try an internal completer, the on disk file one is handy:
partial_name = os.path.join(source_dir, "test_")
@@ -201,24 +238,9 @@ class ParsedCommandTestCase(TestBase):
matches.Clear()
descriptions.Clear()
- num_completions = interp.HandleCompletionWithDescriptions(
- cmd_str, len(cmd_str) - 1, 0, 1000, matches, descriptions
- )
- self.assertEqual(num_completions, 1, "Only one completion for source file")
- self.assertEqual(matches.GetSize(), 2, "The first element is the complete line")
- self.assertEqual(
- matches.GetStringAtIndex(0),
- "commands.py' ",
- "And we got the right extra characters",
- )
- self.assertEqual(
- matches.GetStringAtIndex(1), test_file_path, "And we got the right match"
- )
- self.assertEqual(
- descriptions.GetSize(), 2, "descriptions match the return length"
- )
- # FIXME: we don't return descriptions for enum elements
- # self.assertEqual(descriptions.GetStringAtIndex(1), "does foo things", "And we got the right description")
+ matches.AppendList(["commands.py' ", test_file_path], 2)
+ # We don't have descriptions for the file path completer:
+ self.handle_completion(cmd_str, 1, matches, descriptions, False)
# Try a command with arguments.
# FIXME: It should be enough to define an argument and it's type to get the completer
@@ -231,6 +253,44 @@ class ParsedCommandTestCase(TestBase):
substrs=["0: First Argument", "1: Second Argument"],
)
+ # Now test custom completions - two-args has both option and arg completers. In both
+ # completers we return different values if the -p option is set, so we can test that too:
+ matches.Clear()
+ descriptions.Clear()
+ cmd_str = "two-args -p something -c other_"
+ matches.AppendString("something ")
+ matches.AppendString("other_something")
+ # This is a full match so no descriptions:
+ self.handle_completion(cmd_str, 1, matches, descriptions, False)
+
+ matches.Clear()
+ descriptions.Clear()
+ cmd_str = "two-args -c other_"
+ matches.AppendList(["", "other_nice", "other_not_nice", "other_mediocre"], 4)
+ # The option doesn't return descriptions either:
+ self.handle_completion(cmd_str, 3, matches, descriptions, False)
+
+ # Now try the argument - it says "no completions" if the proc_name was set:
+ matches.Clear()
+ descriptions.Clear()
+ cmd_str = "two-args -p something arg"
+ matches.AppendString("")
+ self.handle_completion(cmd_str, 0, matches, descriptions, False)
+
+ cmd_str = "two-args arg_"
+ matches.Clear()
+ descriptions.Clear()
+ matches.AppendList(["", "arg_cool", "arg_yuck"], 3)
+ descriptions.AppendList(["", "good idea", "bad idea"], 3)
+ self.handle_completion(cmd_str, 2, matches, descriptions, True)
+
+ # This one gets a single unique match:
+ cmd_str = "two-args correct_"
+ matches.Clear()
+ descriptions.Clear()
+ matches.AppendList(["answer ", "correct_answer"], 2)
+ self.handle_completion(cmd_str, 1, matches, descriptions, False)
+
# Now make sure get_repeat_command works properly:
# no-args turns off auto-repeat
diff --git a/lldb/test/API/commands/command/script/add/test_commands.py b/lldb/test/API/commands/command/script/add/test_commands.py
index fcde6cd..b15ea93 100644
--- a/lldb/test/API/commands/command/script/add/test_commands.py
+++ b/lldb/test/API/commands/command/script/add/test_commands.py
@@ -18,7 +18,7 @@ class ReportingCmd(ParsedCommand):
for long_option, elem in opt_def.items():
dest = elem["dest"]
result.AppendMessage(
- f"{long_option} (set: {elem['_value_set']}): {object.__getattribute__(self.ov_parser, dest)}\n"
+ f"{long_option} (set: {elem['_value_set']}): {object.__getattribute__(self.get_parser(), dest)}\n"
)
else:
result.AppendMessage("No options\n")
@@ -31,7 +31,6 @@ class ReportingCmd(ParsedCommand):
f"{idx}: {args_array.GetItemAtIndex(idx).GetStringValue(10000)}\n"
)
-
# Use these to make sure that get_repeat_command sends the right
# command.
no_args_repeat = None
@@ -49,7 +48,8 @@ class NoArgsCommand(ReportingCmd):
ParsedCommand.do_register_cmd(cls, debugger, module_name)
def setup_command_definition(self):
- self.ov_parser.add_option(
+ ov_parser = self.get_parser()
+ ov_parser.add_option(
"b",
"bool-arg",
"a boolean arg, defaults to True",
@@ -59,7 +59,7 @@ class NoArgsCommand(ReportingCmd):
default=True,
)
- self.ov_parser.add_option(
+ ov_parser.add_option(
"s",
"shlib-name",
"A shared library name.",
@@ -69,7 +69,7 @@ class NoArgsCommand(ReportingCmd):
default=None,
)
- self.ov_parser.add_option(
+ ov_parser.add_option(
"d",
"disk-file-name",
"An on disk filename",
@@ -78,7 +78,7 @@ class NoArgsCommand(ReportingCmd):
default=None,
)
- self.ov_parser.add_option(
+ ov_parser.add_option(
"l",
"line-num",
"A line number",
@@ -88,7 +88,7 @@ class NoArgsCommand(ReportingCmd):
default=0,
)
- self.ov_parser.add_option(
+ ov_parser.add_option(
"e",
"enum-option",
"An enum, doesn't actually do anything",
@@ -126,8 +126,9 @@ class OneArgCommandNoOptions(ReportingCmd):
ParsedCommand.do_register_cmd(cls, debugger, module_name)
def setup_command_definition(self):
- self.ov_parser.add_argument_set(
- [self.ov_parser.make_argument_element(lldb.eArgTypeSourceFile, "plain")]
+ ov_parser = self.get_parser()
+ ov_parser.add_argument_set(
+ [ov_parser.make_argument_element(lldb.eArgTypeSourceFile, "plain")]
)
def get_repeat_command(self, command):
@@ -154,7 +155,8 @@ class TwoArgGroupsCommand(ReportingCmd):
ParsedCommand.do_register_cmd(cls, debugger, module_name)
def setup_command_definition(self):
- self.ov_parser.add_option(
+ ov_parser = self.get_parser()
+ ov_parser.add_option(
"l",
"language",
"language defaults to None",
@@ -164,7 +166,7 @@ class TwoArgGroupsCommand(ReportingCmd):
default=None,
)
- self.ov_parser.add_option(
+ ov_parser.add_option(
"c",
"log-channel",
"log channel - defaults to lldb",
@@ -174,7 +176,7 @@ class TwoArgGroupsCommand(ReportingCmd):
default="lldb",
)
- self.ov_parser.add_option(
+ ov_parser.add_option(
"p",
"process-name",
"A process name, defaults to None",
@@ -183,25 +185,23 @@ class TwoArgGroupsCommand(ReportingCmd):
default=None,
)
- self.ov_parser.add_argument_set(
+ ov_parser.add_argument_set(
[
- self.ov_parser.make_argument_element(
+ ov_parser.make_argument_element(
lldb.eArgTypeClassName, "plain", [1, 2]
),
- self.ov_parser.make_argument_element(
+ ov_parser.make_argument_element(
lldb.eArgTypeOffset, "optional", [1, 2]
),
]
)
- self.ov_parser.add_argument_set(
+ ov_parser.add_argument_set(
[
- self.ov_parser.make_argument_element(
+ ov_parser.make_argument_element(
lldb.eArgTypePythonClass, "plain", [3, 4]
),
- self.ov_parser.make_argument_element(
- lldb.eArgTypePid, "optional", [3, 4]
- ),
+ ov_parser.make_argument_element(lldb.eArgTypePid, "optional", [3, 4]),
]
)
@@ -210,6 +210,35 @@ class TwoArgGroupsCommand(ReportingCmd):
two_arg_repeat = command
return command + " THIRD_ARG"
+ def handle_option_argument_completion(self, long_option, cursor_pos):
+ ov_parser = self.get_parser()
+ value = ov_parser.dest_for_option(long_option)[0 : cursor_pos + 1]
+ proc_value = ov_parser.proc_name
+ if proc_value != None:
+ new_str = value + proc_value
+ ret_arr = {"completion": new_str, "mode": "partial"}
+ return ret_arr
+
+ ret_arr = {"values": [value + "nice", value + "not_nice", value + "mediocre"]}
+ return ret_arr
+
+ def handle_argument_completion(self, args, arg_pos, cursor_pos):
+ ov_parser = self.get_parser()
+ orig_arg = args[arg_pos][0:cursor_pos]
+ if orig_arg == "correct_":
+ ret_arr = {"completion": "correct_answer"}
+ return ret_arr
+
+ if ov_parser.was_set("process-name"):
+ # No completions if proc_name was set.
+ return True
+
+ ret_arr = {
+ "values": [orig_arg + "cool", orig_arg + "yuck"],
+ "descriptions": ["good idea", "bad idea"],
+ }
+ return ret_arr
+
def get_short_help(self):
return "This is my short help string"
diff --git a/lldb/test/API/functionalities/gdb_remote_client/TestAArch64XMLRegistersSVEOnly.py b/lldb/test/API/functionalities/gdb_remote_client/TestAArch64XMLRegistersSVEOnly.py
new file mode 100644
index 0000000..e36013a
--- /dev/null
+++ b/lldb/test/API/functionalities/gdb_remote_client/TestAArch64XMLRegistersSVEOnly.py
@@ -0,0 +1,121 @@
+""" Check that when a debug server provides XML that only defines SVE Z registers,
+ and does not include Neon V registers, lldb creates sub-registers to represent
+ the V registers as the bottom 128 bits of the Z registers.
+
+ qemu-aarch64 is one such debug server.
+
+ This also doubles as a test that lldb has a fallback path for registers of
+ unknown type that are > 128 bits, as the SVE registers are here.
+"""
+
+from textwrap import dedent
+import lldb
+from lldbsuite.test.lldbtest import *
+from lldbsuite.test.decorators import *
+from lldbsuite.test.gdbclientutils import *
+from lldbsuite.test.lldbgdbclient import GDBRemoteTestBase
+
+
+class Responder(MockGDBServerResponder):
+ def __init__(self):
+ super().__init__()
+ self.vg = 4
+ self.pc = 0xA0A0A0A0A0A0A0A0
+
+ def qXferRead(self, obj, annex, offset, length):
+ if annex == "target.xml":
+ # Note that QEMU sends the current SVE size in XML and the debugger
+ # then reads vg to know the latest size.
+ return (
+ dedent(
+ """\
+ <?xml version="1.0"?>
+ <target version="1.0">
+ <architecture>aarch64</architecture>
+ <feature name="org.gnu.gdb.aarch64.core">
+ <reg name="pc" regnum="0" bitsize="64"/>
+ <reg name="vg" regnum="1" bitsize="64"/>
+ <reg name="z0" regnum="2" bitsize="2048" type="not_a_type"/>
+ </feature>
+ </target>"""
+ ),
+ False,
+ )
+
+ return (None,)
+
+ def readRegister(self, regnum):
+ return "E01"
+
+ def readRegisters(self):
+ return "".join(
+ [
+ # 64 bit PC.
+ f"{self.pc:x}",
+ # 64 bit vg
+ f"0{self.vg}00000000000000",
+ # Enough data for 256 and 512 bit SVE.
+ "".join([f"{n:02x}" * 4 for n in range(1, 17)]),
+ ]
+ )
+
+ def cont(self):
+ # vg is expedited so that lldb can resize the SVE registers.
+ return f"T02thread:1ff0d;threads:1ff0d;thread-pcs:{self.pc};01:0{self.vg}00000000000000;"
+
+ def writeRegisters(self, registers_hex):
+ # We get a block of data containing values in regnum order.
+ self.vg = int(registers_hex[16:18])
+ return "OK"
+
+
+class TestXMLRegisterFlags(GDBRemoteTestBase):
+ def check_regs(self, vg):
+ # Each 32 bit chunk repeats n.
+ z0_value = " ".join(
+ [" ".join([f"0x{n:02x}"] * 4) for n in range(1, (vg * 2) + 1)]
+ )
+
+ self.expect(
+ "register read vg z0 v0 s0 d0",
+ substrs=[
+ f" vg = 0x000000000000000{vg}\n"
+ " z0 = {" + z0_value + "}\n"
+ " v0 = {0x01 0x01 0x01 0x01 0x02 0x02 0x02 0x02 0x03 0x03 0x03 0x03 0x04 0x04 0x04 0x04}\n"
+ " s0 = 2.36942783E-38\n"
+ " d0 = 5.3779407333977203E-299\n"
+ ],
+ )
+
+ self.expect("register read s0 --format uint32", substrs=["s0 = {0x01010101}"])
+ self.expect(
+ "register read d0 --format uint64",
+ substrs=["d0 = {0x0202020201010101}"],
+ )
+
+ @skipIfXmlSupportMissing
+ @skipIfRemote
+ @skipIfLLVMTargetMissing("AArch64")
+ def test_v_sub_registers(self):
+ self.server.responder = Responder()
+ target = self.dbg.CreateTarget("")
+
+ if self.TraceOn():
+ self.runCmd("log enable gdb-remote packets")
+ self.addTearDownHook(lambda: self.runCmd("log disable gdb-remote packets"))
+
+ process = self.connect(target)
+ lldbutil.expect_state_changes(
+ self, self.dbg.GetListener(), process, [lldb.eStateStopped]
+ )
+
+ self.check_regs(4)
+
+ # Now increase the SVE length and continue. The mock will respond with a new
+ # vg and lldb will reconfigure the register defs. This should not break the
+ # sub-registers.
+
+ self.runCmd("register write vg 8")
+ self.expect("continue", substrs=["stop reason = signal SIGINT"])
+
+ self.check_regs(8)
diff --git a/lldb/test/API/functionalities/gdb_remote_client/TestGDBRemotePlatformFile.py b/lldb/test/API/functionalities/gdb_remote_client/TestGDBRemotePlatformFile.py
index c902722..69e04df 100644
--- a/lldb/test/API/functionalities/gdb_remote_client/TestGDBRemotePlatformFile.py
+++ b/lldb/test/API/functionalities/gdb_remote_client/TestGDBRemotePlatformFile.py
@@ -49,25 +49,23 @@ class TestGDBRemotePlatformFile(GDBPlatformClientTestBase):
return "F-1,58"
self.server.responder = Responder()
-
+ enosys_regex = r"error: (Function not implemented|function not supported)"
self.match(
"platform file open /some/file.txt -v 0755",
- [r"error: Function not implemented"],
+ [enosys_regex],
error=True,
)
self.match(
"platform file read 16 -o 11 -c 13",
- [r"error: Function not implemented"],
+ [enosys_regex],
error=True,
)
self.match(
"platform file write 16 -o 11 -d teststring",
- [r"error: Function not implemented"],
+ [enosys_regex],
error=True,
)
- self.match(
- "platform file close 16", [r"error: Function not implemented"], error=True
- )
+ self.match("platform file close 16", [enosys_regex], error=True)
self.assertPacketLogContains(
[
"vFile:open:2f736f6d652f66696c652e747874,00000202,000001ed",
diff --git a/lldb/test/API/macosx/expedited-thread-pcs/Makefile b/lldb/test/API/macosx/expedited-thread-pcs/Makefile
new file mode 100644
index 0000000..7799f06
--- /dev/null
+++ b/lldb/test/API/macosx/expedited-thread-pcs/Makefile
@@ -0,0 +1,11 @@
+CXX_SOURCES := main.cpp
+
+.PHONY: build-libfoo
+all: build-libfoo a.out
+
+include Makefile.rules
+
+build-libfoo: foo.c
+ $(MAKE) -f $(MAKEFILE_RULES) \
+ DYLIB_C_SOURCES=foo.c DYLIB_NAME=foo DYLIB_ONLY=YES
+
diff --git a/lldb/test/API/macosx/expedited-thread-pcs/TestExpeditedThreadPCs.py b/lldb/test/API/macosx/expedited-thread-pcs/TestExpeditedThreadPCs.py
new file mode 100644
index 0000000..0611907
--- /dev/null
+++ b/lldb/test/API/macosx/expedited-thread-pcs/TestExpeditedThreadPCs.py
@@ -0,0 +1,91 @@
+"""Test that the expedited thread pc values are not re-fetched by lldb."""
+
+import subprocess
+import lldb
+from lldbsuite.test.decorators import *
+from lldbsuite.test.lldbtest import *
+from lldbsuite.test import lldbutil
+
+file_index = 0
+
+
+class TestExpeditedThreadPCs(TestBase):
+ NO_DEBUG_INFO_TESTCASE = True
+
+ @skipUnlessDarwin
+ def test_expedited_thread_pcs(self):
+ TestBase.setUp(self)
+
+ global file_index
+ ++file_index
+ logfile = os.path.join(
+ self.getBuildDir(),
+ "packet-log-" + self.getArchitecture() + "-" + str(file_index) + ".txt",
+ )
+ self.runCmd("log enable -f %s gdb-remote packets" % (logfile))
+
+ def cleanup():
+ self.runCmd("log disable gdb-remote packets")
+ if os.path.exists(logfile):
+ os.unlink(logfile)
+
+ self.addTearDownHook(cleanup)
+
+ self.source = "main.cpp"
+ self.build()
+ (target, process, thread, bkpt) = lldbutil.run_to_source_breakpoint(
+ self, "break here", lldb.SBFileSpec(self.source, False)
+ )
+
+ # verify that libfoo.dylib hasn't loaded yet
+ for m in target.modules:
+ self.assertNotEqual(m.GetFileSpec().GetFilename(), "libfoo.dylib")
+
+ thread.StepInto()
+ thread.StepInto()
+
+ thread.StepInto()
+ thread.StepInto()
+ thread.StepInto()
+
+ # verify that libfoo.dylib has loaded
+ for m in target.modules:
+ if m.GetFileSpec().GetFilename() == "libfoo.dylib":
+ found_libfoo = True
+ self.assertTrue(found_libfoo)
+
+ thread.StepInto()
+ thread.StepInto()
+ thread.StepOver()
+ thread.StepOver()
+ thread.StepOver()
+ thread.StepOver()
+ thread.StepOver()
+ thread.StepOver()
+ thread.StepOver()
+ thread.StepOver()
+ thread.StepOver()
+ thread.StepOver()
+
+ process.Kill()
+
+ # Confirm that we never fetched the pc for any threads during
+ # this debug session.
+ if os.path.exists(logfile):
+ f = open(logfile)
+ lines = f.readlines()
+ num_errors = 0
+ for line in lines:
+ arch = self.getArchitecture()
+ if arch == "arm64" or arch == "arm64_32":
+ # <reg name="pc" regnum="32" offset="256" bitsize="64" group="general" group_id="1" ehframe_regnum="32" dwarf_regnum="32" generic="pc"/>
+ # A fetch of $pc on arm64 looks like
+ # < 22> send packet: $p20;thread:91698e;#70
+ self.assertNotIn("$p20;thread", line)
+ else:
+ # <reg name="rip" regnum="16" offset="128" bitsize="64" group="general" altname="pc" group_id="1" ehframe_regnum="16" dwarf_regnum="16" generic="pc"/>
+ # A fetch of $pc on x86_64 looks like
+ # < 22> send packet: $p10;thread:91889c;#6f
+ self.assertNotIn("$p10;thread", line)
+
+ f.close()
diff --git a/lldb/test/API/macosx/expedited-thread-pcs/foo.c b/lldb/test/API/macosx/expedited-thread-pcs/foo.c
new file mode 100644
index 0000000..de1cbc4
--- /dev/null
+++ b/lldb/test/API/macosx/expedited-thread-pcs/foo.c
@@ -0,0 +1 @@
+int foo() { return 5; }
diff --git a/lldb/test/API/macosx/expedited-thread-pcs/main.cpp b/lldb/test/API/macosx/expedited-thread-pcs/main.cpp
new file mode 100644
index 0000000..d77c679
--- /dev/null
+++ b/lldb/test/API/macosx/expedited-thread-pcs/main.cpp
@@ -0,0 +1,62 @@
+#include <dlfcn.h>
+#include <stdio.h>
+#include <thread>
+#include <unistd.h>
+
+void f1() {
+ while (1)
+ sleep(1);
+}
+void f2() {
+ while (1)
+ sleep(1);
+}
+void f3() {
+ while (1)
+ sleep(1);
+}
+
+int main() {
+ std::thread t1{f1};
+ std::thread t2{f2};
+ std::thread t3{f3};
+
+ puts("break here");
+
+ void *handle = dlopen("libfoo.dylib", RTLD_LAZY);
+ int (*foo_ptr)() = (int (*)())dlsym(handle, "foo");
+ int c = foo_ptr();
+
+ // clang-format off
+ // multiple function calls on a single source line so 'step'
+ // and 'next' need to do multiple steps of work.
+ puts("1"); puts("2"); puts("3"); puts("4"); puts("5");
+ puts("6"); puts("7"); puts("8"); puts("9"); puts("10");
+ puts("11"); puts("12"); puts("13"); puts("14"); puts("15");
+ puts("16"); puts("17"); puts("18"); puts("19"); puts("20");
+ puts("21"); puts("22"); puts("23"); puts("24"); puts("24");
+ // clang-format on
+ puts("one");
+ puts("two");
+ puts("three");
+ puts("four");
+ puts("five");
+ puts("six");
+ puts("seven");
+ puts("eight");
+ puts("nine");
+ puts("ten");
+ c++;
+ c++;
+ c++;
+ c++;
+ c++;
+ c++;
+ c++;
+ c++;
+ c++;
+ c++;
+ c++;
+ c++;
+ return c;
+}
diff --git a/lldb/unittests/ScriptInterpreter/Python/PythonTestSuite.cpp b/lldb/unittests/ScriptInterpreter/Python/PythonTestSuite.cpp
index c67a2b4..3faeb58 100644
--- a/lldb/unittests/ScriptInterpreter/Python/PythonTestSuite.cpp
+++ b/lldb/unittests/ScriptInterpreter/Python/PythonTestSuite.cpp
@@ -211,6 +211,19 @@ LLDBSwigPythonGetRepeatCommandForScriptedCommand(PyObject *implementor,
return std::nullopt;
}
+StructuredData::DictionarySP
+LLDBSwigPythonHandleArgumentCompletionForScriptedCommand(
+ PyObject *implementor, std::vector<llvm::StringRef> &args, size_t args_pos,
+ size_t pos_in_arg) {
+ return {};
+}
+
+StructuredData::DictionarySP
+LLDBSwigPythonHandleOptionArgumentCompletionForScriptedCommand(
+ PyObject *implementor, llvm::StringRef &long_options, size_t char_in_arg) {
+ return {};
+}
+
bool lldb_private::python::SWIGBridge::LLDBSwigPythonCallModuleInit(
const char *python_module_name, const char *session_dictionary_name,
lldb::DebuggerSP debugger) {
diff --git a/lldb/unittests/UnwindAssembly/ARM64/TestArm64InstEmulation.cpp b/lldb/unittests/UnwindAssembly/ARM64/TestArm64InstEmulation.cpp
index 9303d6f..12eb577 100644
--- a/lldb/unittests/UnwindAssembly/ARM64/TestArm64InstEmulation.cpp
+++ b/lldb/unittests/UnwindAssembly/ARM64/TestArm64InstEmulation.cpp
@@ -62,7 +62,7 @@ TEST_F(TestArm64InstEmulation, TestSimpleDarwinFunction) {
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
// 'int main() { }' compiled for arm64-apple-ios with clang
uint8_t data[] = {
@@ -170,7 +170,7 @@ TEST_F(TestArm64InstEmulation, TestMediumDarwinFunction) {
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
// disassembly of -[NSPlaceholderString initWithBytes:length:encoding:]
// from Foundation for iOS.
@@ -332,7 +332,7 @@ TEST_F(TestArm64InstEmulation, TestFramelessThreeEpilogueFunction) {
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
// disassembly of JSC::ARM64LogicalImmediate::findBitRange<16u>
// from JavaScriptcore for iOS.
@@ -431,7 +431,7 @@ TEST_F(TestArm64InstEmulation, TestRegisterSavedTwice) {
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
// disassembly of mach_msg_sever_once from libsystem_kernel.dylib for iOS.
uint8_t data[] = {
@@ -533,7 +533,7 @@ TEST_F(TestArm64InstEmulation, TestRegisterDoubleSpills) {
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
// this file built with clang for iOS arch arm64 optimization -Os
// #include <stdio.h>
@@ -705,7 +705,7 @@ TEST_F(TestArm64InstEmulation, TestCFARegisterTrackedAcrossJumps) {
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
uint8_t data[] = {
// prologue
@@ -804,7 +804,7 @@ TEST_F(TestArm64InstEmulation, TestCFAResetToSP) {
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
// The called_from_nodebug() from TestStepNoDebug.py
// Most of the previous unit tests have $sp being set as
diff --git a/lldb/unittests/UnwindAssembly/PPC64/TestPPC64InstEmulation.cpp b/lldb/unittests/UnwindAssembly/PPC64/TestPPC64InstEmulation.cpp
index 9892e18..a85aad7 100644
--- a/lldb/unittests/UnwindAssembly/PPC64/TestPPC64InstEmulation.cpp
+++ b/lldb/unittests/UnwindAssembly/PPC64/TestPPC64InstEmulation.cpp
@@ -61,7 +61,7 @@ TEST_F(TestPPC64InstEmulation, TestSimpleFunction) {
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
// prologue and epilogue of:
// int main() {
@@ -180,7 +180,7 @@ TEST_F(TestPPC64InstEmulation, TestMediumFunction) {
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
// prologue and epilogue of main() (call-func.c),
// with several calls and stack variables.
diff --git a/lldb/unittests/UnwindAssembly/x86/Testx86AssemblyInspectionEngine.cpp b/lldb/unittests/UnwindAssembly/x86/Testx86AssemblyInspectionEngine.cpp
index 597e5b2..3ff57b4 100644
--- a/lldb/unittests/UnwindAssembly/x86/Testx86AssemblyInspectionEngine.cpp
+++ b/lldb/unittests/UnwindAssembly/x86/Testx86AssemblyInspectionEngine.cpp
@@ -168,7 +168,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestSimple64bitFrameFunction) {
eLazyBoolYes);
EXPECT_TRUE(unwind_plan.GetSourcedFromCompiler() == eLazyBoolNo);
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
// 0: CFA=rsp +8 => rsp=CFA+0 rip=[CFA-8]
UnwindPlan::RowSP row_sp = unwind_plan.GetRowForFunctionOffset(0);
@@ -244,7 +244,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestSimple32bitFrameFunction) {
eLazyBoolYes);
EXPECT_TRUE(unwind_plan.GetSourcedFromCompiler() == eLazyBoolNo);
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
// offset 0 -- pushl %ebp
UnwindPlan::RowSP row_sp = unwind_plan.GetRowForFunctionOffset(0);
@@ -381,7 +381,7 @@ TEST_F(Testx86AssemblyInspectionEngine, Test64bitFramelessBigStackFrame) {
// 33: CFA=rsp+16 => rbp=[CFA-16] rsp=CFA+0 rip=[CFA-8]
// 34: CFA=rsp +8 => rsp=CFA+0 rip=[CFA-8]
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
// grab the Row for when the prologue has finished executing:
// 17: CFA=rsp+14496 => rbx=[CFA-56] rbp=[CFA-16] rsp=CFA+0 r12=[CFA-48]
@@ -650,7 +650,7 @@ TEST_F(Testx86AssemblyInspectionEngine, Test32bitFramelessBigStackFrame) {
// 48: CFA=esp+14480 => ebx=[CFA-12] edi=[CFA-16] esi=[CFA-20] ebp=[CFA-8]
// esp=CFA+0 eip=[CFA-4]
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
// Check that we get the CFA correct for the pic base setup sequence
@@ -802,7 +802,7 @@ TEST_F(Testx86AssemblyInspectionEngine, Test64bitFramelessSmallStackFrame) {
// 1: CFA=rsp+16 => rsp=CFA+0 rip=[CFA-8]
// 22: CFA=rsp +8 => rsp=CFA+0 rip=[CFA-8]
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
// grab the Row for when the prologue has finished executing:
// 1: CFA=rsp+16 => rsp=CFA+0 rip=[CFA-8]
@@ -911,7 +911,7 @@ TEST_F(Testx86AssemblyInspectionEngine, Test32bitFramelessSmallStackFrame) {
// row[3]: 9: CFA=esp+16 => esp=CFA+0 eip=[CFA-4]
// row[4]: 34: CFA=esp +4 => esp=CFA+0 eip=[CFA-4]
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
// Check unwind state before we set up the picbase register
// 3: CFA=esp+16 => esp=CFA+0 eip=[CFA-4]
@@ -962,7 +962,7 @@ TEST_F(Testx86AssemblyInspectionEngine, Test32bitFramelessSmallStackFrame) {
}
TEST_F(Testx86AssemblyInspectionEngine, TestPushRBP) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
uint8_t data[] = {
@@ -1005,7 +1005,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestPushRBP) {
}
TEST_F(Testx86AssemblyInspectionEngine, TestPushImm) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
uint8_t data[] = {
@@ -1054,7 +1054,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestPushImm) {
// in the first function called in a new thread and it needs to
// put a 0 as the saved pc. We pretend it didn't change the CFA.
TEST_F(Testx86AssemblyInspectionEngine, TestPush0) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
uint8_t data[] = {
@@ -1085,7 +1085,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestPush0) {
}
TEST_F(Testx86AssemblyInspectionEngine, TestPushExtended) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
uint8_t data[] = {
@@ -1133,7 +1133,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestPushExtended) {
}
TEST_F(Testx86AssemblyInspectionEngine, TestPushR15) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
uint8_t data[] = {
@@ -1161,7 +1161,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestPushR15) {
}
TEST_F(Testx86AssemblyInspectionEngine, TestPushR14) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
uint8_t data[] = {
@@ -1189,7 +1189,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestPushR14) {
}
TEST_F(Testx86AssemblyInspectionEngine, TestPushR13) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
uint8_t data[] = {
@@ -1217,7 +1217,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestPushR13) {
}
TEST_F(Testx86AssemblyInspectionEngine, TestPushR12) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
uint8_t data[] = {
@@ -1245,7 +1245,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestPushR12) {
}
TEST_F(Testx86AssemblyInspectionEngine, TestPushRBX) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
uint8_t data[] = {
@@ -1276,7 +1276,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestPushRBX) {
// eax, ecx, edx are all considered volatile and push/pops of them are
// not tracked (except to keep track of stack pointer movement)
TEST_F(Testx86AssemblyInspectionEngine, TestPushEAX) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
@@ -1305,7 +1305,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestPushEAX) {
// eax, ecx, edx are all considered volatile and push/pops of them are
// not tracked (except to keep track of stack pointer movement)
TEST_F(Testx86AssemblyInspectionEngine, TestPushECX) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
@@ -1334,7 +1334,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestPushECX) {
// eax, ecx, edx are all considered volatile and push/pops of them are
// not tracked (except to keep track of stack pointer movement)
TEST_F(Testx86AssemblyInspectionEngine, TestPushEDX) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
@@ -1360,7 +1360,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestPushEDX) {
}
TEST_F(Testx86AssemblyInspectionEngine, TestPushEBX) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
@@ -1388,7 +1388,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestPushEBX) {
}
TEST_F(Testx86AssemblyInspectionEngine, TestPushEBP) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
@@ -1416,7 +1416,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestPushEBP) {
}
TEST_F(Testx86AssemblyInspectionEngine, TestPushRBPWithREX) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
uint8_t data[] = {
@@ -1444,7 +1444,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestPushRBPWithREX) {
}
TEST_F(Testx86AssemblyInspectionEngine, TestPushESI) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
@@ -1472,7 +1472,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestPushESI) {
}
TEST_F(Testx86AssemblyInspectionEngine, TestPushEDI) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
@@ -1500,7 +1500,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestPushEDI) {
}
TEST_F(Testx86AssemblyInspectionEngine, TestMovRSPtoRBP) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
uint8_t data64_1[] = {
@@ -1572,7 +1572,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestMovRSPtoRBP) {
}
TEST_F(Testx86AssemblyInspectionEngine, TestSubRSP) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
@@ -1612,7 +1612,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestSubRSP) {
}
TEST_F(Testx86AssemblyInspectionEngine, TestSubESP) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
@@ -1652,7 +1652,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestSubESP) {
}
TEST_F(Testx86AssemblyInspectionEngine, TestAddRSP) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
@@ -1692,7 +1692,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestAddRSP) {
}
TEST_F(Testx86AssemblyInspectionEngine, TestAddESP) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
@@ -1732,7 +1732,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestAddESP) {
}
TEST_F(Testx86AssemblyInspectionEngine, TestLEA_RSP_Pattern) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
@@ -1756,7 +1756,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestLEA_RSP_Pattern) {
}
TEST_F(Testx86AssemblyInspectionEngine, TestPopRBX) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
@@ -1782,7 +1782,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestPopRBX) {
}
TEST_F(Testx86AssemblyInspectionEngine, TestPopRBP) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
@@ -1808,7 +1808,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestPopRBP) {
}
TEST_F(Testx86AssemblyInspectionEngine, TestPopR12) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
@@ -1834,7 +1834,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestPopR12) {
}
TEST_F(Testx86AssemblyInspectionEngine, TestPopR13) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
@@ -1860,7 +1860,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestPopR13) {
}
TEST_F(Testx86AssemblyInspectionEngine, TestPopR14) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
@@ -1886,7 +1886,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestPopR14) {
}
TEST_F(Testx86AssemblyInspectionEngine, TestPopR15) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
@@ -1912,7 +1912,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestPopR15) {
}
TEST_F(Testx86AssemblyInspectionEngine, TestPopEBX) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
@@ -1938,7 +1938,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestPopEBX) {
}
TEST_F(Testx86AssemblyInspectionEngine, TestPopEBP) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
@@ -1964,7 +1964,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestPopEBP) {
}
TEST_F(Testx86AssemblyInspectionEngine, TestPopRBPWithREX) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
@@ -1990,7 +1990,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestPopRBPWithREX) {
}
TEST_F(Testx86AssemblyInspectionEngine, TestPopESI) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
@@ -2016,7 +2016,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestPopESI) {
}
TEST_F(Testx86AssemblyInspectionEngine, TestPopEDI) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
@@ -2044,7 +2044,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestPopEDI) {
// We don't track these registers, but make sure the CFA address is updated
// if we're defining the CFA in term of esp.
TEST_F(Testx86AssemblyInspectionEngine, Testi386IgnoredRegisters) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
@@ -2082,7 +2082,7 @@ TEST_F(Testx86AssemblyInspectionEngine, Testi386IgnoredRegisters) {
}
TEST_F(Testx86AssemblyInspectionEngine, TestLEAVE) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
@@ -2123,7 +2123,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestLEAVE) {
// pushes the addr of the next insn on the stack, and then pop that value
// into a register (the "pic base" register).
TEST_F(Testx86AssemblyInspectionEngine, TestCALLNextInsn) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
@@ -2148,7 +2148,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestCALLNextInsn) {
}
TEST_F(Testx86AssemblyInspectionEngine, TestSpillRegToStackViaMOVx86_64) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
@@ -2187,7 +2187,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestSpillRegToStackViaMOVx86_64) {
}
TEST_F(Testx86AssemblyInspectionEngine, TestSpillRegToStackViaMOVi386) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
@@ -2221,7 +2221,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestSpillRegToStackViaMOVi386) {
}
TEST_F(Testx86AssemblyInspectionEngine, TestSpArithx86_64Augmented) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
@@ -2312,7 +2312,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestSpArithx86_64Augmented) {
}
TEST_F(Testx86AssemblyInspectionEngine, TestSimplex86_64Augmented) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
@@ -2390,7 +2390,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestSimplex86_64Augmented) {
}
TEST_F(Testx86AssemblyInspectionEngine, TestSimplei386ugmented) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
@@ -2472,7 +2472,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestSimplei386ugmented) {
// stops
// disassembling at that point (long-mode).
TEST_F(Testx86AssemblyInspectionEngine, Test32BitOnlyInstruction) {
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
UnwindPlan::RowSP row_sp;
AddressRange sample_range;
UnwindPlan unwind_plan(eRegisterKindLLDB);
@@ -2634,7 +2634,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestStackRealignMSVC_i386) {
EXPECT_EQ(esp_minus_4, plan.GetRowForFunctionOffset(30)->GetAFAValue());
// Test saved register
- UnwindPlan::Row::RegisterLocation reg_loc;
+ UnwindPlan::Row::AbstractRegisterLocation reg_loc;
EXPECT_TRUE(
plan.GetRowForFunctionOffset(27)->GetRegisterInfo(k_edi, reg_loc));
EXPECT_TRUE(reg_loc.IsAtAFAPlusOffset());
@@ -2712,7 +2712,7 @@ TEST_F(Testx86AssemblyInspectionEngine, TestReturnDetect) {
eLazyBoolYes);
EXPECT_TRUE(unwind_plan.GetSourcedFromCompiler() == eLazyBoolNo);
- UnwindPlan::Row::RegisterLocation regloc;
+ UnwindPlan::Row::AbstractRegisterLocation regloc;
// 0: CFA=rsp +8 => rsp=CFA+0 rip=[CFA-8]
UnwindPlan::RowSP row_sp = unwind_plan.GetRowForFunctionOffset(0);
diff --git a/lldb/unittests/Utility/StatusTest.cpp b/lldb/unittests/Utility/StatusTest.cpp
index be4f2be..e37c94a 100644
--- a/lldb/unittests/Utility/StatusTest.cpp
+++ b/lldb/unittests/Utility/StatusTest.cpp
@@ -70,6 +70,14 @@ TEST(StatusTest, ErrorConversion) {
llvm::Error foo = Status::FromErrorString("foo").ToError();
EXPECT_TRUE(bool(foo));
EXPECT_EQ("foo", llvm::toString(std::move(foo)));
+
+ llvm::Error eperm = llvm::errorCodeToError({EPERM, std::generic_category()});
+ llvm::Error eintr = llvm::errorCodeToError({EINTR, std::generic_category()});
+ llvm::Error elist = llvm::joinErrors(std::move(eperm), std::move(eintr));
+ elist = llvm::joinErrors(std::move(elist), llvm::createStringError("foo"));
+ Status list = Status::FromError(std::move(elist));
+ EXPECT_EQ((int)list.GetError(), EPERM);
+ EXPECT_EQ(list.GetType(), eErrorTypePOSIX);
}
#ifdef _WIN32
diff --git a/llvm/cmake/modules/CMakeLists.txt b/llvm/cmake/modules/CMakeLists.txt
index d99af79..ef4cfa3 100644
--- a/llvm/cmake/modules/CMakeLists.txt
+++ b/llvm/cmake/modules/CMakeLists.txt
@@ -36,6 +36,9 @@ endif()
if(omp_gen IN_LIST LLVM_COMMON_DEPENDS)
list(REMOVE_ITEM LLVM_COMMON_DEPENDS omp_gen)
endif()
+if(vt_gen IN_LIST LLVM_COMMON_DEPENDS)
+ list(REMOVE_ITEM LLVM_COMMON_DEPENDS vt_gen)
+endif()
#
# Generate LLVMConfig.cmake for the build tree.
diff --git a/llvm/cmake/modules/LLVMConfig.cmake.in b/llvm/cmake/modules/LLVMConfig.cmake.in
index 7e1501a..c49f10b 100644
--- a/llvm/cmake/modules/LLVMConfig.cmake.in
+++ b/llvm/cmake/modules/LLVMConfig.cmake.in
@@ -151,6 +151,9 @@ endif()
if(NOT TARGET intrinsics_gen)
add_custom_target(intrinsics_gen)
endif()
+if(NOT TARGET vt_gen)
+ add_custom_target(vt_gen)
+endif()
if(NOT TARGET omp_gen)
add_custom_target(omp_gen)
endif()
diff --git a/llvm/docs/AMDGPUUsage.rst b/llvm/docs/AMDGPUUsage.rst
index 4b48b54..9e11b13 100644
--- a/llvm/docs/AMDGPUUsage.rst
+++ b/llvm/docs/AMDGPUUsage.rst
@@ -611,9 +611,7 @@ Generic processor code objects are versioned. See :ref:`amdgpu-generic-processor
- ``gfx1152``
SALU floating point instructions
- and single-use VGPR hint
- instructions are not available
- on:
+ are not available on:
- ``gfx1150``
- ``gfx1151``
diff --git a/llvm/docs/CMake.rst b/llvm/docs/CMake.rst
index 838447f..b5adb22 100644
--- a/llvm/docs/CMake.rst
+++ b/llvm/docs/CMake.rst
@@ -571,10 +571,12 @@ enabled sub-projects. Nearly all of these variable names begin with
Semicolon-separated list of projects to build, or *all* for building all
(clang, lldb, lld, polly, etc) projects. This flag assumes that projects
are checked out side-by-side and not nested, i.e. clang needs to be in
- parallel of llvm instead of nested in `llvm/tools`. This feature allows
+ parallel of llvm instead of nested in ``llvm/tools``. This feature allows
to have one build for only LLVM and another for clang+llvm using the same
source checkout.
+
The full list is:
+
``clang;clang-tools-extra;cross-project-tests;libc;libclc;lld;lldb;openmp;polly;pstl``
**LLVM_ENABLE_RTTI**:BOOL
@@ -586,10 +588,16 @@ enabled sub-projects. Nearly all of these variable names begin with
It will build the builtins separately from the other runtimes to preserve
correct dependency ordering. If you want to build the runtimes using a system
compiler, see the `libc++ documentation <https://libcxx.llvm.org/BuildingLibcxx.html>`_.
- Note: the list should not have duplicates with `LLVM_ENABLE_PROJECTS`.
+
+ .. note::
+ The list should not have duplicates with ``LLVM_ENABLE_PROJECTS``.
+
The full list is:
+
``compiler-rt;libc;libcxx;libcxxabi;libunwind;openmp``
+
To enable all of them, use:
+
``LLVM_ENABLE_RUNTIMES=all``
**LLVM_ENABLE_SPHINX**:BOOL
diff --git a/llvm/docs/NVPTXUsage.rst b/llvm/docs/NVPTXUsage.rst
index 3a566bb..8b0b05c 100644
--- a/llvm/docs/NVPTXUsage.rst
+++ b/llvm/docs/NVPTXUsage.rst
@@ -127,69 +127,6 @@ Example: 64-bit PTX for CUDA Driver API: ``nvptx64-nvidia-cuda``
NVPTX Intrinsics
================
-Address Space Conversion
-------------------------
-
-'``llvm.nvvm.ptr.*.to.gen``' Intrinsics
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Syntax:
-"""""""
-
-These are overloaded intrinsics. You can use these on any pointer types.
-
-.. code-block:: llvm
-
- declare ptr @llvm.nvvm.ptr.global.to.gen.p0.p1(ptr addrspace(1))
- declare ptr @llvm.nvvm.ptr.shared.to.gen.p0.p3(ptr addrspace(3))
- declare ptr @llvm.nvvm.ptr.constant.to.gen.p0.p4(ptr addrspace(4))
- declare ptr @llvm.nvvm.ptr.local.to.gen.p0.p5(ptr addrspace(5))
-
-Overview:
-"""""""""
-
-The '``llvm.nvvm.ptr.*.to.gen``' intrinsics convert a pointer in a non-generic
-address space to a generic address space pointer.
-
-Semantics:
-""""""""""
-
-These intrinsics modify the pointer value to be a valid generic address space
-pointer.
-
-
-'``llvm.nvvm.ptr.gen.to.*``' Intrinsics
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Syntax:
-"""""""
-
-These are overloaded intrinsics. You can use these on any pointer types.
-
-.. code-block:: llvm
-
- declare ptr addrspace(1) @llvm.nvvm.ptr.gen.to.global.p1.p0(ptr)
- declare ptr addrspace(3) @llvm.nvvm.ptr.gen.to.shared.p3.p0(ptr)
- declare ptr addrspace(4) @llvm.nvvm.ptr.gen.to.constant.p4.p0(ptr)
- declare ptr addrspace(5) @llvm.nvvm.ptr.gen.to.local.p5.p0(ptr)
-
-Overview:
-"""""""""
-
-The '``llvm.nvvm.ptr.gen.to.*``' intrinsics convert a pointer in the generic
-address space to a pointer in the target address space. Note that these
-intrinsics are only useful if the address space of the target address space of
-the pointer is known. It is not legal to use address space conversion
-intrinsics to convert a pointer from one non-generic address space to another
-non-generic address space.
-
-Semantics:
-""""""""""
-
-These intrinsics modify the pointer value to be a valid pointer in the target
-non-generic address space.
-
-
Reading PTX Special Registers
-----------------------------
diff --git a/llvm/docs/ReleaseNotes.rst b/llvm/docs/ReleaseNotes.rst
index 660a378..c85ea28 100644
--- a/llvm/docs/ReleaseNotes.rst
+++ b/llvm/docs/ReleaseNotes.rst
@@ -56,6 +56,31 @@ Changes to the LLVM IR
* Added ``usub_cond`` and ``usub_sat`` operations to ``atomicrmw``.
+* Remove the following intrinsics which can be replaced with a ``bitcast``:
+
+ * ``llvm.nvvm.bitcast.f2i``
+ * ``llvm.nvvm.bitcast.i2f``
+ * ``llvm.nvvm.bitcast.d2ll``
+ * ``llvm.nvvm.bitcast.ll2d``
+
+* Remove the following intrinsics which can be replaced with a funnel-shift:
+
+ * ``llvm.nvvm.rotate.b32``
+ * ``llvm.nvvm.rotate.right.b64``
+ * ``llvm.nvvm.rotate.b64``
+
+* Remove the following intrinsics which can be replaced with an
+ ``addrspacecast``:
+
+ * ``llvm.nvvm.ptr.gen.to.global``
+ * ``llvm.nvvm.ptr.gen.to.shared``
+ * ``llvm.nvvm.ptr.gen.to.constant``
+ * ``llvm.nvvm.ptr.gen.to.local``
+ * ``llvm.nvvm.ptr.global.to.gen``
+ * ``llvm.nvvm.ptr.shared.to.gen``
+ * ``llvm.nvvm.ptr.constant.to.gen``
+ * ``llvm.nvvm.ptr.local.to.gen``
+
Changes to LLVM infrastructure
------------------------------
@@ -204,6 +229,7 @@ Changes to the C API
* ``LLVMGetNextDbgRecord``
* ``LLVMGetPreviousDbgRecord``
+* Added ``LLVMAtomicRMWBinOpUSubCond`` and ``LLVMAtomicRMWBinOpUSubSat`` to ``LLVMAtomicRMWBinOp`` enum for AtomicRMW instructions.
Changes to the CodeGen infrastructure
-------------------------------------
diff --git a/llvm/docs/TableGen/ProgRef.rst b/llvm/docs/TableGen/ProgRef.rst
index dcea3b7..5cf48d6e 100644
--- a/llvm/docs/TableGen/ProgRef.rst
+++ b/llvm/docs/TableGen/ProgRef.rst
@@ -223,12 +223,12 @@ TableGen provides "bang operators" that have a wide variety of uses:
: !div !empty !eq !exists !filter
: !find !foldl !foreach !ge !getdagarg
: !getdagname !getdagop !gt !head !if
- : !interleave !isa !le !listconcat !listremove
- : !listsplat !logtwo !lt !mul !ne
- : !not !or !range !repr !setdagarg
- : !setdagname !setdagop !shl !size !sra
- : !srl !strconcat !sub !subst !substr
- : !tail !tolower !toupper !xor
+ : !interleave !isa !le !listconcat !listflatten
+ : !listremove !listsplat !logtwo !lt !mul
+ : !ne !not !or !range !repr
+ : !setdagarg !setdagname !setdagop !shl !size
+ : !sra !srl !strconcat !sub !subst
+ : !substr !tail !tolower !toupper !xor
The ``!cond`` operator has a slightly different
syntax compared to other bang operators, so it is defined separately:
@@ -1832,6 +1832,12 @@ and non-0 as true.
This operator concatenates the list arguments *list1*, *list2*, etc., and
produces the resulting list. The lists must have the same element type.
+``!listflatten(``\ *list*\ ``)``
+ This operator flattens a list of lists *list* and produces a list with all
+ elements of the constituent lists concatenated. If *list* is of type
+ ``list<list<X>>`` the resulting list is of type ``list<X>``. If *list*'s
+ element type is not a list, the result is *list* itself.
+
``!listremove(``\ *list1*\ ``,`` *list2*\ ``)``
This operator returns a copy of *list1* removing all elements that also occur in
*list2*. The lists must have the same element type.
diff --git a/llvm/include/llvm-c/Core.h b/llvm/include/llvm-c/Core.h
index 19e0592..ec5c0e7 100644
--- a/llvm/include/llvm-c/Core.h
+++ b/llvm/include/llvm-c/Core.h
@@ -395,6 +395,9 @@ typedef enum {
when incremented above input value */
LLVMAtomicRMWBinOpUDecWrap, /**< Decrements the value, wrapping back to
the input value when decremented below zero */
+ LLVMAtomicRMWBinOpUSubCond, /**<Subtracts the value only if no unsigned
+ overflow */
+ LLVMAtomicRMWBinOpUSubSat, /**<Subtracts the value, clamping to zero */
} LLVMAtomicRMWBinOp;
typedef enum {
@@ -4523,6 +4526,9 @@ LLVMValueRef LLVMBuildStructGEP2(LLVMBuilderRef B, LLVMTypeRef Ty,
const char *Name);
LLVMValueRef LLVMBuildGlobalString(LLVMBuilderRef B, const char *Str,
const char *Name);
+/**
+ * Deprecated: Use LLVMBuildGlobalString instead, which has identical behavior.
+ */
LLVMValueRef LLVMBuildGlobalStringPtr(LLVMBuilderRef B, const char *Str,
const char *Name);
LLVMBool LLVMGetVolatile(LLVMValueRef MemoryAccessInst);
diff --git a/llvm/include/llvm/Analysis/CtxProfAnalysis.h b/llvm/include/llvm/Analysis/CtxProfAnalysis.h
index b3e64b2..0a5beb9 100644
--- a/llvm/include/llvm/Analysis/CtxProfAnalysis.h
+++ b/llvm/include/llvm/Analysis/CtxProfAnalysis.h
@@ -117,6 +117,9 @@ public:
/// Get the instruction instrumenting a BB, or nullptr if not present.
static InstrProfIncrementInst *getBBInstrumentation(BasicBlock &BB);
+
+ /// Get the step instrumentation associated with a `select`
+ static InstrProfIncrementInstStep *getSelectInstrumentation(SelectInst &SI);
};
class CtxProfAnalysisPrinterPass
diff --git a/llvm/include/llvm/Analysis/DXILMetadataAnalysis.h b/llvm/include/llvm/Analysis/DXILMetadataAnalysis.h
index ed342c2..cb535ac 100644
--- a/llvm/include/llvm/Analysis/DXILMetadataAnalysis.h
+++ b/llvm/include/llvm/Analysis/DXILMetadataAnalysis.h
@@ -21,20 +21,20 @@ class Function;
namespace dxil {
struct EntryProperties {
- const Function *Entry;
+ const Function *Entry{nullptr};
// Specific target shader stage may be specified for entry functions
- Triple::EnvironmentType ShaderStage = Triple::UnknownEnvironment;
+ Triple::EnvironmentType ShaderStage{Triple::UnknownEnvironment};
unsigned NumThreadsX{0}; // X component
unsigned NumThreadsY{0}; // Y component
unsigned NumThreadsZ{0}; // Z component
- EntryProperties(const Function &Fn) : Entry(&Fn) {};
+ EntryProperties(const Function *Fn = nullptr) : Entry(Fn) {};
};
struct ModuleMetadataInfo {
VersionTuple DXILVersion{};
VersionTuple ShaderModelVersion{};
- Triple::EnvironmentType ShaderStage = Triple::UnknownEnvironment;
+ Triple::EnvironmentType ShaderProfile{Triple::UnknownEnvironment};
VersionTuple ValidatorVersion{};
SmallVector<EntryProperties> EntryPropertyVec{};
void print(raw_ostream &OS) const;
diff --git a/llvm/include/llvm/Analysis/Loads.h b/llvm/include/llvm/Analysis/Loads.h
index 1f01ff7..639070c 100644
--- a/llvm/include/llvm/Analysis/Loads.h
+++ b/llvm/include/llvm/Analysis/Loads.h
@@ -27,6 +27,8 @@ class LoadInst;
class Loop;
class MemoryLocation;
class ScalarEvolution;
+class SCEVPredicate;
+template <typename T> class SmallVectorImpl;
class TargetLibraryInfo;
/// Return true if this is always a dereferenceable pointer. If the context
@@ -81,14 +83,16 @@ bool isSafeToLoadUnconditionally(Value *V, Align Alignment, const APInt &Size,
/// that required by the header itself and could be hoisted into the header
/// if desired.) This is more powerful than the variants above when the
/// address loaded from is analyzeable by SCEV.
-bool isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L,
- ScalarEvolution &SE, DominatorTree &DT,
- AssumptionCache *AC = nullptr);
+bool isDereferenceableAndAlignedInLoop(
+ LoadInst *LI, Loop *L, ScalarEvolution &SE, DominatorTree &DT,
+ AssumptionCache *AC = nullptr,
+ SmallVectorImpl<const SCEVPredicate *> *Predicates = nullptr);
/// Return true if the loop \p L cannot fault on any iteration and only
/// contains read-only memory accesses.
-bool isDereferenceableReadOnlyLoop(Loop *L, ScalarEvolution *SE,
- DominatorTree *DT, AssumptionCache *AC);
+bool isDereferenceableReadOnlyLoop(
+ Loop *L, ScalarEvolution *SE, DominatorTree *DT, AssumptionCache *AC,
+ SmallVectorImpl<const SCEVPredicate *> *Predicates = nullptr);
/// Return true if we know that executing a load from this value cannot trap.
///
diff --git a/llvm/include/llvm/Analysis/PtrUseVisitor.h b/llvm/include/llvm/Analysis/PtrUseVisitor.h
index 237d328..bbe2741f 100644
--- a/llvm/include/llvm/Analysis/PtrUseVisitor.h
+++ b/llvm/include/llvm/Analysis/PtrUseVisitor.h
@@ -52,57 +52,54 @@ public:
/// analysis and whether the visit completed or aborted early.
class PtrInfo {
public:
- PtrInfo() : AbortedInfo(nullptr, false), EscapedInfo(nullptr, false) {}
-
/// Reset the pointer info, clearing all state.
void reset() {
- AbortedInfo.setPointer(nullptr);
- AbortedInfo.setInt(false);
- EscapedInfo.setPointer(nullptr);
- EscapedInfo.setInt(false);
+ AbortedInfo = nullptr;
+ EscapedInfo = nullptr;
}
/// Did we abort the visit early?
- bool isAborted() const { return AbortedInfo.getInt(); }
+ bool isAborted() const { return AbortedInfo != nullptr; }
/// Is the pointer escaped at some point?
- bool isEscaped() const { return EscapedInfo.getInt(); }
+ bool isEscaped() const { return EscapedInfo != nullptr; }
/// Get the instruction causing the visit to abort.
/// \returns a pointer to the instruction causing the abort if one is
/// available; otherwise returns null.
- Instruction *getAbortingInst() const { return AbortedInfo.getPointer(); }
+ Instruction *getAbortingInst() const { return AbortedInfo; }
/// Get the instruction causing the pointer to escape.
/// \returns a pointer to the instruction which escapes the pointer if one
/// is available; otherwise returns null.
- Instruction *getEscapingInst() const { return EscapedInfo.getPointer(); }
+ Instruction *getEscapingInst() const { return EscapedInfo; }
/// Mark the visit as aborted. Intended for use in a void return.
/// \param I The instruction which caused the visit to abort, if available.
- void setAborted(Instruction *I = nullptr) {
- AbortedInfo.setInt(true);
- AbortedInfo.setPointer(I);
+ void setAborted(Instruction *I) {
+ assert(I && "Expected a valid pointer in setAborted");
+ AbortedInfo = I;
}
/// Mark the pointer as escaped. Intended for use in a void return.
/// \param I The instruction which escapes the pointer, if available.
- void setEscaped(Instruction *I = nullptr) {
- EscapedInfo.setInt(true);
- EscapedInfo.setPointer(I);
+ void setEscaped(Instruction *I) {
+ assert(I && "Expected a valid pointer in setEscaped");
+ EscapedInfo = I;
}
/// Mark the pointer as escaped, and the visit as aborted. Intended
/// for use in a void return.
/// \param I The instruction which both escapes the pointer and aborts the
/// visit, if available.
- void setEscapedAndAborted(Instruction *I = nullptr) {
+ void setEscapedAndAborted(Instruction *I) {
setEscaped(I);
setAborted(I);
}
private:
- PointerIntPair<Instruction *, 1, bool> AbortedInfo, EscapedInfo;
+ Instruction *AbortedInfo = nullptr;
+ Instruction *EscapedInfo = nullptr;
};
protected:
diff --git a/llvm/include/llvm/Analysis/ScalarEvolution.h b/llvm/include/llvm/Analysis/ScalarEvolution.h
index 44fb249..68b8607 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolution.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolution.h
@@ -823,8 +823,11 @@ public:
/// Returns the upper bound of the loop trip count as a normal unsigned
/// value.
- /// Returns 0 if the trip count is unknown or not constant.
- unsigned getSmallConstantMaxTripCount(const Loop *L);
+ /// Returns 0 if the trip count is unknown, not constant or requires
+ /// SCEV predicates and \p Predicates is nullptr.
+ unsigned getSmallConstantMaxTripCount(
+ const Loop *L,
+ SmallVectorImpl<const SCEVPredicate *> *Predicates = nullptr);
/// Returns the largest constant divisor of the trip count as a normal
/// unsigned value, if possible. This means that the actual trip count is
@@ -905,6 +908,13 @@ public:
return getBackedgeTakenCount(L, ConstantMaximum);
}
+ /// Similar to getConstantMaxBackedgeTakenCount, except it will add a set of
+ /// SCEV predicates to Predicates that are required to be true in order for
+ /// the answer to be correct. Predicates can be checked with run-time
+ /// checks and can be used to perform loop versioning.
+ const SCEV *getPredicatedConstantMaxBackedgeTakenCount(
+ const Loop *L, SmallVectorImpl<const SCEVPredicate *> &Predicates);
+
/// When successful, this returns a SCEV that is greater than or equal
/// to (i.e. a "conservative over-approximation") of the value returend by
/// getBackedgeTakenCount. If such a value cannot be computed, it returns the
@@ -1506,7 +1516,7 @@ private:
/// Expression indicating the least constant maximum backedge-taken count of
/// the loop that is known, or a SCEVCouldNotCompute. This expression is
- /// only valid if the redicates associated with all loop exits are true.
+ /// only valid if the predicates associated with all loop exits are true.
const SCEV *ConstantMax = nullptr;
/// Indicating if \c ExitNotTaken has an element for every exiting block in
@@ -1585,7 +1595,9 @@ private:
}
/// Get the constant max backedge taken count for the loop.
- const SCEV *getConstantMax(ScalarEvolution *SE) const;
+ const SCEV *getConstantMax(
+ ScalarEvolution *SE,
+ SmallVectorImpl<const SCEVPredicate *> *Predicates = nullptr) const;
/// Get the constant max backedge taken count for the particular loop exit.
const SCEV *getConstantMax(
diff --git a/llvm/include/llvm/Analysis/ValueTracking.h b/llvm/include/llvm/Analysis/ValueTracking.h
index de7e7be..5749a34 100644
--- a/llvm/include/llvm/Analysis/ValueTracking.h
+++ b/llvm/include/llvm/Analysis/ValueTracking.h
@@ -805,7 +805,9 @@ bool onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V);
///
/// If the CtxI is specified this method performs context-sensitive analysis
/// and returns true if it is safe to execute the instruction immediately
-/// before the CtxI.
+/// before the CtxI. If the instruction has (transitive) operands that don't
+/// dominate CtxI, the analysis is performed under the assumption that these
+/// operands will also be speculated to a point before CxtI.
///
/// If the CtxI is NOT specified this method only looks at the instruction
/// itself and its operands, so if this method returns true, it is safe to
diff --git a/llvm/include/llvm/BinaryFormat/ELFRelocs/x86_64.def b/llvm/include/llvm/BinaryFormat/ELFRelocs/x86_64.def
index 18fdcf9..161b196 100644
--- a/llvm/include/llvm/BinaryFormat/ELFRelocs/x86_64.def
+++ b/llvm/include/llvm/BinaryFormat/ELFRelocs/x86_64.def
@@ -43,3 +43,4 @@ ELF_RELOC(R_X86_64_TLSDESC, 36)
ELF_RELOC(R_X86_64_IRELATIVE, 37)
ELF_RELOC(R_X86_64_GOTPCRELX, 41)
ELF_RELOC(R_X86_64_REX_GOTPCRELX, 42)
+ELF_RELOC(R_X86_64_REX2_GOTPCRELX, 43)
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
index 1729d60..76d51ab8 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -600,6 +600,9 @@ public:
bool matchRotateOutOfRange(MachineInstr &MI);
void applyRotateOutOfRange(MachineInstr &MI);
+ bool matchUseVectorTruncate(MachineInstr &MI, Register &MatchInfo);
+ void applyUseVectorTruncate(MachineInstr &MI, Register &MatchInfo);
+
/// \returns true if a G_ICMP instruction \p MI can be replaced with a true
/// or false constant based off of KnownBits information.
bool matchICmpToTrueFalseKnownBits(MachineInstr &MI, int64_t &MatchInfo);
diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h
index 18ed60e..da43f5b 100644
--- a/llvm/include/llvm/CodeGen/ISDOpcodes.h
+++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -825,7 +825,7 @@ enum NodeType {
/// be saturated against signed values, resulting in `S`, which will combine
/// to `TRUNCATE_SSAT_S`. If the value of C ranges from `0 to 255`, it will
/// be saturated against unsigned values, resulting in `U`, which will
- /// combine to `TRUNATE_SSAT_U`. Similarly, in `truncate(umin(x, C))`, if
+ /// combine to `TRUNCATE_SSAT_U`. Similarly, in `truncate(umin(x, C))`, if
/// value of C ranges from `0 to 255`, it becomes `U` because it is saturated
/// for unsigned values. As a result, it combines to `TRUNCATE_USAT_U`.
TRUNCATE_SSAT_S, // saturate signed input to signed result -
diff --git a/llvm/include/llvm/CodeGen/LiveInterval.h b/llvm/include/llvm/CodeGen/LiveInterval.h
index ad8dec6..cd75de4 100644
--- a/llvm/include/llvm/CodeGen/LiveInterval.h
+++ b/llvm/include/llvm/CodeGen/LiveInterval.h
@@ -662,9 +662,9 @@ namespace llvm {
///
/// Note that this is a no-op when asserts are disabled.
#ifdef NDEBUG
- void verify() const {}
+ [[nodiscard]] bool verify() const { return true; }
#else
- void verify() const;
+ [[nodiscard]] bool verify() const;
#endif
protected:
@@ -893,9 +893,11 @@ namespace llvm {
///
/// Note that this is a no-op when asserts are disabled.
#ifdef NDEBUG
- void verify(const MachineRegisterInfo *MRI = nullptr) const {}
+ [[nodiscard]] bool verify(const MachineRegisterInfo *MRI = nullptr) const {
+ return true;
+ }
#else
- void verify(const MachineRegisterInfo *MRI = nullptr) const;
+ [[nodiscard]] bool verify(const MachineRegisterInfo *MRI = nullptr) const;
#endif
private:
diff --git a/llvm/include/llvm/CodeGen/MIRYamlMapping.h b/llvm/include/llvm/CodeGen/MIRYamlMapping.h
index 304db57..ab8dc44 100644
--- a/llvm/include/llvm/CodeGen/MIRYamlMapping.h
+++ b/llvm/include/llvm/CodeGen/MIRYamlMapping.h
@@ -730,6 +730,11 @@ struct MachineFunction {
bool TracksRegLiveness = false;
bool HasWinCFI = false;
+ // Computed properties that should be overridable
+ std::optional<bool> NoPHIs;
+ std::optional<bool> IsSSA;
+ std::optional<bool> NoVRegs;
+
bool CallsEHReturn = false;
bool CallsUnwindInit = false;
bool HasEHCatchret = false;
@@ -770,6 +775,12 @@ template <> struct MappingTraits<MachineFunction> {
YamlIO.mapOptional("tracksRegLiveness", MF.TracksRegLiveness, false);
YamlIO.mapOptional("hasWinCFI", MF.HasWinCFI, false);
+ // PHIs must be not be capitalized, since it will clash with the MIR opcode
+ // leading to false-positive FileCheck hits with CHECK-NOT
+ YamlIO.mapOptional("noPhis", MF.NoPHIs, std::optional<bool>());
+ YamlIO.mapOptional("isSSA", MF.IsSSA, std::optional<bool>());
+ YamlIO.mapOptional("noVRegs", MF.NoVRegs, std::optional<bool>());
+
YamlIO.mapOptional("callsEHReturn", MF.CallsEHReturn, false);
YamlIO.mapOptional("callsUnwindInit", MF.CallsUnwindInit, false);
YamlIO.mapOptional("hasEHCatchret", MF.HasEHCatchret, false);
diff --git a/llvm/include/llvm/CodeGen/MachineFunction.h b/llvm/include/llvm/CodeGen/MachineFunction.h
index aeb72ca..5c1da4f 100644
--- a/llvm/include/llvm/CodeGen/MachineFunction.h
+++ b/llvm/include/llvm/CodeGen/MachineFunction.h
@@ -897,13 +897,14 @@ public:
/// for debugger use.
/// \returns true if no problems were found.
bool verify(Pass *p = nullptr, const char *Banner = nullptr,
- bool AbortOnError = true) const;
+ raw_ostream *OS = nullptr, bool AbortOnError = true) const;
/// Run the current MachineFunction through the machine code verifier, useful
/// for debugger use.
/// \returns true if no problems were found.
bool verify(LiveIntervals *LiveInts, SlotIndexes *Indexes,
- const char *Banner = nullptr, bool AbortOnError = true) const;
+ const char *Banner = nullptr, raw_ostream *OS = nullptr,
+ bool AbortOnError = true) const;
// Provide accessors for the MachineBasicBlock list...
using iterator = BasicBlockListType::iterator;
diff --git a/llvm/include/llvm/CodeGen/RuntimeLibcallUtil.h b/llvm/include/llvm/CodeGen/RuntimeLibcallUtil.h
index 7a13164..045ec7d 100644
--- a/llvm/include/llvm/CodeGen/RuntimeLibcallUtil.h
+++ b/llvm/include/llvm/CodeGen/RuntimeLibcallUtil.h
@@ -62,6 +62,10 @@ Libcall getLDEXP(EVT RetVT);
/// UNKNOWN_LIBCALL if there is none.
Libcall getFREXP(EVT RetVT);
+/// getFSINCOS - Return the FSINCOS_* value for the given types, or
+/// UNKNOWN_LIBCALL if there is none.
+Libcall getFSINCOS(EVT RetVT);
+
/// Return the SYNC_FETCH_AND_* value for the given opcode and type, or
/// UNKNOWN_LIBCALL if there is none.
Libcall getSYNC(unsigned Opc, MVT VT);
diff --git a/llvm/include/llvm/Demangle/ItaniumDemangle.h b/llvm/include/llvm/Demangle/ItaniumDemangle.h
index 31af042..9ada4d7 100644
--- a/llvm/include/llvm/Demangle/ItaniumDemangle.h
+++ b/llvm/include/llvm/Demangle/ItaniumDemangle.h
@@ -4450,6 +4450,7 @@ Node *AbstractManglingParser<Derived, Alloc>::parseType() {
// parse them, take the second production.
if (TryToParseTemplateArgs && look() == 'I') {
+ Subs.push_back(Result);
Node *TA = getDerived().parseTemplateArgs();
if (TA == nullptr)
return nullptr;
diff --git a/llvm/include/llvm/ExecutionEngine/JITLink/x86_64.h b/llvm/include/llvm/ExecutionEngine/JITLink/x86_64.h
index 2f475ed..24cf982 100644
--- a/llvm/include/llvm/ExecutionEngine/JITLink/x86_64.h
+++ b/llvm/include/llvm/ExecutionEngine/JITLink/x86_64.h
@@ -95,6 +95,19 @@ enum EdgeKind_x86_64 : Edge::Kind {
///
Delta32,
+ /// A 16-bit delta.
+ ///
+ /// Delta from the fixup to the target.
+ ///
+ /// Fixup expression:
+ /// Fixup <- Target - Fixup + Addend : int16
+ ///
+ /// Errors:
+ /// - The result of the fixup expression must fit into an int16, otherwise
+ /// an out-of-range error will be returned.
+ ///
+ Delta16,
+
/// An 8-bit delta.
///
/// Delta from the fixup to the target.
@@ -486,6 +499,15 @@ inline Error applyFixup(LinkGraph &G, Block &B, const Edge &E,
break;
}
+ case Delta16: {
+ int64_t Value = E.getTarget().getAddress() - FixupAddress + E.getAddend();
+ if (LLVM_LIKELY(isInt<16>(Value)))
+ *(little16_t *)FixupPtr = Value;
+ else
+ return makeTargetOutOfRangeError(G, B, E);
+ break;
+ }
+
case Delta8: {
int64_t Value = E.getTarget().getAddress() - FixupAddress + E.getAddend();
if (LLVM_LIKELY(isInt<8>(Value)))
diff --git a/llvm/include/llvm/IR/Attributes.h b/llvm/include/llvm/IR/Attributes.h
index 5a80a07..35c8134 100644
--- a/llvm/include/llvm/IR/Attributes.h
+++ b/llvm/include/llvm/IR/Attributes.h
@@ -773,7 +773,7 @@ public:
/// Returns a new list because attribute lists are immutable.
[[nodiscard]] AttributeList
addAllocSizeParamAttr(LLVMContext &C, unsigned ArgNo, unsigned ElemSizeArg,
- const std::optional<unsigned> &NumElemsArg);
+ const std::optional<unsigned> &NumElemsArg) const;
//===--------------------------------------------------------------------===//
// AttributeList Accessors
diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h
index 8f83ded..23fd835 100644
--- a/llvm/include/llvm/IR/IRBuilder.h
+++ b/llvm/include/llvm/IR/IRBuilder.h
@@ -2015,6 +2015,7 @@ public:
///
/// If no module is given via \p M, it is take from the insertion point basic
/// block.
+ LLVM_DEPRECATED("Use CreateGlobalString instead", "CreateGlobalString")
Constant *CreateGlobalStringPtr(StringRef Str, const Twine &Name = "",
unsigned AddressSpace = 0,
Module *M = nullptr, bool AddNull = true) {
diff --git a/llvm/include/llvm/IR/IntrinsicsNVVM.td b/llvm/include/llvm/IR/IntrinsicsNVVM.td
index 39685c9..7b8ffe4 100644
--- a/llvm/include/llvm/IR/IntrinsicsNVVM.td
+++ b/llvm/include/llvm/IR/IntrinsicsNVVM.td
@@ -30,6 +30,18 @@
// * llvm.nvvm.max.ui --> select(x ule y, x, y)
// * llvm.nvvm.max.ull --> ibid.
// * llvm.nvvm.h2f --> llvm.convert.to.fp16.f32
+// * llvm.nvvm.bitcast.f2i --> bitcast
+// * llvm.nvvm.bitcast.i2f --> ibid.
+// * llvm.nvvm.bitcast.d2ll --> ibid.
+// * llvm.nvvm.bitcast.ll2d --> ibid.
+// * llvm.nvvm.ptr.gen.to.global --> addrspacecast
+// * llvm.nvvm.ptr.gen.to.shared --> ibid.
+// * llvm.nvvm.ptr.gen.to.constant --> ibid.
+// * llvm.nvvm.ptr.gen.to.local --> ibid.
+// * llvm.nvvm.ptr.global.to.gen --> ibid.
+// * llvm.nvvm.ptr.shared.to.gen --> ibid.
+// * llvm.nvvm.ptr.constant.to.gen --> ibid.
+// * llvm.nvvm.ptr.local.to.gen --> ibid.
def llvm_global_ptr_ty : LLVMQualPointerType<1>; // (global)ptr
def llvm_shared_ptr_ty : LLVMQualPointerType<3>; // (shared)ptr
@@ -1339,20 +1351,6 @@ let TargetPrefix = "nvvm" in {
def int_nvvm_e5m2x2_to_f16x2_rn_relu : ClangBuiltin<"__nvvm_e5m2x2_to_f16x2_rn_relu">,
Intrinsic<[llvm_v2f16_ty], [llvm_i16_ty], [IntrNoMem, IntrNoCallback]>;
-//
-// Bitcast
-//
-
- def int_nvvm_bitcast_f2i : ClangBuiltin<"__nvvm_bitcast_f2i">,
- DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
- def int_nvvm_bitcast_i2f : ClangBuiltin<"__nvvm_bitcast_i2f">,
- DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem, IntrSpeculatable]>;
-
- def int_nvvm_bitcast_ll2d : ClangBuiltin<"__nvvm_bitcast_ll2d">,
- DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_i64_ty], [IntrNoMem, IntrSpeculatable]>;
- def int_nvvm_bitcast_d2ll : ClangBuiltin<"__nvvm_bitcast_d2ll">,
- DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem, IntrSpeculatable]>;
-
// FNS
def int_nvvm_fns : ClangBuiltin<"__nvvm_fns">,
@@ -1612,40 +1610,6 @@ def int_nvvm_ldg_global_p : Intrinsic<[llvm_anyptr_ty],
[IntrReadMem, IntrArgMemOnly, IntrNoCallback, IntrWillReturn, NoCapture<ArgIndex<0>>],
"llvm.nvvm.ldg.global.p">;
-// Use for generic pointers
-// - These intrinsics are used to convert address spaces.
-// - The input pointer and output pointer must have the same type, except for
-// the address-space. (This restriction is not enforced here as there is
-// currently no way to describe it).
-// - This complements the llvm bitcast, which can be used to cast one type
-// of pointer to another type of pointer, while the address space remains
-// the same.
-def int_nvvm_ptr_local_to_gen: DefaultAttrsIntrinsic<[llvm_anyptr_ty],
- [llvm_anyptr_ty], [IntrNoMem, IntrSpeculatable],
- "llvm.nvvm.ptr.local.to.gen">;
-def int_nvvm_ptr_shared_to_gen: DefaultAttrsIntrinsic<[llvm_anyptr_ty],
- [llvm_anyptr_ty], [IntrNoMem, IntrSpeculatable],
- "llvm.nvvm.ptr.shared.to.gen">;
-def int_nvvm_ptr_global_to_gen: DefaultAttrsIntrinsic<[llvm_anyptr_ty],
- [llvm_anyptr_ty], [IntrNoMem, IntrSpeculatable],
- "llvm.nvvm.ptr.global.to.gen">;
-def int_nvvm_ptr_constant_to_gen: DefaultAttrsIntrinsic<[llvm_anyptr_ty],
- [llvm_anyptr_ty], [IntrNoMem, IntrSpeculatable],
- "llvm.nvvm.ptr.constant.to.gen">;
-
-def int_nvvm_ptr_gen_to_global: DefaultAttrsIntrinsic<[llvm_anyptr_ty],
- [llvm_anyptr_ty], [IntrNoMem, IntrSpeculatable],
- "llvm.nvvm.ptr.gen.to.global">;
-def int_nvvm_ptr_gen_to_shared: DefaultAttrsIntrinsic<[llvm_anyptr_ty],
- [llvm_anyptr_ty], [IntrNoMem, IntrSpeculatable],
- "llvm.nvvm.ptr.gen.to.shared">;
-def int_nvvm_ptr_gen_to_local: DefaultAttrsIntrinsic<[llvm_anyptr_ty],
- [llvm_anyptr_ty], [IntrNoMem, IntrSpeculatable],
- "llvm.nvvm.ptr.gen.to.local">;
-def int_nvvm_ptr_gen_to_constant: DefaultAttrsIntrinsic<[llvm_anyptr_ty],
- [llvm_anyptr_ty], [IntrNoMem, IntrSpeculatable],
- "llvm.nvvm.ptr.gen.to.constant">;
-
// Used in nvvm internally to help address space opt and ptx code generation
// This is for params that are passed to kernel functions by pointer by-val.
def int_nvvm_ptr_gen_to_param: Intrinsic<[llvm_anyptr_ty],
@@ -4489,22 +4453,6 @@ def int_nvvm_sust_p_3d_v4i32_trap
"llvm.nvvm.sust.p.3d.v4i32.trap">,
ClangBuiltin<"__nvvm_sust_p_3d_v4i32_trap">;
-
-def int_nvvm_rotate_b32
- : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, IntrSpeculatable], "llvm.nvvm.rotate.b32">,
- ClangBuiltin<"__nvvm_rotate_b32">;
-
-def int_nvvm_rotate_b64
- : DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty],
- [IntrNoMem, IntrSpeculatable], "llvm.nvvm.rotate.b64">,
- ClangBuiltin<"__nvvm_rotate_b64">;
-
-def int_nvvm_rotate_right_b64
- : DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty],
- [IntrNoMem, IntrSpeculatable], "llvm.nvvm.rotate.right.b64">,
- ClangBuiltin<"__nvvm_rotate_right_b64">;
-
def int_nvvm_swap_lo_hi_b64
: DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty],
[IntrNoMem, IntrSpeculatable], "llvm.nvvm.swap.lo.hi.b64">,
diff --git a/llvm/include/llvm/IR/LLVMContext.h b/llvm/include/llvm/IR/LLVMContext.h
index 6ffa2bd..558816e 100644
--- a/llvm/include/llvm/IR/LLVMContext.h
+++ b/llvm/include/llvm/IR/LLVMContext.h
@@ -316,17 +316,6 @@ public:
/// LLVMContext is used by compilation.
void setOptPassGate(OptPassGate&);
- /// Set whether opaque pointers are enabled. The method may be called multiple
- /// times, but only with the same value. Note that creating a pointer type or
- /// otherwise querying the opaque pointer mode performs an implicit set to
- /// the default value.
- [[deprecated("Opaque pointers are always enabled")]]
- void setOpaquePointers(bool Enable) const;
-
- /// Whether typed pointers are supported. If false, all pointers are opaque.
- [[deprecated("Always returns false")]]
- bool supportsTypedPointers() const;
-
/// Get or set the current "default" target CPU (target-cpu function
/// attribute). The intent is that compiler frontends will set this to a value
/// that reflects the attribute that a function would get "by default" without
diff --git a/llvm/include/llvm/IR/Type.h b/llvm/include/llvm/IR/Type.h
index b88c8ae..2f53197 100644
--- a/llvm/include/llvm/IR/Type.h
+++ b/llvm/include/llvm/IR/Type.h
@@ -250,10 +250,6 @@ public:
/// True if this is an instance of PointerType.
bool isPointerTy() const { return getTypeID() == PointerTyID; }
- /// True if this is an instance of an opaque PointerType.
- LLVM_DEPRECATED("Use isPointerTy() instead", "isPointerTy")
- bool isOpaquePointerTy() const { return isPointerTy(); };
-
/// Return true if this is a pointer type or a vector of pointer types.
bool isPtrOrPtrVectorTy() const { return getScalarType()->isPointerTy(); }
@@ -406,14 +402,6 @@ public:
inline StringRef getTargetExtName() const;
- /// Only use this method in code that is not reachable with opaque pointers,
- /// or part of deprecated methods that will be removed as part of the opaque
- /// pointers transition.
- [[deprecated("Pointers no longer have element types")]]
- Type *getNonOpaquePointerElementType() const {
- llvm_unreachable("Pointers no longer have element types");
- }
-
/// Given vector type, change the element type,
/// whilst keeping the old number of elements.
/// For non-vectors simply returns \p EltTy.
diff --git a/llvm/include/llvm/MC/MCTargetOptions.h b/llvm/include/llvm/MC/MCTargetOptions.h
index 2e2025c..7b0d81f 100644
--- a/llvm/include/llvm/MC/MCTargetOptions.h
+++ b/llvm/include/llvm/MC/MCTargetOptions.h
@@ -72,6 +72,8 @@ public:
bool X86Sse2Avx = false;
+ std::optional<unsigned> OutputAsmVariant;
+
EmitDwarfUnwindType EmitDwarfUnwind;
int DwarfVersion = 0;
diff --git a/llvm/include/llvm/Option/OptTable.h b/llvm/include/llvm/Option/OptTable.h
index d8bf292..8fabc78 100644
--- a/llvm/include/llvm/Option/OptTable.h
+++ b/llvm/include/llvm/Option/OptTable.h
@@ -64,7 +64,7 @@ public:
// the program, HelpText is used instead. This cannot use std::vector
// because OptTable is used in constexpr contexts. Increase the array sizes
// here if you need more entries and adjust the constants in
- // OptParserEmitter::EmitHelpTextsForVariants.
+ // OptionParserEmitter::EmitHelpTextsForVariants.
std::array<std::pair<std::array<unsigned int, 2 /*MaxVisibilityPerHelp*/>,
const char *>,
1 /*MaxVisibilityHelp*/>
diff --git a/llvm/include/llvm/ProfileData/MemProf.h b/llvm/include/llvm/ProfileData/MemProf.h
index 892865a..f8121d3 100644
--- a/llvm/include/llvm/ProfileData/MemProf.h
+++ b/llvm/include/llvm/ProfileData/MemProf.h
@@ -7,8 +7,10 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/ProfileData/MemProfData.inc"
+#include "llvm/Support/BLAKE3.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/EndianStream.h"
+#include "llvm/Support/HashBuilder.h"
#include "llvm/Support/raw_ostream.h"
#include <bitset>
@@ -308,24 +310,19 @@ struct Frame {
<< " Inline: " << IsInlineFrame << "\n";
}
- // Return a hash value based on the contents of the frame. Here we don't use
- // hashing from llvm ADT since we are going to persist the hash id, the hash
- // combine algorithm in ADT uses a new randomized seed each time.
+ // Return a hash value based on the contents of the frame. Here we use a
+ // cryptographic hash function to minimize the chance of hash collisions. We
+ // do persist FrameIds as part of memprof formats up to Version 2, inclusive.
+ // However, the deserializer never calls this function; it uses FrameIds
+ // merely as keys to look up Frames proper.
inline FrameId hash() const {
- auto HashCombine = [](auto Value, size_t Seed) {
- std::hash<decltype(Value)> Hasher;
- // The constant used below is the 64 bit representation of the fractional
- // part of the golden ratio. Used here for the randomness in their bit
- // pattern.
- return Hasher(Value) + 0x9e3779b97f4a7c15 + (Seed << 6) + (Seed >> 2);
- };
-
- size_t Result = 0;
- Result ^= HashCombine(Function, Result);
- Result ^= HashCombine(LineOffset, Result);
- Result ^= HashCombine(Column, Result);
- Result ^= HashCombine(IsInlineFrame, Result);
- return static_cast<FrameId>(Result);
+ llvm::HashBuilder<llvm::TruncatedBLAKE3<8>, llvm::endianness::little>
+ HashBuilder;
+ HashBuilder.add(Function, LineOffset, Column, IsInlineFrame);
+ llvm::BLAKE3Result<8> Hash = HashBuilder.final();
+ FrameId Id;
+ std::memcpy(&Id, Hash.data(), sizeof(Hash));
+ return Id;
}
};
diff --git a/llvm/include/llvm/SandboxIR/SandboxIR.h b/llvm/include/llvm/SandboxIR/SandboxIR.h
index 3f8946a7..d4c907c 100644
--- a/llvm/include/llvm/SandboxIR/SandboxIR.h
+++ b/llvm/include/llvm/SandboxIR/SandboxIR.h
@@ -135,6 +135,7 @@ class GlobalVariable;
class GlobalAlias;
class NoCFIValue;
class ConstantPtrAuth;
+class ConstantExpr;
class Context;
class Function;
class Instruction;
@@ -344,6 +345,11 @@ protected:
friend class GlobalAlias; // For `Val`.
friend class NoCFIValue; // For `Val`.
friend class ConstantPtrAuth; // For `Val`.
+ friend class ConstantExpr; // For `Val`.
+
+ // Region needs to manipulate metadata in the underlying LLVM Value, we don't
+ // expose metadata in sandboxir.
+ friend class Region;
/// All values point to the context.
Context &Ctx;
@@ -1661,6 +1667,19 @@ public:
}
};
+class ConstantExpr : public Constant {
+ ConstantExpr(llvm::ConstantExpr *C, Context &Ctx)
+ : Constant(ClassID::ConstantExpr, C, Ctx) {}
+ friend class Context; // For constructor.
+
+public:
+ /// For isa/dyn_cast.
+ static bool classof(const sandboxir::Value *From) {
+ return From->getSubclassID() == ClassID::ConstantExpr;
+ }
+ // TODO: Missing functions.
+};
+
class BlockAddress final : public Constant {
BlockAddress(llvm::BlockAddress *C, Context &Ctx)
: Constant(ClassID::BlockAddress, C, Ctx) {}
@@ -1916,6 +1935,22 @@ public:
/// \Returns this Instruction's opcode. Note that SandboxIR has its own opcode
/// state to allow for new SandboxIR-specific instructions.
Opcode getOpcode() const { return Opc; }
+
+ // TODO: Missing function getOpcodeName().
+
+ bool isTerminator() const {
+ return cast<llvm::Instruction>(Val)->isTerminator();
+ }
+ bool isUnaryOp() const { return cast<llvm::Instruction>(Val)->isUnaryOp(); }
+ bool isBinaryOp() const { return cast<llvm::Instruction>(Val)->isBinaryOp(); }
+ bool isIntDivRem() const {
+ return cast<llvm::Instruction>(Val)->isIntDivRem();
+ }
+ bool isShift() const { return cast<llvm::Instruction>(Val)->isShift(); }
+ bool isCast() const { return cast<llvm::Instruction>(Val)->isCast(); }
+
+ // TODO: More missing functions
+
/// Detach this from its parent BasicBlock without deleting it.
void removeFromParent();
/// Detach this Value from its parent and delete it.
@@ -2042,6 +2077,61 @@ public:
/// LangRef.html for the meaning of these flags.
void copyFastMathFlags(FastMathFlags FMF);
+ bool isAssociative() const {
+ return cast<llvm::Instruction>(Val)->isAssociative();
+ }
+
+ bool isCommutative() const {
+ return cast<llvm::Instruction>(Val)->isCommutative();
+ }
+
+ bool isIdempotent() const {
+ return cast<llvm::Instruction>(Val)->isIdempotent();
+ }
+
+ bool isNilpotent() const {
+ return cast<llvm::Instruction>(Val)->isNilpotent();
+ }
+
+ bool mayWriteToMemory() const {
+ return cast<llvm::Instruction>(Val)->mayWriteToMemory();
+ }
+
+ bool mayReadFromMemory() const {
+ return cast<llvm::Instruction>(Val)->mayReadFromMemory();
+ }
+ bool mayReadOrWriteMemory() const {
+ return cast<llvm::Instruction>(Val)->mayReadOrWriteMemory();
+ }
+
+ bool isAtomic() const { return cast<llvm::Instruction>(Val)->isAtomic(); }
+
+ bool hasAtomicLoad() const {
+ return cast<llvm::Instruction>(Val)->hasAtomicLoad();
+ }
+
+ bool hasAtomicStore() const {
+ return cast<llvm::Instruction>(Val)->hasAtomicStore();
+ }
+
+ bool isVolatile() const { return cast<llvm::Instruction>(Val)->isVolatile(); }
+
+ Type *getAccessType() const;
+
+ bool mayThrow(bool IncludePhaseOneUnwind = false) const {
+ return cast<llvm::Instruction>(Val)->mayThrow(IncludePhaseOneUnwind);
+ }
+
+ bool isFenceLike() const {
+ return cast<llvm::Instruction>(Val)->isFenceLike();
+ }
+
+ bool mayHaveSideEffects() const {
+ return cast<llvm::Instruction>(Val)->mayHaveSideEffects();
+ }
+
+ // TODO: Missing functions.
+
bool isStackSaveOrRestoreIntrinsic() const {
auto *I = cast<llvm::Instruction>(Val);
return match(I,
@@ -4453,9 +4543,11 @@ protected:
friend class PointerType; // For LLVMCtx.
friend class CmpInst; // For LLVMCtx. TODO: cleanup when sandboxir::VectorType
// is complete
- friend class IntegerType; // For LLVMCtx.
- friend class StructType; // For LLVMCtx.
+ friend class IntegerType; // For LLVMCtx.
+ friend class StructType; // For LLVMCtx.
friend class ::llvm::TargetExtType; // For LLVMCtx.
+ friend class Region; // For LLVMCtx.
+
Tracker IRTracker;
/// Maps LLVM Value to the corresponding sandboxir::Value. Owns all
diff --git a/llvm/include/llvm/SandboxIR/SandboxIRValues.def b/llvm/include/llvm/SandboxIR/SandboxIRValues.def
index 3367c7d..2a9ca6d 100644
--- a/llvm/include/llvm/SandboxIR/SandboxIRValues.def
+++ b/llvm/include/llvm/SandboxIR/SandboxIRValues.def
@@ -40,6 +40,7 @@ DEF_CONST(GlobalAlias, GlobalAlias)
DEF_CONST(BlockAddress, BlockAddress)
DEF_CONST(NoCFIValue, NoCFIValue)
DEF_CONST(ConstantPtrAuth, ConstantPtrAuth)
+DEF_CONST(ConstantExpr, ConstantExpr)
DEF_CONST(DSOLocalEquivalent, DSOLocalEquivalent)
DEF_CONST(ConstantTokenNone, ConstantTokenNone)
diff --git a/llvm/include/llvm/SandboxIR/Type.h b/llvm/include/llvm/SandboxIR/Type.h
index a2ac9e0..f99f809 100644
--- a/llvm/include/llvm/SandboxIR/Type.h
+++ b/llvm/include/llvm/SandboxIR/Type.h
@@ -56,6 +56,7 @@ protected:
friend class ConstantVector; // For LLVMTy.
friend class CmpInst; // For LLVMTy. TODO: Cleanup after
// sandboxir::VectorType is more complete.
+ friend class Utils; // for LLVMTy
// Friend all instruction classes because `create()` functions use LLVMTy.
#define DEF_INSTR(ID, OPCODE, CLASS) friend class CLASS;
diff --git a/llvm/include/llvm/SandboxIR/Utils.h b/llvm/include/llvm/SandboxIR/Utils.h
new file mode 100644
index 0000000..ccc0030
--- /dev/null
+++ b/llvm/include/llvm/SandboxIR/Utils.h
@@ -0,0 +1,54 @@
+//===- Utils.h --------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Collector for SandboxIR related convenience functions that don't belong in
+// other classes.
+
+#ifndef LLVM_SANDBOXIR_UTILS_H
+#define LLVM_SANDBOXIR_UTILS_H
+
+namespace llvm::sandboxir {
+
+class Utils {
+public:
+ /// \Returns the expected type of \p Value V. For most Values this is
+ /// equivalent to getType, but for stores returns the stored type, rather
+ /// than void, and for ReturnInsts returns the returned type.
+ static Type *getExpectedType(const Value *V) {
+ if (auto *I = dyn_cast<Instruction>(V)) {
+ // A Return's value operand can be null if it returns void.
+ if (auto *RI = dyn_cast<ReturnInst>(I)) {
+ if (RI->getReturnValue() == nullptr)
+ return RI->getType();
+ }
+ return getExpectedValue(I)->getType();
+ }
+ return V->getType();
+ }
+
+ /// \Returns the expected Value for this instruction. For most instructions,
+ /// this is the instruction itself, but for stores returns the stored
+ /// operand, and for ReturnInstructions returns the returned value.
+ static Value *getExpectedValue(const Instruction *I) {
+ if (auto *SI = dyn_cast<StoreInst>(I))
+ return SI->getValueOperand();
+ if (auto *RI = dyn_cast<ReturnInst>(I))
+ return RI->getReturnValue();
+ return const_cast<Instruction *>(I);
+ }
+
+ /// \Returns the number of bits required to represent the operands or return
+ /// value of \p V in \p DL.
+ static unsigned getNumBits(Value *V, const DataLayout &DL) {
+ Type *Ty = getExpectedType(V);
+ return DL.getTypeSizeInBits(Ty->LLVMTy);
+ }
+};
+} // namespace llvm::sandboxir
+
+#endif // LLVM_SANDBOXIR_UTILS_H
diff --git a/llvm/include/llvm/Support/OptionStrCmp.h b/llvm/include/llvm/Support/OptionStrCmp.h
index d417fe6..f3d3c2a 100644
--- a/llvm/include/llvm/Support/OptionStrCmp.h
+++ b/llvm/include/llvm/Support/OptionStrCmp.h
@@ -1,32 +1,32 @@
-//===- OptionStrCmp.h - Option String Comparison ----------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_SUPPORT_OPTIONSTRCMP_H
-#define LLVM_SUPPORT_OPTIONSTRCMP_H
-
-#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/StringRef.h"
-
-namespace llvm {
-
-// Comparison function for Option strings (option names & prefixes).
-// The ordering is *almost* case-insensitive lexicographic, with an exception.
-// '\0' comes at the end of the alphabet instead of the beginning (thus options
-// precede any other options which prefix them). Additionally, if two options
-// are identical ignoring case, they are ordered according to case sensitive
-// ordering if `FallbackCaseSensitive` is true.
-int StrCmpOptionName(StringRef A, StringRef B,
- bool FallbackCaseSensitive = true);
-
-// Comparison function for Option prefixes.
-int StrCmpOptionPrefixes(ArrayRef<StringRef> APrefixes,
- ArrayRef<StringRef> BPrefixes);
-
-} // namespace llvm
-
-#endif // LLVM_SUPPORT_OPTIONSTRCMP_H
+//===- OptionStrCmp.h - Option String Comparison ----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_OPTIONSTRCMP_H
+#define LLVM_SUPPORT_OPTIONSTRCMP_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+
+// Comparison function for Option strings (option names & prefixes).
+// The ordering is *almost* case-insensitive lexicographic, with an exception.
+// '\0' comes at the end of the alphabet instead of the beginning (thus options
+// precede any other options which prefix them). Additionally, if two options
+// are identical ignoring case, they are ordered according to case sensitive
+// ordering if `FallbackCaseSensitive` is true.
+int StrCmpOptionName(StringRef A, StringRef B,
+ bool FallbackCaseSensitive = true);
+
+// Comparison function for Option prefixes.
+int StrCmpOptionPrefixes(ArrayRef<StringRef> APrefixes,
+ ArrayRef<StringRef> BPrefixes);
+
+} // namespace llvm
+
+#endif // LLVM_SUPPORT_OPTIONSTRCMP_H
diff --git a/llvm/include/llvm/Support/raw_ostream.h b/llvm/include/llvm/Support/raw_ostream.h
index 34f91cb..c2f2299 100644
--- a/llvm/include/llvm/Support/raw_ostream.h
+++ b/llvm/include/llvm/Support/raw_ostream.h
@@ -774,18 +774,33 @@ public:
// you can use
// OS << indent(6) << "more stuff";
// which has better ergonomics (and clang-formats better as well).
+//
+// If indentation is always in increments of a fixed value, you can use Scale
+// to set that value once. So indent(1, 2) will add 2 spaces and
+// indent(1,2) + 1 will add 4 spaces.
struct indent {
- unsigned NumSpaces;
-
- explicit indent(unsigned NumSpaces) : NumSpaces(NumSpaces) {}
- void operator+=(unsigned N) { NumSpaces += N; }
- void operator-=(unsigned N) { NumSpaces -= N; }
- indent operator+(unsigned N) const { return indent(NumSpaces + N); }
- indent operator-(unsigned N) const { return indent(NumSpaces - N); }
+ // Indentation is represented as `NumIndents` steps of size `Scale` each.
+ unsigned NumIndents;
+ unsigned Scale;
+
+ explicit indent(unsigned NumIndents, unsigned Scale = 1)
+ : NumIndents(NumIndents), Scale(Scale) {}
+
+ // These arithmeric operators preserve scale.
+ void operator+=(unsigned N) { NumIndents += N; }
+ void operator-=(unsigned N) {
+ assert(NumIndents >= N && "Indentation underflow");
+ NumIndents -= N;
+ }
+ indent operator+(unsigned N) const { return indent(NumIndents + N, Scale); }
+ indent operator-(unsigned N) const {
+ assert(NumIndents >= N && "Indentation undeflow");
+ return indent(NumIndents - N, Scale);
+ }
};
inline raw_ostream &operator<<(raw_ostream &OS, const indent &Indent) {
- return OS.indent(Indent.NumSpaces);
+ return OS.indent(Indent.NumIndents * Indent.Scale);
}
class Error;
diff --git a/llvm/include/llvm/TableGen/Record.h b/llvm/include/llvm/TableGen/Record.h
index 5348c11..4cd73c3 100644
--- a/llvm/include/llvm/TableGen/Record.h
+++ b/llvm/include/llvm/TableGen/Record.h
@@ -847,7 +847,8 @@ public:
EMPTY,
GETDAGOP,
LOG2,
- REPR
+ REPR,
+ LISTFLATTEN,
};
private:
diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index be33c77..f838c6e 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -1505,6 +1505,13 @@ def insert_vector_elt_oob : GICombineRule<
[{ return Helper.matchInsertVectorElementOOB(*${root}, ${matchinfo}); }]),
(apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
+// Combine v8i8 (buildvector i8 (trunc(unmerge)), i8 (trunc), i8 (trunc), i8 (trunc), undef, undef, undef, undef)
+def combine_use_vector_truncate : GICombineRule<
+ (defs root:$root, register_matchinfo:$matchinfo),
+ (match (G_BUILD_VECTOR $dst, GIVariadic<>:$unused):$root,
+ [{ return Helper.matchUseVectorTruncate(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyUseVectorTruncate(*${root}, ${matchinfo}); }])>;
+
def add_of_vscale : GICombineRule<
(defs root:$root, build_fn_matchinfo:$matchinfo),
(match (G_VSCALE $left, $imm1),
@@ -1912,7 +1919,8 @@ def all_combines : GICombineGroup<[integer_reassoc_combines, trivial_combines,
sub_add_reg, select_to_minmax,
fsub_to_fneg, commute_constant_to_rhs, match_ands, match_ors,
combine_concat_vector, match_addos,
- sext_trunc, zext_trunc, prefer_sign_combines, combine_shuffle_concat]>;
+ sext_trunc, zext_trunc, prefer_sign_combines, combine_shuffle_concat,
+ combine_use_vector_truncate]>;
// A combine group used to for prelegalizer combiners at -O0. The combines in
// this group have been selected based on experiments to balance code size and
diff --git a/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h b/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h
index 0910614..f5b9191 100644
--- a/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h
+++ b/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h
@@ -377,19 +377,19 @@ public:
return LAI->getDepChecker().getMaxSafeVectorWidthInBits();
}
- /// Returns true if the loop has a speculative early exit, i.e. an
+ /// Returns true if the loop has an uncountable early exit, i.e. an
/// uncountable exit that isn't the latch block.
- bool hasSpeculativeEarlyExit() const { return HasSpeculativeEarlyExit; }
+ bool hasUncountableEarlyExit() const { return HasUncountableEarlyExit; }
- /// Returns the speculative early exiting block.
- BasicBlock *getSpeculativeEarlyExitingBlock() const {
+ /// Returns the uncountable early exiting block.
+ BasicBlock *getUncountableEarlyExitingBlock() const {
assert(getUncountableExitingBlocks().size() == 1 &&
"Expected only a single uncountable exiting block");
return getUncountableExitingBlocks()[0];
}
- /// Returns the destination of a speculative early exiting block.
- BasicBlock *getSpeculativeEarlyExitBlock() const {
+ /// Returns the destination of an uncountable early exiting block.
+ BasicBlock *getUncountableEarlyExitBlock() const {
assert(getUncountableExitBlocks().size() == 1 &&
"Expected only a single uncountable exit block");
return getUncountableExitBlocks()[0];
@@ -603,15 +603,17 @@ private:
/// the use of those function variants.
bool VecCallVariantsFound = false;
- /// Indicates whether this loop has a speculative early exit, i.e. an
+ /// Indicates whether this loop has an uncountable early exit, i.e. an
/// uncountable exiting block that is not the latch.
- bool HasSpeculativeEarlyExit = false;
+ bool HasUncountableEarlyExit = false;
- /// Keep track of all the loop exiting blocks.
+ /// Keep track of all the countable and uncountable exiting blocks if
+ /// the exact backedge taken count is not computable.
SmallVector<BasicBlock *, 4> CountableExitingBlocks;
SmallVector<BasicBlock *, 4> UncountableExitingBlocks;
- /// Keep track of the destinations of all uncountable exits.
+ /// Keep track of the destinations of all uncountable exits if the
+ /// exact backedge taken count is not computable.
SmallVector<BasicBlock *, 4> UncountableExitBlocks;
};
diff --git a/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Region.h b/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Region.h
index 2f893ba..884f132 100644
--- a/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Region.h
+++ b/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/Region.h
@@ -9,10 +9,11 @@
#ifndef LLVM_TRANSFORMS_VECTORIZE_SANDBOXVECTORIZER_REGION_H
#define LLVM_TRANSFORMS_VECTORIZE_SANDBOXVECTORIZER_REGION_H
+#include <memory>
+
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/SandboxIR/SandboxIR.h"
-#include "llvm/Support/InstructionCost.h"
#include "llvm/Support/raw_ostream.h"
namespace llvm::sandboxir {
@@ -55,8 +56,10 @@ class Region {
/// vectorization are part of the Region.
SetVector<Instruction *> Insts;
- /// A unique ID, used for debugging.
- unsigned RegionID = 0;
+ /// MDNode that we'll use to mark instructions as being part of the region.
+ MDNode *RegionMDN;
+ static constexpr const char *MDKind = "sandboxvec";
+ static constexpr const char *RegionStr = "sandboxregion";
Context &Ctx;
@@ -68,8 +71,6 @@ public:
~Region();
Context &getContext() const { return Ctx; }
- /// Returns the region's unique ID.
- unsigned getID() const { return RegionID; }
/// Adds I to the set.
void add(Instruction *I);
@@ -85,6 +86,8 @@ public:
iterator end() { return Insts.end(); }
iterator_range<iterator> insts() { return make_range(begin(), end()); }
+ static SmallVector<std::unique_ptr<Region>> createRegionsFromMD(Function &F);
+
#ifndef NDEBUG
/// This is an expensive check, meant for testing.
bool operator==(const Region &Other) const;
diff --git a/llvm/lib/Analysis/CtxProfAnalysis.cpp b/llvm/lib/Analysis/CtxProfAnalysis.cpp
index 3df7298..7517011 100644
--- a/llvm/lib/Analysis/CtxProfAnalysis.cpp
+++ b/llvm/lib/Analysis/CtxProfAnalysis.cpp
@@ -254,6 +254,15 @@ InstrProfIncrementInst *CtxProfAnalysis::getBBInstrumentation(BasicBlock &BB) {
return nullptr;
}
+InstrProfIncrementInstStep *
+CtxProfAnalysis::getSelectInstrumentation(SelectInst &SI) {
+ Instruction *Prev = &SI;
+ while ((Prev = Prev->getPrevNode()))
+ if (auto *Step = dyn_cast<InstrProfIncrementInstStep>(Prev))
+ return Step;
+ return nullptr;
+}
+
template <class ProfilesTy, class ProfTy>
static void preorderVisit(ProfilesTy &Profiles,
function_ref<void(ProfTy &)> Visitor,
diff --git a/llvm/lib/Analysis/DXILMetadataAnalysis.cpp b/llvm/lib/Analysis/DXILMetadataAnalysis.cpp
index cebfe4b8..a7f666a 100644
--- a/llvm/lib/Analysis/DXILMetadataAnalysis.cpp
+++ b/llvm/lib/Analysis/DXILMetadataAnalysis.cpp
@@ -27,7 +27,7 @@ static ModuleMetadataInfo collectMetadataInfo(Module &M) {
Triple TT(Triple(M.getTargetTriple()));
MMDAI.DXILVersion = TT.getDXILVersion();
MMDAI.ShaderModelVersion = TT.getOSVersion();
- MMDAI.ShaderStage = TT.getEnvironment();
+ MMDAI.ShaderProfile = TT.getEnvironment();
NamedMDNode *ValidatorVerNode = M.getNamedMetadata("dx.valver");
if (ValidatorVerNode) {
auto *ValVerMD = cast<MDNode>(ValidatorVerNode->getOperand(0));
@@ -42,7 +42,7 @@ static ModuleMetadataInfo collectMetadataInfo(Module &M) {
if (!F.hasFnAttribute("hlsl.shader"))
continue;
- EntryProperties EFP(F);
+ EntryProperties EFP(&F);
// Get "hlsl.shader" attribute
Attribute EntryAttr = F.getFnAttribute("hlsl.shader");
assert(EntryAttr.isValid() &&
@@ -74,8 +74,8 @@ static ModuleMetadataInfo collectMetadataInfo(Module &M) {
void ModuleMetadataInfo::print(raw_ostream &OS) const {
OS << "Shader Model Version : " << ShaderModelVersion.getAsString() << "\n";
OS << "DXIL Version : " << DXILVersion.getAsString() << "\n";
- OS << "Target Shader Stage : " << Triple::getEnvironmentTypeName(ShaderStage)
- << "\n";
+ OS << "Target Shader Stage : "
+ << Triple::getEnvironmentTypeName(ShaderProfile) << "\n";
OS << "Validator Version : " << ValidatorVersion.getAsString() << "\n";
for (const auto &EP : EntryPropertyVec) {
OS << " " << EP.Entry->getName() << "\n";
diff --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp
index 957ac88..f4b2027 100644
--- a/llvm/lib/Analysis/Loads.cpp
+++ b/llvm/lib/Analysis/Loads.cpp
@@ -104,6 +104,17 @@ static bool isDereferenceableAndAlignedPointer(
if (CheckForNonNull &&
!isKnownNonZero(V, SimplifyQuery(DL, DT, AC, CtxI)))
return false;
+ // When using something like !dereferenceable on a load, the
+ // dereferenceability may only be valid on a specific control-flow path.
+ // If the instruction doesn't dominate the context instruction, we're
+ // asking about dereferenceability under the assumption that the
+ // instruction has been speculated to the point of the context instruction,
+ // in which case we don't know if the dereferenceability info still holds.
+ // We don't bother handling allocas here, as they aren't speculatable
+ // anyway.
+ auto *I = dyn_cast<Instruction>(V);
+ if (I && !isa<AllocaInst>(I))
+ return CtxI && isValidAssumeForContext(I, CtxI, DT);
return true;
};
if (IsKnownDeref()) {
@@ -265,10 +276,9 @@ static bool AreEquivalentAddressValues(const Value *A, const Value *B) {
return false;
}
-bool llvm::isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L,
- ScalarEvolution &SE,
- DominatorTree &DT,
- AssumptionCache *AC) {
+bool llvm::isDereferenceableAndAlignedInLoop(
+ LoadInst *LI, Loop *L, ScalarEvolution &SE, DominatorTree &DT,
+ AssumptionCache *AC, SmallVectorImpl<const SCEVPredicate *> *Predicates) {
auto &DL = LI->getDataLayout();
Value *Ptr = LI->getPointerOperand();
@@ -293,7 +303,7 @@ bool llvm::isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L,
if (!Step)
return false;
- auto TC = SE.getSmallConstantMaxTripCount(L);
+ auto TC = SE.getSmallConstantMaxTripCount(L, Predicates);
if (!TC)
return false;
@@ -799,13 +809,13 @@ bool llvm::canReplacePointersIfEqual(const Value *From, const Value *To,
return isPointerAlwaysReplaceable(From, To, DL);
}
-bool llvm::isDereferenceableReadOnlyLoop(Loop *L, ScalarEvolution *SE,
- DominatorTree *DT,
- AssumptionCache *AC) {
+bool llvm::isDereferenceableReadOnlyLoop(
+ Loop *L, ScalarEvolution *SE, DominatorTree *DT, AssumptionCache *AC,
+ SmallVectorImpl<const SCEVPredicate *> *Predicates) {
for (BasicBlock *BB : L->blocks()) {
for (Instruction &I : *BB) {
if (auto *LI = dyn_cast<LoadInst>(&I)) {
- if (!isDereferenceableAndAlignedInLoop(LI, L, *SE, *DT, AC))
+ if (!isDereferenceableAndAlignedInLoop(LI, L, *SE, *DT, AC, Predicates))
return false;
} else if (I.mayReadFromMemory() || I.mayWriteToMemory() || I.mayThrow())
return false;
diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
index 980f142..3f18972 100644
--- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
@@ -2449,13 +2449,20 @@ bool LoopAccessInfo::analyzeLoop(AAResults *AA, const LoopInfo *LI,
continue;
// If this is a load, save it. If this instruction can read from memory
- // but is not a load, then we quit. Notice that we don't handle function
- // calls that read or write.
+ // but is not a load, we only allow it if it's a call to a function with a
+ // vector mapping and no pointer arguments.
if (I.mayReadFromMemory()) {
- // If the function has an explicit vectorized counterpart, we can safely
- // assume that it can be vectorized.
+ auto hasPointerArgs = [](CallBase *CB) {
+ return any_of(CB->args(), [](Value const *Arg) {
+ return Arg->getType()->isPointerTy();
+ });
+ };
+
+ // If the function has an explicit vectorized counterpart, and does not
+ // take output/input pointers, we can safely assume that it can be
+ // vectorized.
if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() &&
- !VFDatabase::getMappings(*Call).empty())
+ !hasPointerArgs(Call) && !VFDatabase::getMappings(*Call).empty())
continue;
auto *Ld = dyn_cast<LoadInst>(&I);
diff --git a/llvm/lib/Analysis/MemDerefPrinter.cpp b/llvm/lib/Analysis/MemDerefPrinter.cpp
index e858d94..68cb885 100644
--- a/llvm/lib/Analysis/MemDerefPrinter.cpp
+++ b/llvm/lib/Analysis/MemDerefPrinter.cpp
@@ -30,10 +30,10 @@ PreservedAnalyses MemDerefPrinterPass::run(Function &F,
for (auto &I : instructions(F)) {
if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
Value *PO = LI->getPointerOperand();
- if (isDereferenceablePointer(PO, LI->getType(), DL))
+ if (isDereferenceablePointer(PO, LI->getType(), DL, LI))
Deref.push_back(PO);
if (isDereferenceableAndAlignedPointer(PO, LI->getType(), LI->getAlign(),
- DL))
+ DL, LI))
DerefAndAligned.insert(PO);
}
}
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 1d34435..233f8ed 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -8191,10 +8191,13 @@ ScalarEvolution::getSmallConstantTripCount(const Loop *L,
return getConstantTripCount(ExitCount);
}
-unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop *L) {
+unsigned ScalarEvolution::getSmallConstantMaxTripCount(
+ const Loop *L, SmallVectorImpl<const SCEVPredicate *> *Predicates) {
+
const auto *MaxExitCount =
- dyn_cast<SCEVConstant>(getConstantMaxBackedgeTakenCount(L));
- return getConstantTripCount(MaxExitCount);
+ Predicates ? getPredicatedConstantMaxBackedgeTakenCount(L, *Predicates)
+ : getConstantMaxBackedgeTakenCount(L);
+ return getConstantTripCount(dyn_cast<SCEVConstant>(MaxExitCount));
}
unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L) {
@@ -8303,6 +8306,11 @@ const SCEV *ScalarEvolution::getPredicatedSymbolicMaxBackedgeTakenCount(
return getPredicatedBackedgeTakenInfo(L).getSymbolicMax(L, this, &Preds);
}
+const SCEV *ScalarEvolution::getPredicatedConstantMaxBackedgeTakenCount(
+ const Loop *L, SmallVectorImpl<const SCEVPredicate *> &Preds) {
+ return getPredicatedBackedgeTakenInfo(L).getConstantMax(this, &Preds);
+}
+
bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop *L) {
return getBackedgeTakenInfo(L).isConstantMaxOrZero(this);
}
@@ -8624,15 +8632,19 @@ ScalarEvolution::BackedgeTakenInfo::getExitNotTaken(
}
/// getConstantMax - Get the constant max backedge taken count for the loop.
-const SCEV *
-ScalarEvolution::BackedgeTakenInfo::getConstantMax(ScalarEvolution *SE) const {
- auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) {
- return !ENT.hasAlwaysTruePredicate();
- };
-
- if (!getConstantMax() || any_of(ExitNotTaken, PredicateNotAlwaysTrue))
+const SCEV *ScalarEvolution::BackedgeTakenInfo::getConstantMax(
+ ScalarEvolution *SE,
+ SmallVectorImpl<const SCEVPredicate *> *Predicates) const {
+ if (!getConstantMax())
return SE->getCouldNotCompute();
+ for (const auto &ENT : ExitNotTaken)
+ if (!ENT.hasAlwaysTruePredicate()) {
+ if (!Predicates)
+ return SE->getCouldNotCompute();
+ append_range(*Predicates, ENT.Predicates);
+ }
+
assert((isa<SCEVCouldNotCompute>(getConstantMax()) ||
isa<SCEVConstant>(getConstantMax())) &&
"No point in having a non-constant max backedge taken count!");
@@ -13749,8 +13761,28 @@ static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
for (const auto *P : Preds)
P->print(OS, 4);
}
+ Preds.clear();
+ auto *PredConstantMax =
+ SE->getPredicatedConstantMaxBackedgeTakenCount(L, Preds);
+ if (PredConstantMax != ConstantBTC) {
+ assert(!Preds.empty() &&
+ "different predicated constant max BTC but no predicates");
+ OS << "Loop ";
+ L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
+ OS << ": ";
+ if (!isa<SCEVCouldNotCompute>(PredConstantMax)) {
+ OS << "Predicated constant max backedge-taken count is ";
+ PrintSCEVWithTypeHint(OS, PredConstantMax);
+ } else
+ OS << "Unpredictable predicated constant max backedge-taken count.";
+ OS << "\n";
+ OS << " Predicates:\n";
+ for (const auto *P : Preds)
+ P->print(OS, 4);
+ }
Preds.clear();
+
auto *PredSymbolicMax =
SE->getPredicatedSymbolicMaxBackedgeTakenCount(L, Preds);
if (SymbolicBTC != PredSymbolicMax) {
diff --git a/llvm/lib/Analysis/VectorUtils.cpp b/llvm/lib/Analysis/VectorUtils.cpp
index d45d3bb..dbffbb8 100644
--- a/llvm/lib/Analysis/VectorUtils.cpp
+++ b/llvm/lib/Analysis/VectorUtils.cpp
@@ -1454,7 +1454,7 @@ void InterleavedAccessInfo::analyzeInterleaving(
// that all the pointers in the group don't wrap.
// So we check only group member 0 (which is always guaranteed to exist),
// and group member Factor - 1; If the latter doesn't exist we rely on
- // peeling (if it is a non-reversed accsess -- see Case 3).
+ // peeling (if it is a non-reversed access -- see Case 3).
if (InvalidateGroupIfMemberMayWrap(Group, 0, std::string("first")))
continue;
if (Group->getMember(Group->getFactor() - 1))
diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index db7adfd..d17800d 100644
--- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -1971,7 +1971,10 @@ void AsmPrinter::emitFunctionBody() {
// are automatically sized.
bool EmitFunctionSize = MAI->hasDotTypeDotSizeDirective() && !TT.isWasm();
- if (EmitFunctionSize || needFuncLabels(*MF, *this)) {
+ // SPIR-V supports label instructions only inside a block, not after the
+ // function body.
+ if (TT.getObjectFormat() != Triple::SPIRV &&
+ (EmitFunctionSize || needFuncLabels(*MF, *this))) {
// Create a symbol for the end of function.
CurrentFnEnd = createTempSymbol("func_end");
OutStreamer->emitLabel(CurrentFnEnd);
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index df9c12b..c279289 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -3320,6 +3320,112 @@ static bool isConstValidTrue(const TargetLowering &TLI, unsigned ScalarSizeBits,
isConstTrueVal(TLI, Cst, IsVector, IsFP);
}
+// This combine tries to reduce the number of scalarised G_TRUNC instructions by
+// using vector truncates instead
+//
+// EXAMPLE:
+// %a(i32), %b(i32) = G_UNMERGE_VALUES %src(<2 x i32>)
+// %T_a(i16) = G_TRUNC %a(i32)
+// %T_b(i16) = G_TRUNC %b(i32)
+// %Undef(i16) = G_IMPLICIT_DEF(i16)
+// %dst(v4i16) = G_BUILD_VECTORS %T_a(i16), %T_b(i16), %Undef(i16), %Undef(i16)
+//
+// ===>
+// %Undef(<2 x i32>) = G_IMPLICIT_DEF(<2 x i32>)
+// %Mid(<4 x s32>) = G_CONCAT_VECTORS %src(<2 x i32>), %Undef(<2 x i32>)
+// %dst(<4 x s16>) = G_TRUNC %Mid(<4 x s32>)
+//
+// Only matches sources made up of G_TRUNCs followed by G_IMPLICIT_DEFs
+bool CombinerHelper::matchUseVectorTruncate(MachineInstr &MI,
+ Register &MatchInfo) {
+ auto BuildMI = cast<GBuildVector>(&MI);
+ unsigned NumOperands = BuildMI->getNumSources();
+ LLT DstTy = MRI.getType(BuildMI->getReg(0));
+
+ // Check the G_BUILD_VECTOR sources
+ unsigned I;
+ MachineInstr *UnmergeMI = nullptr;
+
+ // Check all source TRUNCs come from the same UNMERGE instruction
+ for (I = 0; I < NumOperands; ++I) {
+ auto SrcMI = MRI.getVRegDef(BuildMI->getSourceReg(I));
+ auto SrcMIOpc = SrcMI->getOpcode();
+
+ // Check if the G_TRUNC instructions all come from the same MI
+ if (SrcMIOpc == TargetOpcode::G_TRUNC) {
+ if (!UnmergeMI) {
+ UnmergeMI = MRI.getVRegDef(SrcMI->getOperand(1).getReg());
+ if (UnmergeMI->getOpcode() != TargetOpcode::G_UNMERGE_VALUES)
+ return false;
+ } else {
+ auto UnmergeSrcMI = MRI.getVRegDef(SrcMI->getOperand(1).getReg());
+ if (UnmergeMI != UnmergeSrcMI)
+ return false;
+ }
+ } else {
+ break;
+ }
+ }
+ if (I < 2)
+ return false;
+
+ // Check the remaining source elements are only G_IMPLICIT_DEF
+ for (; I < NumOperands; ++I) {
+ auto SrcMI = MRI.getVRegDef(BuildMI->getSourceReg(I));
+ auto SrcMIOpc = SrcMI->getOpcode();
+
+ if (SrcMIOpc != TargetOpcode::G_IMPLICIT_DEF)
+ return false;
+ }
+
+ // Check the size of unmerge source
+ MatchInfo = cast<GUnmerge>(UnmergeMI)->getSourceReg();
+ LLT UnmergeSrcTy = MRI.getType(MatchInfo);
+ if (!DstTy.getElementCount().isKnownMultipleOf(UnmergeSrcTy.getNumElements()))
+ return false;
+
+ // Only generate legal instructions post-legalizer
+ if (!IsPreLegalize) {
+ LLT MidTy = DstTy.changeElementType(UnmergeSrcTy.getScalarType());
+
+ if (DstTy.getElementCount() != UnmergeSrcTy.getElementCount() &&
+ !isLegal({TargetOpcode::G_CONCAT_VECTORS, {MidTy, UnmergeSrcTy}}))
+ return false;
+
+ if (!isLegal({TargetOpcode::G_TRUNC, {DstTy, MidTy}}))
+ return false;
+ }
+
+ return true;
+}
+
+void CombinerHelper::applyUseVectorTruncate(MachineInstr &MI,
+ Register &MatchInfo) {
+ Register MidReg;
+ auto BuildMI = cast<GBuildVector>(&MI);
+ Register DstReg = BuildMI->getReg(0);
+ LLT DstTy = MRI.getType(DstReg);
+ LLT UnmergeSrcTy = MRI.getType(MatchInfo);
+ unsigned DstTyNumElt = DstTy.getNumElements();
+ unsigned UnmergeSrcTyNumElt = UnmergeSrcTy.getNumElements();
+
+ // No need to pad vector if only G_TRUNC is needed
+ if (DstTyNumElt / UnmergeSrcTyNumElt == 1) {
+ MidReg = MatchInfo;
+ } else {
+ Register UndefReg = Builder.buildUndef(UnmergeSrcTy).getReg(0);
+ SmallVector<Register> ConcatRegs = {MatchInfo};
+ for (unsigned I = 1; I < DstTyNumElt / UnmergeSrcTyNumElt; ++I)
+ ConcatRegs.push_back(UndefReg);
+
+ auto MidTy = DstTy.changeElementType(UnmergeSrcTy.getScalarType());
+ MidReg = Builder.buildConcatVectors(MidTy, ConcatRegs).getReg(0);
+ }
+
+ Builder.buildTrunc(DstReg, MidReg);
+ MI.eraseFromParent();
+}
+
bool CombinerHelper::matchNotCmp(MachineInstr &MI,
SmallVectorImpl<Register> &RegsToNegate) {
assert(MI.getOpcode() == TargetOpcode::G_XOR);
diff --git a/llvm/lib/CodeGen/LLVMTargetMachine.cpp b/llvm/lib/CodeGen/LLVMTargetMachine.cpp
index d0dfafe..ea36fed 100644
--- a/llvm/lib/CodeGen/LLVMTargetMachine.cpp
+++ b/llvm/lib/CodeGen/LLVMTargetMachine.cpp
@@ -160,7 +160,9 @@ Expected<std::unique_ptr<MCStreamer>> LLVMTargetMachine::createMCStreamer(
switch (FileType) {
case CodeGenFileType::AssemblyFile: {
MCInstPrinter *InstPrinter = getTarget().createMCInstPrinter(
- getTargetTriple(), MAI.getAssemblerDialect(), MAI, MII, MRI);
+ getTargetTriple(),
+ Options.MCOptions.OutputAsmVariant.value_or(MAI.getAssemblerDialect()),
+ MAI, MII, MRI);
// Create a code emitter if asked to show the encoding.
std::unique_ptr<MCCodeEmitter> MCE;
@@ -259,9 +261,11 @@ bool LLVMTargetMachine::addPassesToEmitMC(PassManagerBase &PM, MCContext *&Ctx,
const MCRegisterInfo &MRI = *getMCRegisterInfo();
std::unique_ptr<MCCodeEmitter> MCE(
getTarget().createMCCodeEmitter(*getMCInstrInfo(), *Ctx));
+ if (!MCE)
+ return true;
MCAsmBackend *MAB =
getTarget().createMCAsmBackend(STI, MRI, Options.MCOptions);
- if (!MCE || !MAB)
+ if (!MAB)
return true;
const Triple &T = getTargetTriple();
diff --git a/llvm/lib/CodeGen/LiveInterval.cpp b/llvm/lib/CodeGen/LiveInterval.cpp
index c815406..0683353 100644
--- a/llvm/lib/CodeGen/LiveInterval.cpp
+++ b/llvm/lib/CodeGen/LiveInterval.cpp
@@ -630,8 +630,8 @@ void LiveRange::join(LiveRange &Other,
const int *LHSValNoAssignments,
const int *RHSValNoAssignments,
SmallVectorImpl<VNInfo *> &NewVNInfo) {
- verify();
- Other.verify();
+ assert(verify());
+ assert(Other.verify());
// Determine if any of our values are mapped. This is uncommon, so we want
// to avoid the range scan if not.
@@ -797,7 +797,7 @@ void LiveRange::flushSegmentSet() {
"segment set can be used only initially before switching to the array");
segments.append(segmentSet->begin(), segmentSet->end());
segmentSet = nullptr;
- verify();
+ assert(verify());
}
bool LiveRange::isLiveAtIndexes(ArrayRef<SlotIndex> Slots) const {
@@ -1055,24 +1055,36 @@ LLVM_DUMP_METHOD void LiveInterval::dump() const {
#endif
#ifndef NDEBUG
-void LiveRange::verify() const {
+bool LiveRange::verify() const {
for (const_iterator I = begin(), E = end(); I != E; ++I) {
- assert(I->start.isValid());
- assert(I->end.isValid());
- assert(I->start < I->end);
- assert(I->valno != nullptr);
- assert(I->valno->id < valnos.size());
- assert(I->valno == valnos[I->valno->id]);
+ if (!I->start.isValid())
+ return false;
+ if (!I->end.isValid())
+ return false;
+ if (I->start >= I->end)
+ return false;
+ if (I->valno == nullptr)
+ return false;
+ if (I->valno->id >= valnos.size())
+ return false;
+ if (I->valno != valnos[I->valno->id])
+ return false;
if (std::next(I) != E) {
- assert(I->end <= std::next(I)->start);
- if (I->end == std::next(I)->start)
- assert(I->valno != std::next(I)->valno);
+ if (I->end > std::next(I)->start)
+ return false;
+ if (I->end == std::next(I)->start) {
+ if (I->valno == std::next(I)->valno)
+ return false;
+ }
}
}
+
+ return true;
}
-void LiveInterval::verify(const MachineRegisterInfo *MRI) const {
- super::verify();
+bool LiveInterval::verify(const MachineRegisterInfo *MRI) const {
+ if (!super::verify())
+ return false;
// Make sure SubRanges are fine and LaneMasks are disjunct.
LaneBitmask Mask;
@@ -1080,18 +1092,28 @@ void LiveInterval::verify(const MachineRegisterInfo *MRI) const {
: LaneBitmask::getAll();
for (const SubRange &SR : subranges()) {
// Subrange lanemask should be disjunct to any previous subrange masks.
- assert((Mask & SR.LaneMask).none());
+ if ((Mask & SR.LaneMask).any())
+ return false;
+
Mask |= SR.LaneMask;
// subrange mask should not contained in maximum lane mask for the vreg.
- assert((Mask & ~MaxMask).none());
+ if ((Mask & ~MaxMask).any())
+ return false;
+
// empty subranges must be removed.
- assert(!SR.empty());
+ if (SR.empty())
+ return false;
+
+ if (!SR.verify())
+ return false;
- SR.verify();
// Main liverange should cover subrange.
- assert(covers(SR));
+ if (!covers(SR))
+ return false;
}
+
+ return true;
}
#endif
@@ -1283,7 +1305,7 @@ void LiveRangeUpdater::flush() {
// Nothing to merge?
if (Spills.empty()) {
LR->segments.erase(WriteI, ReadI);
- LR->verify();
+ assert(LR->verify());
return;
}
@@ -1301,7 +1323,7 @@ void LiveRangeUpdater::flush() {
}
ReadI = WriteI + Spills.size();
mergeSpills();
- LR->verify();
+ assert(LR->verify());
}
unsigned ConnectedVNInfoEqClasses::Classify(const LiveRange &LR) {
diff --git a/llvm/lib/CodeGen/LiveIntervals.cpp b/llvm/lib/CodeGen/LiveIntervals.cpp
index d879a7c..7ddaaaa 100644
--- a/llvm/lib/CodeGen/LiveIntervals.cpp
+++ b/llvm/lib/CodeGen/LiveIntervals.cpp
@@ -1105,7 +1105,7 @@ private:
else
handleMoveUp(LR, Reg, LaneMask);
LLVM_DEBUG(dbgs() << " -->\t" << LR << '\n');
- LR.verify();
+ assert(LR.verify());
}
/// Update LR to reflect an instruction has been moved downwards from OldIdx
diff --git a/llvm/lib/CodeGen/MIRParser/MIRParser.cpp b/llvm/lib/CodeGen/MIRParser/MIRParser.cpp
index d506cd1..3a00b8e 100644
--- a/llvm/lib/CodeGen/MIRParser/MIRParser.cpp
+++ b/llvm/lib/CodeGen/MIRParser/MIRParser.cpp
@@ -178,7 +178,8 @@ private:
SMDiagnostic diagFromBlockStringDiag(const SMDiagnostic &Error,
SMRange SourceRange);
- void computeFunctionProperties(MachineFunction &MF);
+ bool computeFunctionProperties(MachineFunction &MF,
+ const yaml::MachineFunction &YamlMF);
void setupDebugValueTracking(MachineFunction &MF,
PerFunctionMIParsingState &PFS, const yaml::MachineFunction &YamlMF);
@@ -373,7 +374,8 @@ static bool isSSA(const MachineFunction &MF) {
return true;
}
-void MIRParserImpl::computeFunctionProperties(MachineFunction &MF) {
+bool MIRParserImpl::computeFunctionProperties(
+ MachineFunction &MF, const yaml::MachineFunction &YamlMF) {
MachineFunctionProperties &Properties = MF.getProperties();
bool HasPHI = false;
@@ -398,21 +400,48 @@ void MIRParserImpl::computeFunctionProperties(MachineFunction &MF) {
}
}
}
- if (!HasPHI)
- Properties.set(MachineFunctionProperties::Property::NoPHIs);
+
+ // Helper function to sanity-check and set properties that are computed, but
+ // may be explicitly set from the input MIR
+ auto ComputedPropertyHelper =
+ [&Properties](std::optional<bool> ExplicitProp, bool ComputedProp,
+ MachineFunctionProperties::Property P) -> bool {
+ // Prefer explicitly given values over the computed properties
+ if (ExplicitProp.value_or(ComputedProp))
+ Properties.set(P);
+ else
+ Properties.reset(P);
+
+ // Check for conflict between the explicit values and the computed ones
+ return ExplicitProp && *ExplicitProp && !ComputedProp;
+ };
+
+ if (ComputedPropertyHelper(YamlMF.NoPHIs, !HasPHI,
+ MachineFunctionProperties::Property::NoPHIs)) {
+ return error(MF.getName() +
+ " has explicit property NoPhi, but contains at least one PHI");
+ }
+
MF.setHasInlineAsm(HasInlineAsm);
if (HasTiedOps && AllTiedOpsRewritten)
Properties.set(MachineFunctionProperties::Property::TiedOpsRewritten);
- if (isSSA(MF))
- Properties.set(MachineFunctionProperties::Property::IsSSA);
- else
- Properties.reset(MachineFunctionProperties::Property::IsSSA);
+ if (ComputedPropertyHelper(YamlMF.IsSSA, isSSA(MF),
+ MachineFunctionProperties::Property::IsSSA)) {
+ return error(MF.getName() +
+ " has explicit property IsSSA, but is not valid SSA");
+ }
const MachineRegisterInfo &MRI = MF.getRegInfo();
- if (MRI.getNumVirtRegs() == 0)
- Properties.set(MachineFunctionProperties::Property::NoVRegs);
+ if (ComputedPropertyHelper(YamlMF.NoVRegs, MRI.getNumVirtRegs() == 0,
+ MachineFunctionProperties::Property::NoVRegs)) {
+ return error(
+ MF.getName() +
+ " has explicit property NoVRegs, but contains virtual registers");
+ }
+
+ return false;
}
bool MIRParserImpl::initializeCallSiteInfo(
@@ -595,7 +624,8 @@ MIRParserImpl::initializeMachineFunction(const yaml::MachineFunction &YamlMF,
MachineRegisterInfo &MRI = MF.getRegInfo();
MRI.freezeReservedRegs();
- computeFunctionProperties(MF);
+ if (computeFunctionProperties(MF, YamlMF))
+ return false;
if (initializeCallSiteInfo(PFS, YamlMF))
return false;
@@ -604,7 +634,7 @@ MIRParserImpl::initializeMachineFunction(const yaml::MachineFunction &YamlMF,
MF.getSubtarget().mirFileLoaded(MF);
- MF.verify();
+ MF.verify(nullptr, nullptr, &errs());
return false;
}
diff --git a/llvm/lib/CodeGen/MIRPrinter.cpp b/llvm/lib/CodeGen/MIRPrinter.cpp
index 7de68b1..cf6122b 100644
--- a/llvm/lib/CodeGen/MIRPrinter.cpp
+++ b/llvm/lib/CodeGen/MIRPrinter.cpp
@@ -223,6 +223,13 @@ void MIRPrinter::print(const MachineFunction &MF) {
YamlMF.TracksDebugUserValues = MF.getProperties().hasProperty(
MachineFunctionProperties::Property::TracksDebugUserValues);
+ YamlMF.NoPHIs = MF.getProperties().hasProperty(
+ MachineFunctionProperties::Property::NoPHIs);
+ YamlMF.IsSSA = MF.getProperties().hasProperty(
+ MachineFunctionProperties::Property::IsSSA);
+ YamlMF.NoVRegs = MF.getProperties().hasProperty(
+ MachineFunctionProperties::Property::NoVRegs);
+
convert(YamlMF, MF.getRegInfo(), MF.getSubtarget().getRegisterInfo());
MachineModuleSlotTracker MST(MMI, &MF);
MST.incorporateFunction(MF.getFunction());
diff --git a/llvm/lib/CodeGen/MachineBlockPlacement.cpp b/llvm/lib/CodeGen/MachineBlockPlacement.cpp
index be783bc..a52c82d 100644
--- a/llvm/lib/CodeGen/MachineBlockPlacement.cpp
+++ b/llvm/lib/CodeGen/MachineBlockPlacement.cpp
@@ -119,10 +119,10 @@ static cl::opt<unsigned> LoopToColdBlockRatio(
"(frequency of block) is greater than this ratio"),
cl::init(5), cl::Hidden);
-static cl::opt<bool> ForceLoopColdBlock(
- "force-loop-cold-block",
- cl::desc("Force outlining cold blocks from loops."),
- cl::init(false), cl::Hidden);
+static cl::opt<bool>
+ ForceLoopColdBlock("force-loop-cold-block",
+ cl::desc("Force outlining cold blocks from loops."),
+ cl::init(false), cl::Hidden);
static cl::opt<bool>
PreciseRotationCost("precise-rotation-cost",
@@ -147,43 +147,43 @@ static cl::opt<unsigned> JumpInstCost("jump-inst-cost",
cl::desc("Cost of jump instructions."),
cl::init(1), cl::Hidden);
static cl::opt<bool>
-TailDupPlacement("tail-dup-placement",
- cl::desc("Perform tail duplication during placement. "
- "Creates more fallthrough opportunites in "
- "outline branches."),
- cl::init(true), cl::Hidden);
+ TailDupPlacement("tail-dup-placement",
+ cl::desc("Perform tail duplication during placement. "
+ "Creates more fallthrough opportunites in "
+ "outline branches."),
+ cl::init(true), cl::Hidden);
static cl::opt<bool>
-BranchFoldPlacement("branch-fold-placement",
- cl::desc("Perform branch folding during placement. "
- "Reduces code size."),
- cl::init(true), cl::Hidden);
+ BranchFoldPlacement("branch-fold-placement",
+ cl::desc("Perform branch folding during placement. "
+ "Reduces code size."),
+ cl::init(true), cl::Hidden);
// Heuristic for tail duplication.
static cl::opt<unsigned> TailDupPlacementThreshold(
"tail-dup-placement-threshold",
cl::desc("Instruction cutoff for tail duplication during layout. "
"Tail merging during layout is forced to have a threshold "
- "that won't conflict."), cl::init(2),
- cl::Hidden);
+ "that won't conflict."),
+ cl::init(2), cl::Hidden);
// Heuristic for aggressive tail duplication.
static cl::opt<unsigned> TailDupPlacementAggressiveThreshold(
"tail-dup-placement-aggressive-threshold",
cl::desc("Instruction cutoff for aggressive tail duplication during "
"layout. Used at -O3. Tail merging during layout is forced to "
- "have a threshold that won't conflict."), cl::init(4),
- cl::Hidden);
+ "have a threshold that won't conflict."),
+ cl::init(4), cl::Hidden);
// Heuristic for tail duplication.
static cl::opt<unsigned> TailDupPlacementPenalty(
"tail-dup-placement-penalty",
- cl::desc("Cost penalty for blocks that can avoid breaking CFG by copying. "
- "Copying can increase fallthrough, but it also increases icache "
- "pressure. This parameter controls the penalty to account for that. "
- "Percent as integer."),
- cl::init(2),
- cl::Hidden);
+ cl::desc(
+ "Cost penalty for blocks that can avoid breaking CFG by copying. "
+ "Copying can increase fallthrough, but it also increases icache "
+ "pressure. This parameter controls the penalty to account for that. "
+ "Percent as integer."),
+ cl::init(2), cl::Hidden);
// Heuristic for tail duplication if profile count is used in cost model.
static cl::opt<unsigned> TailDupProfilePercentThreshold(
@@ -198,8 +198,7 @@ static cl::opt<unsigned> TriangleChainCount(
"triangle-chain-count",
cl::desc("Number of triangle-shaped-CFG's that need to be in a row for the "
"triangle tail duplication heuristic to kick in. 0 to disable."),
- cl::init(2),
- cl::Hidden);
+ cl::init(2), cl::Hidden);
// Use case: When block layout is visualized after MBP pass, the basic blocks
// are labeled in layout order; meanwhile blocks could be numbered in a
@@ -292,8 +291,8 @@ public:
iterator end() { return Blocks.end(); }
const_iterator end() const { return Blocks.end(); }
- bool remove(MachineBasicBlock* BB) {
- for(iterator i = begin(); i != end(); ++i) {
+ bool remove(MachineBasicBlock *BB) {
+ for (iterator i = begin(); i != end(); ++i) {
if (*i == BB) {
Blocks.erase(i);
return true;
@@ -405,6 +404,8 @@ class MachineBlockPlacement : public MachineFunctionPass {
ProfileSummaryInfo *PSI = nullptr;
+ TargetPassConfig *PassConfig = nullptr;
+
/// Duplicator used to duplicate tails during placement.
///
/// Placement decisions can open up new tail duplication opportunities, but
@@ -415,6 +416,8 @@ class MachineBlockPlacement : public MachineFunctionPass {
/// Partial tail duplication threshold.
BlockFrequency DupThreshold;
+ unsigned TailDupSize;
+
/// True: use block profile count to compute tail duplication cost.
/// False: use block frequency to compute tail duplication cost.
bool UseProfileCount = false;
@@ -459,26 +462,24 @@ class MachineBlockPlacement : public MachineFunctionPass {
/// Scale the DupThreshold according to basic block size.
BlockFrequency scaleThreshold(MachineBasicBlock *BB);
- void initDupThreshold();
+ void initTailDupThreshold();
/// Decrease the UnscheduledPredecessors count for all blocks in chain, and
/// if the count goes to 0, add them to the appropriate work list.
- void markChainSuccessors(
- const BlockChain &Chain, const MachineBasicBlock *LoopHeaderBB,
- const BlockFilterSet *BlockFilter = nullptr);
+ void markChainSuccessors(const BlockChain &Chain,
+ const MachineBasicBlock *LoopHeaderBB,
+ const BlockFilterSet *BlockFilter = nullptr);
/// Decrease the UnscheduledPredecessors count for a single block, and
/// if the count goes to 0, add them to the appropriate work list.
- void markBlockSuccessors(
- const BlockChain &Chain, const MachineBasicBlock *BB,
- const MachineBasicBlock *LoopHeaderBB,
- const BlockFilterSet *BlockFilter = nullptr);
+ void markBlockSuccessors(const BlockChain &Chain, const MachineBasicBlock *BB,
+ const MachineBasicBlock *LoopHeaderBB,
+ const BlockFilterSet *BlockFilter = nullptr);
BranchProbability
- collectViableSuccessors(
- const MachineBasicBlock *BB, const BlockChain &Chain,
- const BlockFilterSet *BlockFilter,
- SmallVector<MachineBasicBlock *, 4> &Successors);
+ collectViableSuccessors(const MachineBasicBlock *BB, const BlockChain &Chain,
+ const BlockFilterSet *BlockFilter,
+ SmallVector<MachineBasicBlock *, 4> &Successors);
bool isBestSuccessor(MachineBasicBlock *BB, MachineBasicBlock *Pred,
BlockFilterSet *BlockFilter);
void findDuplicateCandidates(SmallVectorImpl<MachineBasicBlock *> &Candidates,
@@ -496,16 +497,19 @@ class MachineBlockPlacement : public MachineFunctionPass {
MachineFunction::iterator &PrevUnplacedBlockIt,
BlockFilterSet::iterator &PrevUnplacedBlockInFilterIt,
bool &DuplicatedToLPred);
- bool hasBetterLayoutPredecessor(
- const MachineBasicBlock *BB, const MachineBasicBlock *Succ,
- const BlockChain &SuccChain, BranchProbability SuccProb,
- BranchProbability RealSuccProb, const BlockChain &Chain,
- const BlockFilterSet *BlockFilter);
- BlockAndTailDupResult selectBestSuccessor(
- const MachineBasicBlock *BB, const BlockChain &Chain,
- const BlockFilterSet *BlockFilter);
- MachineBasicBlock *selectBestCandidateBlock(
- const BlockChain &Chain, SmallVectorImpl<MachineBasicBlock *> &WorkList);
+ bool hasBetterLayoutPredecessor(const MachineBasicBlock *BB,
+ const MachineBasicBlock *Succ,
+ const BlockChain &SuccChain,
+ BranchProbability SuccProb,
+ BranchProbability RealSuccProb,
+ const BlockChain &Chain,
+ const BlockFilterSet *BlockFilter);
+ BlockAndTailDupResult selectBestSuccessor(const MachineBasicBlock *BB,
+ const BlockChain &Chain,
+ const BlockFilterSet *BlockFilter);
+ MachineBasicBlock *
+ selectBestCandidateBlock(const BlockChain &Chain,
+ SmallVectorImpl<MachineBasicBlock *> &WorkList);
MachineBasicBlock *
getFirstUnplacedBlock(const BlockChain &PlacedChain,
MachineFunction::iterator &PrevUnplacedBlockIt);
@@ -536,20 +540,19 @@ class MachineBlockPlacement : public MachineFunctionPass {
const MachineBasicBlock *ExitBB,
const BlockFilterSet &LoopBlockSet);
MachineBasicBlock *findBestLoopTopHelper(MachineBasicBlock *OldTop,
- const MachineLoop &L, const BlockFilterSet &LoopBlockSet);
- MachineBasicBlock *findBestLoopTop(
- const MachineLoop &L, const BlockFilterSet &LoopBlockSet);
- MachineBasicBlock *findBestLoopExit(
- const MachineLoop &L, const BlockFilterSet &LoopBlockSet,
- BlockFrequency &ExitFreq);
+ const MachineLoop &L,
+ const BlockFilterSet &LoopBlockSet);
+ MachineBasicBlock *findBestLoopTop(const MachineLoop &L,
+ const BlockFilterSet &LoopBlockSet);
+ MachineBasicBlock *findBestLoopExit(const MachineLoop &L,
+ const BlockFilterSet &LoopBlockSet,
+ BlockFrequency &ExitFreq);
BlockFilterSet collectLoopBlockSet(const MachineLoop &L);
void buildLoopChains(const MachineLoop &L);
- void rotateLoop(
- BlockChain &LoopChain, const MachineBasicBlock *ExitingBB,
- BlockFrequency ExitFreq, const BlockFilterSet &LoopBlockSet);
- void rotateLoopWithProfile(
- BlockChain &LoopChain, const MachineLoop &L,
- const BlockFilterSet &LoopBlockSet);
+ void rotateLoop(BlockChain &LoopChain, const MachineBasicBlock *ExitingBB,
+ BlockFrequency ExitFreq, const BlockFilterSet &LoopBlockSet);
+ void rotateLoopWithProfile(BlockChain &LoopChain, const MachineLoop &L,
+ const BlockFilterSet &LoopBlockSet);
void buildCFGChains();
void optimizeBranches();
void alignBlocks();
@@ -558,10 +561,10 @@ class MachineBlockPlacement : public MachineFunctionPass {
bool shouldTailDuplicate(MachineBasicBlock *BB);
/// Check the edge frequencies to see if tail duplication will increase
/// fallthroughs.
- bool isProfitableToTailDup(
- const MachineBasicBlock *BB, const MachineBasicBlock *Succ,
- BranchProbability QProb,
- const BlockChain &Chain, const BlockFilterSet *BlockFilter);
+ bool isProfitableToTailDup(const MachineBasicBlock *BB,
+ const MachineBasicBlock *Succ,
+ BranchProbability QProb, const BlockChain &Chain,
+ const BlockFilterSet *BlockFilter);
/// Check for a trellis layout.
bool isTrellis(const MachineBasicBlock *BB,
@@ -582,9 +585,10 @@ class MachineBlockPlacement : public MachineFunctionPass {
/// Returns true if a block can tail duplicate into all unplaced
/// predecessors. Filters based on loop.
- bool canTailDuplicateUnplacedPreds(
- const MachineBasicBlock *BB, MachineBasicBlock *Succ,
- const BlockChain &Chain, const BlockFilterSet *BlockFilter);
+ bool canTailDuplicateUnplacedPreds(const MachineBasicBlock *BB,
+ MachineBasicBlock *Succ,
+ const BlockChain &Chain,
+ const BlockFilterSet *BlockFilter);
/// Find chains of triangles to tail-duplicate where a global analysis works,
/// but a local analysis would not find them.
@@ -802,8 +806,8 @@ bool MachineBlockPlacement::shouldTailDuplicate(MachineBasicBlock *BB) {
/// Compare 2 BlockFrequency's with a small penalty for \p A.
/// In order to be conservative, we apply a X% penalty to account for
/// increased icache pressure and static heuristics. For small frequencies
-/// we use only the numerators to improve accuracy. For simplicity, we assume the
-/// penalty is less than 100%
+/// we use only the numerators to improve accuracy. For simplicity, we assume
+/// the penalty is less than 100%
/// TODO(iteratee): Use 64-bit fixed point edge frequencies everywhere.
static bool greaterWithBias(BlockFrequency A, BlockFrequency B,
BlockFrequency EntryFreq) {
@@ -819,8 +823,8 @@ static bool greaterWithBias(BlockFrequency A, BlockFrequency B,
/// considering duplication.
bool MachineBlockPlacement::isProfitableToTailDup(
const MachineBasicBlock *BB, const MachineBasicBlock *Succ,
- BranchProbability QProb,
- const BlockChain &Chain, const BlockFilterSet *BlockFilter) {
+ BranchProbability QProb, const BlockChain &Chain,
+ const BlockFilterSet *BlockFilter) {
// We need to do a probability calculation to make sure this is profitable.
// First: does succ have a successor that post-dominates? This affects the
// calculation. The 2 relevant cases are:
@@ -876,12 +880,12 @@ bool MachineBlockPlacement::isProfitableToTailDup(
// from BB.
auto SuccBestPred = BlockFrequency(0);
for (MachineBasicBlock *SuccPred : Succ->predecessors()) {
- if (SuccPred == Succ || SuccPred == BB
- || BlockToChain[SuccPred] == &Chain
- || (BlockFilter && !BlockFilter->count(SuccPred)))
+ if (SuccPred == Succ || SuccPred == BB ||
+ BlockToChain[SuccPred] == &Chain ||
+ (BlockFilter && !BlockFilter->count(SuccPred)))
continue;
- auto Freq = MBFI->getBlockFreq(SuccPred)
- * MBPI->getEdgeProbability(SuccPred, Succ);
+ auto Freq =
+ MBFI->getBlockFreq(SuccPred) * MBPI->getEdgeProbability(SuccPred, Succ);
if (Freq > SuccBestPred)
SuccBestPred = Freq;
}
@@ -1137,7 +1141,7 @@ MachineBlockPlacement::getBestTrellisSuccessor(
}
// We have already computed the optimal edge for the other side of the
// trellis.
- ComputedEdges[BestB.Src] = { BestB.Dest, false };
+ ComputedEdges[BestB.Src] = {BestB.Dest, false};
auto TrellisSucc = BestA.Dest;
LLVM_DEBUG(BranchProbability SuccProb = getAdjustedProbability(
@@ -1169,8 +1173,8 @@ bool MachineBlockPlacement::canTailDuplicateUnplacedPreds(
// Make sure all unplaced and unfiltered predecessors can be
// tail-duplicated into.
// Skip any blocks that are already placed or not in this loop.
- if (Pred == BB || (BlockFilter && !BlockFilter->count(Pred))
- || (BlockToChain[Pred] == &Chain && !Succ->succ_empty()))
+ if (Pred == BB || (BlockFilter && !BlockFilter->count(Pred)) ||
+ (BlockToChain[Pred] == &Chain && !Succ->succ_empty()))
continue;
if (!TailDup.canTailDuplicate(Succ, Pred)) {
if (Successors.size() > 1 && hasSameSuccessors(*Pred, Successors))
@@ -1289,9 +1293,7 @@ void MachineBlockPlacement::precomputeTriangleChains() {
unsigned count() const { return Edges.size() - 1; }
- MachineBasicBlock *getKey() const {
- return Edges.back();
- }
+ MachineBasicBlock *getKey() const { return Edges.back(); }
};
if (TriangleChainCount == 0)
@@ -1326,7 +1328,7 @@ void MachineBlockPlacement::precomputeTriangleChains() {
bool CanTailDuplicate = true;
// If PDom can't tail-duplicate into it's non-BB predecessors, then this
// isn't the kind of triangle we're looking for.
- for (MachineBasicBlock* Pred : PDom->predecessors()) {
+ for (MachineBasicBlock *Pred : PDom->predecessors()) {
if (Pred == &BB)
continue;
if (!TailDup.canTailDuplicate(PDom, Pred)) {
@@ -1386,8 +1388,8 @@ void MachineBlockPlacement::precomputeTriangleChains() {
// When profile is not present, return the StaticLikelyProb.
// When profile is available, we need to handle the triangle-shape CFG.
-static BranchProbability getLayoutSuccessorProbThreshold(
- const MachineBasicBlock *BB) {
+static BranchProbability
+getLayoutSuccessorProbThreshold(const MachineBasicBlock *BB) {
if (!BB->getParent()->getFunction().hasProfileData())
return BranchProbability(StaticLikelyProb, 100);
if (BB->succ_size() == 2) {
@@ -1551,8 +1553,8 @@ bool MachineBlockPlacement::hasBetterLayoutPredecessor(
for (MachineBasicBlock *Pred : Succ->predecessors()) {
BlockChain *PredChain = BlockToChain[Pred];
if (Pred == Succ || PredChain == &SuccChain ||
- (BlockFilter && !BlockFilter->count(Pred)) ||
- PredChain == &Chain || Pred != *std::prev(PredChain->end()) ||
+ (BlockFilter && !BlockFilter->count(Pred)) || PredChain == &Chain ||
+ Pred != *std::prev(PredChain->end()) ||
// This check is redundant except for look ahead. This function is
// called for lookahead by isProfitableToTailDup when BB hasn't been
// placed yet.
@@ -1599,12 +1601,12 @@ bool MachineBlockPlacement::hasBetterLayoutPredecessor(
/// \returns The best successor block found, or null if none are viable, along
/// with a boolean indicating if tail duplication is necessary.
MachineBlockPlacement::BlockAndTailDupResult
-MachineBlockPlacement::selectBestSuccessor(
- const MachineBasicBlock *BB, const BlockChain &Chain,
- const BlockFilterSet *BlockFilter) {
+MachineBlockPlacement::selectBestSuccessor(const MachineBasicBlock *BB,
+ const BlockChain &Chain,
+ const BlockFilterSet *BlockFilter) {
const BranchProbability HotProb(StaticLikelyProb, 100);
- BlockAndTailDupResult BestSucc = { nullptr, false };
+ BlockAndTailDupResult BestSucc = {nullptr, false};
auto BestProb = BranchProbability::getZero();
SmallVector<MachineBasicBlock *, 4> Successors;
@@ -1684,8 +1686,8 @@ MachineBlockPlacement::selectBestSuccessor(
std::tie(DupProb, Succ) = Tup;
if (DupProb < BestProb)
break;
- if (canTailDuplicateUnplacedPreds(BB, Succ, Chain, BlockFilter)
- && (isProfitableToTailDup(BB, Succ, BestProb, Chain, BlockFilter))) {
+ if (canTailDuplicateUnplacedPreds(BB, Succ, Chain, BlockFilter) &&
+ (isProfitableToTailDup(BB, Succ, BestProb, Chain, BlockFilter))) {
LLVM_DEBUG(dbgs() << " Candidate: " << getBlockName(Succ)
<< ", probability: " << DupProb
<< " (Tail Duplicate)\n");
@@ -1822,8 +1824,7 @@ MachineBasicBlock *MachineBlockPlacement::getFirstUnplacedBlock(
}
void MachineBlockPlacement::fillWorkLists(
- const MachineBasicBlock *MBB,
- SmallPtrSetImpl<BlockChain *> &UpdatedPreds,
+ const MachineBasicBlock *MBB, SmallPtrSetImpl<BlockChain *> &UpdatedPreds,
const BlockFilterSet *BlockFilter = nullptr) {
BlockChain &Chain = *BlockToChain[MBB];
if (!UpdatedPreds.insert(&Chain).second)
@@ -1854,9 +1855,9 @@ void MachineBlockPlacement::fillWorkLists(
BlockWorkList.push_back(BB);
}
-void MachineBlockPlacement::buildChain(
- const MachineBasicBlock *HeadBB, BlockChain &Chain,
- BlockFilterSet *BlockFilter) {
+void MachineBlockPlacement::buildChain(const MachineBasicBlock *HeadBB,
+ BlockChain &Chain,
+ BlockFilterSet *BlockFilter) {
assert(HeadBB && "BB must not be null.\n");
assert(BlockToChain[HeadBB] == &Chain && "BlockToChainMap mis-match.\n");
MachineFunction::iterator PrevUnplacedBlockIt = F->begin();
@@ -1872,16 +1873,14 @@ void MachineBlockPlacement::buildChain(
assert(BlockToChain[BB] == &Chain && "BlockToChainMap mis-match in loop.");
assert(*std::prev(Chain.end()) == BB && "BB Not found at end of chain.");
-
// Look for the best viable successor if there is one to place immediately
// after this block.
auto Result = selectBestSuccessor(BB, Chain, BlockFilter);
- MachineBasicBlock* BestSucc = Result.BB;
+ MachineBasicBlock *BestSucc = Result.BB;
bool ShouldTailDup = Result.ShouldTailDup;
if (allowTailDupPlacement())
- ShouldTailDup |= (BestSucc && canTailDuplicateUnplacedPreds(BB, BestSucc,
- Chain,
- BlockFilter));
+ ShouldTailDup |= (BestSucc && canTailDuplicateUnplacedPreds(
+ BB, BestSucc, Chain, BlockFilter));
// If an immediate successor isn't available, look for the best viable
// block among those we've identified as not violating the loop's CFG at
@@ -1918,8 +1917,8 @@ void MachineBlockPlacement::buildChain(
// Place this block, updating the datastructures to reflect its placement.
BlockChain &SuccChain = *BlockToChain[BestSucc];
- // Zero out UnscheduledPredecessors for the successor we're about to merge in case
- // we selected a successor that didn't fit naturally into the CFG.
+ // Zero out UnscheduledPredecessors for the successor we're about to merge
+ // in case we selected a successor that didn't fit naturally into the CFG.
SuccChain.UnscheduledPredecessors = 0;
LLVM_DEBUG(dbgs() << "Merging from " << getBlockName(BB) << " to "
<< getBlockName(BestSucc) << "\n");
@@ -1946,10 +1945,8 @@ void MachineBlockPlacement::buildChain(
// If BB is moved before OldTop, Pred needs a taken branch to BB, and it can't
// layout the other successor below it, so it can't reduce taken branch.
// In this case we keep its original layout.
-bool
-MachineBlockPlacement::canMoveBottomBlockToTop(
- const MachineBasicBlock *BottomBlock,
- const MachineBasicBlock *OldTop) {
+bool MachineBlockPlacement::canMoveBottomBlockToTop(
+ const MachineBasicBlock *BottomBlock, const MachineBasicBlock *OldTop) {
if (BottomBlock->pred_size() != 1)
return true;
MachineBasicBlock *Pred = *BottomBlock->pred_begin();
@@ -1967,9 +1964,8 @@ MachineBlockPlacement::canMoveBottomBlockToTop(
// Find out the possible fall through frequence to the top of a loop.
BlockFrequency
-MachineBlockPlacement::TopFallThroughFreq(
- const MachineBasicBlock *Top,
- const BlockFilterSet &LoopBlockSet) {
+MachineBlockPlacement::TopFallThroughFreq(const MachineBasicBlock *Top,
+ const BlockFilterSet &LoopBlockSet) {
BlockFrequency MaxFreq = BlockFrequency(0);
for (MachineBasicBlock *Pred : Top->predecessors()) {
BlockChain *PredChain = BlockToChain[Pred];
@@ -1991,8 +1987,8 @@ MachineBlockPlacement::TopFallThroughFreq(
}
}
if (TopOK) {
- BlockFrequency EdgeFreq = MBFI->getBlockFreq(Pred) *
- MBPI->getEdgeProbability(Pred, Top);
+ BlockFrequency EdgeFreq =
+ MBFI->getBlockFreq(Pred) * MBPI->getEdgeProbability(Pred, Top);
if (EdgeFreq > MaxFreq)
MaxFreq = EdgeFreq;
}
@@ -2022,19 +2018,16 @@ MachineBlockPlacement::TopFallThroughFreq(
// |-
// V
//
-BlockFrequency
-MachineBlockPlacement::FallThroughGains(
- const MachineBasicBlock *NewTop,
- const MachineBasicBlock *OldTop,
- const MachineBasicBlock *ExitBB,
- const BlockFilterSet &LoopBlockSet) {
+BlockFrequency MachineBlockPlacement::FallThroughGains(
+ const MachineBasicBlock *NewTop, const MachineBasicBlock *OldTop,
+ const MachineBasicBlock *ExitBB, const BlockFilterSet &LoopBlockSet) {
BlockFrequency FallThrough2Top = TopFallThroughFreq(OldTop, LoopBlockSet);
BlockFrequency FallThrough2Exit = BlockFrequency(0);
if (ExitBB)
- FallThrough2Exit = MBFI->getBlockFreq(NewTop) *
- MBPI->getEdgeProbability(NewTop, ExitBB);
- BlockFrequency BackEdgeFreq = MBFI->getBlockFreq(NewTop) *
- MBPI->getEdgeProbability(NewTop, OldTop);
+ FallThrough2Exit =
+ MBFI->getBlockFreq(NewTop) * MBPI->getEdgeProbability(NewTop, ExitBB);
+ BlockFrequency BackEdgeFreq =
+ MBFI->getBlockFreq(NewTop) * MBPI->getEdgeProbability(NewTop, OldTop);
// Find the best Pred of NewTop.
MachineBasicBlock *BestPred = nullptr;
@@ -2113,10 +2106,8 @@ MachineBlockPlacement::FallThroughGains(
/// At the same time, move it before old top increases the taken branch
/// to loop exit block, so the reduced taken branch will be compared with
/// the increased taken branch to the loop exit block.
-MachineBasicBlock *
-MachineBlockPlacement::findBestLoopTopHelper(
- MachineBasicBlock *OldTop,
- const MachineLoop &L,
+MachineBasicBlock *MachineBlockPlacement::findBestLoopTopHelper(
+ MachineBasicBlock *OldTop, const MachineLoop &L,
const BlockFilterSet &LoopBlockSet) {
// Check that the header hasn't been fused with a preheader block due to
// crazy branches. If it has, we need to start with the header at the top to
@@ -2153,8 +2144,8 @@ MachineBlockPlacement::findBestLoopTopHelper(
if (!canMoveBottomBlockToTop(Pred, OldTop))
continue;
- BlockFrequency Gains = FallThroughGains(Pred, OldTop, OtherBB,
- LoopBlockSet);
+ BlockFrequency Gains =
+ FallThroughGains(Pred, OldTop, OtherBB, LoopBlockSet);
if ((Gains > BlockFrequency(0)) &&
(Gains > BestGains ||
((Gains == BestGains) && Pred->isLayoutSuccessor(OldTop)))) {
@@ -2204,7 +2195,7 @@ MachineBlockPlacement::findBestLoopTop(const MachineLoop &L,
OldTop = NewTop;
NewTop = findBestLoopTopHelper(OldTop, L, LoopBlockSet);
if (NewTop != OldTop)
- ComputedEdges[NewTop] = { OldTop, false };
+ ComputedEdges[NewTop] = {OldTop, false};
}
return NewTop;
}
@@ -2336,10 +2327,8 @@ MachineBlockPlacement::findBestLoopExit(const MachineLoop &L,
///
/// 1. Look for a Pred that can be layout before Top.
/// 2. Check if Top is the most possible successor of Pred.
-bool
-MachineBlockPlacement::hasViableTopFallthrough(
- const MachineBasicBlock *Top,
- const BlockFilterSet &LoopBlockSet) {
+bool MachineBlockPlacement::hasViableTopFallthrough(
+ const MachineBasicBlock *Top, const BlockFilterSet &LoopBlockSet) {
for (MachineBasicBlock *Pred : Top->predecessors()) {
BlockChain *PredChain = BlockToChain[Pred];
if (!LoopBlockSet.count(Pred) &&
@@ -2491,7 +2480,7 @@ void MachineBlockPlacement::rotateLoopWithProfile(
if (!LoopBlockSet.count(Pred) &&
(!PredChain || Pred == *std::prev(PredChain->end()))) {
auto EdgeFreq = MBFI->getBlockFreq(Pred) *
- MBPI->getEdgeProbability(Pred, ChainHeaderBB);
+ MBPI->getEdgeProbability(Pred, ChainHeaderBB);
auto FallThruCost = ScaleBlockFrequency(EdgeFreq, MisfetchCost);
// If the predecessor has only an unconditional jump to the header, we
// need to consider the cost of this jump.
@@ -2951,12 +2940,16 @@ void MachineBlockPlacement::alignBlocks() {
// exclusively on the loop info here so that we can align backedges in
// unnatural CFGs and backedges that were introduced purely because of the
// loop rotations done during this layout pass.
- if (F->getFunction().hasMinSize() ||
- (F->getFunction().hasOptSize() && !TLI->alignLoopsWithOptSize()))
- return;
+ if (!AlignAllBlock && !AlignAllNonFallThruBlocks) {
+ if (F->getFunction().hasMinSize() ||
+ (F->getFunction().hasOptSize() && !TLI->alignLoopsWithOptSize()))
+ return;
+ }
+
BlockChain &FunctionChain = *BlockToChain[&F->front()];
+ // Empty chain.
if (FunctionChain.begin() == FunctionChain.end())
- return; // Empty chain.
+ return;
const BranchProbability ColdProb(1, 5); // 20%
BlockFrequency EntryFreq = MBFI->getBlockFreq(&F->front());
@@ -3052,6 +3045,33 @@ void MachineBlockPlacement::alignBlocks() {
DetermineMaxAlignmentPadding();
}
}
+
+ const bool HasMaxBytesOverride =
+ MaxBytesForAlignmentOverride.getNumOccurrences() > 0;
+
+ if (AlignAllBlock)
+ // Align all of the blocks in the function to a specific alignment.
+ for (MachineBasicBlock &MBB : *F) {
+ if (HasMaxBytesOverride)
+ MBB.setAlignment(Align(1ULL << AlignAllBlock),
+ MaxBytesForAlignmentOverride);
+ else
+ MBB.setAlignment(Align(1ULL << AlignAllBlock));
+ }
+ else if (AlignAllNonFallThruBlocks) {
+ // Align all of the blocks that have no fall-through predecessors to a
+ // specific alignment.
+ for (auto MBI = std::next(F->begin()), MBE = F->end(); MBI != MBE; ++MBI) {
+ auto LayoutPred = std::prev(MBI);
+ if (!LayoutPred->isSuccessor(&*MBI)) {
+ if (HasMaxBytesOverride)
+ MBI->setAlignment(Align(1ULL << AlignAllNonFallThruBlocks),
+ MaxBytesForAlignmentOverride);
+ else
+ MBI->setAlignment(Align(1ULL << AlignAllNonFallThruBlocks));
+ }
+ }
+ }
}
/// Tail duplicate \p BB into (some) predecessors if profitable, repeating if
@@ -3142,67 +3162,66 @@ bool MachineBlockPlacement::maybeTailDuplicateBlock(
// This has to be a callback because none of it can be done after
// BB is deleted.
bool Removed = false;
- auto RemovalCallback =
- [&](MachineBasicBlock *RemBB) {
- // Signal to outer function
- Removed = true;
-
- // Conservative default.
- bool InWorkList = true;
- // Remove from the Chain and Chain Map
- if (BlockToChain.count(RemBB)) {
- BlockChain *Chain = BlockToChain[RemBB];
- InWorkList = Chain->UnscheduledPredecessors == 0;
- Chain->remove(RemBB);
- BlockToChain.erase(RemBB);
- }
-
- // Handle the unplaced block iterator
- if (&(*PrevUnplacedBlockIt) == RemBB) {
- PrevUnplacedBlockIt++;
- }
-
- // Handle the Work Lists
- if (InWorkList) {
- SmallVectorImpl<MachineBasicBlock *> &RemoveList = BlockWorkList;
- if (RemBB->isEHPad())
- RemoveList = EHPadWorkList;
- llvm::erase(RemoveList, RemBB);
- }
-
- // Handle the filter set
- if (BlockFilter) {
- auto It = llvm::find(*BlockFilter, RemBB);
- // Erase RemBB from BlockFilter, and keep PrevUnplacedBlockInFilterIt
- // pointing to the same element as before.
- if (It != BlockFilter->end()) {
- if (It < PrevUnplacedBlockInFilterIt) {
- const MachineBasicBlock *PrevBB = *PrevUnplacedBlockInFilterIt;
- // BlockFilter is a SmallVector so all elements after RemBB are
- // shifted to the front by 1 after its deletion.
- auto Distance = PrevUnplacedBlockInFilterIt - It - 1;
- PrevUnplacedBlockInFilterIt = BlockFilter->erase(It) + Distance;
- assert(*PrevUnplacedBlockInFilterIt == PrevBB);
- (void)PrevBB;
- } else if (It == PrevUnplacedBlockInFilterIt)
- // The block pointed by PrevUnplacedBlockInFilterIt is erased, we
- // have to set it to the next element.
- PrevUnplacedBlockInFilterIt = BlockFilter->erase(It);
- else
- BlockFilter->erase(It);
- }
- }
+ auto RemovalCallback = [&](MachineBasicBlock *RemBB) {
+ // Signal to outer function
+ Removed = true;
+
+ // Conservative default.
+ bool InWorkList = true;
+ // Remove from the Chain and Chain Map
+ if (BlockToChain.count(RemBB)) {
+ BlockChain *Chain = BlockToChain[RemBB];
+ InWorkList = Chain->UnscheduledPredecessors == 0;
+ Chain->remove(RemBB);
+ BlockToChain.erase(RemBB);
+ }
+
+ // Handle the unplaced block iterator
+ if (&(*PrevUnplacedBlockIt) == RemBB) {
+ PrevUnplacedBlockIt++;
+ }
+
+ // Handle the Work Lists
+ if (InWorkList) {
+ SmallVectorImpl<MachineBasicBlock *> &RemoveList = BlockWorkList;
+ if (RemBB->isEHPad())
+ RemoveList = EHPadWorkList;
+ llvm::erase(RemoveList, RemBB);
+ }
+
+ // Handle the filter set
+ if (BlockFilter) {
+ auto It = llvm::find(*BlockFilter, RemBB);
+ // Erase RemBB from BlockFilter, and keep PrevUnplacedBlockInFilterIt
+ // pointing to the same element as before.
+ if (It != BlockFilter->end()) {
+ if (It < PrevUnplacedBlockInFilterIt) {
+ const MachineBasicBlock *PrevBB = *PrevUnplacedBlockInFilterIt;
+ // BlockFilter is a SmallVector so all elements after RemBB are
+ // shifted to the front by 1 after its deletion.
+ auto Distance = PrevUnplacedBlockInFilterIt - It - 1;
+ PrevUnplacedBlockInFilterIt = BlockFilter->erase(It) + Distance;
+ assert(*PrevUnplacedBlockInFilterIt == PrevBB);
+ (void)PrevBB;
+ } else if (It == PrevUnplacedBlockInFilterIt)
+ // The block pointed by PrevUnplacedBlockInFilterIt is erased, we
+ // have to set it to the next element.
+ PrevUnplacedBlockInFilterIt = BlockFilter->erase(It);
+ else
+ BlockFilter->erase(It);
+ }
+ }
- // Remove the block from loop info.
- MLI->removeBlock(RemBB);
- if (RemBB == PreferredLoopExit)
- PreferredLoopExit = nullptr;
+ // Remove the block from loop info.
+ MLI->removeBlock(RemBB);
+ if (RemBB == PreferredLoopExit)
+ PreferredLoopExit = nullptr;
- LLVM_DEBUG(dbgs() << "TailDuplicator deleted block: "
- << getBlockName(RemBB) << "\n");
- };
+ LLVM_DEBUG(dbgs() << "TailDuplicator deleted block: " << getBlockName(RemBB)
+ << "\n");
+ };
auto RemovalCallbackRef =
- function_ref<void(MachineBasicBlock*)>(RemovalCallback);
+ function_ref<void(MachineBasicBlock *)>(RemovalCallback);
SmallVector<MachineBasicBlock *, 8> DuplicatedPreds;
bool IsSimple = TailDup.isSimpleBB(BB);
@@ -3223,11 +3242,11 @@ bool MachineBlockPlacement::maybeTailDuplicateBlock(
DuplicatedToLPred = false;
for (MachineBasicBlock *Pred : DuplicatedPreds) {
// We're only looking for unscheduled predecessors that match the filter.
- BlockChain* PredChain = BlockToChain[Pred];
+ BlockChain *PredChain = BlockToChain[Pred];
if (Pred == LPred)
DuplicatedToLPred = true;
- if (Pred == LPred || (BlockFilter && !BlockFilter->count(Pred))
- || PredChain == &Chain)
+ if (Pred == LPred || (BlockFilter && !BlockFilter->count(Pred)) ||
+ PredChain == &Chain)
continue;
for (MachineBasicBlock *NewSucc : Pred->successors()) {
if (BlockFilter && !BlockFilter->count(NewSucc))
@@ -3297,8 +3316,7 @@ bool MachineBlockPlacement::isBestSuccessor(MachineBasicBlock *BB,
// Find out the predecessors of BB and BB can be beneficially duplicated into
// them.
void MachineBlockPlacement::findDuplicateCandidates(
- SmallVectorImpl<MachineBasicBlock *> &Candidates,
- MachineBasicBlock *BB,
+ SmallVectorImpl<MachineBasicBlock *> &Candidates, MachineBasicBlock *BB,
BlockFilterSet *BlockFilter) {
MachineBasicBlock *Fallthrough = nullptr;
BranchProbability DefaultBranchProb = BranchProbability::getZero();
@@ -3407,31 +3425,53 @@ void MachineBlockPlacement::findDuplicateCandidates(
}
}
-void MachineBlockPlacement::initDupThreshold() {
+void MachineBlockPlacement::initTailDupThreshold() {
DupThreshold = BlockFrequency(0);
- if (!F->getFunction().hasProfileData())
- return;
+ if (F->getFunction().hasProfileData()) {
+ // We prefer to use prifile count.
+ uint64_t HotThreshold = PSI->getOrCompHotCountThreshold();
+ if (HotThreshold != UINT64_MAX) {
+ UseProfileCount = true;
+ DupThreshold =
+ BlockFrequency(HotThreshold * TailDupProfilePercentThreshold / 100);
+ } else {
+ // Profile count is not available, we can use block frequency instead.
+ BlockFrequency MaxFreq = BlockFrequency(0);
+ for (MachineBasicBlock &MBB : *F) {
+ BlockFrequency Freq = MBFI->getBlockFreq(&MBB);
+ if (Freq > MaxFreq)
+ MaxFreq = Freq;
+ }
- // We prefer to use prifile count.
- uint64_t HotThreshold = PSI->getOrCompHotCountThreshold();
- if (HotThreshold != UINT64_MAX) {
- UseProfileCount = true;
- DupThreshold =
- BlockFrequency(HotThreshold * TailDupProfilePercentThreshold / 100);
- return;
+ BranchProbability ThresholdProb(TailDupPlacementPenalty, 100);
+ DupThreshold = BlockFrequency(MaxFreq * ThresholdProb);
+ UseProfileCount = false;
+ }
}
- // Profile count is not available, we can use block frequency instead.
- BlockFrequency MaxFreq = BlockFrequency(0);
- for (MachineBasicBlock &MBB : *F) {
- BlockFrequency Freq = MBFI->getBlockFreq(&MBB);
- if (Freq > MaxFreq)
- MaxFreq = Freq;
+ TailDupSize = TailDupPlacementThreshold;
+ // If only the aggressive threshold is explicitly set, use it.
+ if (TailDupPlacementAggressiveThreshold.getNumOccurrences() != 0 &&
+ TailDupPlacementThreshold.getNumOccurrences() == 0)
+ TailDupSize = TailDupPlacementAggressiveThreshold;
+
+ // For aggressive optimization, we can adjust some thresholds to be less
+ // conservative.
+ if (PassConfig->getOptLevel() >= CodeGenOptLevel::Aggressive) {
+ // At O3 we should be more willing to copy blocks for tail duplication. This
+ // increases size pressure, so we only do it at O3
+ // Do this unless only the regular threshold is explicitly set.
+ if (TailDupPlacementThreshold.getNumOccurrences() == 0 ||
+ TailDupPlacementAggressiveThreshold.getNumOccurrences() != 0)
+ TailDupSize = TailDupPlacementAggressiveThreshold;
}
- BranchProbability ThresholdProb(TailDupPlacementPenalty, 100);
- DupThreshold = BlockFrequency(MaxFreq * ThresholdProb);
- UseProfileCount = false;
+ // If there's no threshold provided through options, query the target
+ // information for a threshold instead.
+ if (TailDupPlacementThreshold.getNumOccurrences() == 0 &&
+ (PassConfig->getOptLevel() < CodeGenOptLevel::Aggressive ||
+ TailDupPlacementAggressiveThreshold.getNumOccurrences() == 0))
+ TailDupSize = TII->getTailDuplicateSize(PassConfig->getOptLevel());
}
bool MachineBlockPlacement::runOnMachineFunction(MachineFunction &MF) {
@@ -3451,8 +3491,7 @@ bool MachineBlockPlacement::runOnMachineFunction(MachineFunction &MF) {
TLI = MF.getSubtarget().getTargetLowering();
MPDT = nullptr;
PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
-
- initDupThreshold();
+ PassConfig = &getAnalysis<TargetPassConfig>();
// Initialize PreferredLoopExit to nullptr here since it may never be set if
// there are no MachineLoops.
@@ -3463,38 +3502,17 @@ bool MachineBlockPlacement::runOnMachineFunction(MachineFunction &MF) {
assert(ComputedEdges.empty() &&
"Computed Edge map should be empty before starting placement.");
- unsigned TailDupSize = TailDupPlacementThreshold;
- // If only the aggressive threshold is explicitly set, use it.
- if (TailDupPlacementAggressiveThreshold.getNumOccurrences() != 0 &&
- TailDupPlacementThreshold.getNumOccurrences() == 0)
- TailDupSize = TailDupPlacementAggressiveThreshold;
-
- TargetPassConfig *PassConfig = &getAnalysis<TargetPassConfig>();
- // For aggressive optimization, we can adjust some thresholds to be less
- // conservative.
- if (PassConfig->getOptLevel() >= CodeGenOptLevel::Aggressive) {
- // At O3 we should be more willing to copy blocks for tail duplication. This
- // increases size pressure, so we only do it at O3
- // Do this unless only the regular threshold is explicitly set.
- if (TailDupPlacementThreshold.getNumOccurrences() == 0 ||
- TailDupPlacementAggressiveThreshold.getNumOccurrences() != 0)
- TailDupSize = TailDupPlacementAggressiveThreshold;
- }
-
- // If there's no threshold provided through options, query the target
- // information for a threshold instead.
- if (TailDupPlacementThreshold.getNumOccurrences() == 0 &&
- (PassConfig->getOptLevel() < CodeGenOptLevel::Aggressive ||
- TailDupPlacementAggressiveThreshold.getNumOccurrences() == 0))
- TailDupSize = TII->getTailDuplicateSize(PassConfig->getOptLevel());
+ // Initialize tail duplication thresholds.
+ initTailDupThreshold();
+ // Apply tail duplication.
if (allowTailDupPlacement()) {
MPDT = &getAnalysis<MachinePostDominatorTreeWrapperPass>().getPostDomTree();
bool OptForSize = MF.getFunction().hasOptSize() ||
llvm::shouldOptimizeForSize(&MF, PSI, &MBFI->getMBFI());
if (OptForSize)
TailDupSize = 1;
- bool PreRegAlloc = false;
+ const bool PreRegAlloc = false;
TailDup.initMF(MF, PreRegAlloc, MBPI, MBFI.get(), PSI,
/* LayoutMode */ true, TailDupSize);
precomputeTriangleChains();
@@ -3505,12 +3523,12 @@ bool MachineBlockPlacement::runOnMachineFunction(MachineFunction &MF) {
// Changing the layout can create new tail merging opportunities.
// TailMerge can create jump into if branches that make CFG irreducible for
// HW that requires structured CFG.
- bool EnableTailMerge = !MF.getTarget().requiresStructuredCFG() &&
- PassConfig->getEnableTailMerge() &&
- BranchFoldPlacement;
+ const bool EnableTailMerge = !MF.getTarget().requiresStructuredCFG() &&
+ PassConfig->getEnableTailMerge() &&
+ BranchFoldPlacement && MF.size() > 3;
// No tail merging opportunities if the block number is less than four.
- if (MF.size() > 3 && EnableTailMerge) {
- unsigned TailMergeSize = TailDupSize + 1;
+ if (EnableTailMerge) {
+ const unsigned TailMergeSize = TailDupSize + 1;
BranchFolder BF(/*DefaultEnableTailMerge=*/true, /*CommonHoist=*/false,
*MBFI, *MBPI, PSI, TailMergeSize);
@@ -3545,32 +3563,7 @@ bool MachineBlockPlacement::runOnMachineFunction(MachineFunction &MF) {
ComputedEdges.clear();
ChainAllocator.DestroyAll();
- bool HasMaxBytesOverride =
- MaxBytesForAlignmentOverride.getNumOccurrences() > 0;
-
- if (AlignAllBlock)
- // Align all of the blocks in the function to a specific alignment.
- for (MachineBasicBlock &MBB : MF) {
- if (HasMaxBytesOverride)
- MBB.setAlignment(Align(1ULL << AlignAllBlock),
- MaxBytesForAlignmentOverride);
- else
- MBB.setAlignment(Align(1ULL << AlignAllBlock));
- }
- else if (AlignAllNonFallThruBlocks) {
- // Align all of the blocks that have no fall-through predecessors to a
- // specific alignment.
- for (auto MBI = std::next(MF.begin()), MBE = MF.end(); MBI != MBE; ++MBI) {
- auto LayoutPred = std::prev(MBI);
- if (!LayoutPred->isSuccessor(&*MBI)) {
- if (HasMaxBytesOverride)
- MBI->setAlignment(Align(1ULL << AlignAllNonFallThruBlocks),
- MaxBytesForAlignmentOverride);
- else
- MBI->setAlignment(Align(1ULL << AlignAllNonFallThruBlocks));
- }
- }
- }
+ // View the function.
if (ViewBlockLayoutWithBFI != GVDT_None &&
(ViewBlockFreqFuncName.empty() ||
F->getFunction().getName() == ViewBlockFreqFuncName)) {
@@ -3705,7 +3698,7 @@ void MachineBlockPlacement::assignBlockOrder(
#ifndef NDEBUG
// Make sure we correctly constructed all branches.
- F->verify(this, "After optimized block reordering");
+ F->verify(this, "After optimized block reordering", &errs());
#endif
}
diff --git a/llvm/lib/CodeGen/MachineOperand.cpp b/llvm/lib/CodeGen/MachineOperand.cpp
index 6ee4762..89d32c3 100644
--- a/llvm/lib/CodeGen/MachineOperand.cpp
+++ b/llvm/lib/CodeGen/MachineOperand.cpp
@@ -1047,7 +1047,8 @@ bool MachinePointerInfo::isDereferenceable(unsigned Size, LLVMContext &C,
return false;
return isDereferenceableAndAlignedPointer(
- BasePtr, Align(1), APInt(DL.getPointerSizeInBits(), Offset + Size), DL);
+ BasePtr, Align(1), APInt(DL.getPointerSizeInBits(), Offset + Size), DL,
+ dyn_cast<Instruction>(BasePtr));
}
/// getConstantPool - Return a MachinePointerInfo record that refers to the
diff --git a/llvm/lib/CodeGen/MachineScheduler.cpp b/llvm/lib/CodeGen/MachineScheduler.cpp
index 4e6d343..9b2862d 100644
--- a/llvm/lib/CodeGen/MachineScheduler.cpp
+++ b/llvm/lib/CodeGen/MachineScheduler.cpp
@@ -453,7 +453,7 @@ bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
if (VerifyScheduling) {
LLVM_DEBUG(LIS->dump());
- MF->verify(this, "Before machine scheduling.");
+ MF->verify(this, "Before machine scheduling.", &errs());
}
RegClassInfo->runOnMachineFunction(*MF);
@@ -472,7 +472,7 @@ bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
LLVM_DEBUG(LIS->dump());
if (VerifyScheduling)
- MF->verify(this, "After machine scheduling.");
+ MF->verify(this, "After machine scheduling.", &errs());
return true;
}
@@ -496,7 +496,7 @@ bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) {
AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
if (VerifyScheduling)
- MF->verify(this, "Before post machine scheduling.");
+ MF->verify(this, "Before post machine scheduling.", &errs());
// Instantiate the selected scheduler for this target, function, and
// optimization level.
@@ -512,7 +512,7 @@ bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) {
scheduleRegions(*Scheduler, true);
if (VerifyScheduling)
- MF->verify(this, "After post machine scheduling.");
+ MF->verify(this, "After post machine scheduling.", &errs());
return true;
}
diff --git a/llvm/lib/CodeGen/MachineSink.cpp b/llvm/lib/CodeGen/MachineSink.cpp
index 609f9af..658ebd4 100644
--- a/llvm/lib/CodeGen/MachineSink.cpp
+++ b/llvm/lib/CodeGen/MachineSink.cpp
@@ -2152,8 +2152,9 @@ bool PostRAMachineSinking::tryToSinkCopy(MachineBasicBlock &CurBB,
MachineBasicBlock::iterator InsertPos =
SuccBB->SkipPHIsAndLabels(SuccBB->begin());
if (blockPrologueInterferes(SuccBB, InsertPos, MI, TRI, TII, nullptr)) {
- LLVM_DEBUG(
- dbgs() << " *** Not sinking: prologue interference\n");
+ LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits,
+ TRI);
+ LLVM_DEBUG(dbgs() << " *** Not sinking: prologue interference\n");
continue;
}
diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index 2766420..24a0f41 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -94,21 +94,24 @@ using namespace llvm;
namespace {
struct MachineVerifier {
- MachineVerifier(MachineFunctionAnalysisManager &MFAM, const char *b)
- : MFAM(&MFAM), Banner(b) {}
+ MachineVerifier(MachineFunctionAnalysisManager &MFAM, const char *b,
+ raw_ostream *OS)
+ : MFAM(&MFAM), OS(OS ? *OS : nulls()), Banner(b) {}
- MachineVerifier(Pass *pass, const char *b) : PASS(pass), Banner(b) {}
+ MachineVerifier(Pass *pass, const char *b, raw_ostream *OS)
+ : PASS(pass), OS(OS ? *OS : nulls()), Banner(b) {}
MachineVerifier(const char *b, LiveVariables *LiveVars,
LiveIntervals *LiveInts, LiveStacks *LiveStks,
- SlotIndexes *Indexes)
- : Banner(b), LiveVars(LiveVars), LiveInts(LiveInts), LiveStks(LiveStks),
- Indexes(Indexes) {}
+ SlotIndexes *Indexes, raw_ostream *OS)
+ : OS(OS ? *OS : nulls()), Banner(b), LiveVars(LiveVars),
+ LiveInts(LiveInts), LiveStks(LiveStks), Indexes(Indexes) {}
unsigned verify(const MachineFunction &MF);
MachineFunctionAnalysisManager *MFAM = nullptr;
Pass *const PASS = nullptr;
+ raw_ostream &OS;
const char *Banner;
const MachineFunction *MF = nullptr;
const TargetMachine *TM = nullptr;
@@ -334,7 +337,8 @@ namespace {
MachineFunctionProperties::Property::FailsVerification))
return false;
- unsigned FoundErrors = MachineVerifier(this, Banner.c_str()).verify(MF);
+ unsigned FoundErrors =
+ MachineVerifier(this, Banner.c_str(), &errs()).verify(MF);
if (FoundErrors)
report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors.");
return false;
@@ -352,7 +356,8 @@ MachineVerifierPass::run(MachineFunction &MF,
if (MF.getProperties().hasProperty(
MachineFunctionProperties::Property::FailsVerification))
return PreservedAnalyses::all();
- unsigned FoundErrors = MachineVerifier(MFAM, Banner.c_str()).verify(MF);
+ unsigned FoundErrors =
+ MachineVerifier(MFAM, Banner.c_str(), &errs()).verify(MF);
if (FoundErrors)
report_fatal_error("Found " + Twine(FoundErrors) + " machine code errors.");
return PreservedAnalyses::all();
@@ -374,25 +379,28 @@ void llvm::verifyMachineFunction(const std::string &Banner,
// LiveIntervals *LiveInts;
// LiveStacks *LiveStks;
// SlotIndexes *Indexes;
- unsigned FoundErrors = MachineVerifier(nullptr, Banner.c_str()).verify(MF);
+ unsigned FoundErrors =
+ MachineVerifier(nullptr, Banner.c_str(), &errs()).verify(MF);
if (FoundErrors)
report_fatal_error("Found " + Twine(FoundErrors) + " machine code errors.");
}
-bool MachineFunction::verify(Pass *p, const char *Banner, bool AbortOnErrors)
- const {
+bool MachineFunction::verify(Pass *p, const char *Banner, raw_ostream *OS,
+ bool AbortOnErrors) const {
MachineFunction &MF = const_cast<MachineFunction&>(*this);
- unsigned FoundErrors = MachineVerifier(p, Banner).verify(MF);
+ unsigned FoundErrors = MachineVerifier(p, Banner, OS).verify(MF);
if (AbortOnErrors && FoundErrors)
report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors.");
return FoundErrors == 0;
}
bool MachineFunction::verify(LiveIntervals *LiveInts, SlotIndexes *Indexes,
- const char *Banner, bool AbortOnErrors) const {
+ const char *Banner, raw_ostream *OS,
+ bool AbortOnErrors) const {
MachineFunction &MF = const_cast<MachineFunction &>(*this);
unsigned FoundErrors =
- MachineVerifier(Banner, nullptr, LiveInts, nullptr, Indexes).verify(MF);
+ MachineVerifier(Banner, nullptr, LiveInts, nullptr, Indexes, OS)
+ .verify(MF);
if (AbortOnErrors && FoundErrors)
report_fatal_error("Found " + Twine(FoundErrors) + " machine code errors.");
return FoundErrors == 0;
@@ -482,7 +490,7 @@ unsigned MachineVerifier::verify(const MachineFunction &MF) {
for (const MachineInstr &MI : MBB.instrs()) {
if (MI.getParent() != &MBB) {
report("Bad instruction parent pointer", &MBB);
- errs() << "Instruction: " << MI;
+ OS << "Instruction: " << MI;
continue;
}
@@ -540,46 +548,48 @@ unsigned MachineVerifier::verify(const MachineFunction &MF) {
void MachineVerifier::report(const char *msg, const MachineFunction *MF) {
assert(MF);
- errs() << '\n';
+ OS << '\n';
if (!foundErrors++) {
if (Banner)
- errs() << "# " << Banner << '\n';
+ OS << "# " << Banner << '\n';
+
if (LiveInts != nullptr)
- LiveInts->print(errs());
+ LiveInts->print(OS);
else
- MF->print(errs(), Indexes);
+ MF->print(OS, Indexes);
}
- errs() << "*** Bad machine code: " << msg << " ***\n"
- << "- function: " << MF->getName() << "\n";
+
+ OS << "*** Bad machine code: " << msg << " ***\n"
+ << "- function: " << MF->getName() << '\n';
}
void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) {
assert(MBB);
report(msg, MBB->getParent());
- errs() << "- basic block: " << printMBBReference(*MBB) << ' '
- << MBB->getName() << " (" << (const void *)MBB << ')';
+ OS << "- basic block: " << printMBBReference(*MBB) << ' ' << MBB->getName()
+ << " (" << (const void *)MBB << ')';
if (Indexes)
- errs() << " [" << Indexes->getMBBStartIdx(MBB)
- << ';' << Indexes->getMBBEndIdx(MBB) << ')';
- errs() << '\n';
+ OS << " [" << Indexes->getMBBStartIdx(MBB) << ';'
+ << Indexes->getMBBEndIdx(MBB) << ')';
+ OS << '\n';
}
void MachineVerifier::report(const char *msg, const MachineInstr *MI) {
assert(MI);
report(msg, MI->getParent());
- errs() << "- instruction: ";
+ OS << "- instruction: ";
if (Indexes && Indexes->hasIndex(*MI))
- errs() << Indexes->getInstructionIndex(*MI) << '\t';
- MI->print(errs(), /*IsStandalone=*/true);
+ OS << Indexes->getInstructionIndex(*MI) << '\t';
+ MI->print(OS, /*IsStandalone=*/true);
}
void MachineVerifier::report(const char *msg, const MachineOperand *MO,
unsigned MONum, LLT MOVRegType) {
assert(MO);
report(msg, MO->getParent());
- errs() << "- operand " << MONum << ": ";
- MO->print(errs(), MOVRegType, TRI);
- errs() << "\n";
+ OS << "- operand " << MONum << ": ";
+ MO->print(OS, MOVRegType, TRI);
+ OS << '\n';
}
void MachineVerifier::report(const Twine &Msg, const MachineInstr *MI) {
@@ -587,11 +597,11 @@ void MachineVerifier::report(const Twine &Msg, const MachineInstr *MI) {
}
void MachineVerifier::report_context(SlotIndex Pos) const {
- errs() << "- at: " << Pos << '\n';
+ OS << "- at: " << Pos << '\n';
}
void MachineVerifier::report_context(const LiveInterval &LI) const {
- errs() << "- interval: " << LI << '\n';
+ OS << "- interval: " << LI << '\n';
}
void MachineVerifier::report_context(const LiveRange &LR, Register VRegUnit,
@@ -603,35 +613,35 @@ void MachineVerifier::report_context(const LiveRange &LR, Register VRegUnit,
}
void MachineVerifier::report_context(const LiveRange::Segment &S) const {
- errs() << "- segment: " << S << '\n';
+ OS << "- segment: " << S << '\n';
}
void MachineVerifier::report_context(const VNInfo &VNI) const {
- errs() << "- ValNo: " << VNI.id << " (def " << VNI.def << ")\n";
+ OS << "- ValNo: " << VNI.id << " (def " << VNI.def << ")\n";
}
void MachineVerifier::report_context_liverange(const LiveRange &LR) const {
- errs() << "- liverange: " << LR << '\n';
+ OS << "- liverange: " << LR << '\n';
}
void MachineVerifier::report_context(MCPhysReg PReg) const {
- errs() << "- p. register: " << printReg(PReg, TRI) << '\n';
+ OS << "- p. register: " << printReg(PReg, TRI) << '\n';
}
void MachineVerifier::report_context_vreg(Register VReg) const {
- errs() << "- v. register: " << printReg(VReg, TRI) << '\n';
+ OS << "- v. register: " << printReg(VReg, TRI) << '\n';
}
void MachineVerifier::report_context_vreg_regunit(Register VRegOrUnit) const {
if (VRegOrUnit.isVirtual()) {
report_context_vreg(VRegOrUnit);
} else {
- errs() << "- regunit: " << printRegUnit(VRegOrUnit, TRI) << '\n';
+ OS << "- regunit: " << printRegUnit(VRegOrUnit, TRI) << '\n';
}
}
void MachineVerifier::report_context_lanemask(LaneBitmask LaneMask) const {
- errs() << "- lanemask: " << PrintLaneMask(LaneMask) << '\n';
+ OS << "- lanemask: " << PrintLaneMask(LaneMask) << '\n';
}
void MachineVerifier::markReachable(const MachineBasicBlock *MBB) {
@@ -710,8 +720,8 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
report("MBB has successor that isn't part of the function.", MBB);
if (!MBBInfoMap[succ].Preds.count(MBB)) {
report("Inconsistent CFG", MBB);
- errs() << "MBB is not in the predecessor list of the successor "
- << printMBBReference(*succ) << ".\n";
+ OS << "MBB is not in the predecessor list of the successor "
+ << printMBBReference(*succ) << ".\n";
}
}
@@ -721,8 +731,8 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
report("MBB has predecessor that isn't part of the function.", MBB);
if (!MBBInfoMap[Pred].Succs.count(MBB)) {
report("Inconsistent CFG", MBB);
- errs() << "MBB is not in the successor list of the predecessor "
- << printMBBReference(*Pred) << ".\n";
+ OS << "MBB is not in the successor list of the predecessor "
+ << printMBBReference(*Pred) << ".\n";
}
}
@@ -880,7 +890,7 @@ void MachineVerifier::visitMachineBundleBefore(const MachineInstr *MI) {
SlotIndex idx = Indexes->getInstructionIndex(*MI);
if (!(idx > lastIndex)) {
report("Instruction index out of order", MI);
- errs() << "Last instruction was at " << lastIndex << '\n';
+ OS << "Last instruction was at " << lastIndex << '\n';
}
lastIndex = idx;
}
@@ -894,7 +904,7 @@ void MachineVerifier::visitMachineBundleBefore(const MachineInstr *MI) {
// precede non-terminators.
if (FirstTerminator->getOpcode() != TargetOpcode::G_INVOKE_REGION_START) {
report("Non-terminator instruction after the first terminator", MI);
- errs() << "First terminator was:\t" << *FirstTerminator;
+ OS << "First terminator was:\t" << *FirstTerminator;
}
}
}
@@ -2185,8 +2195,8 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
const MCInstrDesc &MCID = MI->getDesc();
if (MI->getNumOperands() < MCID.getNumOperands()) {
report("Too few operands", MI);
- errs() << MCID.getNumOperands() << " operands expected, but "
- << MI->getNumOperands() << " given.\n";
+ OS << MCID.getNumOperands() << " operands expected, but "
+ << MI->getNumOperands() << " given.\n";
}
if (MI->getFlag(MachineInstr::NoConvergent) && !MCID.isConvergent())
@@ -2278,7 +2288,7 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
// If both types are valid, check that the types are the same.
if (SrcTy != DstTy) {
report("Copy Instruction is illegal with mismatching types", MI);
- errs() << "Def = " << DstTy << ", Src = " << SrcTy << "\n";
+ OS << "Def = " << DstTy << ", Src = " << SrcTy << '\n';
}
break;
@@ -2322,8 +2332,7 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
if (SrcSize.isNonZero() && DstSize.isNonZero() && SrcSize != DstSize) {
if (!DstOp.getSubReg() && !SrcOp.getSubReg()) {
report("Copy Instruction is illegal with mismatching sizes", MI);
- errs() << "Def Size = " << DstSize << ", Src Size = " << SrcSize
- << "\n";
+ OS << "Def Size = " << DstSize << ", Src Size = " << SrcSize << '\n';
}
}
break;
@@ -2554,8 +2563,8 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
TII->getRegClass(MCID, MONum, TRI, *MF)) {
if (!DRC->contains(Reg)) {
report("Illegal physical register for instruction", MO, MONum);
- errs() << printReg(Reg, TRI) << " is not a "
- << TRI->getRegClassName(DRC) << " register.\n";
+ OS << printReg(Reg, TRI) << " is not a "
+ << TRI->getRegClassName(DRC) << " register.\n";
}
}
}
@@ -2618,9 +2627,9 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
RBI->getMaximumSize(RegBank->getID()) < Ty.getSizeInBits()) {
report("Register bank is too small for virtual register", MO,
MONum);
- errs() << "Register bank " << RegBank->getName() << " too small("
- << RBI->getMaximumSize(RegBank->getID()) << ") to fit "
- << Ty.getSizeInBits() << "-bits\n";
+ OS << "Register bank " << RegBank->getName() << " too small("
+ << RBI->getMaximumSize(RegBank->getID()) << ") to fit "
+ << Ty.getSizeInBits() << "-bits\n";
return;
}
}
@@ -2639,10 +2648,9 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
TII->getRegClass(MCID, MONum, TRI, *MF)) {
report("Virtual register does not match instruction constraint", MO,
MONum);
- errs() << "Expect register class "
- << TRI->getRegClassName(
- TII->getRegClass(MCID, MONum, TRI, *MF))
- << " but got nothing\n";
+ OS << "Expect register class "
+ << TRI->getRegClassName(TII->getRegClass(MCID, MONum, TRI, *MF))
+ << " but got nothing\n";
return;
}
@@ -2653,14 +2661,14 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
TRI->getSubClassWithSubReg(RC, SubIdx);
if (!SRC) {
report("Invalid subregister index for virtual register", MO, MONum);
- errs() << "Register class " << TRI->getRegClassName(RC)
- << " does not support subreg index " << SubIdx << "\n";
+ OS << "Register class " << TRI->getRegClassName(RC)
+ << " does not support subreg index " << SubIdx << '\n';
return;
}
if (RC != SRC) {
report("Invalid register class for subregister index", MO, MONum);
- errs() << "Register class " << TRI->getRegClassName(RC)
- << " does not fully support subreg index " << SubIdx << "\n";
+ OS << "Register class " << TRI->getRegClassName(RC)
+ << " does not fully support subreg index " << SubIdx << '\n';
return;
}
}
@@ -2682,9 +2690,9 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
}
if (!RC->hasSuperClassEq(DRC)) {
report("Illegal virtual register for instruction", MO, MONum);
- errs() << "Expected a " << TRI->getRegClassName(DRC)
- << " register, but got a " << TRI->getRegClassName(RC)
- << " register\n";
+ OS << "Expected a " << TRI->getRegClassName(DRC)
+ << " register, but got a " << TRI->getRegClassName(RC)
+ << " register\n";
}
}
}
@@ -2733,11 +2741,11 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
}
if (loads && !LI.liveAt(Idx.getRegSlot(true))) {
report("Instruction loads from dead spill slot", MO, MONum);
- errs() << "Live stack: " << LI << '\n';
+ OS << "Live stack: " << LI << '\n';
}
if (stores && !LI.liveAt(Idx.getRegSlot())) {
report("Instruction stores to dead spill slot", MO, MONum);
- errs() << "Live stack: " << LI << '\n';
+ OS << "Live stack: " << LI << '\n';
}
}
break;
@@ -2758,6 +2766,15 @@ void MachineVerifier::checkLivenessAtUse(const MachineOperand *MO,
Register VRegOrUnit,
LaneBitmask LaneMask) {
const MachineInstr *MI = MO->getParent();
+
+ if (!LR.verify()) {
+ report("invalid live range", MO, MONum);
+ report_context_liverange(LR);
+ report_context_vreg_regunit(VRegOrUnit);
+ report_context(UseIdx);
+ return;
+ }
+
LiveQueryResult LRQ = LR.Query(UseIdx);
bool HasValue = LRQ.valueIn() || (MI->isPHI() && LRQ.valueOut());
// Check if we have a segment at the use, note however that we only need one
@@ -2784,6 +2801,15 @@ void MachineVerifier::checkLivenessAtDef(const MachineOperand *MO,
Register VRegOrUnit,
bool SubRangeCheck,
LaneBitmask LaneMask) {
+ if (!LR.verify()) {
+ report("invalid live range", MO, MONum);
+ report_context_liverange(LR);
+ report_context_vreg_regunit(VRegOrUnit);
+ if (LaneMask.any())
+ report_context_lanemask(LaneMask);
+ report_context(DefIdx);
+ }
+
if (const VNInfo *VNI = LR.getVNInfoAt(DefIdx)) {
// The LR can correspond to the whole reg and its def slot is not obliged
// to be the same as the MO' def slot. E.g. when we check here "normal"
@@ -3032,8 +3058,8 @@ MachineVerifier::visitMachineBasicBlockAfter(const MachineBasicBlock *MBB) {
SlotIndex stop = Indexes->getMBBEndIdx(MBB);
if (!(stop > lastIndex)) {
report("Block ends before last instruction index", MBB);
- errs() << "Block ends at " << stop
- << " last instruction was at " << lastIndex << '\n';
+ OS << "Block ends at " << stop << " last instruction was at " << lastIndex
+ << '\n';
}
lastIndex = stop;
}
@@ -3278,8 +3304,8 @@ void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) {
for (MachineBasicBlock *Pred : MBB.predecessors()) {
if (!seen.count(Pred)) {
report("Missing PHI operand", &Phi);
- errs() << printMBBReference(*Pred)
- << " is a predecessor according to the CFG.\n";
+ OS << printMBBReference(*Pred)
+ << " is a predecessor according to the CFG.\n";
}
}
}
@@ -3288,9 +3314,10 @@ void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) {
static void
verifyConvergenceControl(const MachineFunction &MF, MachineDominatorTree &DT,
- std::function<void(const Twine &Message)> FailureCB) {
+ std::function<void(const Twine &Message)> FailureCB,
+ raw_ostream &OS) {
MachineConvergenceVerifier CV;
- CV.initialize(&errs(), FailureCB, MF);
+ CV.initialize(&OS, FailureCB, MF);
for (const auto &MBB : MF) {
CV.visit(MBB);
@@ -3308,7 +3335,7 @@ void MachineVerifier::visitMachineFunctionAfter() {
auto FailureCB = [this](const Twine &Message) {
report(Message.str().c_str(), MF);
};
- verifyConvergenceControl(*MF, DT, FailureCB);
+ verifyConvergenceControl(*MF, DT, FailureCB, OS);
calcRegsPassed();
@@ -3324,8 +3351,8 @@ void MachineVerifier::visitMachineFunctionAfter() {
for (Register VReg : MInfo.vregsRequired)
if (MInfo.regsKilled.count(VReg)) {
report("Virtual register killed in block, but needed live out.", &MBB);
- errs() << "Virtual register " << printReg(VReg)
- << " is used after the block.\n";
+ OS << "Virtual register " << printReg(VReg)
+ << " is used after the block.\n";
}
}
@@ -3361,9 +3388,8 @@ void MachineVerifier::visitMachineFunctionAfter() {
if (!PInfo.regsLiveOut.count(LiveInReg)) {
report("Live in register not found to be live out from predecessor.",
&MBB);
- errs() << TRI->getName(LiveInReg)
- << " not found to be live out from "
- << printMBBReference(*Pred) << "\n";
+ OS << TRI->getName(LiveInReg) << " not found to be live out from "
+ << printMBBReference(*Pred) << '\n';
}
}
}
@@ -3400,14 +3426,14 @@ void MachineVerifier::verifyLiveVariables() {
if (MInfo.vregsRequired.count(Reg)) {
if (!VI.AliveBlocks.test(MBB.getNumber())) {
report("LiveVariables: Block missing from AliveBlocks", &MBB);
- errs() << "Virtual register " << printReg(Reg)
- << " must be live through the block.\n";
+ OS << "Virtual register " << printReg(Reg)
+ << " must be live through the block.\n";
}
} else {
if (VI.AliveBlocks.test(MBB.getNumber())) {
report("LiveVariables: Block should not be in AliveBlocks", &MBB);
- errs() << "Virtual register " << printReg(Reg)
- << " is not needed live through the block.\n";
+ OS << "Virtual register " << printReg(Reg)
+ << " is not needed live through the block.\n";
}
}
}
@@ -3425,7 +3451,7 @@ void MachineVerifier::verifyLiveIntervals() {
if (!LiveInts->hasInterval(Reg)) {
report("Missing live interval for virtual register", MF);
- errs() << printReg(Reg, TRI) << " still has defs or uses\n";
+ OS << printReg(Reg, TRI) << " still has defs or uses\n";
continue;
}
@@ -3737,9 +3763,9 @@ void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR,
report("Register not marked live out of predecessor", Pred);
report_context(LR, Reg, LaneMask);
report_context(*VNI);
- errs() << " live into " << printMBBReference(*MFI) << '@'
- << LiveInts->getMBBStartIdx(&*MFI) << ", not live before "
- << PEnd << '\n';
+ OS << " live into " << printMBBReference(*MFI) << '@'
+ << LiveInts->getMBBStartIdx(&*MFI) << ", not live before " << PEnd
+ << '\n';
continue;
}
@@ -3747,10 +3773,10 @@ void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR,
if (!IsPHI && PVNI != VNI) {
report("Different value live out of predecessor", Pred);
report_context(LR, Reg, LaneMask);
- errs() << "Valno #" << PVNI->id << " live out of "
- << printMBBReference(*Pred) << '@' << PEnd << "\nValno #"
- << VNI->id << " live into " << printMBBReference(*MFI) << '@'
- << LiveInts->getMBBStartIdx(&*MFI) << '\n';
+ OS << "Valno #" << PVNI->id << " live out of "
+ << printMBBReference(*Pred) << '@' << PEnd << "\nValno #" << VNI->id
+ << " live into " << printMBBReference(*MFI) << '@'
+ << LiveInts->getMBBStartIdx(&*MFI) << '\n';
}
}
if (&*MFI == EndMBB)
@@ -3805,11 +3831,11 @@ void MachineVerifier::verifyLiveInterval(const LiveInterval &LI) {
report("Multiple connected components in live interval", MF);
report_context(LI);
for (unsigned comp = 0; comp != NumComp; ++comp) {
- errs() << comp << ": valnos";
+ OS << comp << ": valnos";
for (const VNInfo *I : LI.valnos)
if (comp == ConEQ.getEqClass(I))
- errs() << ' ' << I->id;
- errs() << '\n';
+ OS << ' ' << I->id;
+ OS << '\n';
}
}
}
@@ -3871,9 +3897,9 @@ void MachineVerifier::verifyStackFrame() {
report("Call frame size on entry does not match value computed from "
"predecessor",
MBB);
- errs() << "Call frame size on entry " << MBB->getCallFrameSize()
- << " does not match value computed from predecessor "
- << -BBState.EntryValue << '\n';
+ OS << "Call frame size on entry " << MBB->getCallFrameSize()
+ << " does not match value computed from predecessor "
+ << -BBState.EntryValue << '\n';
}
// Update stack state by checking contents of MBB.
@@ -3896,8 +3922,8 @@ void MachineVerifier::verifyStackFrame() {
BBState.ExitValue;
if (BBState.ExitIsSetup && AbsSPAdj != Size) {
report("FrameDestroy <n> is after FrameSetup <m>", &I);
- errs() << "FrameDestroy <" << Size << "> is after FrameSetup <"
- << AbsSPAdj << ">.\n";
+ OS << "FrameDestroy <" << Size << "> is after FrameSetup <"
+ << AbsSPAdj << ">.\n";
}
if (!MRI->isSSA() && !MF->getFrameInfo().adjustsStack())
report("AdjustsStack not set in presence of a frame pseudo "
@@ -3915,11 +3941,11 @@ void MachineVerifier::verifyStackFrame() {
(SPState[Pred->getNumber()].ExitValue != BBState.EntryValue ||
SPState[Pred->getNumber()].ExitIsSetup != BBState.EntryIsSetup)) {
report("The exit stack state of a predecessor is inconsistent.", MBB);
- errs() << "Predecessor " << printMBBReference(*Pred)
- << " has exit state (" << SPState[Pred->getNumber()].ExitValue
- << ", " << SPState[Pred->getNumber()].ExitIsSetup << "), while "
- << printMBBReference(*MBB) << " has entry state ("
- << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n";
+ OS << "Predecessor " << printMBBReference(*Pred) << " has exit state ("
+ << SPState[Pred->getNumber()].ExitValue << ", "
+ << SPState[Pred->getNumber()].ExitIsSetup << "), while "
+ << printMBBReference(*MBB) << " has entry state ("
+ << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n";
}
}
@@ -3930,11 +3956,11 @@ void MachineVerifier::verifyStackFrame() {
(SPState[Succ->getNumber()].EntryValue != BBState.ExitValue ||
SPState[Succ->getNumber()].EntryIsSetup != BBState.ExitIsSetup)) {
report("The entry stack state of a successor is inconsistent.", MBB);
- errs() << "Successor " << printMBBReference(*Succ)
- << " has entry state (" << SPState[Succ->getNumber()].EntryValue
- << ", " << SPState[Succ->getNumber()].EntryIsSetup << "), while "
- << printMBBReference(*MBB) << " has exit state ("
- << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n";
+ OS << "Successor " << printMBBReference(*Succ) << " has entry state ("
+ << SPState[Succ->getNumber()].EntryValue << ", "
+ << SPState[Succ->getNumber()].EntryIsSetup << "), while "
+ << printMBBReference(*MBB) << " has exit state ("
+ << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n";
}
}
diff --git a/llvm/lib/CodeGen/RegAllocGreedy.cpp b/llvm/lib/CodeGen/RegAllocGreedy.cpp
index 5001b4f..1ad70c8 100644
--- a/llvm/lib/CodeGen/RegAllocGreedy.cpp
+++ b/llvm/lib/CodeGen/RegAllocGreedy.cpp
@@ -1054,7 +1054,7 @@ void RAGreedy::splitAroundRegion(LiveRangeEdit &LREdit,
}
if (VerifyEnabled)
- MF->verify(this, "After splitting live range around region");
+ MF->verify(this, "After splitting live range around region", &errs());
}
MCRegister RAGreedy::tryRegionSplit(const LiveInterval &VirtReg,
@@ -1323,7 +1323,7 @@ unsigned RAGreedy::tryBlockSplit(const LiveInterval &VirtReg,
}
if (VerifyEnabled)
- MF->verify(this, "After splitting live range around basic blocks");
+ MF->verify(this, "After splitting live range around basic blocks", &errs());
return 0;
}
@@ -2507,7 +2507,7 @@ MCRegister RAGreedy::selectOrSplitImpl(const LiveInterval &VirtReg,
DebugVars->splitRegister(VirtReg.reg(), LRE.regs(), *LIS);
if (VerifyEnabled)
- MF->verify(this, "After spilling");
+ MF->verify(this, "After spilling", &errs());
}
// The live virtual register requesting allocation was spilled, so tell
@@ -2711,7 +2711,7 @@ bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
TII = MF->getSubtarget().getInstrInfo();
if (VerifyEnabled)
- MF->verify(this, "Before greedy register allocator");
+ MF->verify(this, "Before greedy register allocator", &errs());
RegAllocBase::init(getAnalysis<VirtRegMap>(),
getAnalysis<LiveIntervalsWrapperPass>().getLIS(),
@@ -2770,7 +2770,7 @@ bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
tryHintsRecoloring();
if (VerifyEnabled)
- MF->verify(this, "Before post optimization");
+ MF->verify(this, "Before post optimization", &errs());
postOptimization();
reportStats();
diff --git a/llvm/lib/CodeGen/RegisterCoalescer.cpp b/llvm/lib/CodeGen/RegisterCoalescer.cpp
index 97f8346..2e1f498 100644
--- a/llvm/lib/CodeGen/RegisterCoalescer.cpp
+++ b/llvm/lib/CodeGen/RegisterCoalescer.cpp
@@ -3548,8 +3548,7 @@ void RegisterCoalescer::joinSubRegRanges(LiveRange &LRange, LiveRange &RRange,
LHSVals.removeImplicitDefs();
RHSVals.removeImplicitDefs();
- LRange.verify();
- RRange.verify();
+ assert(LRange.verify() && RRange.verify());
// Join RRange into LHS.
LRange.join(RRange, LHSVals.getAssignments(), RHSVals.getAssignments(),
@@ -4240,7 +4239,7 @@ bool RegisterCoalescer::runOnMachineFunction(MachineFunction &fn) {
JoinSplitEdges = EnableJoinSplits;
if (VerifyCoalescing)
- MF->verify(this, "Before register coalescing");
+ MF->verify(this, "Before register coalescing", &errs());
DbgVRegToValues.clear();
buildVRegToDbgValueMap(fn);
@@ -4300,7 +4299,7 @@ bool RegisterCoalescer::runOnMachineFunction(MachineFunction &fn) {
LLVM_DEBUG(dump());
if (VerifyCoalescing)
- MF->verify(this, "After register coalescing");
+ MF->verify(this, "After register coalescing", &errs());
return true;
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index b36a124..c6f6fc2 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -508,6 +508,7 @@ namespace {
SDValue visitFSQRT(SDNode *N);
SDValue visitFCOPYSIGN(SDNode *N);
SDValue visitFPOW(SDNode *N);
+ SDValue visitFCANONICALIZE(SDNode *N);
SDValue visitSINT_TO_FP(SDNode *N);
SDValue visitUINT_TO_FP(SDNode *N);
SDValue visitFP_TO_SINT(SDNode *N);
@@ -1980,6 +1981,7 @@ SDValue DAGCombiner::visit(SDNode *N) {
case ISD::FREEZE: return visitFREEZE(N);
case ISD::GET_FPENV_MEM: return visitGET_FPENV_MEM(N);
case ISD::SET_FPENV_MEM: return visitSET_FPENV_MEM(N);
+ case ISD::FCANONICALIZE: return visitFCANONICALIZE(N);
case ISD::VECREDUCE_FADD:
case ISD::VECREDUCE_FMUL:
case ISD::VECREDUCE_ADD:
@@ -2090,6 +2092,19 @@ static SDValue getInputChainForNode(SDNode *N) {
return SDValue();
}
+SDValue DAGCombiner::visitFCANONICALIZE(SDNode *N) {
+ SDValue Operand = N->getOperand(0);
+ EVT VT = Operand.getValueType();
+ SDLoc dl(N);
+
+ // Canonicalize undef to quiet NaN.
+ if (Operand.isUndef()) {
+ APFloat CanonicalQNaN = APFloat::getQNaN(VT.getFltSemantics());
+ return DAG.getConstantFP(CanonicalQNaN, dl, VT);
+ }
+ return SDValue();
+}
+
SDValue DAGCombiner::visitTokenFactor(SDNode *N) {
// If N has two operands, where one has an input chain equal to the other,
// the 'other' chain is redundant.
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index f5fbc01..3c08772 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -2326,15 +2326,7 @@ SelectionDAGLegalize::ExpandDivRemLibCall(SDNode *Node,
/// Return true if sincos libcall is available.
static bool isSinCosLibcallAvailable(SDNode *Node, const TargetLowering &TLI) {
- RTLIB::Libcall LC;
- switch (Node->getSimpleValueType(0).SimpleTy) {
- default: llvm_unreachable("Unexpected request for libcall!");
- case MVT::f32: LC = RTLIB::SINCOS_F32; break;
- case MVT::f64: LC = RTLIB::SINCOS_F64; break;
- case MVT::f80: LC = RTLIB::SINCOS_F80; break;
- case MVT::f128: LC = RTLIB::SINCOS_F128; break;
- case MVT::ppcf128: LC = RTLIB::SINCOS_PPCF128; break;
- }
+ RTLIB::Libcall LC = RTLIB::getFSINCOS(Node->getSimpleValueType(0).SimpleTy);
return TLI.getLibcallName(LC) != nullptr;
}
@@ -2355,68 +2347,72 @@ static bool useSinCos(SDNode *Node) {
}
/// Issue libcalls to sincos to compute sin / cos pairs.
-void
-SelectionDAGLegalize::ExpandSinCosLibCall(SDNode *Node,
- SmallVectorImpl<SDValue> &Results) {
- RTLIB::Libcall LC;
- switch (Node->getSimpleValueType(0).SimpleTy) {
- default: llvm_unreachable("Unexpected request for libcall!");
- case MVT::f32: LC = RTLIB::SINCOS_F32; break;
- case MVT::f64: LC = RTLIB::SINCOS_F64; break;
- case MVT::f80: LC = RTLIB::SINCOS_F80; break;
- case MVT::f128: LC = RTLIB::SINCOS_F128; break;
- case MVT::ppcf128: LC = RTLIB::SINCOS_PPCF128; break;
+void SelectionDAGLegalize::ExpandSinCosLibCall(
+ SDNode *Node, SmallVectorImpl<SDValue> &Results) {
+ EVT VT = Node->getValueType(0);
+ Type *Ty = VT.getTypeForEVT(*DAG.getContext());
+ RTLIB::Libcall LC = RTLIB::getFSINCOS(VT);
+
+ // Find users of the node that store the results (and share input chains). The
+ // destination pointers can be used instead of creating stack allocations.
+ SDValue StoresInChain{};
+ std::array<StoreSDNode *, 2> ResultStores = {nullptr};
+ for (SDNode *User : Node->uses()) {
+ if (!ISD::isNormalStore(User))
+ continue;
+ auto *ST = cast<StoreSDNode>(User);
+ if (!ST->isSimple() || ST->getAddressSpace() != 0 ||
+ ST->getAlign() < DAG.getDataLayout().getABITypeAlign(Ty) ||
+ (StoresInChain && ST->getChain() != StoresInChain) ||
+ Node->isPredecessorOf(ST->getChain().getNode()))
+ continue;
+ ResultStores[ST->getValue().getResNo()] = ST;
+ StoresInChain = ST->getChain();
}
- // The input chain to this libcall is the entry node of the function.
- // Legalizing the call will automatically add the previous call to the
- // dependence.
- SDValue InChain = DAG.getEntryNode();
-
- EVT RetVT = Node->getValueType(0);
- Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
-
TargetLowering::ArgListTy Args;
- TargetLowering::ArgListEntry Entry;
+ TargetLowering::ArgListEntry Entry{};
// Pass the argument.
Entry.Node = Node->getOperand(0);
- Entry.Ty = RetTy;
- Entry.IsSExt = false;
- Entry.IsZExt = false;
- Args.push_back(Entry);
-
- // Pass the return address of sin.
- SDValue SinPtr = DAG.CreateStackTemporary(RetVT);
- Entry.Node = SinPtr;
- Entry.Ty = PointerType::getUnqual(RetTy->getContext());
- Entry.IsSExt = false;
- Entry.IsZExt = false;
+ Entry.Ty = Ty;
Args.push_back(Entry);
- // Also pass the return address of the cos.
- SDValue CosPtr = DAG.CreateStackTemporary(RetVT);
- Entry.Node = CosPtr;
- Entry.Ty = PointerType::getUnqual(RetTy->getContext());
- Entry.IsSExt = false;
- Entry.IsZExt = false;
- Args.push_back(Entry);
+ // Pass the output pointers for sin and cos.
+ SmallVector<SDValue, 2> ResultPtrs{};
+ for (StoreSDNode *ST : ResultStores) {
+ SDValue ResultPtr = ST ? ST->getBasePtr() : DAG.CreateStackTemporary(VT);
+ Entry.Node = ResultPtr;
+ Entry.Ty = PointerType::getUnqual(Ty->getContext());
+ Args.push_back(Entry);
+ ResultPtrs.push_back(ResultPtr);
+ }
+ SDLoc DL(Node);
+ SDValue InChain = StoresInChain ? StoresInChain : DAG.getEntryNode();
SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
TLI.getPointerTy(DAG.getDataLayout()));
-
- SDLoc dl(Node);
TargetLowering::CallLoweringInfo CLI(DAG);
- CLI.setDebugLoc(dl).setChain(InChain).setLibCallee(
+ CLI.setDebugLoc(DL).setChain(InChain).setLibCallee(
TLI.getLibcallCallingConv(LC), Type::getVoidTy(*DAG.getContext()), Callee,
std::move(Args));
- std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI);
+ auto [Call, OutChain] = TLI.LowerCallTo(CLI);
- Results.push_back(
- DAG.getLoad(RetVT, dl, CallInfo.second, SinPtr, MachinePointerInfo()));
- Results.push_back(
- DAG.getLoad(RetVT, dl, CallInfo.second, CosPtr, MachinePointerInfo()));
+ for (auto [ResNo, ResultPtr] : llvm::enumerate(ResultPtrs)) {
+ MachinePointerInfo PtrInfo;
+ if (StoreSDNode *ST = ResultStores[ResNo]) {
+ // Replace store with the library call.
+ DAG.ReplaceAllUsesOfValueWith(SDValue(ST, 0), OutChain);
+ PtrInfo = ST->getPointerInfo();
+ } else {
+ PtrInfo = MachinePointerInfo::getFixedStack(
+ DAG.getMachineFunction(),
+ cast<FrameIndexSDNode>(ResultPtr)->getIndex());
+ }
+ SDValue LoadResult = DAG.getLoad(VT, DL, OutChain, ResultPtr, PtrInfo);
+ Results.push_back(LoadResult);
+ }
}
SDValue SelectionDAGLegalize::expandLdexp(SDNode *Node) const {
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index c622b2a..ee9c95c 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -4608,14 +4608,23 @@ void DAGTypeLegalizer::ExpandIntRes_ShiftThroughStack(SDNode *N, SDValue &Lo,
SDValue ShAmt = N->getOperand(1);
EVT ShAmtVT = ShAmt.getValueType();
- // This legalization is optimal when the shift is by a multiple of byte width,
- // %x * 8 <-> %x << 3 so 3 low bits should be be known zero.
- bool ShiftByByteMultiple =
- DAG.computeKnownBits(ShAmt).countMinTrailingZeros() >= 3;
+ EVT LoadVT = VT;
+ do {
+ LoadVT = TLI.getTypeToTransformTo(*DAG.getContext(), LoadVT);
+ } while (!TLI.isTypeLegal(LoadVT));
+
+ const unsigned ShiftUnitInBits = LoadVT.getStoreSizeInBits();
+ assert(ShiftUnitInBits <= VT.getScalarSizeInBits());
+ assert(isPowerOf2_32(ShiftUnitInBits) &&
+ "Shifting unit is not a a power of two!");
+
+ const bool IsOneStepShift =
+ DAG.computeKnownBits(ShAmt).countMinTrailingZeros() >=
+ Log2_32(ShiftUnitInBits);
// If we can't do it as one step, we'll have two uses of shift amount,
// and thus must freeze it.
- if (!ShiftByByteMultiple)
+ if (!IsOneStepShift)
ShAmt = DAG.getFreeze(ShAmt);
unsigned VTBitWidth = VT.getScalarSizeInBits();
@@ -4629,10 +4638,9 @@ void DAGTypeLegalizer::ExpandIntRes_ShiftThroughStack(SDNode *N, SDValue &Lo,
// Get a temporary stack slot 2x the width of our VT.
// FIXME: reuse stack slots?
- // FIXME: should we be more picky about alignment?
- Align StackSlotAlignment(1);
- SDValue StackPtr = DAG.CreateStackTemporary(
- TypeSize::getFixed(StackSlotByteWidth), StackSlotAlignment);
+ Align StackAlign = DAG.getReducedAlign(StackSlotVT, /*UseABI=*/false);
+ SDValue StackPtr =
+ DAG.CreateStackTemporary(StackSlotVT.getStoreSize(), StackAlign);
EVT PtrTy = StackPtr.getValueType();
SDValue Ch = DAG.getEntryNode();
@@ -4652,15 +4660,22 @@ void DAGTypeLegalizer::ExpandIntRes_ShiftThroughStack(SDNode *N, SDValue &Lo,
Init = DAG.getNode(ISD::BUILD_PAIR, dl, StackSlotVT, AllZeros, Shiftee);
}
// And spill it into the stack slot.
- Ch = DAG.getStore(Ch, dl, Init, StackPtr, StackPtrInfo, StackSlotAlignment);
+ Ch = DAG.getStore(Ch, dl, Init, StackPtr, StackPtrInfo, StackAlign);
// Now, compute the full-byte offset into stack slot from where we can load.
- // We have shift amount, which is in bits, but in multiples of byte.
- // So just divide by CHAR_BIT.
+ // We have shift amount, which is in bits. Offset should point to an aligned
+ // address.
SDNodeFlags Flags;
- if (ShiftByByteMultiple)
- Flags.setExact(true);
- SDValue ByteOffset = DAG.getNode(ISD::SRL, dl, ShAmtVT, ShAmt,
+ Flags.setExact(IsOneStepShift);
+ SDValue SrlTmp = DAG.getNode(
+ ISD::SRL, dl, ShAmtVT, ShAmt,
+ DAG.getConstant(Log2_32(ShiftUnitInBits), dl, ShAmtVT), Flags);
+ SDValue BitOffset =
+ DAG.getNode(ISD::SHL, dl, ShAmtVT, SrlTmp,
+ DAG.getConstant(Log2_32(ShiftUnitInBits), dl, ShAmtVT));
+
+ Flags.setExact(true);
+ SDValue ByteOffset = DAG.getNode(ISD::SRL, dl, ShAmtVT, BitOffset,
DAG.getConstant(3, dl, ShAmtVT), Flags);
// And clamp it, because OOB load is an immediate UB,
// while shift overflow would have *just* been poison.
@@ -4689,15 +4704,16 @@ void DAGTypeLegalizer::ExpandIntRes_ShiftThroughStack(SDNode *N, SDValue &Lo,
AdjStackPtr = DAG.getMemBasePlusOffset(AdjStackPtr, ByteOffset, dl);
// And load it! While the load is not legal, legalizing it is obvious.
- SDValue Res = DAG.getLoad(
- VT, dl, Ch, AdjStackPtr,
- MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()), Align(1));
- // We've performed the shift by a CHAR_BIT * [_ShAmt / CHAR_BIT_]
-
- // If we may still have a less-than-CHAR_BIT to shift by, do so now.
- if (!ShiftByByteMultiple) {
- SDValue ShAmtRem = DAG.getNode(ISD::AND, dl, ShAmtVT, ShAmt,
- DAG.getConstant(7, dl, ShAmtVT));
+ SDValue Res =
+ DAG.getLoad(VT, dl, Ch, AdjStackPtr,
+ MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()),
+ commonAlignment(StackAlign, LoadVT.getStoreSize()));
+
+ // If we may still have a remaining bits to shift by, do so now.
+ if (!IsOneStepShift) {
+ SDValue ShAmtRem =
+ DAG.getNode(ISD::AND, dl, ShAmtVT, ShAmt,
+ DAG.getConstant(ShiftUnitInBits - 1, dl, ShAmtVT));
Res = DAG.getNode(N->getOpcode(), dl, VT, Res, ShAmtRem);
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 482f88e..1c466ed 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -5857,11 +5857,10 @@ SDValue DAGTypeLegalizer::WidenVecRes_LOAD(SDNode *N) {
SDValue Mask = DAG.getAllOnesConstant(DL, WideMaskVT);
SDValue EVL = DAG.getElementCount(DL, TLI.getVPExplicitVectorLengthTy(),
LdVT.getVectorElementCount());
- const auto *MMO = LD->getMemOperand();
SDValue NewLoad =
- DAG.getLoadVP(WideVT, DL, LD->getChain(), LD->getBasePtr(), Mask, EVL,
- MMO->getPointerInfo(), MMO->getAlign(), MMO->getFlags(),
- MMO->getAAInfo());
+ DAG.getLoadVP(LD->getAddressingMode(), ISD::NON_EXTLOAD, WideVT, DL,
+ LD->getChain(), LD->getBasePtr(), LD->getOffset(), Mask,
+ EVL, LD->getMemoryVT(), LD->getMemOperand());
// Modified the chain - switch anything that used the old chain to use
// the new one.
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 9b96dbb..64414e2 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -11237,6 +11237,12 @@ void SelectionDAG::salvageDebugInfo(SDNode &N) {
if (!N.getHasDebugValue())
return;
+ auto GetLocationOperand = [](SDNode *Node, unsigned ResNo) {
+ if (auto *FISDN = dyn_cast<FrameIndexSDNode>(Node))
+ return SDDbgOperand::fromFrameIdx(FISDN->getIndex());
+ return SDDbgOperand::fromNode(Node, ResNo);
+ };
+
SmallVector<SDDbgValue *, 2> ClonedDVs;
for (auto *DV : GetDbgValues(&N)) {
if (DV->isInvalidated())
@@ -11272,7 +11278,7 @@ void SelectionDAG::salvageDebugInfo(SDNode &N) {
if (NewLocOps[i].getKind() != SDDbgOperand::SDNODE ||
NewLocOps[i].getSDNode() != &N)
continue;
- NewLocOps[i] = SDDbgOperand::fromNode(N0.getNode(), N0.getResNo());
+ NewLocOps[i] = GetLocationOperand(N0.getNode(), N0.getResNo());
if (RHSConstant) {
SmallVector<uint64_t, 3> ExprOps;
DIExpression::appendOffset(ExprOps, Offset);
@@ -11327,7 +11333,7 @@ void SelectionDAG::salvageDebugInfo(SDNode &N) {
NewLocOps[i].getSDNode() != &N)
continue;
- NewLocOps[i] = SDDbgOperand::fromNode(N0.getNode(), N0.getResNo());
+ NewLocOps[i] = GetLocationOperand(N0.getNode(), N0.getResNo());
DbgExpression = DIExpression::appendOpsToArg(DbgExpression, ExtOps, i);
Changed = true;
}
@@ -11350,7 +11356,11 @@ void SelectionDAG::salvageDebugInfo(SDNode &N) {
}
for (SDDbgValue *Dbg : ClonedDVs) {
- assert(!Dbg->getSDNodes().empty() &&
+ assert((!Dbg->getSDNodes().empty() ||
+ llvm::any_of(Dbg->getLocationOps(),
+ [&](const SDDbgOperand &Op) {
+ return Op.getKind() == SDDbgOperand::FRAMEIX;
+ })) &&
"Salvaged DbgValue should depend on a new SDNode");
AddDbgValue(Dbg, false);
}
diff --git a/llvm/lib/CodeGen/StackProtector.cpp b/llvm/lib/CodeGen/StackProtector.cpp
index fca822a..1f23838 100644
--- a/llvm/lib/CodeGen/StackProtector.cpp
+++ b/llvm/lib/CodeGen/StackProtector.cpp
@@ -705,7 +705,7 @@ BasicBlock *CreateFailBB(Function *F, const Triple &Trip) {
StackChkFail = M->getOrInsertFunction("__stack_smash_handler",
Type::getVoidTy(Context),
PointerType::getUnqual(Context));
- Args.push_back(B.CreateGlobalStringPtr(F->getName(), "SSH"));
+ Args.push_back(B.CreateGlobalString(F->getName(), "SSH"));
} else {
StackChkFail =
M->getOrInsertFunction("__stack_chk_fail", Type::getVoidTy(Context));
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index 9fdde45..1f49d60 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -400,6 +400,11 @@ RTLIB::Libcall RTLIB::getFREXP(EVT RetVT) {
FREXP_PPCF128);
}
+RTLIB::Libcall RTLIB::getFSINCOS(EVT RetVT) {
+ return getFPLibCall(RetVT, SINCOS_F32, SINCOS_F64, SINCOS_F80, SINCOS_F128,
+ SINCOS_PPCF128);
+}
+
RTLIB::Libcall RTLIB::getOutlineAtomicHelper(const Libcall (&LC)[5][4],
AtomicOrdering Order,
uint64_t MemSize) {
diff --git a/llvm/lib/DWARFLinker/Classic/DWARFStreamer.cpp b/llvm/lib/DWARFLinker/Classic/DWARFStreamer.cpp
index bca3125..947db9c 100644
--- a/llvm/lib/DWARFLinker/Classic/DWARFStreamer.cpp
+++ b/llvm/lib/DWARFLinker/Classic/DWARFStreamer.cpp
@@ -933,7 +933,7 @@ void DwarfStreamer::emitLineTablePrologueV5IncludeAndFileTable(
LineSectionSize += MS->emitULEB128IntValue(StrForm);
LineSectionSize += MS->emitULEB128IntValue(dwarf::DW_LNCT_directory_index);
- LineSectionSize += MS->emitULEB128IntValue(dwarf::DW_FORM_data1);
+ LineSectionSize += MS->emitULEB128IntValue(dwarf::DW_FORM_udata);
if (HasChecksums) {
LineSectionSize += MS->emitULEB128IntValue(dwarf::DW_LNCT_MD5);
@@ -952,8 +952,7 @@ void DwarfStreamer::emitLineTablePrologueV5IncludeAndFileTable(
// file_names (sequence of file name entries).
for (auto File : P.FileNames) {
emitLineTableString(P, File.Name, DebugStrPool, DebugLineStrPool);
- MS->emitInt8(File.DirIdx);
- LineSectionSize += 1;
+ LineSectionSize += MS->emitULEB128IntValue(File.DirIdx);
if (HasChecksums) {
MS->emitBinaryData(
StringRef(reinterpret_cast<const char *>(File.Checksum.data()),
diff --git a/llvm/lib/DWARFLinker/Parallel/DWARFLinkerTypeUnit.cpp b/llvm/lib/DWARFLinker/Parallel/DWARFLinkerTypeUnit.cpp
index 3030aa2..9b626239 100644
--- a/llvm/lib/DWARFLinker/Parallel/DWARFLinkerTypeUnit.cpp
+++ b/llvm/lib/DWARFLinker/Parallel/DWARFLinkerTypeUnit.cpp
@@ -286,21 +286,18 @@ uint32_t TypeUnit::addFileNameIntoLinetable(StringEntry *Dir,
DirIdx++;
}
- uint32_t FileIdx = 0;
- FilenamesMapTy::iterator FileEntry = FileNamesMap.find({FileName, DirIdx});
- if (FileEntry == FileNamesMap.end()) {
+ auto [FileEntry, Inserted] = FileNamesMap.try_emplace(
+ {FileName, DirIdx}, LineTable.Prologue.FileNames.size());
+ if (Inserted) {
// We currently do not support more than UINT32_MAX files.
assert(LineTable.Prologue.FileNames.size() < UINT32_MAX);
- FileIdx = LineTable.Prologue.FileNames.size();
- FileNamesMap.insert({{FileName, DirIdx}, FileIdx});
LineTable.Prologue.FileNames.push_back(DWARFDebugLine::FileNameEntry());
LineTable.Prologue.FileNames.back().Name = DWARFFormValue::createFromPValue(
dwarf::DW_FORM_string, FileName->getKeyData());
LineTable.Prologue.FileNames.back().DirIdx = DirIdx;
- } else {
- FileIdx = FileEntry->second;
}
+ uint32_t FileIdx = FileEntry->second;
return getVersion() < 5 ? FileIdx + 1 : FileIdx;
}
diff --git a/llvm/lib/DWARFLinker/Parallel/DebugLineSectionEmitter.h b/llvm/lib/DWARFLinker/Parallel/DebugLineSectionEmitter.h
index 38357c7..b035c4b 100644
--- a/llvm/lib/DWARFLinker/Parallel/DebugLineSectionEmitter.h
+++ b/llvm/lib/DWARFLinker/Parallel/DebugLineSectionEmitter.h
@@ -215,7 +215,7 @@ private:
encodeULEB128(FileNameForm, Section.OS);
encodeULEB128(dwarf::DW_LNCT_directory_index, Section.OS);
- encodeULEB128(dwarf::DW_FORM_data1, Section.OS);
+ encodeULEB128(dwarf::DW_FORM_udata, Section.OS);
if (HasChecksums) {
encodeULEB128(dwarf::DW_LNCT_MD5, Section.OS);
@@ -242,7 +242,7 @@ private:
// A null-terminated string containing the full or relative path name of a
// source file.
Section.emitString(FileNameForm, *FileNameStr);
- Section.emitIntVal(File.DirIdx, 1);
+ encodeULEB128(File.DirIdx, Section.OS);
if (HasChecksums) {
assert((File.Checksum.size() == 16) &&
diff --git a/llvm/lib/ExecutionEngine/JITLink/ELF_x86_64.cpp b/llvm/lib/ExecutionEngine/JITLink/ELF_x86_64.cpp
index b27a1a1..6a32ccc 100644
--- a/llvm/lib/ExecutionEngine/JITLink/ELF_x86_64.cpp
+++ b/llvm/lib/ExecutionEngine/JITLink/ELF_x86_64.cpp
@@ -156,6 +156,9 @@ private:
case ELF::R_X86_64_PC8:
Kind = x86_64::Delta8;
break;
+ case ELF::R_X86_64_PC16:
+ Kind = x86_64::Delta16;
+ break;
case ELF::R_X86_64_PC32:
case ELF::R_X86_64_GOTPC32:
Kind = x86_64::Delta32;
diff --git a/llvm/lib/ExecutionEngine/JITLink/x86_64.cpp b/llvm/lib/ExecutionEngine/JITLink/x86_64.cpp
index 9f7ece8..cca4358 100644
--- a/llvm/lib/ExecutionEngine/JITLink/x86_64.cpp
+++ b/llvm/lib/ExecutionEngine/JITLink/x86_64.cpp
@@ -34,6 +34,8 @@ const char *getEdgeKindName(Edge::Kind K) {
return "Delta64";
case Delta32:
return "Delta32";
+ case Delta16:
+ return "Delta16";
case Delta8:
return "Delta8";
case NegDelta64:
diff --git a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
index 722587e..a90770f 100644
--- a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
+++ b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
@@ -886,8 +886,8 @@ Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(StringRef LocStr,
GV.getInitializer() == Initializer)
return SrcLocStr = ConstantExpr::getPointerCast(&GV, Int8Ptr);
- SrcLocStr = Builder.CreateGlobalStringPtr(LocStr, /* Name */ "",
- /* AddressSpace */ 0, &M);
+ SrcLocStr = Builder.CreateGlobalString(LocStr, /* Name */ "",
+ /* AddressSpace */ 0, &M);
}
return SrcLocStr;
}
diff --git a/llvm/lib/IR/Attributes.cpp b/llvm/lib/IR/Attributes.cpp
index fa124e4..eb61583 100644
--- a/llvm/lib/IR/Attributes.cpp
+++ b/llvm/lib/IR/Attributes.cpp
@@ -1608,7 +1608,7 @@ AttributeList AttributeList::addRangeRetAttr(LLVMContext &C,
AttributeList AttributeList::addAllocSizeParamAttr(
LLVMContext &C, unsigned Index, unsigned ElemSizeArg,
- const std::optional<unsigned> &NumElemsArg) {
+ const std::optional<unsigned> &NumElemsArg) const {
AttrBuilder B(C);
B.addAllocSizeAttr(ElemSizeArg, NumElemsArg);
return addParamAttributes(C, Index, B);
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index 69dae5e3..b842583 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -1268,6 +1268,23 @@ static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn,
else if (Name.consume_front("atomic.load.add."))
// nvvm.atomic.load.add.{f32.p,f64.p}
Expand = Name.starts_with("f32.p") || Name.starts_with("f64.p");
+ else if (Name.consume_front("bitcast."))
+ // nvvm.bitcast.{f2i,i2f,ll2d,d2ll}
+ Expand =
+ Name == "f2i" || Name == "i2f" || Name == "ll2d" || Name == "d2ll";
+ else if (Name.consume_front("rotate."))
+ // nvvm.rotate.{b32,b64,right.b64}
+ Expand = Name == "b32" || Name == "b64" || Name == "right.b64";
+ else if (Name.consume_front("ptr.gen.to."))
+ // nvvm.ptr.gen.to.{local,shared,global,constant}
+ Expand = Name.starts_with("local") || Name.starts_with("shared") ||
+ Name.starts_with("global") || Name.starts_with("constant");
+ else if (Name.consume_front("ptr."))
+ // nvvm.ptr.{local,shared,global,constant}.to.gen
+ Expand =
+ (Name.consume_front("local") || Name.consume_front("shared") ||
+ Name.consume_front("global") || Name.consume_front("constant")) &&
+ Name.starts_with(".to.gen");
else
Expand = false;
@@ -2254,6 +2271,117 @@ void llvm::UpgradeInlineAsmString(std::string *AsmStr) {
}
}
+static Value *upgradeNVVMIntrinsicCall(StringRef Name, CallBase *CI,
+ Function *F, IRBuilder<> &Builder) {
+ Value *Rep = nullptr;
+
+ if (Name == "abs.i" || Name == "abs.ll") {
+ Value *Arg = CI->getArgOperand(0);
+ Value *Neg = Builder.CreateNeg(Arg, "neg");
+ Value *Cmp = Builder.CreateICmpSGE(
+ Arg, llvm::Constant::getNullValue(Arg->getType()), "abs.cond");
+ Rep = Builder.CreateSelect(Cmp, Arg, Neg, "abs");
+ } else if (Name.starts_with("atomic.load.add.f32.p") ||
+ Name.starts_with("atomic.load.add.f64.p")) {
+ Value *Ptr = CI->getArgOperand(0);
+ Value *Val = CI->getArgOperand(1);
+ Rep = Builder.CreateAtomicRMW(AtomicRMWInst::FAdd, Ptr, Val, MaybeAlign(),
+ AtomicOrdering::SequentiallyConsistent);
+ } else if (Name.consume_front("max.") &&
+ (Name == "s" || Name == "i" || Name == "ll" || Name == "us" ||
+ Name == "ui" || Name == "ull")) {
+ Value *Arg0 = CI->getArgOperand(0);
+ Value *Arg1 = CI->getArgOperand(1);
+ Value *Cmp = Name.starts_with("u")
+ ? Builder.CreateICmpUGE(Arg0, Arg1, "max.cond")
+ : Builder.CreateICmpSGE(Arg0, Arg1, "max.cond");
+ Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "max");
+ } else if (Name.consume_front("min.") &&
+ (Name == "s" || Name == "i" || Name == "ll" || Name == "us" ||
+ Name == "ui" || Name == "ull")) {
+ Value *Arg0 = CI->getArgOperand(0);
+ Value *Arg1 = CI->getArgOperand(1);
+ Value *Cmp = Name.starts_with("u")
+ ? Builder.CreateICmpULE(Arg0, Arg1, "min.cond")
+ : Builder.CreateICmpSLE(Arg0, Arg1, "min.cond");
+ Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "min");
+ } else if (Name == "clz.ll") {
+ // llvm.nvvm.clz.ll returns an i32, but llvm.ctlz.i64 returns an i64.
+ Value *Arg = CI->getArgOperand(0);
+ Value *Ctlz = Builder.CreateCall(
+ Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz,
+ {Arg->getType()}),
+ {Arg, Builder.getFalse()}, "ctlz");
+ Rep = Builder.CreateTrunc(Ctlz, Builder.getInt32Ty(), "ctlz.trunc");
+ } else if (Name == "popc.ll") {
+ // llvm.nvvm.popc.ll returns an i32, but llvm.ctpop.i64 returns an
+ // i64.
+ Value *Arg = CI->getArgOperand(0);
+ Value *Popc = Builder.CreateCall(
+ Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop,
+ {Arg->getType()}),
+ Arg, "ctpop");
+ Rep = Builder.CreateTrunc(Popc, Builder.getInt32Ty(), "ctpop.trunc");
+ } else if (Name == "h2f") {
+ Rep = Builder.CreateCall(
+ Intrinsic::getDeclaration(F->getParent(), Intrinsic::convert_from_fp16,
+ {Builder.getFloatTy()}),
+ CI->getArgOperand(0), "h2f");
+ } else if (Name.consume_front("bitcast.") &&
+ (Name == "f2i" || Name == "i2f" || Name == "ll2d" ||
+ Name == "d2ll")) {
+ Rep = Builder.CreateBitCast(CI->getArgOperand(0), CI->getType());
+ } else if (Name == "rotate.b32") {
+ Value *Arg = CI->getOperand(0);
+ Value *ShiftAmt = CI->getOperand(1);
+ Rep = Builder.CreateIntrinsic(Builder.getInt32Ty(), Intrinsic::fshl,
+ {Arg, Arg, ShiftAmt});
+ } else if (Name == "rotate.b64") {
+ Type *Int64Ty = Builder.getInt64Ty();
+ Value *Arg = CI->getOperand(0);
+ Value *ZExtShiftAmt = Builder.CreateZExt(CI->getOperand(1), Int64Ty);
+ Rep = Builder.CreateIntrinsic(Int64Ty, Intrinsic::fshl,
+ {Arg, Arg, ZExtShiftAmt});
+ } else if (Name == "rotate.right.b64") {
+ Type *Int64Ty = Builder.getInt64Ty();
+ Value *Arg = CI->getOperand(0);
+ Value *ZExtShiftAmt = Builder.CreateZExt(CI->getOperand(1), Int64Ty);
+ Rep = Builder.CreateIntrinsic(Int64Ty, Intrinsic::fshr,
+ {Arg, Arg, ZExtShiftAmt});
+ } else if ((Name.consume_front("ptr.gen.to.") &&
+ (Name.starts_with("local") || Name.starts_with("shared") ||
+ Name.starts_with("global") || Name.starts_with("constant"))) ||
+ (Name.consume_front("ptr.") &&
+ (Name.consume_front("local") || Name.consume_front("shared") ||
+ Name.consume_front("global") ||
+ Name.consume_front("constant")) &&
+ Name.starts_with(".to.gen"))) {
+ Rep = Builder.CreateAddrSpaceCast(CI->getArgOperand(0), CI->getType());
+ } else {
+ Intrinsic::ID IID = shouldUpgradeNVPTXBF16Intrinsic(Name);
+ if (IID != Intrinsic::not_intrinsic &&
+ !F->getReturnType()->getScalarType()->isBFloatTy()) {
+ rename(F);
+ Function *NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
+ SmallVector<Value *, 2> Args;
+ for (size_t I = 0; I < NewFn->arg_size(); ++I) {
+ Value *Arg = CI->getArgOperand(I);
+ Type *OldType = Arg->getType();
+ Type *NewType = NewFn->getArg(I)->getType();
+ Args.push_back(
+ (OldType->isIntegerTy() && NewType->getScalarType()->isBFloatTy())
+ ? Builder.CreateBitCast(Arg, NewType)
+ : Arg);
+ }
+ Rep = Builder.CreateCall(NewFn, Args);
+ if (F->getReturnType()->isIntegerTy())
+ Rep = Builder.CreateBitCast(Rep, F->getReturnType());
+ }
+ }
+
+ return Rep;
+}
+
static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
IRBuilder<> &Builder) {
LLVMContext &C = F->getContext();
@@ -4204,81 +4332,8 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
if (!IsX86 && Name == "stackprotectorcheck") {
Rep = nullptr;
- } else if (IsNVVM && (Name == "abs.i" || Name == "abs.ll")) {
- Value *Arg = CI->getArgOperand(0);
- Value *Neg = Builder.CreateNeg(Arg, "neg");
- Value *Cmp = Builder.CreateICmpSGE(
- Arg, llvm::Constant::getNullValue(Arg->getType()), "abs.cond");
- Rep = Builder.CreateSelect(Cmp, Arg, Neg, "abs");
- } else if (IsNVVM && (Name.starts_with("atomic.load.add.f32.p") ||
- Name.starts_with("atomic.load.add.f64.p"))) {
- Value *Ptr = CI->getArgOperand(0);
- Value *Val = CI->getArgOperand(1);
- Rep = Builder.CreateAtomicRMW(AtomicRMWInst::FAdd, Ptr, Val, MaybeAlign(),
- AtomicOrdering::SequentiallyConsistent);
- } else if (IsNVVM && Name.consume_front("max.") &&
- (Name == "s" || Name == "i" || Name == "ll" || Name == "us" ||
- Name == "ui" || Name == "ull")) {
- Value *Arg0 = CI->getArgOperand(0);
- Value *Arg1 = CI->getArgOperand(1);
- Value *Cmp = Name.starts_with("u")
- ? Builder.CreateICmpUGE(Arg0, Arg1, "max.cond")
- : Builder.CreateICmpSGE(Arg0, Arg1, "max.cond");
- Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "max");
- } else if (IsNVVM && Name.consume_front("min.") &&
- (Name == "s" || Name == "i" || Name == "ll" || Name == "us" ||
- Name == "ui" || Name == "ull")) {
- Value *Arg0 = CI->getArgOperand(0);
- Value *Arg1 = CI->getArgOperand(1);
- Value *Cmp = Name.starts_with("u")
- ? Builder.CreateICmpULE(Arg0, Arg1, "min.cond")
- : Builder.CreateICmpSLE(Arg0, Arg1, "min.cond");
- Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "min");
- } else if (IsNVVM && Name == "clz.ll") {
- // llvm.nvvm.clz.ll returns an i32, but llvm.ctlz.i64 returns an i64.
- Value *Arg = CI->getArgOperand(0);
- Value *Ctlz = Builder.CreateCall(
- Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz,
- {Arg->getType()}),
- {Arg, Builder.getFalse()}, "ctlz");
- Rep = Builder.CreateTrunc(Ctlz, Builder.getInt32Ty(), "ctlz.trunc");
- } else if (IsNVVM && Name == "popc.ll") {
- // llvm.nvvm.popc.ll returns an i32, but llvm.ctpop.i64 returns an
- // i64.
- Value *Arg = CI->getArgOperand(0);
- Value *Popc = Builder.CreateCall(
- Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop,
- {Arg->getType()}),
- Arg, "ctpop");
- Rep = Builder.CreateTrunc(Popc, Builder.getInt32Ty(), "ctpop.trunc");
} else if (IsNVVM) {
- if (Name == "h2f") {
- Rep =
- Builder.CreateCall(Intrinsic::getDeclaration(
- F->getParent(), Intrinsic::convert_from_fp16,
- {Builder.getFloatTy()}),
- CI->getArgOperand(0), "h2f");
- } else {
- Intrinsic::ID IID = shouldUpgradeNVPTXBF16Intrinsic(Name);
- if (IID != Intrinsic::not_intrinsic &&
- !F->getReturnType()->getScalarType()->isBFloatTy()) {
- rename(F);
- NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
- SmallVector<Value *, 2> Args;
- for (size_t I = 0; I < NewFn->arg_size(); ++I) {
- Value *Arg = CI->getArgOperand(I);
- Type *OldType = Arg->getType();
- Type *NewType = NewFn->getArg(I)->getType();
- Args.push_back((OldType->isIntegerTy() &&
- NewType->getScalarType()->isBFloatTy())
- ? Builder.CreateBitCast(Arg, NewType)
- : Arg);
- }
- Rep = Builder.CreateCall(NewFn, Args);
- if (F->getReturnType()->isIntegerTy())
- Rep = Builder.CreateBitCast(Rep, F->getReturnType());
- }
- }
+ Rep = upgradeNVVMIntrinsicCall(Name, CI, F, Builder);
} else if (IsX86) {
Rep = upgradeX86IntrinsicCall(Name, CI, F, Builder);
} else if (IsARM) {
diff --git a/llvm/lib/IR/Core.cpp b/llvm/lib/IR/Core.cpp
index 32a04c5..c1ca2c2 100644
--- a/llvm/lib/IR/Core.cpp
+++ b/llvm/lib/IR/Core.cpp
@@ -3965,6 +3965,10 @@ static AtomicRMWInst::BinOp mapFromLLVMRMWBinOp(LLVMAtomicRMWBinOp BinOp) {
return AtomicRMWInst::UIncWrap;
case LLVMAtomicRMWBinOpUDecWrap:
return AtomicRMWInst::UDecWrap;
+ case LLVMAtomicRMWBinOpUSubCond:
+ return AtomicRMWInst::USubCond;
+ case LLVMAtomicRMWBinOpUSubSat:
+ return AtomicRMWInst::USubSat;
}
llvm_unreachable("Invalid LLVMAtomicRMWBinOp value!");
@@ -3991,6 +3995,10 @@ static LLVMAtomicRMWBinOp mapToLLVMRMWBinOp(AtomicRMWInst::BinOp BinOp) {
return LLVMAtomicRMWBinOpUIncWrap;
case AtomicRMWInst::UDecWrap:
return LLVMAtomicRMWBinOpUDecWrap;
+ case AtomicRMWInst::USubCond:
+ return LLVMAtomicRMWBinOpUSubCond;
+ case AtomicRMWInst::USubSat:
+ return LLVMAtomicRMWBinOpUSubSat;
default: break;
}
@@ -4052,7 +4060,7 @@ LLVMValueRef LLVMBuildGlobalString(LLVMBuilderRef B, const char *Str,
LLVMValueRef LLVMBuildGlobalStringPtr(LLVMBuilderRef B, const char *Str,
const char *Name) {
- return wrap(unwrap(B)->CreateGlobalStringPtr(Str, Name));
+ return wrap(unwrap(B)->CreateGlobalString(Str, Name));
}
LLVMBool LLVMGetVolatile(LLVMValueRef MemAccessInst) {
diff --git a/llvm/lib/IR/LLVMContext.cpp b/llvm/lib/IR/LLVMContext.cpp
index c0fee93..22e6077 100644
--- a/llvm/lib/IR/LLVMContext.cpp
+++ b/llvm/lib/IR/LLVMContext.cpp
@@ -377,14 +377,6 @@ std::unique_ptr<DiagnosticHandler> LLVMContext::getDiagnosticHandler() {
return std::move(pImpl->DiagHandler);
}
-void LLVMContext::setOpaquePointers(bool Enable) const {
- assert(Enable && "Cannot disable opaque pointers");
-}
-
-bool LLVMContext::supportsTypedPointers() const {
- return false;
-}
-
StringRef LLVMContext::getDefaultTargetCPU() {
return pImpl->DefaultTargetCPU;
}
diff --git a/llvm/lib/MC/ELFObjectWriter.cpp b/llvm/lib/MC/ELFObjectWriter.cpp
index 8127091..94c7697 100644
--- a/llvm/lib/MC/ELFObjectWriter.cpp
+++ b/llvm/lib/MC/ELFObjectWriter.cpp
@@ -68,6 +68,8 @@ using namespace llvm;
namespace {
namespace stats {
+STATISTIC(ELFHeaderBytes, "Total size of ELF headers");
+STATISTIC(SectionHeaderBytes, "Total size of section headers table");
STATISTIC(AllocTextBytes, "Total size of SHF_ALLOC text sections");
STATISTIC(AllocROBytes, "Total size of SHF_ALLOC readonly sections");
STATISTIC(AllocRWBytes, "Total size of SHF_ALLOC read-write sections");
@@ -945,6 +947,7 @@ void ELFWriter::writeSectionHeader(uint32_t GroupSymbolIndex, uint64_t Offset,
}
void ELFWriter::writeSectionHeaders(const MCAssembler &Asm) {
+ uint64_t Start = W.OS.tell();
const unsigned NumSections = SectionTable.size();
// Null section first.
@@ -1008,6 +1011,8 @@ void ELFWriter::writeSectionHeaders(const MCAssembler &Asm) {
writeSectionHeader(GroupSymbolIndex, Offsets.first, Size, *Section);
}
+
+ stats::SectionHeaderBytes += W.OS.tell() - Start;
}
uint64_t ELFWriter::writeObject(MCAssembler &Asm) {
@@ -1023,6 +1028,8 @@ uint64_t ELFWriter::writeObject(MCAssembler &Asm) {
// Write out the ELF header ...
writeHeader(Asm);
+ stats::ELFHeaderBytes += W.OS.tell() - StartOffset;
+
// ... then the sections ...
SmallVector<std::pair<MCSectionELF *, SmallVector<unsigned>>, 0> Groups;
// Map from group section index to group
diff --git a/llvm/lib/MC/MCTargetOptionsCommandFlags.cpp b/llvm/lib/MC/MCTargetOptionsCommandFlags.cpp
index 1a4f7e9..92618bd 100644
--- a/llvm/lib/MC/MCTargetOptionsCommandFlags.cpp
+++ b/llvm/lib/MC/MCTargetOptionsCommandFlags.cpp
@@ -145,8 +145,8 @@ llvm::mc::RegisterMCTargetOptionsFlags::RegisterMCTargetOptionsFlags() {
static cl::opt<bool> X86RelaxRelocations(
"x86-relax-relocations",
- cl::desc(
- "Emit GOTPCRELX/REX_GOTPCRELX instead of GOTPCREL on x86-64 ELF"),
+ cl::desc("Emit GOTPCRELX/REX_GOTPCRELX/REX2_GOTPCRELX instead of "
+ "GOTPCREL on x86-64 ELF"),
cl::init(true));
MCBINDOPT(X86RelaxRelocations);
diff --git a/llvm/lib/Object/COFFObjectFile.cpp b/llvm/lib/Object/COFFObjectFile.cpp
index 5fdf3ba..3ec7a44 100644
--- a/llvm/lib/Object/COFFObjectFile.cpp
+++ b/llvm/lib/Object/COFFObjectFile.cpp
@@ -763,7 +763,7 @@ Error COFFObjectFile::initLoadConfigPtr() {
if (Error E =
getRvaPtr(ChpeOff - getImageBase(), IntPtr, "CHPE metadata"))
return E;
- if (Error E = checkOffset(Data, IntPtr, sizeof(CHPEMetadata)))
+ if (Error E = checkOffset(Data, IntPtr, sizeof(*CHPEMetadata)))
return E;
CHPEMetadata = reinterpret_cast<const chpe_metadata *>(IntPtr);
diff --git a/llvm/lib/SandboxIR/SandboxIR.cpp b/llvm/lib/SandboxIR/SandboxIR.cpp
index b96141f..124c1bf 100644
--- a/llvm/lib/SandboxIR/SandboxIR.cpp
+++ b/llvm/lib/SandboxIR/SandboxIR.cpp
@@ -569,6 +569,10 @@ void Instruction::copyFastMathFlags(FastMathFlags FMF) {
cast<llvm::Instruction>(Val)->copyFastMathFlags(FMF);
}
+Type *Instruction::getAccessType() const {
+ return Ctx.getType(cast<llvm::Instruction>(Val)->getAccessType());
+}
+
void Instruction::setHasApproxFunc(bool B) {
Ctx.getTracker()
.emplaceIfTracking<GenericSetter<&Instruction::hasApproxFunc,
@@ -2888,6 +2892,10 @@ Value *Context::getOrCreateValueInternal(llvm::Value *LLVMV, llvm::User *U) {
It->second = std::unique_ptr<ConstantPtrAuth>(
new ConstantPtrAuth(cast<llvm::ConstantPtrAuth>(C), *this));
break;
+ case llvm::Value::ConstantExprVal:
+ It->second = std::unique_ptr<ConstantExpr>(
+ new ConstantExpr(cast<llvm::ConstantExpr>(C), *this));
+ break;
default:
It->second = std::unique_ptr<Constant>(new Constant(C, *this));
break;
diff --git a/llvm/lib/Support/ModRef.cpp b/llvm/lib/Support/ModRef.cpp
index b57ea30..a4eb70e 100644
--- a/llvm/lib/Support/ModRef.cpp
+++ b/llvm/lib/Support/ModRef.cpp
@@ -1,52 +1,52 @@
-//===--- ModRef.cpp - Memory effect modeling --------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements ModRef and MemoryEffects misc functions.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/Support/ModRef.h"
-#include "llvm/ADT/STLExtras.h"
-
-using namespace llvm;
-
-raw_ostream &llvm::operator<<(raw_ostream &OS, ModRefInfo MR) {
- switch (MR) {
- case ModRefInfo::NoModRef:
- OS << "NoModRef";
- break;
- case ModRefInfo::Ref:
- OS << "Ref";
- break;
- case ModRefInfo::Mod:
- OS << "Mod";
- break;
- case ModRefInfo::ModRef:
- OS << "ModRef";
- break;
- }
- return OS;
-}
-
-raw_ostream &llvm::operator<<(raw_ostream &OS, MemoryEffects ME) {
- interleaveComma(MemoryEffects::locations(), OS, [&](IRMemLocation Loc) {
- switch (Loc) {
- case IRMemLocation::ArgMem:
- OS << "ArgMem: ";
- break;
- case IRMemLocation::InaccessibleMem:
- OS << "InaccessibleMem: ";
- break;
- case IRMemLocation::Other:
- OS << "Other: ";
- break;
- }
- OS << ME.getModRef(Loc);
- });
- return OS;
-}
+//===--- ModRef.cpp - Memory effect modeling --------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements ModRef and MemoryEffects misc functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/ModRef.h"
+#include "llvm/ADT/STLExtras.h"
+
+using namespace llvm;
+
+raw_ostream &llvm::operator<<(raw_ostream &OS, ModRefInfo MR) {
+ switch (MR) {
+ case ModRefInfo::NoModRef:
+ OS << "NoModRef";
+ break;
+ case ModRefInfo::Ref:
+ OS << "Ref";
+ break;
+ case ModRefInfo::Mod:
+ OS << "Mod";
+ break;
+ case ModRefInfo::ModRef:
+ OS << "ModRef";
+ break;
+ }
+ return OS;
+}
+
+raw_ostream &llvm::operator<<(raw_ostream &OS, MemoryEffects ME) {
+ interleaveComma(MemoryEffects::locations(), OS, [&](IRMemLocation Loc) {
+ switch (Loc) {
+ case IRMemLocation::ArgMem:
+ OS << "ArgMem: ";
+ break;
+ case IRMemLocation::InaccessibleMem:
+ OS << "InaccessibleMem: ";
+ break;
+ case IRMemLocation::Other:
+ OS << "Other: ";
+ break;
+ }
+ OS << ME.getModRef(Loc);
+ });
+ return OS;
+}
diff --git a/llvm/lib/Support/OptionStrCmp.cpp b/llvm/lib/Support/OptionStrCmp.cpp
index 1ffb64c..8e4892f 100644
--- a/llvm/lib/Support/OptionStrCmp.cpp
+++ b/llvm/lib/Support/OptionStrCmp.cpp
@@ -1,43 +1,43 @@
-//===- OptionStrCmp.cpp - Option String Comparison --------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/Support/OptionStrCmp.h"
-#include "llvm/ADT/STLExtras.h"
-
-using namespace llvm;
-
-// Comparison function for Option strings (option names & prefixes).
-// The ordering is *almost* case-insensitive lexicographic, with an exception.
-// '\0' comes at the end of the alphabet instead of the beginning (thus options
-// precede any other options which prefix them). Additionally, if two options
-// are identical ignoring case, they are ordered according to case sensitive
-// ordering if `FallbackCaseSensitive` is true.
-int llvm::StrCmpOptionName(StringRef A, StringRef B,
- bool FallbackCaseSensitive) {
- size_t MinSize = std::min(A.size(), B.size());
- if (int Res = A.substr(0, MinSize).compare_insensitive(B.substr(0, MinSize)))
- return Res;
-
- // If they are identical ignoring case, use case sensitive ordering.
- if (A.size() == B.size())
- return FallbackCaseSensitive ? A.compare(B) : 0;
-
- return (A.size() == MinSize) ? 1 /* A is a prefix of B. */
- : -1 /* B is a prefix of A */;
-}
-
-// Comparison function for Option prefixes.
-int llvm::StrCmpOptionPrefixes(ArrayRef<StringRef> APrefixes,
- ArrayRef<StringRef> BPrefixes) {
- for (const auto &[APre, BPre] : zip(APrefixes, BPrefixes)) {
- if (int Cmp = StrCmpOptionName(APre, BPre))
- return Cmp;
- }
- // Both prefixes are identical.
- return 0;
-}
+//===- OptionStrCmp.cpp - Option String Comparison --------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/OptionStrCmp.h"
+#include "llvm/ADT/STLExtras.h"
+
+using namespace llvm;
+
+// Comparison function for Option strings (option names & prefixes).
+// The ordering is *almost* case-insensitive lexicographic, with an exception.
+// '\0' comes at the end of the alphabet instead of the beginning (thus options
+// precede any other options which prefix them). Additionally, if two options
+// are identical ignoring case, they are ordered according to case sensitive
+// ordering if `FallbackCaseSensitive` is true.
+int llvm::StrCmpOptionName(StringRef A, StringRef B,
+ bool FallbackCaseSensitive) {
+ size_t MinSize = std::min(A.size(), B.size());
+ if (int Res = A.substr(0, MinSize).compare_insensitive(B.substr(0, MinSize)))
+ return Res;
+
+ // If they are identical ignoring case, use case sensitive ordering.
+ if (A.size() == B.size())
+ return FallbackCaseSensitive ? A.compare(B) : 0;
+
+ return (A.size() == MinSize) ? 1 /* A is a prefix of B. */
+ : -1 /* B is a prefix of A */;
+}
+
+// Comparison function for Option prefixes.
+int llvm::StrCmpOptionPrefixes(ArrayRef<StringRef> APrefixes,
+ ArrayRef<StringRef> BPrefixes) {
+ for (const auto &[APre, BPre] : zip(APrefixes, BPrefixes)) {
+ if (int Cmp = StrCmpOptionName(APre, BPre))
+ return Cmp;
+ }
+ // Both prefixes are identical.
+ return 0;
+}
diff --git a/llvm/lib/TableGen/Record.cpp b/llvm/lib/TableGen/Record.cpp
index a72a0799..0f99b4a 100644
--- a/llvm/lib/TableGen/Record.cpp
+++ b/llvm/lib/TableGen/Record.cpp
@@ -986,6 +986,32 @@ Init *UnOpInit::Fold(Record *CurRec, bool IsFinal) const {
}
}
break;
+
+ case LISTFLATTEN:
+ if (ListInit *LHSList = dyn_cast<ListInit>(LHS)) {
+ ListRecTy *InnerListTy = dyn_cast<ListRecTy>(LHSList->getElementType());
+ // list of non-lists, !listflatten() is a NOP.
+ if (!InnerListTy)
+ return LHS;
+
+ auto Flatten = [](ListInit *List) -> std::optional<std::vector<Init *>> {
+ std::vector<Init *> Flattened;
+ // Concatenate elements of all the inner lists.
+ for (Init *InnerInit : List->getValues()) {
+ ListInit *InnerList = dyn_cast<ListInit>(InnerInit);
+ if (!InnerList)
+ return std::nullopt;
+ for (Init *InnerElem : InnerList->getValues())
+ Flattened.push_back(InnerElem);
+ };
+ return Flattened;
+ };
+
+ auto Flattened = Flatten(LHSList);
+ if (Flattened)
+ return ListInit::get(*Flattened, InnerListTy->getElementType());
+ }
+ break;
}
return const_cast<UnOpInit *>(this);
}
@@ -1010,6 +1036,9 @@ std::string UnOpInit::getAsString() const {
case EMPTY: Result = "!empty"; break;
case GETDAGOP: Result = "!getdagop"; break;
case LOG2 : Result = "!logtwo"; break;
+ case LISTFLATTEN:
+ Result = "!listflatten";
+ break;
case REPR:
Result = "!repr";
break;
diff --git a/llvm/lib/TableGen/TGLexer.cpp b/llvm/lib/TableGen/TGLexer.cpp
index 62a884e..8fe7f69 100644
--- a/llvm/lib/TableGen/TGLexer.cpp
+++ b/llvm/lib/TableGen/TGLexer.cpp
@@ -628,6 +628,7 @@ tgtok::TokKind TGLexer::LexExclaim() {
.Case("foreach", tgtok::XForEach)
.Case("filter", tgtok::XFilter)
.Case("listconcat", tgtok::XListConcat)
+ .Case("listflatten", tgtok::XListFlatten)
.Case("listsplat", tgtok::XListSplat)
.Case("listremove", tgtok::XListRemove)
.Case("range", tgtok::XRange)
diff --git a/llvm/lib/TableGen/TGLexer.h b/llvm/lib/TableGen/TGLexer.h
index 9adc03c..4fa4d84 100644
--- a/llvm/lib/TableGen/TGLexer.h
+++ b/llvm/lib/TableGen/TGLexer.h
@@ -122,6 +122,7 @@ enum TokKind {
XSRL,
XSHL,
XListConcat,
+ XListFlatten,
XListSplat,
XStrConcat,
XInterleave,
diff --git a/llvm/lib/TableGen/TGParser.cpp b/llvm/lib/TableGen/TGParser.cpp
index 1a60c2a..54c9a90 100644
--- a/llvm/lib/TableGen/TGParser.cpp
+++ b/llvm/lib/TableGen/TGParser.cpp
@@ -1190,6 +1190,7 @@ Init *TGParser::ParseOperation(Record *CurRec, RecTy *ItemType) {
case tgtok::XNOT:
case tgtok::XToLower:
case tgtok::XToUpper:
+ case tgtok::XListFlatten:
case tgtok::XLOG2:
case tgtok::XHead:
case tgtok::XTail:
@@ -1235,6 +1236,11 @@ Init *TGParser::ParseOperation(Record *CurRec, RecTy *ItemType) {
Code = UnOpInit::NOT;
Type = IntRecTy::get(Records);
break;
+ case tgtok::XListFlatten:
+ Lex.Lex(); // eat the operation.
+ Code = UnOpInit::LISTFLATTEN;
+ Type = IntRecTy::get(Records); // Bogus type used here.
+ break;
case tgtok::XLOG2:
Lex.Lex(); // eat the operation
Code = UnOpInit::LOG2;
@@ -1309,7 +1315,8 @@ Init *TGParser::ParseOperation(Record *CurRec, RecTy *ItemType) {
}
}
- if (Code == UnOpInit::HEAD || Code == UnOpInit::TAIL) {
+ if (Code == UnOpInit::HEAD || Code == UnOpInit::TAIL ||
+ Code == UnOpInit::LISTFLATTEN) {
ListInit *LHSl = dyn_cast<ListInit>(LHS);
TypedInit *LHSt = dyn_cast<TypedInit>(LHS);
if (!LHSl && !LHSt) {
@@ -1328,6 +1335,8 @@ Init *TGParser::ParseOperation(Record *CurRec, RecTy *ItemType) {
TokError("empty list argument in unary operator");
return nullptr;
}
+ bool UseElementType =
+ Code == UnOpInit::HEAD || Code == UnOpInit::LISTFLATTEN;
if (LHSl) {
Init *Item = LHSl->getElement(0);
TypedInit *Itemt = dyn_cast<TypedInit>(Item);
@@ -1335,12 +1344,25 @@ Init *TGParser::ParseOperation(Record *CurRec, RecTy *ItemType) {
TokError("untyped list element in unary operator");
return nullptr;
}
- Type = (Code == UnOpInit::HEAD) ? Itemt->getType()
- : ListRecTy::get(Itemt->getType());
+ Type = UseElementType ? Itemt->getType()
+ : ListRecTy::get(Itemt->getType());
} else {
assert(LHSt && "expected list type argument in unary operator");
ListRecTy *LType = dyn_cast<ListRecTy>(LHSt->getType());
- Type = (Code == UnOpInit::HEAD) ? LType->getElementType() : LType;
+ Type = UseElementType ? LType->getElementType() : LType;
+ }
+
+ // for !listflatten, we expect a list of lists, but also support a list of
+ // non-lists, where !listflatten will be a NOP.
+ if (Code == UnOpInit::LISTFLATTEN) {
+ ListRecTy *InnerListTy = dyn_cast<ListRecTy>(Type);
+ if (InnerListTy) {
+ // listflatten will convert list<list<X>> to list<X>.
+ Type = ListRecTy::get(InnerListTy->getElementType());
+ } else {
+ // If its a list of non-lists, !listflatten will be a NOP.
+ Type = ListRecTy::get(Type);
+ }
}
}
@@ -1378,7 +1400,7 @@ Init *TGParser::ParseOperation(Record *CurRec, RecTy *ItemType) {
case tgtok::XExists: {
// Value ::= !exists '<' Type '>' '(' Value ')'
- Lex.Lex(); // eat the operation
+ Lex.Lex(); // eat the operation.
RecTy *Type = ParseOperatorType();
if (!Type)
diff --git a/llvm/lib/Target/AArch64/AArch64Combine.td b/llvm/lib/Target/AArch64/AArch64Combine.td
index 25989fb5..ead6455 100644
--- a/llvm/lib/Target/AArch64/AArch64Combine.td
+++ b/llvm/lib/Target/AArch64/AArch64Combine.td
@@ -330,5 +330,5 @@ def AArch64PostLegalizerCombiner
select_to_minmax, or_to_bsp, combine_concat_vector,
commute_constant_to_rhs,
push_freeze_to_prevent_poison_from_propagating,
- combine_mul_cmlt]> {
+ combine_mul_cmlt, combine_use_vector_truncate]> {
}
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index 7e041b0..fde07d8 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -2757,7 +2757,11 @@ StackOffset AArch64FrameLowering::resolveFrameOffsetReference(
bool FPOffsetFits = !ForSimm || FPOffset >= -256;
PreferFP |= Offset > -FPOffset && !SVEStackSize;
- if (MFI.hasVarSizedObjects()) {
+ if (FPOffset >= 0) {
+ // If the FPOffset is positive, that'll always be best, as the SP/BP
+ // will be even further away.
+ UseFP = true;
+ } else if (MFI.hasVarSizedObjects()) {
// If we have variable sized objects, we can use either FP or BP, as the
// SP offset is unknown. We can use the base pointer if we have one and
// FP is not preferred. If not, we're stuck with using FP.
@@ -2769,11 +2773,6 @@ StackOffset AArch64FrameLowering::resolveFrameOffsetReference(
// else we can use BP and FP, but the offset from FP won't fit.
// That will make us scavenge registers which we can probably avoid by
// using BP. If it won't fit for BP either, we'll scavenge anyway.
- } else if (FPOffset >= 0) {
- // Use SP or FP, whichever gives us the best chance of the offset
- // being in range for direct access. If the FPOffset is positive,
- // that'll always be best, as the SP will be even further away.
- UseFP = true;
} else if (MF.hasEHFunclets() && !RegInfo->hasBasePointer(MF)) {
// Funclets access the locals contained in the parent's stack frame
// via the frame pointer, so we have to use the FP in the parent
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index b11ac81..4166d9b 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1664,6 +1664,7 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
setOperationAction(ISD::BITCAST, VT, Custom);
setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
setOperationAction(ISD::FP_EXTEND, VT, Custom);
+ setOperationAction(ISD::FP_ROUND, VT, Custom);
setOperationAction(ISD::MLOAD, VT, Custom);
setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
@@ -4334,14 +4335,57 @@ SDValue AArch64TargetLowering::LowerFP_EXTEND(SDValue Op,
SDValue AArch64TargetLowering::LowerFP_ROUND(SDValue Op,
SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
- if (VT.isScalableVector())
- return LowerToPredicatedOp(Op, DAG, AArch64ISD::FP_ROUND_MERGE_PASSTHRU);
-
bool IsStrict = Op->isStrictFPOpcode();
SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0);
EVT SrcVT = SrcVal.getValueType();
bool Trunc = Op.getConstantOperandVal(IsStrict ? 2 : 1) == 1;
+ if (VT.isScalableVector()) {
+ if (VT.getScalarType() != MVT::bf16)
+ return LowerToPredicatedOp(Op, DAG, AArch64ISD::FP_ROUND_MERGE_PASSTHRU);
+
+ SDLoc DL(Op);
+ constexpr EVT I32 = MVT::nxv4i32;
+ auto ImmV = [&](int I) -> SDValue { return DAG.getConstant(I, DL, I32); };
+
+ SDValue NaN;
+ SDValue Narrow;
+
+ if (SrcVT == MVT::nxv2f32 || SrcVT == MVT::nxv4f32) {
+ if (Subtarget->hasBF16())
+ return LowerToPredicatedOp(Op, DAG,
+ AArch64ISD::FP_ROUND_MERGE_PASSTHRU);
+
+ Narrow = getSVESafeBitCast(I32, SrcVal, DAG);
+
+ // Set the quiet bit.
+ if (!DAG.isKnownNeverSNaN(SrcVal))
+ NaN = DAG.getNode(ISD::OR, DL, I32, Narrow, ImmV(0x400000));
+ } else
+ return SDValue();
+
+ if (!Trunc) {
+ SDValue Lsb = DAG.getNode(ISD::SRL, DL, I32, Narrow, ImmV(16));
+ Lsb = DAG.getNode(ISD::AND, DL, I32, Lsb, ImmV(1));
+ SDValue RoundingBias = DAG.getNode(ISD::ADD, DL, I32, Lsb, ImmV(0x7fff));
+ Narrow = DAG.getNode(ISD::ADD, DL, I32, Narrow, RoundingBias);
+ }
+
+ // Don't round if we had a NaN, we don't want to turn 0x7fffffff into
+ // 0x80000000.
+ if (NaN) {
+ EVT I1 = I32.changeElementType(MVT::i1);
+ EVT CondVT = VT.changeElementType(MVT::i1);
+ SDValue IsNaN = DAG.getSetCC(DL, CondVT, SrcVal, SrcVal, ISD::SETUO);
+ IsNaN = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, I1, IsNaN);
+ Narrow = DAG.getSelect(DL, I32, IsNaN, NaN, Narrow);
+ }
+
+ // Now that we have rounded, shift the bits into position.
+ Narrow = DAG.getNode(ISD::SRL, DL, I32, Narrow, ImmV(16));
+ return getSVESafeBitCast(VT, Narrow, DAG);
+ }
+
if (useSVEForFixedLengthVectorVT(SrcVT, !Subtarget->isNeonAvailable()))
return LowerFixedLengthFPRoundToSVE(Op, DAG);
diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index 1f3d63a2..7240f6a 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -2425,7 +2425,7 @@ let Predicates = [HasBF16, HasSVEorSME] in {
defm BFMLALT_ZZZ : sve2_fp_mla_long<0b101, "bfmlalt", nxv4f32, nxv8bf16, int_aarch64_sve_bfmlalt>;
defm BFMLALB_ZZZI : sve2_fp_mla_long_by_indexed_elem<0b100, "bfmlalb", nxv4f32, nxv8bf16, int_aarch64_sve_bfmlalb_lane_v2>;
defm BFMLALT_ZZZI : sve2_fp_mla_long_by_indexed_elem<0b101, "bfmlalt", nxv4f32, nxv8bf16, int_aarch64_sve_bfmlalt_lane_v2>;
- defm BFCVT_ZPmZ : sve_bfloat_convert<0b1, "bfcvt", int_aarch64_sve_fcvt_bf16f32>;
+ defm BFCVT_ZPmZ : sve_bfloat_convert<0b1, "bfcvt", int_aarch64_sve_fcvt_bf16f32, AArch64fcvtr_mt>;
defm BFCVTNT_ZPmZ : sve_bfloat_convert<0b0, "bfcvtnt", int_aarch64_sve_fcvtnt_bf16f32>;
} // End HasBF16, HasSVEorSME
diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
index 11a4aa4..ac05a44 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
@@ -2140,6 +2140,16 @@ static std::optional<Instruction *> instCombineSVESrshl(InstCombiner &IC,
return IC.replaceInstUsesWith(II, LSL);
}
+static std::optional<Instruction *> instCombineSVEInsr(InstCombiner &IC,
+ IntrinsicInst &II) {
+ Value *Vec = II.getOperand(0);
+
+ if (getSplatValue(Vec) == II.getOperand(1))
+ return IC.replaceInstUsesWith(II, Vec);
+
+ return std::nullopt;
+}
+
std::optional<Instruction *>
AArch64TTIImpl::instCombineIntrinsic(InstCombiner &IC,
IntrinsicInst &II) const {
@@ -2460,6 +2470,8 @@ AArch64TTIImpl::instCombineIntrinsic(InstCombiner &IC,
return instCombineSVESrshl(IC, II);
case Intrinsic::aarch64_sve_dupq_lane:
return instCombineSVEDupqLane(IC, II);
+ case Intrinsic::aarch64_sve_insr:
+ return instCombineSVEInsr(IC, II);
}
return std::nullopt;
@@ -4159,6 +4171,26 @@ AArch64TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
switch (ISD) {
default:
break;
+ case ISD::FADD:
+ if (Type *EltTy = ValTy->getScalarType();
+ // FIXME: For half types without fullfp16 support, this could extend and
+ // use a fp32 faddp reduction but current codegen unrolls.
+ MTy.isVector() && (EltTy->isFloatTy() || EltTy->isDoubleTy() ||
+ (EltTy->isHalfTy() && ST->hasFullFP16()))) {
+ const unsigned NElts = MTy.getVectorNumElements();
+ if (ValTy->getElementCount().getFixedValue() >= 2 && NElts >= 2 &&
+ isPowerOf2_32(NElts))
+ // Reduction corresponding to series of fadd instructions is lowered to
+ // series of faddp instructions. faddp has latency/throughput that
+ // matches fadd instruction and hence, every faddp instruction can be
+ // considered to have a relative cost = 1 with
+ // CostKind = TCK_RecipThroughput.
+ // An faddp will pairwise add vector elements, so the size of input
+ // vector reduces by half every time, requiring
+ // #(faddp instructions) = log2_32(NElts).
+ return (LT.first - 1) + /*No of faddp instructions*/ Log2_32(NElts);
+ }
+ break;
case ISD::ADD:
if (const auto *Entry = CostTableLookup(CostTblNoPairwise, ISD, MTy))
return (LT.first - 1) + Entry->Cost;
diff --git a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
index 4f6131f..6a4b94a 100644
--- a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
+++ b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
@@ -136,8 +136,8 @@ private:
assert(Predicated);
return ElementSize;
}
- unsigned getDstReg() const { return Dst; }
- unsigned getPgReg() const {
+ MCRegister getDstReg() const { return Dst; }
+ MCRegister getPgReg() const {
assert(Predicated);
return Pg;
}
@@ -146,8 +146,8 @@ private:
bool Active = false;
bool Predicated = false;
unsigned ElementSize;
- unsigned Dst;
- unsigned Pg;
+ MCRegister Dst;
+ MCRegister Pg;
} NextPrefix;
AArch64TargetStreamer &getTargetStreamer() {
@@ -5234,7 +5234,7 @@ bool AArch64AsmParser::parseInstruction(ParseInstructionInfo &Info,
return false;
}
-static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
+static inline bool isMatchingOrAlias(MCRegister ZReg, MCRegister Reg) {
assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
(ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
@@ -5322,7 +5322,7 @@ bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
if (IsWindowsArm64EC) {
for (unsigned i = 0; i < Inst.getNumOperands(); ++i) {
if (Inst.getOperand(i).isReg()) {
- unsigned Reg = Inst.getOperand(i).getReg();
+ MCRegister Reg = Inst.getOperand(i).getReg();
// At this point, vector registers are matched to their
// appropriately sized alias.
if ((Reg == AArch64::W13 || Reg == AArch64::X13) ||
@@ -5351,9 +5351,9 @@ bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
case AArch64::LDPWpre:
case AArch64::LDPXpost:
case AArch64::LDPXpre: {
- unsigned Rt = Inst.getOperand(1).getReg();
- unsigned Rt2 = Inst.getOperand(2).getReg();
- unsigned Rn = Inst.getOperand(3).getReg();
+ MCRegister Rt = Inst.getOperand(1).getReg();
+ MCRegister Rt2 = Inst.getOperand(2).getReg();
+ MCRegister Rn = Inst.getOperand(3).getReg();
if (RI->isSubRegisterEq(Rn, Rt))
return Error(Loc[0], "unpredictable LDP instruction, writeback base "
"is also a destination");
@@ -5376,8 +5376,8 @@ bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
case AArch64::LDPSWi:
case AArch64::LDPWi:
case AArch64::LDPXi: {
- unsigned Rt = Inst.getOperand(0).getReg();
- unsigned Rt2 = Inst.getOperand(1).getReg();
+ MCRegister Rt = Inst.getOperand(0).getReg();
+ MCRegister Rt2 = Inst.getOperand(1).getReg();
if (Rt == Rt2)
return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
break;
@@ -5389,8 +5389,8 @@ bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
case AArch64::LDPSpost:
case AArch64::LDPSpre:
case AArch64::LDPSWpost: {
- unsigned Rt = Inst.getOperand(1).getReg();
- unsigned Rt2 = Inst.getOperand(2).getReg();
+ MCRegister Rt = Inst.getOperand(1).getReg();
+ MCRegister Rt2 = Inst.getOperand(2).getReg();
if (Rt == Rt2)
return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
break;
@@ -5405,9 +5405,9 @@ bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
case AArch64::STPWpre:
case AArch64::STPXpost:
case AArch64::STPXpre: {
- unsigned Rt = Inst.getOperand(1).getReg();
- unsigned Rt2 = Inst.getOperand(2).getReg();
- unsigned Rn = Inst.getOperand(3).getReg();
+ MCRegister Rt = Inst.getOperand(1).getReg();
+ MCRegister Rt2 = Inst.getOperand(2).getReg();
+ MCRegister Rn = Inst.getOperand(3).getReg();
if (RI->isSubRegisterEq(Rn, Rt))
return Error(Loc[0], "unpredictable STP instruction, writeback base "
"is also a source");
@@ -5438,8 +5438,8 @@ bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
case AArch64::LDRSWpost:
case AArch64::LDRWpost:
case AArch64::LDRXpost: {
- unsigned Rt = Inst.getOperand(1).getReg();
- unsigned Rn = Inst.getOperand(2).getReg();
+ MCRegister Rt = Inst.getOperand(1).getReg();
+ MCRegister Rn = Inst.getOperand(2).getReg();
if (RI->isSubRegisterEq(Rn, Rt))
return Error(Loc[0], "unpredictable LDR instruction, writeback base "
"is also a source");
@@ -5457,8 +5457,8 @@ bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
case AArch64::STRHpre:
case AArch64::STRWpre:
case AArch64::STRXpre: {
- unsigned Rt = Inst.getOperand(1).getReg();
- unsigned Rn = Inst.getOperand(2).getReg();
+ MCRegister Rt = Inst.getOperand(1).getReg();
+ MCRegister Rn = Inst.getOperand(2).getReg();
if (RI->isSubRegisterEq(Rn, Rt))
return Error(Loc[0], "unpredictable STR instruction, writeback base "
"is also a source");
@@ -5472,9 +5472,9 @@ bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
case AArch64::STLXRH:
case AArch64::STLXRW:
case AArch64::STLXRX: {
- unsigned Rs = Inst.getOperand(0).getReg();
- unsigned Rt = Inst.getOperand(1).getReg();
- unsigned Rn = Inst.getOperand(2).getReg();
+ MCRegister Rs = Inst.getOperand(0).getReg();
+ MCRegister Rt = Inst.getOperand(1).getReg();
+ MCRegister Rn = Inst.getOperand(2).getReg();
if (RI->isSubRegisterEq(Rt, Rs) ||
(RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
return Error(Loc[0],
@@ -5485,10 +5485,10 @@ bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
case AArch64::STXPX:
case AArch64::STLXPW:
case AArch64::STLXPX: {
- unsigned Rs = Inst.getOperand(0).getReg();
- unsigned Rt1 = Inst.getOperand(1).getReg();
- unsigned Rt2 = Inst.getOperand(2).getReg();
- unsigned Rn = Inst.getOperand(3).getReg();
+ MCRegister Rs = Inst.getOperand(0).getReg();
+ MCRegister Rt1 = Inst.getOperand(1).getReg();
+ MCRegister Rt2 = Inst.getOperand(2).getReg();
+ MCRegister Rn = Inst.getOperand(3).getReg();
if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
(RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
return Error(Loc[0],
@@ -5497,8 +5497,8 @@ bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
}
case AArch64::LDRABwriteback:
case AArch64::LDRAAwriteback: {
- unsigned Xt = Inst.getOperand(0).getReg();
- unsigned Xn = Inst.getOperand(1).getReg();
+ MCRegister Xt = Inst.getOperand(0).getReg();
+ MCRegister Xn = Inst.getOperand(1).getReg();
if (Xt == Xn)
return Error(Loc[0],
"unpredictable LDRA instruction, writeback base"
@@ -5605,12 +5605,12 @@ bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
case AArch64::CPYETWN:
case AArch64::CPYETRN:
case AArch64::CPYETN: {
- unsigned Xd_wb = Inst.getOperand(0).getReg();
- unsigned Xs_wb = Inst.getOperand(1).getReg();
- unsigned Xn_wb = Inst.getOperand(2).getReg();
- unsigned Xd = Inst.getOperand(3).getReg();
- unsigned Xs = Inst.getOperand(4).getReg();
- unsigned Xn = Inst.getOperand(5).getReg();
+ MCRegister Xd_wb = Inst.getOperand(0).getReg();
+ MCRegister Xs_wb = Inst.getOperand(1).getReg();
+ MCRegister Xn_wb = Inst.getOperand(2).getReg();
+ MCRegister Xd = Inst.getOperand(3).getReg();
+ MCRegister Xs = Inst.getOperand(4).getReg();
+ MCRegister Xn = Inst.getOperand(5).getReg();
if (Xd_wb != Xd)
return Error(Loc[0],
"invalid CPY instruction, Xd_wb and Xd do not match");
@@ -5655,11 +5655,11 @@ bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
case AArch64::MOPSSETGET:
case AArch64::MOPSSETGEN:
case AArch64::MOPSSETGETN: {
- unsigned Xd_wb = Inst.getOperand(0).getReg();
- unsigned Xn_wb = Inst.getOperand(1).getReg();
- unsigned Xd = Inst.getOperand(2).getReg();
- unsigned Xn = Inst.getOperand(3).getReg();
- unsigned Xm = Inst.getOperand(4).getReg();
+ MCRegister Xd_wb = Inst.getOperand(0).getReg();
+ MCRegister Xn_wb = Inst.getOperand(1).getReg();
+ MCRegister Xd = Inst.getOperand(2).getReg();
+ MCRegister Xn = Inst.getOperand(3).getReg();
+ MCRegister Xm = Inst.getOperand(4).getReg();
if (Xd_wb != Xd)
return Error(Loc[0],
"invalid SET instruction, Xd_wb and Xd do not match");
@@ -6451,7 +6451,7 @@ bool AArch64AsmParser::matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
// GPR64. Twiddle it here if necessary.
AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
if (Op.isScalarReg()) {
- unsigned Reg = getXRegFromWReg(Op.getReg());
+ MCRegister Reg = getXRegFromWReg(Op.getReg());
Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
Op.getStartLoc(), Op.getEndLoc(),
getContext());
@@ -6467,7 +6467,7 @@ bool AArch64AsmParser::matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
// GPR64. Twiddle it here if necessary.
AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
if (Op.isScalarReg()) {
- unsigned Reg = getXRegFromWReg(Op.getReg());
+ MCRegister Reg = getXRegFromWReg(Op.getReg());
Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
Op.getStartLoc(),
Op.getEndLoc(), getContext());
@@ -6484,7 +6484,7 @@ bool AArch64AsmParser::matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
// GPR32. Twiddle it here if necessary.
AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
if (Op.isScalarReg()) {
- unsigned Reg = getWRegFromXReg(Op.getReg());
+ MCRegister Reg = getWRegFromXReg(Op.getReg());
Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
Op.getStartLoc(),
Op.getEndLoc(), getContext());
@@ -7907,7 +7907,7 @@ ParseStatus AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
return Error(E, "expected second odd register of a consecutive same-size "
"even/odd register pair");
- unsigned Pair = 0;
+ MCRegister Pair;
if (isXReg) {
Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
&AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
@@ -8047,7 +8047,7 @@ ParseStatus AArch64AsmParser::tryParseGPR64x8(OperandVector &Operands) {
MCContext &ctx = getContext();
const MCRegisterInfo *RI = ctx.getRegisterInfo();
- int X8Reg = RI->getMatchingSuperReg(
+ MCRegister X8Reg = RI->getMatchingSuperReg(
XReg, AArch64::x8sub_0,
&AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID]);
if (!X8Reg)
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index 8ed867f..6cb1810 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -95,7 +95,8 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
getActionDefinitionsBuilder(
{G_IMPLICIT_DEF, G_FREEZE, G_CONSTANT_FOLD_BARRIER})
.legalFor({p0, s8, s16, s32, s64})
- .legalFor(PackedVectorAllTypeList)
+ .legalFor({v16s8, v8s16, v4s32, v2s64, v2p0, v8s8, v4s16, v2s32, v4s8,
+ v2s16, v2s8})
.widenScalarToNextPow2(0)
.clampScalar(0, s8, s64)
.moreElementsToNextPow2(0)
diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
index 924d64b..adc6c5b 100644
--- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
+++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
@@ -622,7 +622,7 @@ public:
return CU::UNWIND_ARM64_MODE_DWARF;
case MCCFIInstruction::OpDefCfa: {
// Defines a frame pointer.
- unsigned XReg =
+ MCRegister XReg =
getXRegFromWReg(*MRI.getLLVMRegNum(Inst.getRegister(), true));
// Other CFA registers than FP are not supported by compact unwind.
diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.cpp b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.cpp
index c5de5b4..7c9113f 100644
--- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.cpp
+++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.cpp
@@ -815,14 +815,14 @@ void AArch64AppleInstPrinter::printInst(const MCInst *MI, uint64_t Address,
O << '[' << MI->getOperand(OpNum++).getImm() << ']';
// Next the address: [xN]
- unsigned AddrReg = MI->getOperand(OpNum++).getReg();
+ MCRegister AddrReg = MI->getOperand(OpNum++).getReg();
O << ", [";
printRegName(O, AddrReg);
O << ']';
// Finally, there might be a post-indexed offset.
if (LdStDesc->NaturalOffset != 0) {
- unsigned Reg = MI->getOperand(OpNum++).getReg();
+ MCRegister Reg = MI->getOperand(OpNum++).getReg();
if (Reg != AArch64::XZR) {
O << ", ";
printRegName(O, Reg);
@@ -860,7 +860,7 @@ bool AArch64InstPrinter::printRangePrefetchAlias(const MCInst *MI,
if ((PRFOp & Mask) != Mask)
return false; // Rt != '11xxx', it's a PRFM instruction.
- unsigned Rm = MI->getOperand(2).getReg();
+ MCRegister Rm = MI->getOperand(2).getReg();
// "Rm" must be a 64-bit GPR for RPRFM.
if (MRI.getRegClass(AArch64::GPR32RegClassID).contains(Rm))
@@ -1143,8 +1143,7 @@ void AArch64InstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
const MCOperand &Op = MI->getOperand(OpNo);
if (Op.isReg()) {
- unsigned Reg = Op.getReg();
- printRegName(O, Reg);
+ printRegName(O, Op.getReg());
} else if (Op.isImm()) {
printImm(MI, OpNo, STI, O);
} else {
@@ -1184,7 +1183,7 @@ void AArch64InstPrinter::printPostIncOperand(const MCInst *MI, unsigned OpNo,
unsigned Imm, raw_ostream &O) {
const MCOperand &Op = MI->getOperand(OpNo);
if (Op.isReg()) {
- unsigned Reg = Op.getReg();
+ MCRegister Reg = Op.getReg();
if (Reg == AArch64::XZR)
markup(O, Markup::Immediate) << "#" << Imm;
else
@@ -1198,8 +1197,7 @@ void AArch64InstPrinter::printVRegOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
const MCOperand &Op = MI->getOperand(OpNo);
assert(Op.isReg() && "Non-register vreg operand!");
- unsigned Reg = Op.getReg();
- printRegName(O, Reg, AArch64::vreg);
+ printRegName(O, Op.getReg(), AArch64::vreg);
}
void AArch64InstPrinter::printSysCROperand(const MCInst *MI, unsigned OpNo,
@@ -1280,8 +1278,8 @@ void AArch64InstPrinter::printArithExtend(const MCInst *MI, unsigned OpNum,
// UXTW/UXTX as LSL, and if the shift amount is also zero, print nothing at
// all.
if (ExtType == AArch64_AM::UXTW || ExtType == AArch64_AM::UXTX) {
- unsigned Dest = MI->getOperand(0).getReg();
- unsigned Src1 = MI->getOperand(1).getReg();
+ MCRegister Dest = MI->getOperand(0).getReg();
+ MCRegister Src1 = MI->getOperand(1).getReg();
if ( ((Dest == AArch64::SP || Src1 == AArch64::SP) &&
ExtType == AArch64_AM::UXTX) ||
((Dest == AArch64::WSP || Src1 == AArch64::WSP) &&
@@ -1347,7 +1345,7 @@ void AArch64InstPrinter::printPredicateAsCounter(const MCInst *MI,
unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
- unsigned Reg = MI->getOperand(OpNum).getReg();
+ MCRegister Reg = MI->getOperand(OpNum).getReg();
if (Reg < AArch64::PN0 || Reg > AArch64::PN15)
llvm_unreachable("Unsupported predicate-as-counter register");
O << "pn" << Reg - AArch64::PN0;
@@ -1504,9 +1502,9 @@ void AArch64InstPrinter::printFPImmOperand(const MCInst *MI, unsigned OpNum,
markup(O, Markup::Immediate) << format("#%.8f", FPImm);
}
-static unsigned getNextVectorRegister(unsigned Reg, unsigned Stride = 1) {
+static MCRegister getNextVectorRegister(MCRegister Reg, unsigned Stride = 1) {
while (Stride--) {
- switch (Reg) {
+ switch (Reg.id()) {
default:
llvm_unreachable("Vector register expected!");
case AArch64::Q0: Reg = AArch64::Q1; break;
@@ -1608,13 +1606,13 @@ void AArch64InstPrinter::printGPRSeqPairsClassOperand(const MCInst *MI,
raw_ostream &O) {
static_assert(size == 64 || size == 32,
"Template parameter must be either 32 or 64");
- unsigned Reg = MI->getOperand(OpNum).getReg();
+ MCRegister Reg = MI->getOperand(OpNum).getReg();
unsigned Sube = (size == 32) ? AArch64::sube32 : AArch64::sube64;
unsigned Subo = (size == 32) ? AArch64::subo32 : AArch64::subo64;
- unsigned Even = MRI.getSubReg(Reg, Sube);
- unsigned Odd = MRI.getSubReg(Reg, Subo);
+ MCRegister Even = MRI.getSubReg(Reg, Sube);
+ MCRegister Odd = MRI.getSubReg(Reg, Subo);
printRegName(O, Even);
O << ", ";
printRegName(O, Odd);
@@ -1649,7 +1647,7 @@ void AArch64InstPrinter::printVectorList(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O,
StringRef LayoutSuffix) {
- unsigned Reg = MI->getOperand(OpNum).getReg();
+ MCRegister Reg = MI->getOperand(OpNum).getReg();
O << "{ ";
@@ -1679,13 +1677,13 @@ void AArch64InstPrinter::printVectorList(const MCInst *MI, unsigned OpNum,
Stride = 4;
// Now forget about the list and find out what the first register is.
- if (unsigned FirstReg = MRI.getSubReg(Reg, AArch64::dsub0))
+ if (MCRegister FirstReg = MRI.getSubReg(Reg, AArch64::dsub0))
Reg = FirstReg;
- else if (unsigned FirstReg = MRI.getSubReg(Reg, AArch64::qsub0))
+ else if (MCRegister FirstReg = MRI.getSubReg(Reg, AArch64::qsub0))
Reg = FirstReg;
- else if (unsigned FirstReg = MRI.getSubReg(Reg, AArch64::zsub0))
+ else if (MCRegister FirstReg = MRI.getSubReg(Reg, AArch64::zsub0))
Reg = FirstReg;
- else if (unsigned FirstReg = MRI.getSubReg(Reg, AArch64::psub0))
+ else if (MCRegister FirstReg = MRI.getSubReg(Reg, AArch64::psub0))
Reg = FirstReg;
// If it's a D-reg, we need to promote it to the equivalent Q-reg before
@@ -2008,7 +2006,7 @@ void AArch64InstPrinter::printSVERegOp(const MCInst *MI, unsigned OpNum,
default: llvm_unreachable("Invalid kind specifier.");
}
- unsigned Reg = MI->getOperand(OpNum).getReg();
+ MCRegister Reg = MI->getOperand(OpNum).getReg();
printRegName(O, Reg);
if (suffix != 0)
O << '.' << suffix;
@@ -2090,7 +2088,7 @@ void AArch64InstPrinter::printZPRasFPR(const MCInst *MI, unsigned OpNum,
default:
llvm_unreachable("Unsupported width");
}
- unsigned Reg = MI->getOperand(OpNum).getReg();
+ MCRegister Reg = MI->getOperand(OpNum).getReg();
printRegName(O, Reg - AArch64::Z0 + Base);
}
@@ -2108,21 +2106,21 @@ void AArch64InstPrinter::printExactFPImm(const MCInst *MI, unsigned OpNum,
void AArch64InstPrinter::printGPR64as32(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
- unsigned Reg = MI->getOperand(OpNum).getReg();
+ MCRegister Reg = MI->getOperand(OpNum).getReg();
printRegName(O, getWRegFromXReg(Reg));
}
void AArch64InstPrinter::printGPR64x8(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
- unsigned Reg = MI->getOperand(OpNum).getReg();
+ MCRegister Reg = MI->getOperand(OpNum).getReg();
printRegName(O, MRI.getSubReg(Reg, AArch64::x8sub_0));
}
void AArch64InstPrinter::printSyspXzrPair(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
- unsigned Reg = MI->getOperand(OpNum).getReg();
+ MCRegister Reg = MI->getOperand(OpNum).getReg();
assert(Reg == AArch64::XZR &&
"MC representation of SyspXzrPair should be XZR");
O << getRegisterName(Reg) << ", " << getRegisterName(Reg);
diff --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td
index 8119198..0bfac64 100644
--- a/llvm/lib/Target/AArch64/SVEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td
@@ -8807,9 +8807,13 @@ class sve_bfloat_convert<bit N, string asm>
let mayRaiseFPException = 1;
}
-multiclass sve_bfloat_convert<bit N, string asm, SDPatternOperator op> {
+multiclass sve_bfloat_convert<bit N, string asm, SDPatternOperator op,
+ SDPatternOperator ir_op = null_frag> {
def NAME : sve_bfloat_convert<N, asm>;
+
def : SVE_3_Op_Pat<nxv8bf16, op, nxv8bf16, nxv8i1, nxv4f32, !cast<Instruction>(NAME)>;
+ def : SVE_1_Op_Passthru_Round_Pat<nxv4bf16, ir_op, nxv4i1, nxv4f32, !cast<Instruction>(NAME)>;
+ def : SVE_1_Op_Passthru_Round_Pat<nxv2bf16, ir_op, nxv2i1, nxv2f32, !cast<Instruction>(NAME)>;
}
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/AArch64/Utils/AArch64BaseInfo.h b/llvm/lib/Target/AArch64/Utils/AArch64BaseInfo.h
index f821bb5..9faeccc 100644
--- a/llvm/lib/Target/AArch64/Utils/AArch64BaseInfo.h
+++ b/llvm/lib/Target/AArch64/Utils/AArch64BaseInfo.h
@@ -27,8 +27,8 @@
namespace llvm {
-inline static unsigned getWRegFromXReg(unsigned Reg) {
- switch (Reg) {
+inline static MCRegister getWRegFromXReg(MCRegister Reg) {
+ switch (Reg.id()) {
case AArch64::X0: return AArch64::W0;
case AArch64::X1: return AArch64::W1;
case AArch64::X2: return AArch64::W2;
@@ -67,8 +67,8 @@ inline static unsigned getWRegFromXReg(unsigned Reg) {
return Reg;
}
-inline static unsigned getXRegFromWReg(unsigned Reg) {
- switch (Reg) {
+inline static MCRegister getXRegFromWReg(MCRegister Reg) {
+ switch (Reg.id()) {
case AArch64::W0: return AArch64::X0;
case AArch64::W1: return AArch64::X1;
case AArch64::W2: return AArch64::X2;
@@ -107,8 +107,8 @@ inline static unsigned getXRegFromWReg(unsigned Reg) {
return Reg;
}
-inline static unsigned getXRegFromXRegTuple(unsigned RegTuple) {
- switch (RegTuple) {
+inline static MCRegister getXRegFromXRegTuple(MCRegister RegTuple) {
+ switch (RegTuple.id()) {
case AArch64::X0_X1_X2_X3_X4_X5_X6_X7: return AArch64::X0;
case AArch64::X2_X3_X4_X5_X6_X7_X8_X9: return AArch64::X2;
case AArch64::X4_X5_X6_X7_X8_X9_X10_X11: return AArch64::X4;
@@ -126,8 +126,8 @@ inline static unsigned getXRegFromXRegTuple(unsigned RegTuple) {
return RegTuple;
}
-static inline unsigned getBRegFromDReg(unsigned Reg) {
- switch (Reg) {
+static inline MCRegister getBRegFromDReg(MCRegister Reg) {
+ switch (Reg.id()) {
case AArch64::D0: return AArch64::B0;
case AArch64::D1: return AArch64::B1;
case AArch64::D2: return AArch64::B2;
@@ -165,9 +165,8 @@ static inline unsigned getBRegFromDReg(unsigned Reg) {
return Reg;
}
-
-static inline unsigned getDRegFromBReg(unsigned Reg) {
- switch (Reg) {
+static inline MCRegister getDRegFromBReg(MCRegister Reg) {
+ switch (Reg.id()) {
case AArch64::B0: return AArch64::D0;
case AArch64::B1: return AArch64::D1;
case AArch64::B2: return AArch64::D2;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.h b/llvm/lib/Target/AMDGPU/AMDGPU.h
index b2dd354..4abb5a6 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.h
@@ -405,9 +405,6 @@ extern char &SIModeRegisterID;
void initializeAMDGPUInsertDelayAluPass(PassRegistry &);
extern char &AMDGPUInsertDelayAluID;
-void initializeAMDGPUInsertSingleUseVDSTPass(PassRegistry &);
-extern char &AMDGPUInsertSingleUseVDSTID;
-
void initializeSIInsertHardClausesPass(PassRegistry &);
extern char &SIInsertHardClausesID;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.td b/llvm/lib/Target/AMDGPU/AMDGPU.td
index 5757ac0..3626fd8 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.td
@@ -929,12 +929,6 @@ def FeatureSALUFloatInsts : SubtargetFeature<"salu-float",
"Has SALU floating point instructions"
>;
-def FeatureVGPRSingleUseHintInsts : SubtargetFeature<"vgpr-singleuse-hint",
- "HasVGPRSingleUseHintInsts",
- "true",
- "Has single-use VGPR hint instructions"
->;
-
def FeaturePseudoScalarTrans : SubtargetFeature<"pseudo-scalar-trans",
"HasPseudoScalarTrans",
"true",
@@ -1615,14 +1609,12 @@ def FeatureISAVersion11_5_0 : FeatureSet<
!listconcat(FeatureISAVersion11_Common.Features,
[FeatureSALUFloatInsts,
FeatureDPPSrc1SGPR,
- FeatureVGPRSingleUseHintInsts,
FeatureRequiredExportPriority])>;
def FeatureISAVersion11_5_1 : FeatureSet<
!listconcat(FeatureISAVersion11_Common.Features,
[FeatureSALUFloatInsts,
FeatureDPPSrc1SGPR,
- FeatureVGPRSingleUseHintInsts,
Feature1_5xVGPRs,
FeatureRequiredExportPriority])>;
@@ -1630,7 +1622,6 @@ def FeatureISAVersion11_5_2 : FeatureSet<
!listconcat(FeatureISAVersion11_Common.Features,
[FeatureSALUFloatInsts,
FeatureDPPSrc1SGPR,
- FeatureVGPRSingleUseHintInsts,
FeatureRequiredExportPriority])>;
def FeatureISAVersion12 : FeatureSet<
@@ -1663,7 +1654,6 @@ def FeatureISAVersion12 : FeatureSet<
FeatureSALUFloatInsts,
FeaturePseudoScalarTrans,
FeatureHasRestrictedSOffset,
- FeatureVGPRSingleUseHintInsts,
FeatureScalarDwordx3Loads,
FeatureDPPSrc1SGPR,
FeatureMaxHardClauseLength32,
@@ -1900,6 +1890,10 @@ def isGFX940Plus :
Predicate<"Subtarget->hasGFX940Insts()">,
AssemblerPredicate<(all_of FeatureGFX940Insts)>;
+def isNotGFX940Plus :
+ Predicate<"!Subtarget->hasGFX940Insts()">,
+ AssemblerPredicate<(all_of (not FeatureGFX940Insts))>;
+
def isGFX8GFX9NotGFX940 :
Predicate<"!Subtarget->hasGFX940Insts() &&"
"(Subtarget->getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS ||"
@@ -2267,9 +2261,6 @@ def HasNotMADIntraFwdBug : Predicate<"!Subtarget->hasMADIntraFwdBug()">;
def HasSALUFloatInsts : Predicate<"Subtarget->hasSALUFloatInsts()">,
AssemblerPredicate<(all_of FeatureSALUFloatInsts)>;
-def HasVGPRSingleUseHintInsts : Predicate<"Subtarget->hasVGPRSingleUseHintInsts()">,
- AssemblerPredicate<(all_of FeatureVGPRSingleUseHintInsts)>;
-
def HasPseudoScalarTrans : Predicate<"Subtarget->hasPseudoScalarTrans()">,
AssemblerPredicate<(all_of FeaturePseudoScalarTrans)>;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUArgumentUsageInfo.h b/llvm/lib/Target/AMDGPU/AMDGPUArgumentUsageInfo.h
index 2e02bb4..06b2f18 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUArgumentUsageInfo.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUArgumentUsageInfo.h
@@ -77,6 +77,8 @@ public:
}
unsigned getMask() const {
+ // None of the target SGPRs or VGPRs are expected to have a 'zero' mask.
+ assert(Mask && "Invalid mask.");
return Mask;
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
index b90d245..682c294 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
@@ -900,6 +900,15 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
ProgInfo.NumVGPR = AMDGPUMCExpr::createTotalNumVGPR(
ProgInfo.NumAccVGPR, ProgInfo.NumArchVGPR, Ctx);
+ } else if (isKernel(F.getCallingConv()) &&
+ MFI->getNumKernargPreloadedSGPRs()) {
+ // Consider cases where the total number of UserSGPRs with trailing
+ // allocated preload SGPRs, is greater than the number of explicitly
+ // referenced SGPRs.
+ const MCExpr *UserPlusExtraSGPRs = MCBinaryExpr::createAdd(
+ CreateExpr(MFI->getNumUserSGPRs()), ExtraSGPRs, Ctx);
+ ProgInfo.NumSGPR =
+ AMDGPUMCExpr::createMax({ProgInfo.NumSGPR, UserPlusExtraSGPRs}, Ctx);
}
// Adjust number of registers used to meet default/requested minimum/maximum
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInsertSingleUseVDST.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInsertSingleUseVDST.cpp
deleted file mode 100644
index 43b3bf4..0000000
--- a/llvm/lib/Target/AMDGPU/AMDGPUInsertSingleUseVDST.cpp
+++ /dev/null
@@ -1,245 +0,0 @@
-//===- AMDGPUInsertSingleUseVDST.cpp - Insert s_singleuse_vdst instructions ==//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// Insert s_singleuse_vdst instructions on GFX11.5+ to mark regions of VALU
-/// instructions that produce single-use VGPR values. If the value is forwarded
-/// to the consumer instruction prior to VGPR writeback, the hardware can
-/// then skip (kill) the VGPR write.
-//
-//===----------------------------------------------------------------------===//
-
-#include "AMDGPU.h"
-#include "AMDGPUGenSearchableTables.inc"
-#include "GCNSubtarget.h"
-#include "SIInstrInfo.h"
-#include "SIRegisterInfo.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringRef.h"
-#include "llvm/CodeGen/MachineBasicBlock.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstr.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineOperand.h"
-#include "llvm/CodeGen/Register.h"
-#include "llvm/IR/DebugLoc.h"
-#include "llvm/MC/MCRegister.h"
-#include "llvm/MC/MCRegisterInfo.h"
-#include "llvm/Pass.h"
-#include <array>
-
-using namespace llvm;
-
-#define DEBUG_TYPE "amdgpu-insert-single-use-vdst"
-
-namespace {
-class AMDGPUInsertSingleUseVDST : public MachineFunctionPass {
-private:
- const SIInstrInfo *SII;
- class SingleUseInstruction {
- private:
- static const unsigned MaxSkipRange = 0b111;
- static const unsigned MaxNumberOfSkipRegions = 2;
-
- unsigned LastEncodedPositionEnd;
- MachineInstr *ProducerInstr;
-
- std::array<unsigned, MaxNumberOfSkipRegions + 1> SingleUseRegions;
- SmallVector<unsigned, MaxNumberOfSkipRegions> SkipRegions;
-
- // Adds a skip region into the instruction.
- void skip(const unsigned ProducerPosition) {
- while (LastEncodedPositionEnd + MaxSkipRange < ProducerPosition) {
- SkipRegions.push_back(MaxSkipRange);
- LastEncodedPositionEnd += MaxSkipRange;
- }
- SkipRegions.push_back(ProducerPosition - LastEncodedPositionEnd);
- LastEncodedPositionEnd = ProducerPosition;
- }
-
- bool currentRegionHasSpace() {
- const auto Region = SkipRegions.size();
- // The first region has an extra bit of encoding space.
- return SingleUseRegions[Region] <
- ((Region == MaxNumberOfSkipRegions) ? 0b1111U : 0b111U);
- }
-
- unsigned encodeImm() {
- // Handle the first Single Use Region separately as it has an extra bit
- // of encoding space.
- unsigned Imm = SingleUseRegions[SkipRegions.size()];
- unsigned ShiftAmount = 4;
- for (unsigned i = SkipRegions.size(); i > 0; i--) {
- Imm |= SkipRegions[i - 1] << ShiftAmount;
- ShiftAmount += 3;
- Imm |= SingleUseRegions[i - 1] << ShiftAmount;
- ShiftAmount += 3;
- }
- return Imm;
- }
-
- public:
- SingleUseInstruction(const unsigned ProducerPosition,
- MachineInstr *Producer)
- : LastEncodedPositionEnd(ProducerPosition + 1), ProducerInstr(Producer),
- SingleUseRegions({1, 0, 0}) {}
-
- // Returns false if adding a new single use producer failed. This happens
- // because it could not be encoded, either because there is no room to
- // encode another single use producer region or that this single use
- // producer is too far away to encode the amount of instructions to skip.
- bool tryAddProducer(const unsigned ProducerPosition, MachineInstr *MI) {
- // Producer is too far away to encode into this instruction or another
- // skip region is needed and SkipRegions.size() = 2 so there's no room for
- // another skip region, therefore a new instruction is needed.
- if (LastEncodedPositionEnd +
- (MaxSkipRange * (MaxNumberOfSkipRegions - SkipRegions.size())) <
- ProducerPosition)
- return false;
-
- // If a skip region is needed.
- if (LastEncodedPositionEnd != ProducerPosition ||
- !currentRegionHasSpace()) {
- // If the current region is out of space therefore a skip region would
- // be needed, but there is no room for another skip region.
- if (SkipRegions.size() == MaxNumberOfSkipRegions)
- return false;
- skip(ProducerPosition);
- }
-
- SingleUseRegions[SkipRegions.size()]++;
- LastEncodedPositionEnd = ProducerPosition + 1;
- ProducerInstr = MI;
- return true;
- }
-
- auto emit(const SIInstrInfo *SII) {
- return BuildMI(*ProducerInstr->getParent(), ProducerInstr, DebugLoc(),
- SII->get(AMDGPU::S_SINGLEUSE_VDST))
- .addImm(encodeImm());
- }
- };
-
-public:
- static char ID;
-
- AMDGPUInsertSingleUseVDST() : MachineFunctionPass(ID) {}
-
- void insertSingleUseInstructions(
- ArrayRef<std::pair<unsigned, MachineInstr *>> SingleUseProducers) const {
- SmallVector<SingleUseInstruction> Instructions;
-
- for (auto &[Position, MI] : SingleUseProducers) {
- // Encode this position into the last single use instruction if possible.
- if (Instructions.empty() ||
- !Instructions.back().tryAddProducer(Position, MI)) {
- // If not, add a new instruction.
- Instructions.push_back(SingleUseInstruction(Position, MI));
- }
- }
-
- for (auto &Instruction : Instructions)
- Instruction.emit(SII);
- }
-
- bool runOnMachineFunction(MachineFunction &MF) override {
- const auto &ST = MF.getSubtarget<GCNSubtarget>();
- if (!ST.hasVGPRSingleUseHintInsts())
- return false;
-
- SII = ST.getInstrInfo();
- const auto *TRI = &SII->getRegisterInfo();
- bool InstructionEmitted = false;
-
- for (MachineBasicBlock &MBB : MF) {
- DenseMap<MCRegUnit, unsigned> RegisterUseCount;
-
- // Handle boundaries at the end of basic block separately to avoid
- // false positives. If they are live at the end of a basic block then
- // assume it has more uses later on.
- for (const auto &Liveout : MBB.liveouts()) {
- for (MCRegUnitMaskIterator Units(Liveout.PhysReg, TRI); Units.isValid();
- ++Units) {
- const auto [Unit, Mask] = *Units;
- if ((Mask & Liveout.LaneMask).any())
- RegisterUseCount[Unit] = 2;
- }
- }
-
- SmallVector<std::pair<unsigned, MachineInstr *>>
- SingleUseProducerPositions;
-
- unsigned VALUInstrCount = 0;
- for (MachineInstr &MI : reverse(MBB.instrs())) {
- // All registers in all operands need to be single use for an
- // instruction to be marked as a single use producer.
- bool AllProducerOperandsAreSingleUse = true;
-
- // Gather a list of Registers used before updating use counts to avoid
- // double counting registers that appear multiple times in a single
- // MachineInstr.
- SmallVector<MCRegUnit> RegistersUsed;
-
- for (const auto &Operand : MI.all_defs()) {
- const auto Reg = Operand.getReg();
-
- const auto RegUnits = TRI->regunits(Reg);
- if (any_of(RegUnits, [&RegisterUseCount](const MCRegUnit Unit) {
- return RegisterUseCount[Unit] > 1;
- }))
- AllProducerOperandsAreSingleUse = false;
-
- // Reset uses count when a register is no longer live.
- for (const MCRegUnit Unit : RegUnits)
- RegisterUseCount.erase(Unit);
- }
-
- for (const auto &Operand : MI.all_uses()) {
- const auto Reg = Operand.getReg();
-
- // Count the number of times each register is read.
- for (const MCRegUnit Unit : TRI->regunits(Reg)) {
- if (!is_contained(RegistersUsed, Unit))
- RegistersUsed.push_back(Unit);
- }
- }
- for (const MCRegUnit Unit : RegistersUsed)
- RegisterUseCount[Unit]++;
-
- // Do not attempt to optimise across exec mask changes.
- if (MI.modifiesRegister(AMDGPU::EXEC, TRI) ||
- AMDGPU::isInvalidSingleUseConsumerInst(MI.getOpcode())) {
- for (auto &UsedReg : RegisterUseCount)
- UsedReg.second = 2;
- }
-
- if (!SIInstrInfo::isVALU(MI) ||
- AMDGPU::isInvalidSingleUseProducerInst(MI.getOpcode()))
- continue;
- if (AllProducerOperandsAreSingleUse) {
- SingleUseProducerPositions.push_back({VALUInstrCount, &MI});
- InstructionEmitted = true;
- }
- VALUInstrCount++;
- }
- insertSingleUseInstructions(SingleUseProducerPositions);
- }
- return InstructionEmitted;
- }
-};
-} // namespace
-
-char AMDGPUInsertSingleUseVDST::ID = 0;
-
-char &llvm::AMDGPUInsertSingleUseVDSTID = AMDGPUInsertSingleUseVDST::ID;
-
-INITIALIZE_PASS(AMDGPUInsertSingleUseVDST, DEBUG_TYPE,
- "AMDGPU Insert SingleUseVDST", false, false)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index 04fdee0..abd5074 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -311,12 +311,6 @@ static cl::opt<bool> EnableSIModeRegisterPass(
cl::init(true),
cl::Hidden);
-// Enable GFX11.5+ s_singleuse_vdst insertion
-static cl::opt<bool>
- EnableInsertSingleUseVDST("amdgpu-enable-single-use-vdst",
- cl::desc("Enable s_singleuse_vdst insertion"),
- cl::init(false), cl::Hidden);
-
// Enable GFX11+ s_delay_alu insertion
static cl::opt<bool>
EnableInsertDelayAlu("amdgpu-enable-delay-alu",
@@ -450,7 +444,6 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget() {
initializeAMDGPURewriteUndefForPHILegacyPass(*PR);
initializeAMDGPUUnifyMetadataPass(*PR);
initializeSIAnnotateControlFlowLegacyPass(*PR);
- initializeAMDGPUInsertSingleUseVDSTPass(*PR);
initializeAMDGPUInsertDelayAluPass(*PR);
initializeSIInsertHardClausesPass(*PR);
initializeSIInsertWaitcntsPass(*PR);
@@ -1518,9 +1511,6 @@ void GCNPassConfig::addPreEmitPass() {
// cases.
addPass(&PostRAHazardRecognizerID);
- if (isPassEnabled(EnableInsertSingleUseVDST, CodeGenOptLevel::Less))
- addPass(&AMDGPUInsertSingleUseVDSTID);
-
if (isPassEnabled(EnableInsertDelayAlu, CodeGenOptLevel::Less))
addPass(&AMDGPUInsertDelayAluID);
diff --git a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
index f21c091..555b8cb 100644
--- a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
+++ b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
@@ -5834,6 +5834,17 @@ bool AMDGPUAsmParser::ParseDirectiveAMDHSAKernel() {
if (!Seen.contains(".amdhsa_next_free_sgpr"))
return TokError(".amdhsa_next_free_sgpr directive is required");
+ unsigned UserSGPRCount = ExplicitUserSGPRCount.value_or(ImpliedUserSGPRCount);
+
+ // Consider the case where the total number of UserSGPRs with trailing
+ // allocated preload SGPRs, is greater than the number of explicitly
+ // referenced SGPRs.
+ if (PreloadLength) {
+ MCContext &Ctx = getContext();
+ NextFreeSGPR = AMDGPUMCExpr::createMax(
+ {NextFreeSGPR, MCConstantExpr::create(UserSGPRCount, Ctx)}, Ctx);
+ }
+
const MCExpr *VGPRBlocks;
const MCExpr *SGPRBlocks;
if (calculateGPRBlocks(getFeatureBits(), ReserveVCC, ReserveFlatScr,
@@ -5870,8 +5881,6 @@ bool AMDGPUAsmParser::ParseDirectiveAMDHSAKernel() {
return TokError("amdgpu_user_sgpr_count smaller than than implied by "
"enabled user SGPRs");
- unsigned UserSGPRCount = ExplicitUserSGPRCount.value_or(ImpliedUserSGPRCount);
-
if (!isUInt<COMPUTE_PGM_RSRC2_USER_SGPR_COUNT_WIDTH>(UserSGPRCount))
return TokError("too many user SGPRs enabled");
AMDGPU::MCKernelDescriptor::bits_set(
diff --git a/llvm/lib/Target/AMDGPU/BUFInstructions.td b/llvm/lib/Target/AMDGPU/BUFInstructions.td
index 532ece8..6bdff98 100644
--- a/llvm/lib/Target/AMDGPU/BUFInstructions.td
+++ b/llvm/lib/Target/AMDGPU/BUFInstructions.td
@@ -1132,7 +1132,7 @@ let OtherPredicates = [HasGFX10_BEncoding] in {
>;
}
-let SubtargetPredicate = isGFX8GFX9 in {
+let SubtargetPredicate = isGFX8GFX9NotGFX940 in {
def BUFFER_STORE_LDS_DWORD : MUBUF_Pseudo_Store_Lds <"buffer_store_lds_dword">;
}
@@ -1214,7 +1214,7 @@ defm BUFFER_STORE_FORMAT_D16_HI_X : MUBUF_Pseudo_Stores <
} // End HasD16LoadStore
-let SubtargetPredicate = isNotGFX12Plus in
+let SubtargetPredicate = isNotGFX940Plus in
def BUFFER_WBINVL1 : MUBUF_Invalidate <
"buffer_wbinvl1", int_amdgcn_buffer_wbinvl1
>;
@@ -1297,6 +1297,7 @@ let SubtargetPredicate = isGFX7Plus in {
// Instruction definitions for CI and newer.
//===----------------------------------------------------------------------===//
+let SubtargetPredicate = isNotGFX940Plus in
def BUFFER_WBINVL1_VOL : MUBUF_Invalidate <"buffer_wbinvl1_vol",
int_amdgcn_buffer_wbinvl1_vol>;
diff --git a/llvm/lib/Target/AMDGPU/CMakeLists.txt b/llvm/lib/Target/AMDGPU/CMakeLists.txt
index e813653..7c883cc 100644
--- a/llvm/lib/Target/AMDGPU/CMakeLists.txt
+++ b/llvm/lib/Target/AMDGPU/CMakeLists.txt
@@ -81,7 +81,6 @@ add_llvm_target(AMDGPUCodeGen
AMDGPUMCInstLower.cpp
AMDGPUMemoryUtils.cpp
AMDGPUIGroupLP.cpp
- AMDGPUInsertSingleUseVDST.cpp
AMDGPUMarkLastScratchLoad.cpp
AMDGPUMIRFormatter.cpp
AMDGPUOpenCLEnqueuedBlockLowering.cpp
diff --git a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
index 2f5eba4..ca4be01 100644
--- a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
+++ b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
@@ -291,6 +291,7 @@ DECODE_OPERAND_REG_7(SReg_32_XM0_XEXEC, OPW32)
DECODE_OPERAND_REG_7(SReg_32_XEXEC_HI, OPW32)
DECODE_OPERAND_REG_7(SReg_64, OPW64)
DECODE_OPERAND_REG_7(SReg_64_XEXEC, OPW64)
+DECODE_OPERAND_REG_7(SReg_64_XEXEC_XNULL, OPW64)
DECODE_OPERAND_REG_7(SReg_96, OPW96)
DECODE_OPERAND_REG_7(SReg_128, OPW128)
DECODE_OPERAND_REG_7(SReg_256, OPW256)
diff --git a/llvm/lib/Target/AMDGPU/FLATInstructions.td b/llvm/lib/Target/AMDGPU/FLATInstructions.td
index 7b38220..6b5e479 100644
--- a/llvm/lib/Target/AMDGPU/FLATInstructions.td
+++ b/llvm/lib/Target/AMDGPU/FLATInstructions.td
@@ -209,7 +209,7 @@ class FLAT_Load_Pseudo <string opName, RegisterClass regClass,
!con(
!con(
!if(EnableSaddr,
- (ins SReg_64:$saddr, VGPR_32:$vaddr),
+ (ins SReg_64_XEXEC_XNULL:$saddr, VGPR_32:$vaddr),
(ins VReg_64:$vaddr)),
(ins flat_offset:$offset)),
// FIXME: Operands with default values do not work with following non-optional operands.
@@ -231,7 +231,7 @@ class FLAT_Store_Pseudo <string opName, RegisterClass vdataClass,
(outs),
!con(
!if(EnableSaddr,
- (ins VGPR_32:$vaddr, getLdStRegisterOperand<vdataClass>.ret:$vdata, SReg_64:$saddr),
+ (ins VGPR_32:$vaddr, getLdStRegisterOperand<vdataClass>.ret:$vdata, SReg_64_XEXEC_XNULL:$saddr),
(ins VReg_64:$vaddr, getLdStRegisterOperand<vdataClass>.ret:$vdata)),
(ins flat_offset:$offset, CPol_0:$cpol)),
" $vaddr, $vdata"#!if(HasSaddr, !if(EnableSaddr, ", $saddr", ", off"), "")#"$offset$cpol"> {
@@ -589,7 +589,7 @@ multiclass FLAT_Global_Atomic_Pseudo_NO_RTN<
def _SADDR : FLAT_AtomicNoRet_Pseudo <opName,
(outs),
- (ins VGPR_32:$vaddr, data_op:$vdata, SReg_64:$saddr, flat_offset:$offset, CPol_0:$cpol),
+ (ins VGPR_32:$vaddr, data_op:$vdata, SReg_64_XEXEC_XNULL:$saddr, flat_offset:$offset, CPol_0:$cpol),
" $vaddr, $vdata, $saddr$offset$cpol">,
GlobalSaddrTable<1, opName> {
let has_saddr = 1;
@@ -620,7 +620,7 @@ multiclass FLAT_Global_Atomic_Pseudo_RTN<
def _SADDR_RTN : FLAT_AtomicRet_Pseudo <opName,
(outs vdst_op:$vdst),
- (ins VGPR_32:$vaddr, data_op:$vdata, SReg_64:$saddr, flat_offset:$offset, CPol_GLC1:$cpol),
+ (ins VGPR_32:$vaddr, data_op:$vdata, SReg_64_XEXEC_XNULL:$saddr, flat_offset:$offset, CPol_GLC1:$cpol),
" $vdst, $vaddr, $vdata, $saddr$offset$cpol">,
GlobalSaddrTable<1, opName#"_rtn"> {
let has_saddr = 1;
diff --git a/llvm/lib/Target/AMDGPU/GCNRewritePartialRegUses.cpp b/llvm/lib/Target/AMDGPU/GCNRewritePartialRegUses.cpp
index 90169b1..66c05b4 100644
--- a/llvm/lib/Target/AMDGPU/GCNRewritePartialRegUses.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNRewritePartialRegUses.cpp
@@ -397,7 +397,7 @@ void GCNRewritePartialRegUses::updateLiveIntervals(Register OldReg,
}
if (NewLI.empty())
NewLI.assign(OldLI, Allocator);
- NewLI.verify(MRI);
+ assert(NewLI.verify(MRI));
LIS->removeInterval(OldReg);
}
diff --git a/llvm/lib/Target/AMDGPU/GCNSubtarget.h b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
index a4ae8a1..e6b7342 100644
--- a/llvm/lib/Target/AMDGPU/GCNSubtarget.h
+++ b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
@@ -215,7 +215,6 @@ protected:
bool HasPackedTID = false;
bool ScalarizeGlobal = false;
bool HasSALUFloatInsts = false;
- bool HasVGPRSingleUseHintInsts = false;
bool HasPseudoScalarTrans = false;
bool HasRestrictedSOffset = false;
@@ -1280,8 +1279,6 @@ public:
bool hasSALUFloatInsts() const { return HasSALUFloatInsts; }
- bool hasVGPRSingleUseHintInsts() const { return HasVGPRSingleUseHintInsts; }
-
bool hasPseudoScalarTrans() const { return HasPseudoScalarTrans; }
bool hasRestrictedSOffset() const { return HasRestrictedSOffset; }
diff --git a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
index 434336e..46f5097 100644
--- a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
@@ -92,12 +92,12 @@ public:
SetVector<MachineInstr *> SChain;
// Number of SGPR to VGPR copies that are used to put the SALU computation
// results back to VALU.
- unsigned NumSVCopies;
+ unsigned NumSVCopies = 0;
- unsigned Score;
+ unsigned Score = 0;
// Actual count of v_readfirstlane_b32
// which need to be inserted to keep SChain SALU
- unsigned NumReadfirstlanes;
+ unsigned NumReadfirstlanes = 0;
// Current score state. To speedup selection V2SCopyInfos for processing
bool NeedToBeConvertedToVALU = false;
// Unique ID. Used as a key for mapping to keep permanent order.
@@ -109,7 +109,7 @@ public:
SetVector<unsigned> Siblings;
V2SCopyInfo() : Copy(nullptr), ID(0){};
V2SCopyInfo(unsigned Id, MachineInstr *C, unsigned Width)
- : Copy(C), NumSVCopies(0), NumReadfirstlanes(Width / 32), ID(Id){};
+ : Copy(C), NumReadfirstlanes(Width / 32), ID(Id){};
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void dump() {
dbgs() << ID << " : " << *Copy << "\n\tS:" << SChain.size()
diff --git a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
index 2c67c4a..50a6f02 100644
--- a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
@@ -1418,10 +1418,15 @@ void SIFrameLowering::processFunctionBeforeFrameFinalized(
// the debug value instructions. We should instead, update it with the
// correct register value. But not sure the register value alone is
for (MachineInstr &MI : MBB) {
- if (MI.isDebugValue() && MI.getOperand(0).isFI() &&
- !MFI.isFixedObjectIndex(MI.getOperand(0).getIndex()) &&
- SpillFIs[MI.getOperand(0).getIndex()]) {
- MI.getOperand(0).ChangeToRegister(Register(), false /*isDef*/);
+ if (MI.isDebugValue()) {
+ uint32_t StackOperandIdx = MI.isDebugValueList() ? 2 : 0;
+ if (MI.getOperand(StackOperandIdx).isFI() &&
+ !MFI.isFixedObjectIndex(
+ MI.getOperand(StackOperandIdx).getIndex()) &&
+ SpillFIs[MI.getOperand(StackOperandIdx).getIndex()]) {
+ MI.getOperand(StackOperandIdx)
+ .ChangeToRegister(Register(), false /*isDef*/);
+ }
}
}
}
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index a9754ba..08f2ff4 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -9365,6 +9365,7 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
Opc = AMDGPU::S_GET_BARRIER_STATE_IMM;
SDValue K = DAG.getTargetConstant(BarID, DL, MVT::i32);
Ops.push_back(K);
+ Ops.push_back(Chain);
} else {
Opc = AMDGPU::S_GET_BARRIER_STATE_M0;
SDValue M0Val = copyToM0(DAG, Chain, DL, Op.getOperand(2));
@@ -9967,7 +9968,9 @@ SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
0);
}
Ops.push_back(copyToM0(DAG, Chain, DL, M0Val).getValue(0));
- } else if (!IsInlinableBarID) {
+ } else if (IsInlinableBarID) {
+ Ops.push_back(Chain);
+ } else {
Ops.push_back(copyToM0(DAG, Chain, DL, BarOp).getValue(0));
}
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 579b37c..9ad0b4c 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -4514,7 +4514,6 @@ MachineInstr *SIInstrInfo::buildShrunkInst(MachineInstr &MI,
// of vcc was already added during the initial BuildMI, but we
// 1) may need to change vcc to vcc_lo to preserve the original register
// 2) have to preserve the original flags.
- fixImplicitOperands(*Inst32);
copyFlagsToImplicitVCC(*Inst32, *Src2);
continue;
}
@@ -4524,7 +4523,7 @@ MachineInstr *SIInstrInfo::buildShrunkInst(MachineInstr &MI,
}
// FIXME: Losing implicit operands
-
+ fixImplicitOperands(*Inst32);
return Inst32;
}
@@ -6089,10 +6088,14 @@ void SIInstrInfo::legalizeOperandsVOP3(MachineRegisterInfo &MRI,
legalizeOpWithMove(MI, VOP3Idx[2]);
}
-Register SIInstrInfo::readlaneVGPRToSGPR(Register SrcReg, MachineInstr &UseMI,
- MachineRegisterInfo &MRI) const {
+Register SIInstrInfo::readlaneVGPRToSGPR(
+ Register SrcReg, MachineInstr &UseMI, MachineRegisterInfo &MRI,
+ const TargetRegisterClass *DstRC /*=nullptr*/) const {
const TargetRegisterClass *VRC = MRI.getRegClass(SrcReg);
const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC);
+ if (DstRC)
+ SRC = RI.getCommonSubClass(SRC, DstRC);
+
Register DstReg = MRI.createVirtualRegister(SRC);
unsigned SubRegs = RI.getRegSizeInBits(*VRC) / 32;
@@ -6245,7 +6248,10 @@ void SIInstrInfo::legalizeOperandsFLAT(MachineRegisterInfo &MRI,
if (moveFlatAddrToVGPR(MI))
return;
- Register ToSGPR = readlaneVGPRToSGPR(SAddr->getReg(), MI, MRI);
+ const TargetRegisterClass *DeclaredRC = getRegClass(
+ MI.getDesc(), SAddr->getOperandNo(), &RI, *MI.getParent()->getParent());
+
+ Register ToSGPR = readlaneVGPRToSGPR(SAddr->getReg(), MI, MRI, DeclaredRC);
SAddr->setReg(ToSGPR);
}
@@ -9785,6 +9791,9 @@ bool SIInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
else
return false;
+ // A valid Mask is required to have a single bit set, hence a non-zero and
+ // power-of-two value. This verifies that we will not do 64-bit shift below.
+ assert(llvm::has_single_bit<uint64_t>(Mask) && "Invalid mask.");
unsigned BitNo = llvm::countr_zero((uint64_t)Mask);
if (IsSigned && BitNo == SrcSize - 1)
return false;
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
index d560792..f755490 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
@@ -1213,12 +1213,14 @@ public:
/// Fix operands in \p MI to satisfy constant bus requirements.
void legalizeOperandsVOP3(MachineRegisterInfo &MRI, MachineInstr &MI) const;
- /// Copy a value from a VGPR (\p SrcReg) to SGPR. This function can only
- /// be used when it is know that the value in SrcReg is same across all
- /// threads in the wave.
+ /// Copy a value from a VGPR (\p SrcReg) to SGPR. The desired register class
+ /// for the dst register (\p DstRC) can be optionally supplied. This function
+ /// can only be used when it is know that the value in SrcReg is same across
+ /// all threads in the wave.
/// \returns The SGPR register that \p SrcReg was copied to.
Register readlaneVGPRToSGPR(Register SrcReg, MachineInstr &UseMI,
- MachineRegisterInfo &MRI) const;
+ MachineRegisterInfo &MRI,
+ const TargetRegisterClass *DstRC = nullptr) const;
void legalizeOperandsSMRD(MachineRegisterInfo &MRI, MachineInstr &MI) const;
void legalizeOperandsFLAT(MachineRegisterInfo &MRI, MachineInstr &MI) const;
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
index c016be2..087ca1f 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
@@ -2409,8 +2409,6 @@ class VOPProfile <list<ValueType> _ArgVT, bit _EnableClamp = 0> {
field bit EnableClamp = _EnableClamp;
field bit IsTrue16 = 0;
field bit IsRealTrue16 = 0;
- field bit IsInvalidSingleUseConsumer = 0;
- field bit IsInvalidSingleUseProducer = 0;
field ValueType DstVT = ArgVT[0];
field ValueType Src0VT = ArgVT[1];
diff --git a/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp b/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp
index 99c7d2b..5b74022 100644
--- a/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp
+++ b/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp
@@ -270,7 +270,7 @@ void SILowerControlFlow::emitIf(MachineInstr &MI) {
I = skipToUncondBrOrEnd(MBB, I);
// Insert the S_CBRANCH_EXECZ instruction which will be optimized later
- // during SIRemoveShortExecBranches.
+ // during SIPreEmitPeephole.
MachineInstr *NewBr = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ))
.add(MI.getOperand(2));
diff --git a/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp b/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp
index 28bba8c..35e5bea 100644
--- a/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp
+++ b/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp
@@ -418,10 +418,15 @@ bool SILowerSGPRSpills::run(MachineFunction &MF) {
// correct register value. But not sure the register value alone is
// adequate to lower the DIExpression. It should be worked out later.
for (MachineInstr &MI : MBB) {
- if (MI.isDebugValue() && MI.getOperand(0).isFI() &&
- !MFI.isFixedObjectIndex(MI.getOperand(0).getIndex()) &&
- SpillFIs[MI.getOperand(0).getIndex()]) {
- MI.getOperand(0).ChangeToRegister(Register(), false /*isDef*/);
+ if (MI.isDebugValue()) {
+ uint32_t StackOperandIdx = MI.isDebugValueList() ? 2 : 0;
+ if (MI.getOperand(StackOperandIdx).isFI() &&
+ !MFI.isFixedObjectIndex(
+ MI.getOperand(StackOperandIdx).getIndex()) &&
+ SpillFIs[MI.getOperand(StackOperandIdx).getIndex()]) {
+ MI.getOperand(StackOperandIdx)
+ .ChangeToRegister(Register(), false /*isDef*/);
+ }
}
}
}
diff --git a/llvm/lib/Target/AMDGPU/SIMachineScheduler.h b/llvm/lib/Target/AMDGPU/SIMachineScheduler.h
index ac34a74..f8f4b5a 100644
--- a/llvm/lib/Target/AMDGPU/SIMachineScheduler.h
+++ b/llvm/lib/Target/AMDGPU/SIMachineScheduler.h
@@ -120,8 +120,8 @@ public:
ArrayRef<std::pair<SIScheduleBlock*, SIScheduleBlockLinkKind>>
getSuccs() const { return Succs; }
- unsigned Height; // Maximum topdown path length to block without outputs
- unsigned Depth; // Maximum bottomup path length to block without inputs
+ unsigned Height = 0; // Maximum topdown path length to block without outputs
+ unsigned Depth = 0; // Maximum bottomup path length to block without inputs
unsigned getNumHighLatencySuccessors() const {
return NumHighLatencySuccessors;
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.td b/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
index 443797e..ef9adde 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
@@ -849,14 +849,21 @@ def TTMP_64 : SIRegisterClass<"AMDGPU", [v2i32, i64, f64, v4i16, v4f16, v4bf16],
let HasSGPR = 1;
}
-def SReg_64_XEXEC : SIRegisterClass<"AMDGPU", [v2i32, i64, v2f32, f64, i1, v4i16, v4f16, v4bf16], 32,
- (add SGPR_64, VCC, FLAT_SCR, XNACK_MASK, SGPR_NULL64, SRC_SHARED_BASE,
+def SReg_64_XEXEC_XNULL : SIRegisterClass<"AMDGPU", [v2i32, i64, v2f32, f64, i1, v4i16, v4f16, v4bf16], 32,
+ (add SGPR_64, VCC, FLAT_SCR, XNACK_MASK, SRC_SHARED_BASE,
SRC_SHARED_LIMIT, SRC_PRIVATE_BASE, SRC_PRIVATE_LIMIT, TTMP_64, TBA, TMA)> {
let CopyCost = 1;
let AllocationPriority = 1;
let HasSGPR = 1;
}
+def SReg_64_XEXEC : SIRegisterClass<"AMDGPU", [v2i32, i64, v2f32, f64, i1, v4i16, v4f16, v4bf16], 32,
+ (add SReg_64_XEXEC_XNULL, SGPR_NULL64)> {
+ let CopyCost = 1;
+ let AllocationPriority = 1;
+ let HasSGPR = 1;
+}
+
def SReg_64 : SIRegisterClass<"AMDGPU", [v2i32, i64, v2f32, f64, i1, v4i16, v4f16, v4bf16], 32,
(add SReg_64_XEXEC, EXEC)> {
let CopyCost = 1;
diff --git a/llvm/lib/Target/AMDGPU/SOPInstructions.td b/llvm/lib/Target/AMDGPU/SOPInstructions.td
index 2e73a1a..9da27a7 100644
--- a/llvm/lib/Target/AMDGPU/SOPInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SOPInstructions.td
@@ -1752,11 +1752,6 @@ let OtherPredicates = [HasExportInsts] in
"$simm16">;
} // End SubtargetPredicate = isGFX11Plus
-let SubtargetPredicate = HasVGPRSingleUseHintInsts in {
- def S_SINGLEUSE_VDST :
- SOPP_Pseudo<"s_singleuse_vdst", (ins s16imm:$simm16), "$simm16">;
-} // End SubtargetPredicate = HasVGPRSingeUseHintInsts
-
let SubtargetPredicate = isGFX12Plus, hasSideEffects = 1 in {
def S_WAIT_LOADCNT :
SOPP_Pseudo<"s_wait_loadcnt", (ins s16imm:$simm16), "$simm16",
@@ -2677,12 +2672,6 @@ defm S_ICACHE_INV : SOPP_Real_32_gfx11_gfx12<0x03c>;
defm S_BARRIER : SOPP_Real_32_gfx11<0x03d>;
//===----------------------------------------------------------------------===//
-// SOPP - GFX1150, GFX12.
-//===----------------------------------------------------------------------===//
-
-defm S_SINGLEUSE_VDST : SOPP_Real_32_gfx11_gfx12<0x013>;
-
-//===----------------------------------------------------------------------===//
// SOPP - GFX6, GFX7, GFX8, GFX9, GFX10
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
index 8b5ec87..f32c82f 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
@@ -379,12 +379,6 @@ struct VOPTrue16Info {
bool IsTrue16;
};
-struct SingleUseExceptionInfo {
- uint16_t Opcode;
- bool IsInvalidSingleUseConsumer;
- bool IsInvalidSingleUseProducer;
-};
-
struct FP8DstByteSelInfo {
uint16_t Opcode;
bool HasFP8DstByteSel;
@@ -396,8 +390,6 @@ struct FP8DstByteSelInfo {
#define GET_MTBUFInfoTable_IMPL
#define GET_MUBUFInfoTable_DECL
#define GET_MUBUFInfoTable_IMPL
-#define GET_SingleUseExceptionTable_DECL
-#define GET_SingleUseExceptionTable_IMPL
#define GET_SMInfoTable_DECL
#define GET_SMInfoTable_IMPL
#define GET_VOP1InfoTable_DECL
@@ -626,16 +618,6 @@ bool isTrue16Inst(unsigned Opc) {
return Info ? Info->IsTrue16 : false;
}
-bool isInvalidSingleUseConsumerInst(unsigned Opc) {
- const SingleUseExceptionInfo *Info = getSingleUseExceptionHelper(Opc);
- return Info && Info->IsInvalidSingleUseConsumer;
-}
-
-bool isInvalidSingleUseProducerInst(unsigned Opc) {
- const SingleUseExceptionInfo *Info = getSingleUseExceptionHelper(Opc);
- return Info && Info->IsInvalidSingleUseProducer;
-}
-
bool isFP8DstSelInst(unsigned Opc) {
const FP8DstByteSelInfo *Info = getFP8DstByteSelHelper(Opc);
return Info ? Info->HasFP8DstByteSel : false;
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
index 35c080d..da37534 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
@@ -870,6 +870,8 @@ bool isInvalidSingleUseConsumerInst(unsigned Opc);
LLVM_READONLY
bool isInvalidSingleUseProducerInst(unsigned Opc);
+bool isDPMACCInstruction(unsigned Opc);
+
LLVM_READONLY
unsigned mapWMMA2AddrTo3AddrOpcode(unsigned Opc);
diff --git a/llvm/lib/Target/AMDGPU/VOP1Instructions.td b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
index 33f2f9f..bd80505 100644
--- a/llvm/lib/Target/AMDGPU/VOP1Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
@@ -252,7 +252,6 @@ def VOP_READFIRSTLANE : VOPProfile <[i32, i32, untyped, untyped]> {
def V_READFIRSTLANE_B32 : VOP1_Pseudo <"v_readfirstlane_b32", VOP_READFIRSTLANE,
[], 1> {
let isConvergent = 1;
- let IsInvalidSingleUseConsumer = 1;
}
foreach vt = Reg32Types.types in {
@@ -375,7 +374,6 @@ defm V_CLREXCP : VOP1Inst <"v_clrexcp", VOP_NO_EXT<VOP_NONE>>;
def VOP_MOVRELS : VOPProfile<[i32, i32, untyped, untyped]> {
let Src0RC32 = VRegSrc_32;
let Src0RC64 = VRegSrc_32;
- let IsInvalidSingleUseConsumer = 1;
}
// Special case because there are no true output operands. Hack vdst
@@ -419,12 +417,8 @@ class VOP_MOVREL<RegisterOperand Src1RC> : VOPProfile<[untyped, i32, untyped, un
let EmitDst = 1; // force vdst emission
}
-let IsInvalidSingleUseProducer = 1 in {
- def VOP_MOVRELD : VOP_MOVREL<VSrc_b32>;
- def VOP_MOVRELSD : VOP_MOVREL<VRegSrc_32> {
- let IsInvalidSingleUseConsumer = 1;
- }
-}
+def VOP_MOVRELD : VOP_MOVREL<VSrc_b32>;
+def VOP_MOVRELSD : VOP_MOVREL<VRegSrc_32>;
let SubtargetPredicate = HasMovrel, Uses = [M0, EXEC] in {
// v_movreld_b32 is a special case because the destination output
@@ -541,7 +535,6 @@ let SubtargetPredicate = isGFX9Plus in {
let Constraints = "$vdst = $src1, $vdst1 = $src0";
let DisableEncoding = "$vdst1,$src1";
let SchedRW = [Write64Bit, Write64Bit];
- let IsInvalidSingleUseConsumer = 1;
}
let isReMaterializable = 1 in
@@ -708,8 +701,6 @@ let SubtargetPredicate = isGFX10Plus in {
let Constraints = "$vdst = $src1, $vdst1 = $src0";
let DisableEncoding = "$vdst1,$src1";
let SchedRW = [Write64Bit, Write64Bit];
- let IsInvalidSingleUseConsumer = 1;
- let IsInvalidSingleUseProducer = 1;
}
} // End Uses = [M0]
} // End SubtargetPredicate = isGFX10Plus
@@ -743,10 +734,7 @@ let SubtargetPredicate = isGFX11Plus in {
}
// Restrict src0 to be VGPR
def V_PERMLANE64_B32 : VOP1_Pseudo<"v_permlane64_b32", VOP_MOVRELS,
- [], /*VOP1Only=*/ 1> {
- let IsInvalidSingleUseConsumer = 1;
- let IsInvalidSingleUseProducer = 1;
- }
+ [], /*VOP1Only=*/ 1>;
defm V_MOV_B16 : VOP1Inst_t16<"v_mov_b16", VOP_I16_I16>;
defm V_NOT_B16 : VOP1Inst_t16<"v_not_b16", VOP_I16_I16>;
defm V_CVT_I32_I16 : VOP1Inst_t16<"v_cvt_i32_i16", VOP_I32_I16>;
diff --git a/llvm/lib/Target/AMDGPU/VOP2Instructions.td b/llvm/lib/Target/AMDGPU/VOP2Instructions.td
index dd48607..52f7be3 100644
--- a/llvm/lib/Target/AMDGPU/VOP2Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP2Instructions.td
@@ -788,12 +788,10 @@ defm V_SUBREV_U32 : VOP2Inst <"v_subrev_u32", VOP_I32_I32_I32_ARITH, null_frag,
} // End isCommutable = 1
// These are special and do not read the exec mask.
-let isConvergent = 1, Uses = []<Register>, IsInvalidSingleUseConsumer = 1 in {
+let isConvergent = 1, Uses = []<Register> in {
def V_READLANE_B32 : VOP2_Pseudo<"v_readlane_b32", VOP_READLANE, []>;
let IsNeverUniform = 1, Constraints = "$vdst = $vdst_in", DisableEncoding="$vdst_in" in {
-def V_WRITELANE_B32 : VOP2_Pseudo<"v_writelane_b32", VOP_WRITELANE, []> {
- let IsInvalidSingleUseProducer = 1;
- }
+def V_WRITELANE_B32 : VOP2_Pseudo<"v_writelane_b32", VOP_WRITELANE, []>;
} // End IsNeverUniform, $vdst = $vdst_in, DisableEncoding $vdst_in
} // End isConvergent = 1
diff --git a/llvm/lib/Target/AMDGPU/VOP3Instructions.td b/llvm/lib/Target/AMDGPU/VOP3Instructions.td
index 466114b..20beb41 100644
--- a/llvm/lib/Target/AMDGPU/VOP3Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP3Instructions.td
@@ -157,12 +157,12 @@ defm V_MAX_F64 : VOP3Inst <"v_max_f64", VOP3_Profile<VOP_F64_F64_F64>, fmaxnum_l
} // End SubtargetPredicate = isNotGFX12Plus
} // End SchedRW = [WriteDoubleAdd]
-let SchedRW = [WriteIntMul], IsInvalidSingleUseConsumer = 1 in {
+let SchedRW = [WriteIntMul] in {
defm V_MUL_LO_U32 : VOP3Inst <"v_mul_lo_u32", V_MUL_PROF<VOP_I32_I32_I32>, DivergentBinFrag<mul>>;
defm V_MUL_HI_U32 : VOP3Inst <"v_mul_hi_u32", V_MUL_PROF<VOP_I32_I32_I32>, mulhu>;
defm V_MUL_LO_I32 : VOP3Inst <"v_mul_lo_i32", V_MUL_PROF<VOP_I32_I32_I32>>;
defm V_MUL_HI_I32 : VOP3Inst <"v_mul_hi_i32", V_MUL_PROF<VOP_I32_I32_I32>, mulhs>;
-} // End SchedRW = [WriteIntMul], IsInvalidSingleUseConsumer = 1
+} // End SchedRW = [WriteIntMul]
let SubtargetPredicate = isGFX12Plus, ReadsModeReg = 0 in {
defm V_MINIMUM_F32 : VOP3Inst <"v_minimum_f32", VOP3_Profile<VOP_F32_F32_F32>, DivergentBinFrag<fminimum>>;
@@ -260,9 +260,9 @@ let mayRaiseFPException = 0 in { // Seems suspicious but manual doesn't say it d
let isReMaterializable = 1 in
defm V_MSAD_U8 : VOP3Inst <"v_msad_u8", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>;
-let Constraints = "@earlyclobber $vdst", IsInvalidSingleUseConsumer = 1 in {
+let Constraints = "@earlyclobber $vdst" in {
defm V_MQSAD_PK_U16_U8 : VOP3Inst <"v_mqsad_pk_u16_u8", VOP3_Profile<VOP_I64_I64_I32_I64, VOP3_CLAMP>>;
-} // End Constraints = "@earlyclobber $vdst", IsInvalidSingleUseConsumer = 1
+} // End Constraints = "@earlyclobber $vdst"
let isReMaterializable = 1 in {
@@ -277,16 +277,14 @@ let SchedRW = [Write64Bit] in {
defm V_ASHR_I64 : VOP3Inst <"v_ashr_i64", VOP3_Profile<VOP_I64_I64_I32>, csra_64>;
} // End SubtargetPredicate = isGFX6GFX7
- let IsInvalidSingleUseConsumer = 1 in {
let SubtargetPredicate = isGFX8Plus in {
defm V_LSHRREV_B64 : VOP3Inst <"v_lshrrev_b64", VOP3_Profile<VOP_I64_I32_I64>, clshr_rev_64>;
defm V_ASHRREV_I64 : VOP3Inst <"v_ashrrev_i64", VOP3_Profile<VOP_I64_I32_I64>, cashr_rev_64>;
- } // End SubtargetPredicate = isGFX8Plus, , IsInvalidSingleUseConsumer = 1
+ } // End SubtargetPredicate = isGFX8Plus
let SubtargetPredicate = isGFX8GFX9GFX10GFX11 in {
defm V_LSHLREV_B64 : VOP3Inst <"v_lshlrev_b64", VOP3_Profile<VOP_I64_I32_I64>, clshl_rev_64>;
} // End SubtargetPredicate = isGFX8GFX9GFX10GFX11
- } // End IsInvalidSingleUseConsumer = 1
} // End SchedRW = [Write64Bit]
} // End isReMaterializable = 1
@@ -311,14 +309,14 @@ def VOPProfileMQSAD : VOP3_Profile<VOP_V4I32_I64_I32_V4I32, VOP3_CLAMP> {
let HasModifiers = 0;
}
-let SubtargetPredicate = isGFX7Plus, IsInvalidSingleUseConsumer = 1 in {
+let SubtargetPredicate = isGFX7Plus in {
let Constraints = "@earlyclobber $vdst", SchedRW = [WriteQuarterRate32] in {
defm V_QSAD_PK_U16_U8 : VOP3Inst <"v_qsad_pk_u16_u8", VOP3_Profile<VOP_I64_I64_I32_I64, VOP3_CLAMP>>;
defm V_MQSAD_U32_U8 : VOP3Inst <"v_mqsad_u32_u8", VOPProfileMQSAD>;
} // End Constraints = "@earlyclobber $vdst", SchedRW = [WriteQuarterRate32]
-} // End SubtargetPredicate = isGFX7Plus, IsInvalidSingleUseConsumer = 1
+} // End SubtargetPredicate = isGFX7Plus
-let isCommutable = 1, SchedRW = [WriteIntMul, WriteSALU], IsInvalidSingleUseConsumer = 1 in {
+let isCommutable = 1, SchedRW = [WriteIntMul, WriteSALU] in {
let SubtargetPredicate = isGFX7Plus, OtherPredicates = [HasNotMADIntraFwdBug] in {
defm V_MAD_U64_U32 : VOP3Inst <"v_mad_u64_u32", VOP3b_I64_I1_I32_I32_I64>;
defm V_MAD_I64_I32 : VOP3Inst <"v_mad_i64_i32", VOP3b_I64_I1_I32_I32_I64>;
@@ -328,7 +326,7 @@ let isCommutable = 1, SchedRW = [WriteIntMul, WriteSALU], IsInvalidSingleUseCons
defm V_MAD_U64_U32_gfx11 : VOP3Inst <"v_mad_u64_u32", VOP3b_I64_I1_I32_I32_I64>;
defm V_MAD_I64_I32_gfx11 : VOP3Inst <"v_mad_i64_i32", VOP3b_I64_I1_I32_I32_I64>;
}
-} // End isCommutable = 1, SchedRW = [WriteIntMul, WriteSALU], IsInvalidSingleUseConsumer = 1
+} // End isCommutable = 1, SchedRW = [WriteIntMul, WriteSALU]
let FPDPRounding = 1 in {
@@ -865,10 +863,10 @@ let SubtargetPredicate = isGFX10Plus in {
} // End isCommutable = 1, isReMaterializable = 1
def : ThreeOp_i32_Pats<xor, xor, V_XOR3_B32_e64>;
- let Constraints = "$vdst = $vdst_in", DisableEncoding="$vdst_in", IsInvalidSingleUseConsumer = 1, IsInvalidSingleUseProducer = 1 in {
+ let Constraints = "$vdst = $vdst_in", DisableEncoding="$vdst_in" in {
defm V_PERMLANE16_B32 : VOP3Inst<"v_permlane16_b32", VOP3_PERMLANE_Profile>;
defm V_PERMLANEX16_B32 : VOP3Inst<"v_permlanex16_b32", VOP3_PERMLANE_Profile>;
- } // End $vdst = $vdst_in, DisableEncoding $vdst_in, IsInvalidSingleUseConsumer = 1, IsInvalidSingleUseProducer = 1
+ } // End $vdst = $vdst_in, DisableEncoding $vdst_in
foreach vt = Reg32Types.types in {
def : PermlanePat<int_amdgcn_permlane16, V_PERMLANE16_B32_e64, vt>;
@@ -1286,12 +1284,11 @@ let AssemblerPredicate = isGFX10Only, DecoderNamespace = "GFX10" in {
}
} // End AssemblerPredicate = isGFX10Only, DecoderNamespace = "GFX10"
-let IsInvalidSingleUseConsumer = 1 in {
- defm V_READLANE_B32 : VOP3_Real_No_Suffix_gfx10<0x360>;
- let InOperandList = (ins SSrcOrLds_b32:$src0, SCSrc_b32:$src1, VGPR_32:$vdst_in), IsInvalidSingleUseProducer = 1 in {
- defm V_WRITELANE_B32 : VOP3_Real_No_Suffix_gfx10<0x361>;
- } // End InOperandList = (ins SSrcOrLds_b32:$src0, SCSrc_b32: $src1, VGPR_32:$vdst_in), IsInvalidSingleUseProducer = 1
-} // End IsInvalidSingleUseConsumer = 1
+defm V_READLANE_B32 : VOP3_Real_No_Suffix_gfx10<0x360>;
+
+let InOperandList = (ins SSrcOrLds_b32:$src0, SCSrc_b32:$src1, VGPR_32:$vdst_in) in {
+ defm V_WRITELANE_B32 : VOP3_Real_No_Suffix_gfx10<0x361>;
+} // End InOperandList = (ins SSrcOrLds_b32:$src0, SCSrc_b32:$src1, VGPR_32:$vdst_in)
let SubtargetPredicate = isGFX10Before1030 in {
defm V_MUL_LO_I32 : VOP3_Real_gfx10<0x16b>;
diff --git a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
index f4d2c29..5eee718 100644
--- a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
@@ -382,19 +382,15 @@ defm V_DOT2_F32_F16 : VOP3PInst<"v_dot2_f32_f16",
AMDGPUfdot2, 1/*ExplicitClamp*/>;
let OtherPredicates = [HasDot7Insts] in {
-let IsInvalidSingleUseConsumer = 1 in {
- defm V_DOT4_U32_U8 : VOP3PInst<"v_dot4_u32_u8",
- VOP3P_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>, int_amdgcn_udot4, 1>;
-}
+defm V_DOT4_U32_U8 : VOP3PInst<"v_dot4_u32_u8",
+ VOP3P_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>, int_amdgcn_udot4, 1>;
defm V_DOT8_U32_U4 : VOP3PInst<"v_dot8_u32_u4",
VOP3P_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>, int_amdgcn_udot8, 1>;
} // End OtherPredicates = [HasDot7Insts]
let OtherPredicates = [HasDot1Insts] in {
-let IsInvalidSingleUseConsumer = 1 in {
- defm V_DOT4_I32_I8 : VOP3PInst<"v_dot4_i32_i8",
- VOP3P_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>, int_amdgcn_sdot4, 1>;
-}
+defm V_DOT4_I32_I8 : VOP3PInst<"v_dot4_i32_i8",
+ VOP3P_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>, int_amdgcn_sdot4, 1>;
defm V_DOT8_I32_I4 : VOP3PInst<"v_dot8_i32_i4",
VOP3P_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>, int_amdgcn_sdot8, 1>;
} // End OtherPredicates = [HasDot1Insts]
diff --git a/llvm/lib/Target/AMDGPU/VOPCInstructions.td b/llvm/lib/Target/AMDGPU/VOPCInstructions.td
index be862b4..d6e08dc 100644
--- a/llvm/lib/Target/AMDGPU/VOPCInstructions.td
+++ b/llvm/lib/Target/AMDGPU/VOPCInstructions.td
@@ -464,10 +464,9 @@ multiclass VOPC_I16 <string opName, SDPatternOperator cond = COND_NULL,
multiclass VOPC_I32 <string opName, SDPatternOperator cond = COND_NULL, string revOp = opName> :
VOPC_Pseudos <opName, VOPC_I1_I32_I32, cond, revOp, 0>;
-let IsInvalidSingleUseConsumer = 1 in {
- multiclass VOPC_I64 <string opName, SDPatternOperator cond = COND_NULL, string revOp = opName> :
- VOPC_Pseudos <opName, VOPC_I1_I64_I64, cond, revOp, 0>;
-}
+multiclass VOPC_I64 <string opName, SDPatternOperator cond = COND_NULL, string revOp = opName> :
+ VOPC_Pseudos <opName, VOPC_I1_I64_I64, cond, revOp, 0>;
+
multiclass VOPCX_F16<string opName, string revOp = opName> {
let OtherPredicates = [Has16BitInsts], True16Predicate = NotHasTrue16BitInsts in {
@@ -502,10 +501,8 @@ multiclass VOPCX_I16<string opName, string revOp = opName> {
multiclass VOPCX_I32 <string opName, string revOp = opName> :
VOPCX_Pseudos <opName, VOPC_I1_I32_I32, VOPC_I32_I32, COND_NULL, revOp>;
-let IsInvalidSingleUseConsumer = 1 in {
- multiclass VOPCX_I64 <string opName, string revOp = opName> :
- VOPCX_Pseudos <opName, VOPC_I1_I64_I64, VOPC_I64_I64, COND_NULL, revOp>;
-}
+multiclass VOPCX_I64 <string opName, string revOp = opName> :
+ VOPCX_Pseudos <opName, VOPC_I1_I64_I64, VOPC_I64_I64, COND_NULL, revOp>;
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/AMDGPU/VOPInstructions.td b/llvm/lib/Target/AMDGPU/VOPInstructions.td
index 5a460ef..05a7d90 100644
--- a/llvm/lib/Target/AMDGPU/VOPInstructions.td
+++ b/llvm/lib/Target/AMDGPU/VOPInstructions.td
@@ -17,8 +17,6 @@ class LetDummies {
bit isReMaterializable;
bit isAsCheapAsAMove;
bit FPDPRounding;
- bit IsInvalidSingleUseConsumer;
- bit IsInvalidSingleUseProducer;
Predicate SubtargetPredicate;
string Constraints;
string DisableEncoding;
@@ -67,8 +65,6 @@ class VOP_Pseudo <string opName, string suffix, VOPProfile P, dag outs, dag ins,
string Mnemonic = opName;
Instruction Opcode = !cast<Instruction>(NAME);
bit IsTrue16 = P.IsTrue16;
- bit IsInvalidSingleUseConsumer = P.IsInvalidSingleUseConsumer;
- bit IsInvalidSingleUseProducer = P.IsInvalidSingleUseProducer;
VOPProfile Pfl = P;
string AsmOperands;
@@ -165,8 +161,6 @@ class VOP3P_Pseudo <string opName, VOPProfile P, list<dag> pattern = []> :
class VOP_Real<VOP_Pseudo ps> {
Instruction Opcode = !cast<Instruction>(NAME);
bit IsSingle = ps.Pfl.IsSingle;
- bit IsInvalidSingleUseConsumer = ps.Pfl.IsInvalidSingleUseConsumer;
- bit IsInvalidSingleUseProducer = ps.Pfl.IsInvalidSingleUseProducer;
}
class VOP3_Real <VOP_Pseudo ps, int EncodingFamily, string asm_name = ps.Mnemonic> :
@@ -844,9 +838,6 @@ class VOP_DPP_Pseudo <string OpName, VOPProfile P, list<dag> pattern=[],
let Constraints = !if(P.NumSrcArgs, P.TieRegDPP # " = $vdst", "");
let DisableEncoding = !if(P.NumSrcArgs, P.TieRegDPP, "");
let DecoderNamespace = "GFX8";
-
- let IsInvalidSingleUseConsumer = !not(VINTERP);
- let IsInvalidSingleUseProducer = !not(VINTERP);
}
class VOP3_DPP_Pseudo <string OpName, VOPProfile P> :
@@ -1714,13 +1705,4 @@ def VOPTrue16Table : GenericTable {
let PrimaryKey = ["Opcode"];
let PrimaryKeyName = "getTrue16OpcodeHelper";
-}
-
-def SingleUseExceptionTable : GenericTable {
- let FilterClass = "VOP_Pseudo";
- let CppTypeName = "SingleUseExceptionInfo";
- let Fields = ["Opcode", "IsInvalidSingleUseConsumer", "IsInvalidSingleUseProducer"];
-
- let PrimaryKey = ["Opcode"];
- let PrimaryKeyName = "getSingleUseExceptionHelper";
-}
+} \ No newline at end of file
diff --git a/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp b/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
index fe26d6c..5be9d73 100644
--- a/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
@@ -2178,12 +2178,13 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
} else {
// Use move to satisfy constraints
unsigned MoveOpc = Opcode == ARM::VBSPd ? ARM::VORRd : ARM::VORRq;
+ unsigned MO1Flags = getRegState(MI.getOperand(1)) & ~RegState::Kill;
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(MoveOpc))
.addReg(DstReg,
RegState::Define |
getRenamableRegState(MI.getOperand(0).isRenamable()))
- .add(MI.getOperand(1))
- .add(MI.getOperand(1))
+ .addReg(MI.getOperand(1).getReg(), MO1Flags)
+ .addReg(MI.getOperand(1).getReg(), MO1Flags)
.addImm(MI.getOperand(4).getImm())
.add(MI.getOperand(5));
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewOpc))
diff --git a/llvm/lib/Target/ARM/ARMFeatures.td b/llvm/lib/Target/ARM/ARMFeatures.td
index 8b0ade5..dc0e86c 100644
--- a/llvm/lib/Target/ARM/ARMFeatures.td
+++ b/llvm/lib/Target/ARM/ARMFeatures.td
@@ -375,6 +375,9 @@ def FeaturePref32BitThumb : SubtargetFeature<"32bit", "Prefers32BitThumb", "true
def FeaturePrefLoopAlign32 : SubtargetFeature<"loop-align", "PrefLoopLogAlignment","2",
"Prefer 32-bit alignment for loops">;
+def FeaturePrefLoopAlign64 : SubtargetFeature<"loop-align-64", "PrefLoopLogAlignment","3",
+ "Prefer 64-bit alignment for loops">;
+
def FeatureMVEVectorCostFactor1 : SubtargetFeature<"mve1beat", "MVEVectorCostFactor", "4",
"Model MVE instructions as a 1 beat per tick architecture">;
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index db564d7..a03928b 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -15131,9 +15131,9 @@ static SDValue PerformVMOVRRDCombine(SDNode *N,
SDValue Op0, Op1;
while (BV.getOpcode() == ISD::INSERT_VECTOR_ELT) {
if (isa<ConstantSDNode>(BV.getOperand(2))) {
- if (BV.getConstantOperandVal(2) == Offset)
+ if (BV.getConstantOperandVal(2) == Offset && !Op0)
Op0 = BV.getOperand(1);
- if (BV.getConstantOperandVal(2) == Offset + 1)
+ if (BV.getConstantOperandVal(2) == Offset + 1 && !Op1)
Op1 = BV.getOperand(1);
}
BV = BV.getOperand(0);
diff --git a/llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp b/llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp
index a4598de..e7dfbfb 100644
--- a/llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp
+++ b/llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp
@@ -144,10 +144,9 @@ namespace {
void ProcessLoop() {
std::function<void(MachineBasicBlock *)> Search =
[this, &Search](MachineBasicBlock *MBB) -> void {
- if (Visited.count(MBB))
+ if (!Visited.insert(MBB).second)
return;
- Visited.insert(MBB);
for (auto *Succ : MBB->successors()) {
if (!ML.contains(Succ))
continue;
diff --git a/llvm/lib/Target/ARM/ARMProcessors.td b/llvm/lib/Target/ARM/ARMProcessors.td
index e4e122a0..a66a2c0 100644
--- a/llvm/lib/Target/ARM/ARMProcessors.td
+++ b/llvm/lib/Target/ARM/ARMProcessors.td
@@ -344,6 +344,7 @@ def : ProcessorModel<"cortex-m4", CortexM4Model, [ARMv7em,
def : ProcessorModel<"cortex-m7", CortexM7Model, [ARMv7em,
ProcM7,
FeatureFPARMv8_D16,
+ FeaturePrefLoopAlign64,
FeatureUseMIPipeliner,
FeatureUseMISched]>;
@@ -385,6 +386,7 @@ def : ProcessorModel<"cortex-m85", CortexM85Model, [ARMv81mMainline,
FeatureDSP,
FeatureFPARMv8_D16,
FeaturePACBTI,
+ FeaturePrefLoopAlign64,
FeatureUseMISched,
HasMVEFloatOps]>;
diff --git a/llvm/lib/Target/BPF/BPFInstrInfo.td b/llvm/lib/Target/BPF/BPFInstrInfo.td
index f7e1790..62d6e25 100644
--- a/llvm/lib/Target/BPF/BPFInstrInfo.td
+++ b/llvm/lib/Target/BPF/BPFInstrInfo.td
@@ -826,13 +826,12 @@ let Predicates = [BPFNoALU32] in {
}
// Atomic Fetch-and-<add, and, or, xor> operations
-class XFALU64<BPFWidthModifer SizeOp, BPFArithOp Opc, string OpcodeStr,
- string OpcStr, PatFrag OpNode>
+class XFALU64<BPFWidthModifer SizeOp, BPFArithOp Opc, string OpcodeStr, string OpcStr>
: TYPE_LD_ST<BPF_ATOMIC.Value, SizeOp.Value,
(outs GPR:$dst),
(ins MEMri:$addr, GPR:$val),
"$dst = atomic_fetch_"#OpcStr#"(("#OpcodeStr#" *)($addr), $val)",
- [(set GPR:$dst, (OpNode ADDRri:$addr, GPR:$val))]> {
+ []> {
bits<4> dst;
bits<20> addr;
@@ -844,13 +843,12 @@ class XFALU64<BPFWidthModifer SizeOp, BPFArithOp Opc, string OpcodeStr,
let BPFClass = BPF_STX;
}
-class XFALU32<BPFWidthModifer SizeOp, BPFArithOp Opc, string OpcodeStr,
- string OpcStr, PatFrag OpNode>
+class XFALU32<BPFWidthModifer SizeOp, BPFArithOp Opc, string OpcodeStr, string OpcStr>
: TYPE_LD_ST<BPF_ATOMIC.Value, SizeOp.Value,
(outs GPR32:$dst),
(ins MEMri:$addr, GPR32:$val),
"$dst = atomic_fetch_"#OpcStr#"(("#OpcodeStr#" *)($addr), $val)",
- [(set GPR32:$dst, (OpNode ADDRri:$addr, GPR32:$val))]> {
+ []> {
bits<4> dst;
bits<20> addr;
@@ -864,26 +862,122 @@ class XFALU32<BPFWidthModifer SizeOp, BPFArithOp Opc, string OpcodeStr,
let Constraints = "$dst = $val" in {
let Predicates = [BPFHasALU32], DecoderNamespace = "BPFALU32" in {
- def XFADDW32 : XFALU32<BPF_W, BPF_ADD, "u32", "add", atomic_load_add_i32>;
- def XFANDW32 : XFALU32<BPF_W, BPF_AND, "u32", "and", atomic_load_and_i32>;
- def XFORW32 : XFALU32<BPF_W, BPF_OR, "u32", "or", atomic_load_or_i32>;
- def XFXORW32 : XFALU32<BPF_W, BPF_XOR, "u32", "xor", atomic_load_xor_i32>;
+ def XFADDW32 : XFALU32<BPF_W, BPF_ADD, "u32", "add">;
+ def XFANDW32 : XFALU32<BPF_W, BPF_AND, "u32", "and">;
+ def XFORW32 : XFALU32<BPF_W, BPF_OR, "u32", "or">;
+ def XFXORW32 : XFALU32<BPF_W, BPF_XOR, "u32", "xor">;
}
let Predicates = [BPFHasALU32] in {
- def XFADDD : XFALU64<BPF_DW, BPF_ADD, "u64", "add", atomic_load_add_i64>;
+ def XFADDD : XFALU64<BPF_DW, BPF_ADD, "u64", "add">;
}
- def XFANDD : XFALU64<BPF_DW, BPF_AND, "u64", "and", atomic_load_and_i64>;
- def XFORD : XFALU64<BPF_DW, BPF_OR, "u64", "or", atomic_load_or_i64>;
- def XFXORD : XFALU64<BPF_DW, BPF_XOR, "u64", "xor", atomic_load_xor_i64>;
+ def XFANDD : XFALU64<BPF_DW, BPF_AND, "u64", "and">;
+ def XFORD : XFALU64<BPF_DW, BPF_OR, "u64", "or">;
+ def XFXORD : XFALU64<BPF_DW, BPF_XOR, "u64", "xor">;
}
-// atomic_load_sub can be represented as a neg followed
-// by an atomic_load_add.
-def : Pat<(atomic_load_sub_i32 ADDRri:$addr, GPR32:$val),
- (XFADDW32 ADDRri:$addr, (NEG_32 GPR32:$val))>;
-def : Pat<(atomic_load_sub_i64 ADDRri:$addr, GPR:$val),
- (XFADDD ADDRri:$addr, (NEG_64 GPR:$val))>;
+let Predicates = [BPFHasALU32] in {
+ foreach P = [// add
+ [atomic_load_add_i32_monotonic, XADDW32],
+ [atomic_load_add_i32_acquire, XFADDW32],
+ [atomic_load_add_i32_release, XFADDW32],
+ [atomic_load_add_i32_acq_rel, XFADDW32],
+ [atomic_load_add_i32_seq_cst, XFADDW32],
+ // and
+ [atomic_load_and_i32_monotonic, XANDW32],
+ [atomic_load_and_i32_acquire, XFANDW32],
+ [atomic_load_and_i32_release, XFANDW32],
+ [atomic_load_and_i32_acq_rel, XFANDW32],
+ [atomic_load_and_i32_seq_cst, XFANDW32],
+ // or
+ [atomic_load_or_i32_monotonic, XORW32],
+ [atomic_load_or_i32_acquire, XFORW32],
+ [atomic_load_or_i32_release, XFORW32],
+ [atomic_load_or_i32_acq_rel, XFORW32],
+ [atomic_load_or_i32_seq_cst, XFORW32],
+ // xor
+ [atomic_load_xor_i32_monotonic, XXORW32],
+ [atomic_load_xor_i32_acquire, XFXORW32],
+ [atomic_load_xor_i32_release, XFXORW32],
+ [atomic_load_xor_i32_acq_rel, XFXORW32],
+ [atomic_load_xor_i32_seq_cst, XFXORW32],
+ ] in {
+ def : Pat<(P[0] ADDRri:$addr, GPR32:$val), (P[1] ADDRri:$addr, GPR32:$val)>;
+ }
+
+ // atomic_load_sub can be represented as a neg followed
+ // by an atomic_load_add.
+ foreach P = [[atomic_load_sub_i32_monotonic, XADDW32],
+ [atomic_load_sub_i32_acquire, XFADDW32],
+ [atomic_load_sub_i32_release, XFADDW32],
+ [atomic_load_sub_i32_acq_rel, XFADDW32],
+ [atomic_load_sub_i32_seq_cst, XFADDW32],
+ ] in {
+ def : Pat<(P[0] ADDRri:$addr, GPR32:$val), (P[1] ADDRri:$addr, (NEG_32 GPR32:$val))>;
+ }
+
+ foreach P = [// add
+ [atomic_load_add_i64_monotonic, XADDD],
+ [atomic_load_add_i64_acquire, XFADDD],
+ [atomic_load_add_i64_release, XFADDD],
+ [atomic_load_add_i64_acq_rel, XFADDD],
+ [atomic_load_add_i64_seq_cst, XFADDD],
+ ] in {
+ def : Pat<(P[0] ADDRri:$addr, GPR:$val), (P[1] ADDRri:$addr, GPR:$val)>;
+ }
+}
+
+foreach P = [[atomic_load_sub_i64_monotonic, XADDD],
+ [atomic_load_sub_i64_acquire, XFADDD],
+ [atomic_load_sub_i64_release, XFADDD],
+ [atomic_load_sub_i64_acq_rel, XFADDD],
+ [atomic_load_sub_i64_seq_cst, XFADDD],
+ ] in {
+ def : Pat<(P[0] ADDRri:$addr, GPR:$val), (P[1] ADDRri:$addr, (NEG_64 GPR:$val))>;
+}
+
+// Borrow the idea from X86InstrFragments.td
+class binop_no_use<SDPatternOperator operator>
+ : PatFrag<(ops node:$A, node:$B),
+ (operator node:$A, node:$B),
+ [{ return SDValue(N, 0).use_empty(); }]>;
+
+class binop_has_use<SDPatternOperator operator>
+ : PatFrag<(ops node:$A, node:$B),
+ (operator node:$A, node:$B),
+ [{ return !SDValue(N, 0).use_empty(); }]>;
+
+foreach op = [add, and, or, xor] in {
+def atomic_load_ # op # _i64_monotonic_nu:
+ binop_no_use <!cast<SDPatternOperator>("atomic_load_"#op# _i64_monotonic)>;
+def atomic_load_ # op # _i64_monotonic_hu:
+ binop_has_use<!cast<SDPatternOperator>("atomic_load_"#op# _i64_monotonic)>;
+}
+
+foreach P = [// and
+ [atomic_load_and_i64_monotonic_nu, XANDD],
+ [atomic_load_and_i64_monotonic_hu, XFANDD],
+ [atomic_load_and_i64_acquire, XFANDD],
+ [atomic_load_and_i64_release, XFANDD],
+ [atomic_load_and_i64_acq_rel, XFANDD],
+ [atomic_load_and_i64_seq_cst, XFANDD],
+ // or
+ [atomic_load_or_i64_monotonic_nu, XORD],
+ [atomic_load_or_i64_monotonic_hu, XFORD],
+ [atomic_load_or_i64_acquire, XFORD],
+ [atomic_load_or_i64_release, XFORD],
+ [atomic_load_or_i64_acq_rel, XFORD],
+ [atomic_load_or_i64_seq_cst, XFORD],
+ // xor
+ [atomic_load_xor_i64_monotonic_nu, XXORD],
+ [atomic_load_xor_i64_monotonic_hu, XFXORD],
+ [atomic_load_xor_i64_acquire, XFXORD],
+ [atomic_load_xor_i64_release, XFXORD],
+ [atomic_load_xor_i64_acq_rel, XFXORD],
+ [atomic_load_xor_i64_seq_cst, XFXORD],
+ ] in {
+ def : Pat<(P[0] ADDRri:$addr, GPR:$val), (P[1] ADDRri:$addr, GPR:$val)>;
+}
// Atomic Exchange
class XCHG<BPFWidthModifer SizeOp, string OpcodeStr, PatFrag OpNode>
diff --git a/llvm/lib/Target/BPF/BPFMIChecking.cpp b/llvm/lib/Target/BPF/BPFMIChecking.cpp
index 24224f6..09635db 100644
--- a/llvm/lib/Target/BPF/BPFMIChecking.cpp
+++ b/llvm/lib/Target/BPF/BPFMIChecking.cpp
@@ -118,7 +118,7 @@ static bool hasLiveDefs(const MachineInstr &MI, const TargetRegisterInfo *TRI) {
RegIsGPR64 = GPR64RegClass->contains(MO.getReg());
if (!MO.isDead()) {
- // It is a GPR64 live Def, we are sure it is live. */
+ // It is a GPR64 live Def, we are sure it is live.
if (RegIsGPR64)
return true;
// It is a GPR32 live Def, we are unsure whether it is really dead due to
@@ -153,6 +153,10 @@ static bool hasLiveDefs(const MachineInstr &MI, const TargetRegisterInfo *TRI) {
}
void BPFMIPreEmitChecking::processAtomicInsts() {
+ if (MF->getSubtarget<BPFSubtarget>().getHasJmp32())
+ return;
+
+ // Only check for cpu version 1 and 2.
for (MachineBasicBlock &MBB : *MF) {
for (MachineInstr &MI : MBB) {
if (MI.getOpcode() != BPF::XADDW && MI.getOpcode() != BPF::XADDD)
diff --git a/llvm/lib/Target/BPF/BTFDebug.cpp b/llvm/lib/Target/BPF/BTFDebug.cpp
index 4d847ab..9d6dee1 100644
--- a/llvm/lib/Target/BPF/BTFDebug.cpp
+++ b/llvm/lib/Target/BPF/BTFDebug.cpp
@@ -35,6 +35,15 @@ static const char *BTFKindStr[] = {
#include "llvm/DebugInfo/BTF/BTF.def"
};
+static const DIType *tryRemoveAtomicType(const DIType *Ty) {
+ if (!Ty)
+ return Ty;
+ auto DerivedTy = dyn_cast<DIDerivedType>(Ty);
+ if (DerivedTy && DerivedTy->getTag() == dwarf::DW_TAG_atomic_type)
+ return DerivedTy->getBaseType();
+ return Ty;
+}
+
/// Emit a BTF common type.
void BTFTypeBase::emitType(MCStreamer &OS) {
OS.AddComment(std::string(BTFKindStr[Kind]) + "(id = " + std::to_string(Id) +
@@ -90,7 +99,7 @@ void BTFTypeDerived::completeType(BTFDebug &BDebug) {
return;
// The base type for PTR/CONST/VOLATILE could be void.
- const DIType *ResolvedType = DTy->getBaseType();
+ const DIType *ResolvedType = tryRemoveAtomicType(DTy->getBaseType());
if (!ResolvedType) {
assert((Kind == BTF::BTF_KIND_PTR || Kind == BTF::BTF_KIND_CONST ||
Kind == BTF::BTF_KIND_VOLATILE) &&
@@ -305,7 +314,7 @@ void BTFTypeStruct::completeType(BTFDebug &BDebug) {
} else {
BTFMember.Offset = DDTy->getOffsetInBits();
}
- const auto *BaseTy = DDTy->getBaseType();
+ const auto *BaseTy = tryRemoveAtomicType(DDTy->getBaseType());
BTFMember.Type = BDebug.getTypeId(BaseTy);
Members.push_back(BTFMember);
}
@@ -342,7 +351,7 @@ void BTFTypeFuncProto::completeType(BTFDebug &BDebug) {
IsCompleted = true;
DITypeRefArray Elements = STy->getTypeArray();
- auto RetType = Elements[0];
+ auto RetType = tryRemoveAtomicType(Elements[0]);
BTFType.Type = RetType ? BDebug.getTypeId(RetType) : 0;
BTFType.NameOff = 0;
@@ -350,7 +359,7 @@ void BTFTypeFuncProto::completeType(BTFDebug &BDebug) {
// to represent the vararg, encode the NameOff/Type to be 0.
for (unsigned I = 1, N = Elements.size(); I < N; ++I) {
struct BTF::BTFParam Param;
- auto Element = Elements[I];
+ auto Element = tryRemoveAtomicType(Elements[I]);
if (Element) {
Param.NameOff = BDebug.addString(FuncArgNames[I]);
Param.Type = BDebug.getTypeId(Element);
@@ -483,7 +492,7 @@ void BTFTypeTypeTag::completeType(BTFDebug &BDebug) {
IsCompleted = true;
BTFType.NameOff = BDebug.addString(Tag);
if (DTy) {
- const DIType *ResolvedType = DTy->getBaseType();
+ const DIType *ResolvedType = tryRemoveAtomicType(DTy->getBaseType());
if (!ResolvedType)
BTFType.Type = 0;
else
@@ -800,6 +809,10 @@ void BTFDebug::visitDerivedType(const DIDerivedType *DTy, uint32_t &TypeId,
bool CheckPointer, bool SeenPointer) {
unsigned Tag = DTy->getTag();
+ if (Tag == dwarf::DW_TAG_atomic_type)
+ return visitTypeEntry(DTy->getBaseType(), TypeId, CheckPointer,
+ SeenPointer);
+
/// Try to avoid chasing pointees, esp. structure pointees which may
/// unnecessary bring in a lot of types.
if (CheckPointer && !SeenPointer) {
@@ -1444,8 +1457,10 @@ void BTFDebug::processGlobals(bool ProcessingMapDef) {
DIGlobal = GVE->getVariable();
if (SecName.starts_with(".maps"))
visitMapDefType(DIGlobal->getType(), GVTypeId);
- else
- visitTypeEntry(DIGlobal->getType(), GVTypeId, false, false);
+ else {
+ const DIType *Ty = tryRemoveAtomicType(DIGlobal->getType());
+ visitTypeEntry(Ty, GVTypeId, false, false);
+ }
break;
}
diff --git a/llvm/lib/Target/DirectX/CMakeLists.txt b/llvm/lib/Target/DirectX/CMakeLists.txt
index a9c5d81..7e0f8a1 100644
--- a/llvm/lib/Target/DirectX/CMakeLists.txt
+++ b/llvm/lib/Target/DirectX/CMakeLists.txt
@@ -22,7 +22,6 @@ add_llvm_target(DirectXCodeGen
DXContainerGlobals.cpp
DXILFinalizeLinkage.cpp
DXILIntrinsicExpansion.cpp
- DXILMetadata.cpp
DXILOpBuilder.cpp
DXILOpLowering.cpp
DXILPrepare.cpp
diff --git a/llvm/lib/Target/DirectX/DXContainerGlobals.cpp b/llvm/lib/Target/DirectX/DXContainerGlobals.cpp
index 839060b..2c11373 100644
--- a/llvm/lib/Target/DirectX/DXContainerGlobals.cpp
+++ b/llvm/lib/Target/DirectX/DXContainerGlobals.cpp
@@ -204,9 +204,9 @@ void DXContainerGlobals::addPipelineStateValidationInfo(
dxil::ModuleMetadataInfo &MMI =
getAnalysis<DXILMetadataAnalysisWrapperPass>().getModuleMetadata();
assert(MMI.EntryPropertyVec.size() == 1 ||
- MMI.ShaderStage == Triple::Library);
+ MMI.ShaderProfile == Triple::Library);
PSV.BaseData.ShaderStage =
- static_cast<uint8_t>(MMI.ShaderStage - Triple::Pixel);
+ static_cast<uint8_t>(MMI.ShaderProfile - Triple::Pixel);
addResourcesForPSV(M, PSV);
@@ -215,7 +215,7 @@ void DXContainerGlobals::addPipelineStateValidationInfo(
// TODO: Lots more stuff to do here!
//
// See issue https://github.com/llvm/llvm-project/issues/96674.
- switch (MMI.ShaderStage) {
+ switch (MMI.ShaderProfile) {
case Triple::Compute:
PSV.BaseData.NumThreadsX = MMI.EntryPropertyVec[0].NumThreadsX;
PSV.BaseData.NumThreadsY = MMI.EntryPropertyVec[0].NumThreadsY;
@@ -225,10 +225,10 @@ void DXContainerGlobals::addPipelineStateValidationInfo(
break;
}
- if (MMI.ShaderStage != Triple::Library)
+ if (MMI.ShaderProfile != Triple::Library)
PSV.EntryName = MMI.EntryPropertyVec[0].Entry->getName();
- PSV.finalize(MMI.ShaderStage);
+ PSV.finalize(MMI.ShaderProfile);
PSV.write(OS);
Constant *Constant =
ConstantDataArray::getString(M.getContext(), Data, /*AddNull*/ false);
diff --git a/llvm/lib/Target/DirectX/DXILMetadata.cpp b/llvm/lib/Target/DirectX/DXILMetadata.cpp
deleted file mode 100644
index 1f5759c..0000000
--- a/llvm/lib/Target/DirectX/DXILMetadata.cpp
+++ /dev/null
@@ -1,335 +0,0 @@
-//===- DXILMetadata.cpp - DXIL Metadata helper objects --------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-///
-/// \file This file contains helper objects for working with DXIL metadata.
-///
-//===----------------------------------------------------------------------===//
-
-#include "DXILMetadata.h"
-#include "llvm/IR/Constants.h"
-#include "llvm/IR/IRBuilder.h"
-#include "llvm/IR/Metadata.h"
-#include "llvm/IR/Module.h"
-#include "llvm/Support/VersionTuple.h"
-#include "llvm/TargetParser/Triple.h"
-
-using namespace llvm;
-using namespace llvm::dxil;
-
-ValidatorVersionMD::ValidatorVersionMD(Module &M)
- : Entry(M.getOrInsertNamedMetadata("dx.valver")) {}
-
-void ValidatorVersionMD::update(VersionTuple ValidatorVer) {
- auto &Ctx = Entry->getParent()->getContext();
- IRBuilder<> B(Ctx);
- Metadata *MDVals[2];
- MDVals[0] = ConstantAsMetadata::get(B.getInt32(ValidatorVer.getMajor()));
- MDVals[1] =
- ConstantAsMetadata::get(B.getInt32(ValidatorVer.getMinor().value_or(0)));
-
- if (isEmpty())
- Entry->addOperand(MDNode::get(Ctx, MDVals));
- else
- Entry->setOperand(0, MDNode::get(Ctx, MDVals));
-}
-
-bool ValidatorVersionMD::isEmpty() { return Entry->getNumOperands() == 0; }
-
-VersionTuple ValidatorVersionMD::getAsVersionTuple() {
- if (isEmpty())
- return VersionTuple(1, 0);
- auto *ValVerMD = cast<MDNode>(Entry->getOperand(0));
- auto *MajorMD = mdconst::extract<ConstantInt>(ValVerMD->getOperand(0));
- auto *MinorMD = mdconst::extract<ConstantInt>(ValVerMD->getOperand(1));
- return VersionTuple(MajorMD->getZExtValue(), MinorMD->getZExtValue());
-}
-
-static StringRef getShortShaderStage(Triple::EnvironmentType Env) {
- switch (Env) {
- case Triple::Pixel:
- return "ps";
- case Triple::Vertex:
- return "vs";
- case Triple::Geometry:
- return "gs";
- case Triple::Hull:
- return "hs";
- case Triple::Domain:
- return "ds";
- case Triple::Compute:
- return "cs";
- case Triple::Library:
- return "lib";
- case Triple::Mesh:
- return "ms";
- case Triple::Amplification:
- return "as";
- default:
- break;
- }
- llvm_unreachable("Unsupported environment for DXIL generation.");
- return "";
-}
-
-void dxil::createShaderModelMD(Module &M) {
- NamedMDNode *Entry = M.getOrInsertNamedMetadata("dx.shaderModel");
- Triple TT(M.getTargetTriple());
- VersionTuple Ver = TT.getOSVersion();
- LLVMContext &Ctx = M.getContext();
- IRBuilder<> B(Ctx);
-
- Metadata *Vals[3];
- Vals[0] = MDString::get(Ctx, getShortShaderStage(TT.getEnvironment()));
- Vals[1] = ConstantAsMetadata::get(B.getInt32(Ver.getMajor()));
- Vals[2] = ConstantAsMetadata::get(B.getInt32(Ver.getMinor().value_or(0)));
- Entry->addOperand(MDNode::get(Ctx, Vals));
-}
-
-void dxil::createDXILVersionMD(Module &M) {
- Triple TT(Triple::normalize(M.getTargetTriple()));
- VersionTuple Ver = TT.getDXILVersion();
- LLVMContext &Ctx = M.getContext();
- IRBuilder<> B(Ctx);
- NamedMDNode *Entry = M.getOrInsertNamedMetadata("dx.version");
- Metadata *Vals[2];
- Vals[0] = ConstantAsMetadata::get(B.getInt32(Ver.getMajor()));
- Vals[1] = ConstantAsMetadata::get(B.getInt32(Ver.getMinor().value_or(0)));
- Entry->addOperand(MDNode::get(Ctx, Vals));
-}
-
-static uint32_t getShaderStage(Triple::EnvironmentType Env) {
- return (uint32_t)Env - (uint32_t)llvm::Triple::Pixel;
-}
-
-namespace {
-
-struct EntryProps {
- Triple::EnvironmentType ShaderKind;
- // FIXME: support more shader profiles.
- // See https://github.com/llvm/llvm-project/issues/57927.
- struct {
- unsigned NumThreads[3];
- } CS;
-
- EntryProps(Function &F, Triple::EnvironmentType ModuleShaderKind)
- : ShaderKind(ModuleShaderKind) {
-
- if (ShaderKind == Triple::EnvironmentType::Library) {
- Attribute EntryAttr = F.getFnAttribute("hlsl.shader");
- StringRef EntryProfile = EntryAttr.getValueAsString();
- Triple T("", "", "", EntryProfile);
- ShaderKind = T.getEnvironment();
- }
-
- if (ShaderKind == Triple::EnvironmentType::Compute) {
- auto NumThreadsStr =
- F.getFnAttribute("hlsl.numthreads").getValueAsString();
- SmallVector<StringRef> NumThreads;
- NumThreadsStr.split(NumThreads, ',');
- assert(NumThreads.size() == 3 && "invalid numthreads");
- auto Zip =
- llvm::zip(NumThreads, MutableArrayRef<unsigned>(CS.NumThreads));
- for (auto It : Zip) {
- StringRef Str = std::get<0>(It);
- APInt V;
- [[maybe_unused]] bool Result = Str.getAsInteger(10, V);
- assert(!Result && "Failed to parse numthreads");
-
- unsigned &Num = std::get<1>(It);
- Num = V.getLimitedValue();
- }
- }
- }
-
- MDTuple *emitDXILEntryProps(uint64_t RawShaderFlag, LLVMContext &Ctx,
- bool IsLib) {
- std::vector<Metadata *> MDVals;
-
- if (RawShaderFlag != 0)
- appendShaderFlags(MDVals, RawShaderFlag, Ctx);
-
- // Add shader kind for lib entrys.
- if (IsLib && ShaderKind != Triple::EnvironmentType::Library)
- appendShaderKind(MDVals, Ctx);
-
- if (ShaderKind == Triple::EnvironmentType::Compute)
- appendNumThreads(MDVals, Ctx);
- // FIXME: support more props.
- // See https://github.com/llvm/llvm-project/issues/57948.
- return MDNode::get(Ctx, MDVals);
- }
-
- static MDTuple *emitEntryPropsForEmptyEntry(uint64_t RawShaderFlag,
- LLVMContext &Ctx) {
- if (RawShaderFlag == 0)
- return nullptr;
-
- std::vector<Metadata *> MDVals;
-
- appendShaderFlags(MDVals, RawShaderFlag, Ctx);
- // FIXME: support more props.
- // See https://github.com/llvm/llvm-project/issues/57948.
- return MDNode::get(Ctx, MDVals);
- }
-
-private:
- enum EntryPropsTag {
- ShaderFlagsTag = 0,
- GSStateTag,
- DSStateTag,
- HSStateTag,
- NumThreadsTag,
- AutoBindingSpaceTag,
- RayPayloadSizeTag,
- RayAttribSizeTag,
- ShaderKindTag,
- MSStateTag,
- ASStateTag,
- WaveSizeTag,
- EntryRootSigTag,
- };
-
- void appendNumThreads(std::vector<Metadata *> &MDVals, LLVMContext &Ctx) {
- MDVals.emplace_back(ConstantAsMetadata::get(
- ConstantInt::get(Type::getInt32Ty(Ctx), NumThreadsTag)));
-
- std::vector<Metadata *> NumThreadVals;
- for (auto Num : ArrayRef<unsigned>(CS.NumThreads))
- NumThreadVals.emplace_back(ConstantAsMetadata::get(
- ConstantInt::get(Type::getInt32Ty(Ctx), Num)));
- MDVals.emplace_back(MDNode::get(Ctx, NumThreadVals));
- }
-
- static void appendShaderFlags(std::vector<Metadata *> &MDVals,
- uint64_t RawShaderFlag, LLVMContext &Ctx) {
- MDVals.emplace_back(ConstantAsMetadata::get(
- ConstantInt::get(Type::getInt32Ty(Ctx), ShaderFlagsTag)));
- MDVals.emplace_back(ConstantAsMetadata::get(
- ConstantInt::get(Type::getInt64Ty(Ctx), RawShaderFlag)));
- }
-
- void appendShaderKind(std::vector<Metadata *> &MDVals, LLVMContext &Ctx) {
- MDVals.emplace_back(ConstantAsMetadata::get(
- ConstantInt::get(Type::getInt32Ty(Ctx), ShaderKindTag)));
- MDVals.emplace_back(ConstantAsMetadata::get(
- ConstantInt::get(Type::getInt32Ty(Ctx), getShaderStage(ShaderKind))));
- }
-};
-
-class EntryMD {
- Function &F;
- LLVMContext &Ctx;
- EntryProps Props;
-
-public:
- EntryMD(Function &F, Triple::EnvironmentType ModuleShaderKind)
- : F(F), Ctx(F.getContext()), Props(F, ModuleShaderKind) {}
-
- MDTuple *emitEntryTuple(MDTuple *Resources, uint64_t RawShaderFlag) {
- // FIXME: add signature for profile other than CS.
- // See https://github.com/llvm/llvm-project/issues/57928.
- MDTuple *Signatures = nullptr;
- return emitDXILEntryPointTuple(
- &F, F.getName().str(), Signatures, Resources,
- Props.emitDXILEntryProps(RawShaderFlag, Ctx, /*IsLib*/ false), Ctx);
- }
-
- MDTuple *emitEntryTupleForLib(uint64_t RawShaderFlag) {
- // FIXME: add signature for profile other than CS.
- // See https://github.com/llvm/llvm-project/issues/57928.
- MDTuple *Signatures = nullptr;
- return emitDXILEntryPointTuple(
- &F, F.getName().str(), Signatures,
- /*entry in lib doesn't need resources metadata*/ nullptr,
- Props.emitDXILEntryProps(RawShaderFlag, Ctx, /*IsLib*/ true), Ctx);
- }
-
- // Library will have empty entry metadata which only store the resource table
- // metadata.
- static MDTuple *emitEmptyEntryForLib(MDTuple *Resources,
- uint64_t RawShaderFlag,
- LLVMContext &Ctx) {
- return emitDXILEntryPointTuple(
- nullptr, "", nullptr, Resources,
- EntryProps::emitEntryPropsForEmptyEntry(RawShaderFlag, Ctx), Ctx);
- }
-
-private:
- static MDTuple *emitDXILEntryPointTuple(Function *Fn, const std::string &Name,
- MDTuple *Signatures,
- MDTuple *Resources,
- MDTuple *Properties,
- LLVMContext &Ctx) {
- Metadata *MDVals[5];
- MDVals[0] = Fn ? ValueAsMetadata::get(Fn) : nullptr;
- MDVals[1] = MDString::get(Ctx, Name.c_str());
- MDVals[2] = Signatures;
- MDVals[3] = Resources;
- MDVals[4] = Properties;
- return MDNode::get(Ctx, MDVals);
- }
-};
-} // namespace
-
-void dxil::createEntryMD(Module &M, const uint64_t ShaderFlags) {
- SmallVector<Function *> EntryList;
- for (auto &F : M.functions()) {
- if (!F.hasFnAttribute("hlsl.shader"))
- continue;
- EntryList.emplace_back(&F);
- }
-
- // If there are no entries, do nothing. This is mostly to allow for writing
- // tests with no actual entry functions.
- if (EntryList.empty())
- return;
-
- auto &Ctx = M.getContext();
- // FIXME: generate metadata for resource.
- // See https://github.com/llvm/llvm-project/issues/57926.
- MDTuple *MDResources = nullptr;
- if (auto *NamedResources = M.getNamedMetadata("dx.resources"))
- MDResources = dyn_cast<MDTuple>(NamedResources->getOperand(0));
-
- std::vector<MDNode *> Entries;
- Triple T = Triple(M.getTargetTriple());
- switch (T.getEnvironment()) {
- case Triple::EnvironmentType::Library: {
- // Add empty entry to put resource metadata.
- MDTuple *EmptyEntry =
- EntryMD::emitEmptyEntryForLib(MDResources, ShaderFlags, Ctx);
- Entries.emplace_back(EmptyEntry);
-
- for (Function *Entry : EntryList) {
- EntryMD MD(*Entry, T.getEnvironment());
- Entries.emplace_back(MD.emitEntryTupleForLib(0));
- }
- } break;
- case Triple::EnvironmentType::Compute:
- case Triple::EnvironmentType::Amplification:
- case Triple::EnvironmentType::Mesh:
- case Triple::EnvironmentType::Vertex:
- case Triple::EnvironmentType::Hull:
- case Triple::EnvironmentType::Domain:
- case Triple::EnvironmentType::Geometry:
- case Triple::EnvironmentType::Pixel: {
- assert(EntryList.size() == 1 &&
- "non-lib profiles should only have one entry");
- EntryMD MD(*EntryList.front(), T.getEnvironment());
- Entries.emplace_back(MD.emitEntryTuple(MDResources, ShaderFlags));
- } break;
- default:
- assert(0 && "invalid profile");
- break;
- }
-
- NamedMDNode *EntryPointsNamedMD =
- M.getOrInsertNamedMetadata("dx.entryPoints");
- for (auto *Entry : Entries)
- EntryPointsNamedMD->addOperand(Entry);
-}
diff --git a/llvm/lib/Target/DirectX/DXILMetadata.h b/llvm/lib/Target/DirectX/DXILMetadata.h
deleted file mode 100644
index e05db8d..0000000
--- a/llvm/lib/Target/DirectX/DXILMetadata.h
+++ /dev/null
@@ -1,43 +0,0 @@
-//===- DXILMetadata.h - DXIL Metadata helper objects ----------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-///
-/// \file This file contains helper objects for working with DXIL metadata.
-///
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TARGET_DIRECTX_DXILMETADATA_H
-#define LLVM_TARGET_DIRECTX_DXILMETADATA_H
-
-#include <stdint.h>
-
-namespace llvm {
-class Module;
-class NamedMDNode;
-class VersionTuple;
-namespace dxil {
-
-class ValidatorVersionMD {
- NamedMDNode *Entry;
-
-public:
- ValidatorVersionMD(Module &M);
-
- void update(VersionTuple ValidatorVer);
-
- bool isEmpty();
- VersionTuple getAsVersionTuple();
-};
-
-void createShaderModelMD(Module &M);
-void createDXILVersionMD(Module &M);
-void createEntryMD(Module &M, const uint64_t ShaderFlags);
-
-} // namespace dxil
-} // namespace llvm
-
-#endif // LLVM_TARGET_DIRECTX_DXILMETADATA_H
diff --git a/llvm/lib/Target/DirectX/DXILPrepare.cpp b/llvm/lib/Target/DirectX/DXILPrepare.cpp
index b050240..6092cfb 100644
--- a/llvm/lib/Target/DirectX/DXILPrepare.cpp
+++ b/llvm/lib/Target/DirectX/DXILPrepare.cpp
@@ -11,7 +11,6 @@
/// Language (DXIL).
//===----------------------------------------------------------------------===//
-#include "DXILMetadata.h"
#include "DXILResourceAnalysis.h"
#include "DXILShaderFlags.h"
#include "DirectX.h"
@@ -174,8 +173,9 @@ public:
AttrMask.addAttribute(I);
}
- dxil::ValidatorVersionMD ValVerMD(M);
- VersionTuple ValVer = ValVerMD.getAsVersionTuple();
+ const dxil::ModuleMetadataInfo MetadataInfo =
+ getAnalysis<DXILMetadataAnalysisWrapperPass>().getModuleMetadata();
+ VersionTuple ValVer = MetadataInfo.ValidatorVersion;
bool SkipValidation = ValVer.getMajor() == 0 && ValVer.getMinor() == 0;
for (auto &F : M.functions()) {
@@ -247,6 +247,7 @@ public:
DXILPrepareModule() : ModulePass(ID) {}
void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<DXILMetadataAnalysisWrapperPass>();
AU.addPreserved<ShaderFlagsAnalysisWrapper>();
AU.addPreserved<DXILResourceMDWrapper>();
AU.addPreserved<DXILMetadataAnalysisWrapperPass>();
@@ -260,6 +261,7 @@ char DXILPrepareModule::ID = 0;
INITIALIZE_PASS_BEGIN(DXILPrepareModule, DEBUG_TYPE, "DXIL Prepare Module",
false, false)
+INITIALIZE_PASS_DEPENDENCY(DXILMetadataAnalysisWrapperPass)
INITIALIZE_PASS_END(DXILPrepareModule, DEBUG_TYPE, "DXIL Prepare Module", false,
false)
diff --git a/llvm/lib/Target/DirectX/DXILTranslateMetadata.cpp b/llvm/lib/Target/DirectX/DXILTranslateMetadata.cpp
index 11cd9df..be370e1 100644
--- a/llvm/lib/Target/DirectX/DXILTranslateMetadata.cpp
+++ b/llvm/lib/Target/DirectX/DXILTranslateMetadata.cpp
@@ -7,26 +7,73 @@
//===----------------------------------------------------------------------===//
#include "DXILTranslateMetadata.h"
-#include "DXILMetadata.h"
#include "DXILResource.h"
#include "DXILResourceAnalysis.h"
#include "DXILShaderFlags.h"
#include "DirectX.h"
-#include "llvm/ADT/StringSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Twine.h"
#include "llvm/Analysis/DXILMetadataAnalysis.h"
#include "llvm/Analysis/DXILResource.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/DiagnosticPrinter.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/InitializePasses.h"
#include "llvm/Pass.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/VersionTuple.h"
#include "llvm/TargetParser/Triple.h"
+#include <cstdint>
using namespace llvm;
using namespace llvm::dxil;
-static void emitResourceMetadata(Module &M, const DXILResourceMap &DRM,
- const dxil::Resources &MDResources) {
+namespace {
+/// A simple Wrapper DiagnosticInfo that generates Module-level diagnostic
+/// for TranslateMetadata pass
+class DiagnosticInfoTranslateMD : public DiagnosticInfo {
+private:
+ const Twine &Msg;
+ const Module &Mod;
+
+public:
+ /// \p M is the module for which the diagnostic is being emitted. \p Msg is
+ /// the message to show. Note that this class does not copy this message, so
+ /// this reference must be valid for the whole life time of the diagnostic.
+ DiagnosticInfoTranslateMD(const Module &M, const Twine &Msg,
+ DiagnosticSeverity Severity = DS_Error)
+ : DiagnosticInfo(DK_Unsupported, Severity), Msg(Msg), Mod(M) {}
+
+ void print(DiagnosticPrinter &DP) const override {
+ DP << Mod.getName() << ": " << Msg << '\n';
+ }
+};
+
+enum class EntryPropsTag {
+ ShaderFlags = 0,
+ GSState,
+ DSState,
+ HSState,
+ NumThreads,
+ AutoBindingSpace,
+ RayPayloadSize,
+ RayAttribSize,
+ ShaderKind,
+ MSState,
+ ASStateTag,
+ WaveSize,
+ EntryRootSig,
+};
+
+} // namespace
+
+static NamedMDNode *emitResourceMetadata(Module &M, const DXILResourceMap &DRM,
+ const dxil::Resources &MDResources) {
LLVMContext &Context = M.getContext();
SmallVector<Metadata *> SRVs, UAVs, CBufs, Smps;
@@ -58,25 +105,256 @@ static void emitResourceMetadata(Module &M, const DXILResourceMap &DRM,
}
if (!HasResources)
- return;
+ return nullptr;
NamedMDNode *ResourceMD = M.getOrInsertNamedMetadata("dx.resources");
ResourceMD->addOperand(
MDNode::get(M.getContext(), {SRVMD, UAVMD, CBufMD, SmpMD}));
+
+ return ResourceMD;
+}
+
+static StringRef getShortShaderStage(Triple::EnvironmentType Env) {
+ switch (Env) {
+ case Triple::Pixel:
+ return "ps";
+ case Triple::Vertex:
+ return "vs";
+ case Triple::Geometry:
+ return "gs";
+ case Triple::Hull:
+ return "hs";
+ case Triple::Domain:
+ return "ds";
+ case Triple::Compute:
+ return "cs";
+ case Triple::Library:
+ return "lib";
+ case Triple::Mesh:
+ return "ms";
+ case Triple::Amplification:
+ return "as";
+ default:
+ break;
+ }
+ llvm_unreachable("Unsupported environment for DXIL generation.");
+}
+
+static uint32_t getShaderStage(Triple::EnvironmentType Env) {
+ return (uint32_t)Env - (uint32_t)llvm::Triple::Pixel;
+}
+
+static SmallVector<Metadata *>
+getTagValueAsMetadata(EntryPropsTag Tag, uint64_t Value, LLVMContext &Ctx) {
+ SmallVector<Metadata *> MDVals;
+ MDVals.emplace_back(ConstantAsMetadata::get(
+ ConstantInt::get(Type::getInt32Ty(Ctx), static_cast<int>(Tag))));
+ switch (Tag) {
+ case EntryPropsTag::ShaderFlags:
+ MDVals.emplace_back(ConstantAsMetadata::get(
+ ConstantInt::get(Type::getInt64Ty(Ctx), Value)));
+ break;
+ case EntryPropsTag::ShaderKind:
+ MDVals.emplace_back(ConstantAsMetadata::get(
+ ConstantInt::get(Type::getInt32Ty(Ctx), Value)));
+ break;
+ case EntryPropsTag::GSState:
+ case EntryPropsTag::DSState:
+ case EntryPropsTag::HSState:
+ case EntryPropsTag::NumThreads:
+ case EntryPropsTag::AutoBindingSpace:
+ case EntryPropsTag::RayPayloadSize:
+ case EntryPropsTag::RayAttribSize:
+ case EntryPropsTag::MSState:
+ case EntryPropsTag::ASStateTag:
+ case EntryPropsTag::WaveSize:
+ case EntryPropsTag::EntryRootSig:
+ llvm_unreachable("NYI: Unhandled entry property tag");
+ }
+ return MDVals;
+}
+
+static MDTuple *
+getEntryPropAsMetadata(const EntryProperties &EP, uint64_t EntryShaderFlags,
+ const Triple::EnvironmentType ShaderProfile) {
+ SmallVector<Metadata *> MDVals;
+ LLVMContext &Ctx = EP.Entry->getContext();
+ if (EntryShaderFlags != 0)
+ MDVals.append(getTagValueAsMetadata(EntryPropsTag::ShaderFlags,
+ EntryShaderFlags, Ctx));
+
+ if (EP.Entry != nullptr) {
+ // FIXME: support more props.
+ // See https://github.com/llvm/llvm-project/issues/57948.
+ // Add shader kind for lib entries.
+ if (ShaderProfile == Triple::EnvironmentType::Library &&
+ EP.ShaderStage != Triple::EnvironmentType::Library)
+ MDVals.append(getTagValueAsMetadata(EntryPropsTag::ShaderKind,
+ getShaderStage(EP.ShaderStage), Ctx));
+
+ if (EP.ShaderStage == Triple::EnvironmentType::Compute) {
+ MDVals.emplace_back(ConstantAsMetadata::get(ConstantInt::get(
+ Type::getInt32Ty(Ctx), static_cast<int>(EntryPropsTag::NumThreads))));
+ Metadata *NumThreadVals[] = {ConstantAsMetadata::get(ConstantInt::get(
+ Type::getInt32Ty(Ctx), EP.NumThreadsX)),
+ ConstantAsMetadata::get(ConstantInt::get(
+ Type::getInt32Ty(Ctx), EP.NumThreadsY)),
+ ConstantAsMetadata::get(ConstantInt::get(
+ Type::getInt32Ty(Ctx), EP.NumThreadsZ))};
+ MDVals.emplace_back(MDNode::get(Ctx, NumThreadVals));
+ }
+ }
+ if (MDVals.empty())
+ return nullptr;
+ return MDNode::get(Ctx, MDVals);
+}
+
+MDTuple *constructEntryMetadata(const Function *EntryFn, MDTuple *Signatures,
+ MDNode *Resources, MDTuple *Properties,
+ LLVMContext &Ctx) {
+ // Each entry point metadata record specifies:
+ // * reference to the entry point function global symbol
+ // * unmangled name
+ // * list of signatures
+ // * list of resources
+ // * list of tag-value pairs of shader capabilities and other properties
+ Metadata *MDVals[5];
+ MDVals[0] =
+ EntryFn ? ValueAsMetadata::get(const_cast<Function *>(EntryFn)) : nullptr;
+ MDVals[1] = MDString::get(Ctx, EntryFn ? EntryFn->getName() : "");
+ MDVals[2] = Signatures;
+ MDVals[3] = Resources;
+ MDVals[4] = Properties;
+ return MDNode::get(Ctx, MDVals);
+}
+
+static MDTuple *emitEntryMD(const EntryProperties &EP, MDTuple *Signatures,
+ MDNode *MDResources,
+ const uint64_t EntryShaderFlags,
+ const Triple::EnvironmentType ShaderProfile) {
+ MDTuple *Properties =
+ getEntryPropAsMetadata(EP, EntryShaderFlags, ShaderProfile);
+ return constructEntryMetadata(EP.Entry, Signatures, MDResources, Properties,
+ EP.Entry->getContext());
+}
+
+static void emitValidatorVersionMD(Module &M, const ModuleMetadataInfo &MMDI) {
+ if (MMDI.ValidatorVersion.empty())
+ return;
+
+ LLVMContext &Ctx = M.getContext();
+ IRBuilder<> IRB(Ctx);
+ Metadata *MDVals[2];
+ MDVals[0] =
+ ConstantAsMetadata::get(IRB.getInt32(MMDI.ValidatorVersion.getMajor()));
+ MDVals[1] = ConstantAsMetadata::get(
+ IRB.getInt32(MMDI.ValidatorVersion.getMinor().value_or(0)));
+ NamedMDNode *ValVerNode = M.getOrInsertNamedMetadata("dx.valver");
+ // Set validator version obtained from DXIL Metadata Analysis pass
+ ValVerNode->clearOperands();
+ ValVerNode->addOperand(MDNode::get(Ctx, MDVals));
+}
+
+static void emitShaderModelVersionMD(Module &M,
+ const ModuleMetadataInfo &MMDI) {
+ LLVMContext &Ctx = M.getContext();
+ IRBuilder<> IRB(Ctx);
+ Metadata *SMVals[3];
+ VersionTuple SM = MMDI.ShaderModelVersion;
+ SMVals[0] = MDString::get(Ctx, getShortShaderStage(MMDI.ShaderProfile));
+ SMVals[1] = ConstantAsMetadata::get(IRB.getInt32(SM.getMajor()));
+ SMVals[2] = ConstantAsMetadata::get(IRB.getInt32(SM.getMinor().value_or(0)));
+ NamedMDNode *SMMDNode = M.getOrInsertNamedMetadata("dx.shaderModel");
+ SMMDNode->addOperand(MDNode::get(Ctx, SMVals));
+}
+
+static void emitDXILVersionTupleMD(Module &M, const ModuleMetadataInfo &MMDI) {
+ LLVMContext &Ctx = M.getContext();
+ IRBuilder<> IRB(Ctx);
+ VersionTuple DXILVer = MMDI.DXILVersion;
+ Metadata *DXILVals[2];
+ DXILVals[0] = ConstantAsMetadata::get(IRB.getInt32(DXILVer.getMajor()));
+ DXILVals[1] =
+ ConstantAsMetadata::get(IRB.getInt32(DXILVer.getMinor().value_or(0)));
+ NamedMDNode *DXILVerMDNode = M.getOrInsertNamedMetadata("dx.version");
+ DXILVerMDNode->addOperand(MDNode::get(Ctx, DXILVals));
+}
+
+static MDTuple *emitTopLevelLibraryNode(Module &M, MDNode *RMD,
+ uint64_t ShaderFlags) {
+ LLVMContext &Ctx = M.getContext();
+ MDTuple *Properties = nullptr;
+ if (ShaderFlags != 0) {
+ SmallVector<Metadata *> MDVals;
+ // FIXME: ShaderFlagsAnalysis pass needs to collect and provide
+ // ShaderFlags for each entry function. Currently, ShaderFlags value
+ // provided by ShaderFlagsAnalysis pass is created by walking *all* the
+ // function instructions of the module. Is it is correct to use this value
+ // for metadata of the empty library entry?
+ MDVals.append(
+ getTagValueAsMetadata(EntryPropsTag::ShaderFlags, ShaderFlags, Ctx));
+ Properties = MDNode::get(Ctx, MDVals);
+ }
+ // Library has an entry metadata with resource table metadata and all other
+ // MDNodes as null.
+ return constructEntryMetadata(nullptr, nullptr, RMD, Properties, Ctx);
}
static void translateMetadata(Module &M, const DXILResourceMap &DRM,
- const dxil::Resources &MDResources,
- const ComputedShaderFlags &ShaderFlags) {
- dxil::ValidatorVersionMD ValVerMD(M);
- if (ValVerMD.isEmpty())
- ValVerMD.update(VersionTuple(1, 0));
- dxil::createShaderModelMD(M);
- dxil::createDXILVersionMD(M);
+ const Resources &MDResources,
+ const ComputedShaderFlags &ShaderFlags,
+ const ModuleMetadataInfo &MMDI) {
+ LLVMContext &Ctx = M.getContext();
+ IRBuilder<> IRB(Ctx);
+ SmallVector<MDNode *> EntryFnMDNodes;
+
+ emitValidatorVersionMD(M, MMDI);
+ emitShaderModelVersionMD(M, MMDI);
+ emitDXILVersionTupleMD(M, MMDI);
+ NamedMDNode *NamedResourceMD = emitResourceMetadata(M, DRM, MDResources);
+ auto *ResourceMD =
+ (NamedResourceMD != nullptr) ? NamedResourceMD->getOperand(0) : nullptr;
+ // FIXME: Add support to construct Signatures
+ // See https://github.com/llvm/llvm-project/issues/57928
+ MDTuple *Signatures = nullptr;
- emitResourceMetadata(M, DRM, MDResources);
+ if (MMDI.ShaderProfile == Triple::EnvironmentType::Library)
+ EntryFnMDNodes.emplace_back(
+ emitTopLevelLibraryNode(M, ResourceMD, ShaderFlags));
+ else if (MMDI.EntryPropertyVec.size() > 1) {
+ M.getContext().diagnose(DiagnosticInfoTranslateMD(
+ M, "Non-library shader: One and only one entry expected"));
+ }
+
+ for (const EntryProperties &EntryProp : MMDI.EntryPropertyVec) {
+ // FIXME: ShaderFlagsAnalysis pass needs to collect and provide
+ // ShaderFlags for each entry function. For now, assume shader flags value
+ // of entry functions being compiled for lib_* shader profile viz.,
+ // EntryPro.Entry is 0.
+ uint64_t EntryShaderFlags =
+ (MMDI.ShaderProfile == Triple::EnvironmentType::Library) ? 0
+ : ShaderFlags;
+ if (MMDI.ShaderProfile != Triple::EnvironmentType::Library) {
+ if (EntryProp.ShaderStage != MMDI.ShaderProfile) {
+ M.getContext().diagnose(DiagnosticInfoTranslateMD(
+ M,
+ "Shader stage '" +
+ Twine(getShortShaderStage(EntryProp.ShaderStage) +
+ "' for entry '" + Twine(EntryProp.Entry->getName()) +
+ "' different from specified target profile '" +
+ Twine(Triple::getEnvironmentTypeName(MMDI.ShaderProfile) +
+ "'"))));
+ }
+ }
+ EntryFnMDNodes.emplace_back(emitEntryMD(EntryProp, Signatures, ResourceMD,
+ EntryShaderFlags,
+ MMDI.ShaderProfile));
+ }
- dxil::createEntryMD(M, static_cast<uint64_t>(ShaderFlags));
+ NamedMDNode *EntryPointsNamedMD =
+ M.getOrInsertNamedMetadata("dx.entryPoints");
+ for (auto *Entry : EntryFnMDNodes)
+ EntryPointsNamedMD->addOperand(Entry);
}
PreservedAnalyses DXILTranslateMetadata::run(Module &M,
@@ -85,8 +363,9 @@ PreservedAnalyses DXILTranslateMetadata::run(Module &M,
const dxil::Resources &MDResources = MAM.getResult<DXILResourceMDAnalysis>(M);
const ComputedShaderFlags &ShaderFlags =
MAM.getResult<ShaderFlagsAnalysis>(M);
+ const dxil::ModuleMetadataInfo MMDI = MAM.getResult<DXILMetadataAnalysis>(M);
- translateMetadata(M, DRM, MDResources, ShaderFlags);
+ translateMetadata(M, DRM, MDResources, ShaderFlags, MMDI);
return PreservedAnalyses::all();
}
@@ -100,11 +379,13 @@ public:
StringRef getPassName() const override { return "DXIL Translate Metadata"; }
void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.setPreservesAll();
AU.addRequired<DXILResourceWrapperPass>();
AU.addRequired<DXILResourceMDWrapper>();
AU.addRequired<ShaderFlagsAnalysisWrapper>();
AU.addRequired<DXILMetadataAnalysisWrapperPass>();
+ AU.addPreserved<DXILResourceWrapperPass>();
+ AU.addPreserved<DXILResourceMDWrapper>();
+ AU.addPreserved<DXILMetadataAnalysisWrapperPass>();
}
bool runOnModule(Module &M) override {
@@ -114,8 +395,10 @@ public:
getAnalysis<DXILResourceMDWrapper>().getDXILResource();
const ComputedShaderFlags &ShaderFlags =
getAnalysis<ShaderFlagsAnalysisWrapper>().getShaderFlags();
+ dxil::ModuleMetadataInfo MMDI =
+ getAnalysis<DXILMetadataAnalysisWrapperPass>().getModuleMetadata();
- translateMetadata(M, DRM, MDResources, ShaderFlags);
+ translateMetadata(M, DRM, MDResources, ShaderFlags, MMDI);
return true;
}
};
@@ -133,5 +416,6 @@ INITIALIZE_PASS_BEGIN(DXILTranslateMetadataLegacy, "dxil-translate-metadata",
INITIALIZE_PASS_DEPENDENCY(DXILResourceWrapperPass)
INITIALIZE_PASS_DEPENDENCY(DXILResourceMDWrapper)
INITIALIZE_PASS_DEPENDENCY(ShaderFlagsAnalysisWrapper)
+INITIALIZE_PASS_DEPENDENCY(DXILMetadataAnalysisWrapperPass)
INITIALIZE_PASS_END(DXILTranslateMetadataLegacy, "dxil-translate-metadata",
"DXIL Translate Metadata", false, false)
diff --git a/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp b/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp
index 8cf853a..06f6abe 100644
--- a/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp
@@ -568,7 +568,7 @@ void HexagonExpandCondsets::updateLiveness(const std::set<Register> &RegSet,
// after that.
if (UpdateKills)
updateKillFlags(R);
- LIS->getInterval(R).verify();
+ assert(LIS->getInterval(R).verify());
}
}
@@ -1197,7 +1197,7 @@ bool HexagonExpandCondsets::coalesceRegisters(RegisterRef R1, RegisterRef R2) {
updateKillFlags(R1.Reg);
LLVM_DEBUG(dbgs() << "coalesced: " << L1 << "\n");
- L1.verify();
+ assert(L1.verify());
return true;
}
diff --git a/llvm/lib/Target/Mips/Mips.h b/llvm/lib/Target/Mips/Mips.h
index e3e9e17..f99dadd 100644
--- a/llvm/lib/Target/Mips/Mips.h
+++ b/llvm/lib/Target/Mips/Mips.h
@@ -17,6 +17,17 @@
#include "MCTargetDesc/MipsMCTargetDesc.h"
#include "llvm/Target/TargetMachine.h"
+#define IsMFLOMFHI(instr) \
+ (instr == Mips::MFLO || instr == Mips::MFLO64 || instr == Mips::MFHI || \
+ instr == Mips::MFHI64)
+#define IsDIVMULT(instr) \
+ (instr == Mips::SDIV || instr == Mips::PseudoSDIV || instr == Mips::DSDIV || \
+ instr == Mips::PseudoDSDIV || instr == Mips::UDIV || \
+ instr == Mips::PseudoUDIV || instr == Mips::DUDIV || \
+ instr == Mips::PseudoDUDIV || instr == Mips::MULT || \
+ instr == Mips::PseudoMULT || instr == Mips::DMULT || \
+ instr == Mips::PseudoDMULT)
+
namespace llvm {
class FunctionPass;
class InstructionSelector;
diff --git a/llvm/lib/Target/Mips/MipsBranchExpansion.cpp b/llvm/lib/Target/Mips/MipsBranchExpansion.cpp
index 721e525..5d01c69 100644
--- a/llvm/lib/Target/Mips/MipsBranchExpansion.cpp
+++ b/llvm/lib/Target/Mips/MipsBranchExpansion.cpp
@@ -167,6 +167,9 @@ private:
bool handleFPUDelaySlot();
bool handleLoadDelaySlot();
bool handlePossibleLongBranch();
+ bool handleMFLO();
+ template <typename Pred, typename Safe>
+ bool handleMFLOSlot(Pred Predicate, Safe SafeInSlot);
const MipsSubtarget *STI;
const MipsInstrInfo *TII;
@@ -742,6 +745,53 @@ static void emitGPDisp(MachineFunction &F, const MipsInstrInfo *TII) {
}
template <typename Pred, typename Safe>
+bool MipsBranchExpansion::handleMFLOSlot(Pred Predicate, Safe SafeInSlot) {
+ bool Changed = false;
+ bool hasPendingMFLO = false;
+
+ for (MachineFunction::iterator FI = MFp->begin(); FI != MFp->end(); ++FI) {
+ for (Iter I = FI->begin(); I != FI->end(); ++I) {
+
+ if (!Predicate(*I) && !hasPendingMFLO) {
+ continue;
+ }
+
+ Iter IInSlot;
+ bool LastInstInFunction =
+ std::next(I) == FI->end() && std::next(FI) == MFp->end();
+ // We need process several situations:
+ // mflo is last instruction, do not process;
+ // mflo + div, add two nop between them;
+ // mflo + none-div + none-div, do not process;
+ // mflo + none-div + div, add nop between none-div and div.
+ if (!LastInstInFunction) {
+ std::pair<Iter, bool> Res = getNextMachineInstr(std::next(I), &*FI);
+ LastInstInFunction |= Res.second;
+ IInSlot = Res.first;
+ if (!SafeInSlot(*IInSlot, *I)) {
+ Changed = true;
+ TII->insertNop(*(I->getParent()), std::next(I), I->getDebugLoc())
+ ->bundleWithPred();
+ NumInsertedNops++;
+ if (IsMFLOMFHI(I->getOpcode())) {
+ TII->insertNop(*(I->getParent()), std::next(I), I->getDebugLoc())
+ ->bundleWithPred();
+ NumInsertedNops++;
+ }
+ if (hasPendingMFLO)
+ hasPendingMFLO = false;
+ } else if (hasPendingMFLO)
+ hasPendingMFLO = false;
+ else if (IsMFLOMFHI(I->getOpcode()))
+ hasPendingMFLO = true;
+ }
+ }
+ }
+
+ return Changed;
+}
+
+template <typename Pred, typename Safe>
bool MipsBranchExpansion::handleSlot(Pred Predicate, Safe SafeInSlot) {
bool Changed = false;
@@ -777,6 +827,19 @@ bool MipsBranchExpansion::handleSlot(Pred Predicate, Safe SafeInSlot) {
return Changed;
}
+bool MipsBranchExpansion::handleMFLO() {
+ // mips1-4 require a minimum of 2 instructions between a mflo/mfhi
+ // and the next mul/div instruction.
+ if (STI->hasMips32() || STI->hasMips5())
+ return false;
+
+ return handleMFLOSlot(
+ [this](auto &I) -> bool { return TII->IsMfloOrMfhi(I); },
+ [this](auto &IInSlot, auto &I) -> bool {
+ return TII->SafeAfterMflo(IInSlot);
+ });
+}
+
bool MipsBranchExpansion::handleForbiddenSlot() {
// Forbidden slot hazards are only defined for MIPSR6 but not microMIPSR6.
if (!STI->hasMips32r6() || STI->inMicroMipsMode())
@@ -893,16 +956,19 @@ bool MipsBranchExpansion::runOnMachineFunction(MachineFunction &MF) {
bool forbiddenSlotChanged = handleForbiddenSlot();
bool fpuDelaySlotChanged = handleFPUDelaySlot();
bool loadDelaySlotChanged = handleLoadDelaySlot();
+ bool MfloChanged = handleMFLO();
bool Changed = longBranchChanged || forbiddenSlotChanged ||
- fpuDelaySlotChanged || loadDelaySlotChanged;
+ fpuDelaySlotChanged || loadDelaySlotChanged || MfloChanged;
// Then run them alternatively while there are changes.
while (forbiddenSlotChanged) {
longBranchChanged = handlePossibleLongBranch();
fpuDelaySlotChanged = handleFPUDelaySlot();
loadDelaySlotChanged = handleLoadDelaySlot();
- if (!longBranchChanged && !fpuDelaySlotChanged && !loadDelaySlotChanged)
+ MfloChanged = handleMFLO();
+ if (!longBranchChanged && !fpuDelaySlotChanged && !loadDelaySlotChanged &&
+ !MfloChanged)
break;
forbiddenSlotChanged = handleForbiddenSlot();
}
diff --git a/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp b/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp
index 4ec01ab..a576c53 100644
--- a/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp
+++ b/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp
@@ -744,6 +744,12 @@ bool MipsDelaySlotFiller::searchRange(MachineBasicBlock &MBB, IterTy Begin,
bool InMicroMipsMode = STI.inMicroMipsMode();
const MipsInstrInfo *TII = STI.getInstrInfo();
unsigned Opcode = (*Slot).getOpcode();
+
+ // In mips1-4, should not put mflo into the delay slot for the return.
+ if ((IsMFLOMFHI(CurrI->getOpcode())) &&
+ (!STI.hasMips32() && !STI.hasMips5()))
+ continue;
+
// This is complicated by the tail call optimization. For non-PIC code
// there is only a 32bit sized unconditional branch which can be assumed
// to be able to reach the target. b16 only has a range of +/- 1 KB.
diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp
index 4345b8e..f1cc9fd 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -124,7 +124,7 @@ unsigned MipsTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
unsigned MipsTargetLowering::getVectorTypeBreakdownForCallingConv(
LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
unsigned &NumIntermediates, MVT &RegisterVT) const {
- if (VT.isPow2VectorType()) {
+ if (VT.isPow2VectorType() && VT.getVectorElementType().isRound()) {
IntermediateVT = getRegisterTypeForCallingConv(Context, CC, VT);
RegisterVT = IntermediateVT.getSimpleVT();
NumIntermediates = getNumRegistersForCallingConv(Context, CC, VT);
diff --git a/llvm/lib/Target/Mips/MipsInstrInfo.cpp b/llvm/lib/Target/Mips/MipsInstrInfo.cpp
index f4fba5e5..d33652b 100644
--- a/llvm/lib/Target/Mips/MipsInstrInfo.cpp
+++ b/llvm/lib/Target/Mips/MipsInstrInfo.cpp
@@ -13,6 +13,7 @@
#include "MipsInstrInfo.h"
#include "MCTargetDesc/MipsBaseInfo.h"
#include "MCTargetDesc/MipsMCTargetDesc.h"
+#include "Mips.h"
#include "MipsSubtarget.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
@@ -571,6 +572,13 @@ unsigned MipsInstrInfo::getEquivalentCompactForm(
return 0;
}
+bool MipsInstrInfo::SafeAfterMflo(const MachineInstr &MI) const {
+ if (IsDIVMULT(MI.getOpcode()))
+ return false;
+
+ return true;
+}
+
/// Predicate for distingushing between control transfer instructions and all
/// other instructions for handling forbidden slots. Consider inline assembly
/// as unsafe as well.
@@ -623,6 +631,13 @@ bool MipsInstrInfo::SafeInLoadDelaySlot(const MachineInstr &MIInSlot,
});
}
+bool MipsInstrInfo::IsMfloOrMfhi(const MachineInstr &MI) const {
+ if (IsMFLOMFHI(MI.getOpcode()))
+ return true;
+
+ return false;
+}
+
/// Predicate for distingushing instructions that have forbidden slots.
bool MipsInstrInfo::HasForbiddenSlot(const MachineInstr &MI) const {
return (MI.getDesc().TSFlags & MipsII::HasForbiddenSlot) != 0;
diff --git a/llvm/lib/Target/Mips/MipsInstrInfo.h b/llvm/lib/Target/Mips/MipsInstrInfo.h
index 4e039e0..2ff12f8 100644
--- a/llvm/lib/Target/Mips/MipsInstrInfo.h
+++ b/llvm/lib/Target/Mips/MipsInstrInfo.h
@@ -89,6 +89,8 @@ public:
bool isBranchOffsetInRange(unsigned BranchOpc,
int64_t BrOffset) const override;
+ bool SafeAfterMflo(const MachineInstr &MI) const;
+
/// Predicate to determine if an instruction can go in a forbidden slot.
bool SafeInForbiddenSlot(const MachineInstr &MI) const;
@@ -100,6 +102,8 @@ public:
bool SafeInLoadDelaySlot(const MachineInstr &MIInSlot,
const MachineInstr &LoadMI) const;
+ bool IsMfloOrMfhi(const MachineInstr &MI) const;
+
/// Predicate to determine if an instruction has a forbidden slot.
bool HasForbiddenSlot(const MachineInstr &MI) const;
diff --git a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp
index 5b568b0..7d6442a 100644
--- a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp
+++ b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp
@@ -14,6 +14,7 @@
#include "MCTargetDesc/NVPTXBaseInfo.h"
#include "NVPTX.h"
#include "NVPTXUtilities.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstrInfo.h"
@@ -95,228 +96,262 @@ void NVPTXInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
}
void NVPTXInstPrinter::printCvtMode(const MCInst *MI, int OpNum, raw_ostream &O,
- const char *Modifier) {
+ const char *M) {
const MCOperand &MO = MI->getOperand(OpNum);
int64_t Imm = MO.getImm();
+ llvm::StringRef Modifier(M);
- if (strcmp(Modifier, "ftz") == 0) {
+ if (Modifier == "ftz") {
// FTZ flag
if (Imm & NVPTX::PTXCvtMode::FTZ_FLAG)
O << ".ftz";
- } else if (strcmp(Modifier, "sat") == 0) {
+ return;
+ } else if (Modifier == "sat") {
// SAT flag
if (Imm & NVPTX::PTXCvtMode::SAT_FLAG)
O << ".sat";
- } else if (strcmp(Modifier, "relu") == 0) {
+ return;
+ } else if (Modifier == "relu") {
// RELU flag
if (Imm & NVPTX::PTXCvtMode::RELU_FLAG)
O << ".relu";
- } else if (strcmp(Modifier, "base") == 0) {
+ return;
+ } else if (Modifier == "base") {
// Default operand
switch (Imm & NVPTX::PTXCvtMode::BASE_MASK) {
default:
return;
case NVPTX::PTXCvtMode::NONE:
- break;
+ return;
case NVPTX::PTXCvtMode::RNI:
O << ".rni";
- break;
+ return;
case NVPTX::PTXCvtMode::RZI:
O << ".rzi";
- break;
+ return;
case NVPTX::PTXCvtMode::RMI:
O << ".rmi";
- break;
+ return;
case NVPTX::PTXCvtMode::RPI:
O << ".rpi";
- break;
+ return;
case NVPTX::PTXCvtMode::RN:
O << ".rn";
- break;
+ return;
case NVPTX::PTXCvtMode::RZ:
O << ".rz";
- break;
+ return;
case NVPTX::PTXCvtMode::RM:
O << ".rm";
- break;
+ return;
case NVPTX::PTXCvtMode::RP:
O << ".rp";
- break;
+ return;
case NVPTX::PTXCvtMode::RNA:
O << ".rna";
- break;
+ return;
}
- } else {
- llvm_unreachable("Invalid conversion modifier");
}
+ llvm_unreachable("Invalid conversion modifier");
}
void NVPTXInstPrinter::printCmpMode(const MCInst *MI, int OpNum, raw_ostream &O,
- const char *Modifier) {
+ const char *M) {
const MCOperand &MO = MI->getOperand(OpNum);
int64_t Imm = MO.getImm();
+ llvm::StringRef Modifier(M);
- if (strcmp(Modifier, "ftz") == 0) {
+ if (Modifier == "ftz") {
// FTZ flag
if (Imm & NVPTX::PTXCmpMode::FTZ_FLAG)
O << ".ftz";
- } else if (strcmp(Modifier, "base") == 0) {
+ return;
+ } else if (Modifier == "base") {
switch (Imm & NVPTX::PTXCmpMode::BASE_MASK) {
default:
return;
case NVPTX::PTXCmpMode::EQ:
O << ".eq";
- break;
+ return;
case NVPTX::PTXCmpMode::NE:
O << ".ne";
- break;
+ return;
case NVPTX::PTXCmpMode::LT:
O << ".lt";
- break;
+ return;
case NVPTX::PTXCmpMode::LE:
O << ".le";
- break;
+ return;
case NVPTX::PTXCmpMode::GT:
O << ".gt";
- break;
+ return;
case NVPTX::PTXCmpMode::GE:
O << ".ge";
- break;
+ return;
case NVPTX::PTXCmpMode::LO:
O << ".lo";
- break;
+ return;
case NVPTX::PTXCmpMode::LS:
O << ".ls";
- break;
+ return;
case NVPTX::PTXCmpMode::HI:
O << ".hi";
- break;
+ return;
case NVPTX::PTXCmpMode::HS:
O << ".hs";
- break;
+ return;
case NVPTX::PTXCmpMode::EQU:
O << ".equ";
- break;
+ return;
case NVPTX::PTXCmpMode::NEU:
O << ".neu";
- break;
+ return;
case NVPTX::PTXCmpMode::LTU:
O << ".ltu";
- break;
+ return;
case NVPTX::PTXCmpMode::LEU:
O << ".leu";
- break;
+ return;
case NVPTX::PTXCmpMode::GTU:
O << ".gtu";
- break;
+ return;
case NVPTX::PTXCmpMode::GEU:
O << ".geu";
- break;
+ return;
case NVPTX::PTXCmpMode::NUM:
O << ".num";
- break;
+ return;
case NVPTX::PTXCmpMode::NotANumber:
O << ".nan";
- break;
+ return;
}
- } else {
- llvm_unreachable("Empty Modifier");
}
+ llvm_unreachable("Empty Modifier");
}
void NVPTXInstPrinter::printLdStCode(const MCInst *MI, int OpNum,
- raw_ostream &O, const char *Modifier) {
- if (Modifier) {
- const MCOperand &MO = MI->getOperand(OpNum);
- int Imm = (int) MO.getImm();
- if (!strcmp(Modifier, "sem")) {
- auto Ordering = NVPTX::Ordering(Imm);
- switch (Ordering) {
- case NVPTX::Ordering::NotAtomic:
- break;
- case NVPTX::Ordering::Volatile:
- O << ".volatile";
- break;
- case NVPTX::Ordering::Relaxed:
- O << ".relaxed.sys";
- break;
- case NVPTX::Ordering::Acquire:
- O << ".acquire.sys";
- break;
- case NVPTX::Ordering::Release:
- O << ".release.sys";
- break;
- case NVPTX::Ordering::RelaxedMMIO:
- O << ".mmio.relaxed.sys";
- break;
- default:
- report_fatal_error(formatv(
- "NVPTX LdStCode Printer does not support \"{}\" sem modifier.",
- OrderingToCString(Ordering)));
- }
- } else if (!strcmp(Modifier, "addsp")) {
- switch (Imm) {
- case NVPTX::PTXLdStInstCode::GLOBAL:
- O << ".global";
- break;
- case NVPTX::PTXLdStInstCode::SHARED:
- O << ".shared";
- break;
- case NVPTX::PTXLdStInstCode::LOCAL:
- O << ".local";
- break;
- case NVPTX::PTXLdStInstCode::PARAM:
- O << ".param";
- break;
- case NVPTX::PTXLdStInstCode::CONSTANT:
- O << ".const";
- break;
- case NVPTX::PTXLdStInstCode::GENERIC:
- break;
- default:
- llvm_unreachable("Wrong Address Space");
- }
- } else if (!strcmp(Modifier, "sign")) {
- if (Imm == NVPTX::PTXLdStInstCode::Signed)
- O << "s";
- else if (Imm == NVPTX::PTXLdStInstCode::Unsigned)
- O << "u";
- else if (Imm == NVPTX::PTXLdStInstCode::Untyped)
- O << "b";
- else if (Imm == NVPTX::PTXLdStInstCode::Float)
- O << "f";
- else
- llvm_unreachable("Unknown register type");
- } else if (!strcmp(Modifier, "vec")) {
- if (Imm == NVPTX::PTXLdStInstCode::V2)
- O << ".v2";
- else if (Imm == NVPTX::PTXLdStInstCode::V4)
- O << ".v4";
- } else
- llvm_unreachable("Unknown Modifier");
- } else
- llvm_unreachable("Empty Modifier");
+ raw_ostream &O, const char *M) {
+ llvm::StringRef Modifier(M);
+ const MCOperand &MO = MI->getOperand(OpNum);
+ int Imm = (int)MO.getImm();
+ if (Modifier == "sem") {
+ auto Ordering = NVPTX::Ordering(Imm);
+ switch (Ordering) {
+ case NVPTX::Ordering::NotAtomic:
+ return;
+ case NVPTX::Ordering::Relaxed:
+ O << ".relaxed";
+ return;
+ case NVPTX::Ordering::Acquire:
+ O << ".acquire";
+ return;
+ case NVPTX::Ordering::Release:
+ O << ".release";
+ return;
+ case NVPTX::Ordering::Volatile:
+ O << ".volatile";
+ return;
+ case NVPTX::Ordering::RelaxedMMIO:
+ O << ".mmio.relaxed";
+ return;
+ default:
+ report_fatal_error(formatv(
+ "NVPTX LdStCode Printer does not support \"{}\" sem modifier. "
+ "Loads/Stores cannot be AcquireRelease or SequentiallyConsistent.",
+ OrderingToString(Ordering)));
+ }
+ } else if (Modifier == "scope") {
+ auto S = NVPTX::Scope(Imm);
+ switch (S) {
+ case NVPTX::Scope::Thread:
+ return;
+ case NVPTX::Scope::System:
+ O << ".sys";
+ return;
+ case NVPTX::Scope::Block:
+ O << ".cta";
+ return;
+ case NVPTX::Scope::Cluster:
+ O << ".cluster";
+ return;
+ case NVPTX::Scope::Device:
+ O << ".gpu";
+ return;
+ }
+ report_fatal_error(
+ formatv("NVPTX LdStCode Printer does not support \"{}\" sco modifier.",
+ ScopeToString(S)));
+ } else if (Modifier == "addsp") {
+ auto A = NVPTX::AddressSpace(Imm);
+ switch (A) {
+ case NVPTX::AddressSpace::Generic:
+ return;
+ case NVPTX::AddressSpace::Global:
+ case NVPTX::AddressSpace::Const:
+ case NVPTX::AddressSpace::Shared:
+ case NVPTX::AddressSpace::Param:
+ case NVPTX::AddressSpace::Local:
+ O << "." << A;
+ return;
+ }
+ report_fatal_error(formatv(
+ "NVPTX LdStCode Printer does not support \"{}\" addsp modifier.",
+ AddressSpaceToString(A)));
+ } else if (Modifier == "sign") {
+ switch (Imm) {
+ case NVPTX::PTXLdStInstCode::Signed:
+ O << "s";
+ return;
+ case NVPTX::PTXLdStInstCode::Unsigned:
+ O << "u";
+ return;
+ case NVPTX::PTXLdStInstCode::Untyped:
+ O << "b";
+ return;
+ case NVPTX::PTXLdStInstCode::Float:
+ O << "f";
+ return;
+ default:
+ llvm_unreachable("Unknown register type");
+ }
+ } else if (Modifier == "vec") {
+ switch (Imm) {
+ case NVPTX::PTXLdStInstCode::V2:
+ O << ".v2";
+ return;
+ case NVPTX::PTXLdStInstCode::V4:
+ O << ".v4";
+ return;
+ }
+ // TODO: evaluate whether cases not covered by this switch are bugs
+ return;
+ }
+ llvm_unreachable(formatv("Unknown Modifier: {}", Modifier).str().c_str());
}
void NVPTXInstPrinter::printMmaCode(const MCInst *MI, int OpNum, raw_ostream &O,
- const char *Modifier) {
+ const char *M) {
const MCOperand &MO = MI->getOperand(OpNum);
int Imm = (int)MO.getImm();
- if (Modifier == nullptr || strcmp(Modifier, "version") == 0) {
+ llvm::StringRef Modifier(M);
+ if (Modifier.empty() || Modifier == "version") {
O << Imm; // Just print out PTX version
- } else if (strcmp(Modifier, "aligned") == 0) {
+ return;
+ } else if (Modifier == "aligned") {
// PTX63 requires '.aligned' in the name of the instruction.
if (Imm >= 63)
O << ".aligned";
- } else
- llvm_unreachable("Unknown Modifier");
+ return;
+ }
+ llvm_unreachable("Unknown Modifier");
}
void NVPTXInstPrinter::printMemOperand(const MCInst *MI, int OpNum,
- raw_ostream &O, const char *Modifier) {
+ raw_ostream &O, const char *M) {
printOperand(MI, OpNum, O);
+ llvm::StringRef Modifier(M);
- if (Modifier && !strcmp(Modifier, "add")) {
+ if (Modifier == "add") {
O << ", ";
printOperand(MI, OpNum + 1, O);
} else {
@@ -346,24 +381,24 @@ void NVPTXInstPrinter::printPrmtMode(const MCInst *MI, int OpNum,
default:
return;
case NVPTX::PTXPrmtMode::NONE:
- break;
+ return;
case NVPTX::PTXPrmtMode::F4E:
O << ".f4e";
- break;
+ return;
case NVPTX::PTXPrmtMode::B4E:
O << ".b4e";
- break;
+ return;
case NVPTX::PTXPrmtMode::RC8:
O << ".rc8";
- break;
+ return;
case NVPTX::PTXPrmtMode::ECL:
O << ".ecl";
- break;
+ return;
case NVPTX::PTXPrmtMode::ECR:
O << ".ecr";
- break;
+ return;
case NVPTX::PTXPrmtMode::RC16:
O << ".rc16";
- break;
+ return;
}
}
diff --git a/llvm/lib/Target/NVPTX/NVPTX.h b/llvm/lib/Target/NVPTX/NVPTX.h
index f6f6acb..f6ab81d 100644
--- a/llvm/lib/Target/NVPTX/NVPTX.h
+++ b/llvm/lib/Target/NVPTX/NVPTX.h
@@ -117,23 +117,37 @@ enum Ordering : OrderingUnderlyingType {
// Consume = 3, // Unimplemented in LLVM; NVPTX would map to "Acquire"
Acquire = (OrderingUnderlyingType)AtomicOrdering::Acquire,
Release = (OrderingUnderlyingType)AtomicOrdering::Release,
- // AcquireRelease = 6, // TODO
+ AcquireRelease = (OrderingUnderlyingType)AtomicOrdering::AcquireRelease,
SequentiallyConsistent =
(OrderingUnderlyingType)AtomicOrdering::SequentiallyConsistent,
Volatile = SequentiallyConsistent + 1,
RelaxedMMIO = Volatile + 1,
- LAST = RelaxedMMIO
+ LASTORDERING = RelaxedMMIO
};
-namespace PTXLdStInstCode {
-enum AddressSpace {
- GENERIC = 0,
- GLOBAL = 1,
- CONSTANT = 2,
- SHARED = 3,
- PARAM = 4,
- LOCAL = 5
+using ScopeUnderlyingType = unsigned int;
+enum Scope : ScopeUnderlyingType {
+ Thread = 0,
+ Block = 1,
+ Cluster = 2,
+ Device = 3,
+ System = 4,
+ LASTSCOPE = System
+};
+
+using AddressSpaceUnderlyingType = unsigned int;
+enum AddressSpace : AddressSpaceUnderlyingType {
+ Generic = 0,
+ Global = 1,
+ Shared = 3,
+ Const = 4,
+ Local = 5,
+
+ // NVPTX Backend Private:
+ Param = 101
};
+
+namespace PTXLdStInstCode {
enum FromType {
Unsigned = 0,
Signed,
diff --git a/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp b/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
index 38c5166..9bcc911 100644
--- a/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
@@ -563,21 +563,19 @@ void NVPTXAsmPrinter::emitKernelFunctionDirectives(const Function &F,
O << ".maxntid " << Maxntidx.value_or(1) << ", " << Maxntidy.value_or(1)
<< ", " << Maxntidz.value_or(1) << "\n";
- unsigned Mincta = 0;
- if (getMinCTASm(F, Mincta))
- O << ".minnctapersm " << Mincta << "\n";
+ if (const auto Mincta = getMinCTASm(F))
+ O << ".minnctapersm " << *Mincta << "\n";
- unsigned Maxnreg = 0;
- if (getMaxNReg(F, Maxnreg))
- O << ".maxnreg " << Maxnreg << "\n";
+ if (const auto Maxnreg = getMaxNReg(F))
+ O << ".maxnreg " << *Maxnreg << "\n";
// .maxclusterrank directive requires SM_90 or higher, make sure that we
// filter it out for lower SM versions, as it causes a hard ptxas crash.
const NVPTXTargetMachine &NTM = static_cast<const NVPTXTargetMachine &>(TM);
const auto *STI = static_cast<const NVPTXSubtarget *>(NTM.getSubtargetImpl());
- unsigned Maxclusterrank = 0;
- if (getMaxClusterRank(F, Maxclusterrank) && STI->getSmVersion() >= 90)
- O << ".maxclusterrank " << Maxclusterrank << "\n";
+ if (STI->getSmVersion() >= 90)
+ if (const auto Maxclusterrank = getMaxClusterRank(F))
+ O << ".maxclusterrank " << *Maxclusterrank << "\n";
}
std::string NVPTXAsmPrinter::getVirtualRegisterName(unsigned Reg) const {
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
index 4f0bc1a..7f942de 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
@@ -59,6 +59,7 @@ NVPTXDAGToDAGISel::NVPTXDAGToDAGISel(NVPTXTargetMachine &tm,
bool NVPTXDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
Subtarget = &MF.getSubtarget<NVPTXSubtarget>();
+ Scopes = NVPTXScopes(MF.getFunction().getContext());
return SelectionDAGISel::runOnMachineFunction(MF);
}
@@ -106,6 +107,10 @@ void NVPTXDAGToDAGISel::Select(SDNode *N) {
if (tryStore(N))
return;
break;
+ case ISD::ATOMIC_FENCE:
+ if (tryFence(N))
+ return;
+ break;
case ISD::EXTRACT_VECTOR_ELT:
if (tryEXTRACT_VECTOR_ELEMENT(N))
return;
@@ -699,20 +704,26 @@ static unsigned int getCodeAddrSpace(MemSDNode *N) {
const Value *Src = N->getMemOperand()->getValue();
if (!Src)
- return NVPTX::PTXLdStInstCode::GENERIC;
+ return NVPTX::AddressSpace::Generic;
if (auto *PT = dyn_cast<PointerType>(Src->getType())) {
switch (PT->getAddressSpace()) {
- case llvm::ADDRESS_SPACE_LOCAL: return NVPTX::PTXLdStInstCode::LOCAL;
- case llvm::ADDRESS_SPACE_GLOBAL: return NVPTX::PTXLdStInstCode::GLOBAL;
- case llvm::ADDRESS_SPACE_SHARED: return NVPTX::PTXLdStInstCode::SHARED;
- case llvm::ADDRESS_SPACE_GENERIC: return NVPTX::PTXLdStInstCode::GENERIC;
- case llvm::ADDRESS_SPACE_PARAM: return NVPTX::PTXLdStInstCode::PARAM;
- case llvm::ADDRESS_SPACE_CONST: return NVPTX::PTXLdStInstCode::CONSTANT;
+ case llvm::ADDRESS_SPACE_LOCAL:
+ return NVPTX::AddressSpace::Local;
+ case llvm::ADDRESS_SPACE_GLOBAL:
+ return NVPTX::AddressSpace::Global;
+ case llvm::ADDRESS_SPACE_SHARED:
+ return NVPTX::AddressSpace::Shared;
+ case llvm::ADDRESS_SPACE_GENERIC:
+ return NVPTX::AddressSpace::Generic;
+ case llvm::ADDRESS_SPACE_PARAM:
+ return NVPTX::AddressSpace::Param;
+ case llvm::ADDRESS_SPACE_CONST:
+ return NVPTX::AddressSpace::Const;
default: break;
}
}
- return NVPTX::PTXLdStInstCode::GENERIC;
+ return NVPTX::AddressSpace::Generic;
}
namespace {
@@ -815,9 +826,9 @@ getOperationOrderings(MemSDNode *N, const NVPTXSubtarget *Subtarget) {
// - the "weak" memory instruction we are currently lowering to, and
// - some other instruction that preserves the side-effect, e.g.,
// a dead dummy volatile load.
- if (CodeAddrSpace == NVPTX::PTXLdStInstCode::LOCAL ||
- CodeAddrSpace == NVPTX::PTXLdStInstCode::CONSTANT ||
- CodeAddrSpace == NVPTX::PTXLdStInstCode::PARAM) {
+ if (CodeAddrSpace == NVPTX::AddressSpace::Local ||
+ CodeAddrSpace == NVPTX::AddressSpace::Const ||
+ CodeAddrSpace == NVPTX::AddressSpace::Param) {
return NVPTX::Ordering::NotAtomic;
}
@@ -842,14 +853,14 @@ getOperationOrderings(MemSDNode *N, const NVPTXSubtarget *Subtarget) {
// atomics is undefined if the generic address does not refer to a .global or
// .shared memory location.
bool AddrGenericOrGlobalOrShared =
- (CodeAddrSpace == NVPTX::PTXLdStInstCode::GENERIC ||
- CodeAddrSpace == NVPTX::PTXLdStInstCode::GLOBAL ||
- CodeAddrSpace == NVPTX::PTXLdStInstCode::SHARED);
+ (CodeAddrSpace == NVPTX::AddressSpace::Generic ||
+ CodeAddrSpace == NVPTX::AddressSpace::Global ||
+ CodeAddrSpace == NVPTX::AddressSpace::Shared);
if (!AddrGenericOrGlobalOrShared)
return NVPTX::Ordering::NotAtomic;
bool UseRelaxedMMIO =
- HasRelaxedMMIO && CodeAddrSpace == NVPTX::PTXLdStInstCode::GLOBAL;
+ HasRelaxedMMIO && CodeAddrSpace == NVPTX::AddressSpace::Global;
switch (Ordering) {
case AtomicOrdering::NotAtomic:
@@ -915,6 +926,40 @@ getOperationOrderings(MemSDNode *N, const NVPTXSubtarget *Subtarget) {
} // namespace
+NVPTX::Scope NVPTXDAGToDAGISel::getOperationScope(MemSDNode *N,
+ NVPTX::Ordering O) const {
+ switch (O) {
+ case NVPTX::Ordering::NotAtomic:
+ case NVPTX::Ordering::Volatile: // Non-atomic volatile operations
+ // NVPTX uses Thread scope as the scope of non-atomic operations.
+ return NVPTX::Scope::Thread;
+ case NVPTX::Ordering::RelaxedMMIO:
+ // RelaxedMMIO operations are always system scope.
+ // If a RelaxedMMIO order was generated from an atomic volatile operation
+ // with a smaller thread scope, we bump it here to system scope.
+ return NVPTX::Scope::System;
+ case NVPTX::Ordering::Relaxed:
+ case NVPTX::Ordering::Acquire:
+ case NVPTX::Ordering::Release:
+ case NVPTX::Ordering::AcquireRelease:
+ case NVPTX::Ordering::SequentiallyConsistent:
+ auto S = Scopes[N->getSyncScopeID()];
+
+ // Atomic operations must have a scope greater than thread.
+ if (S == NVPTX::Scope::Thread)
+ report_fatal_error(
+ formatv("Atomics need scope > \"{}\".", ScopeToString(S)));
+
+ // If scope is cluster, clusters must be supported.
+ if (S == NVPTX::Scope::Cluster)
+ Subtarget->failIfClustersUnsupported("cluster scope");
+
+ // If operation is volatile, then its scope is system.
+ return N->isVolatile() ? NVPTX::Scope::System : S;
+ }
+ llvm_unreachable("unhandled ordering");
+}
+
static bool canLowerToLDG(MemSDNode *N, const NVPTXSubtarget &Subtarget,
unsigned CodeAddrSpace, MachineFunction *F) {
// We use ldg (i.e. ld.global.nc) for invariant loads from the global address
@@ -934,7 +979,7 @@ static bool canLowerToLDG(MemSDNode *N, const NVPTXSubtarget &Subtarget,
// TODO: Infer invariance only at -O2. We still want to use ldg at -O0 for
// explicitly invariant loads because these are how clang tells us to use ldg
// when the user uses a builtin.
- if (!Subtarget.hasLDG() || CodeAddrSpace != NVPTX::PTXLdStInstCode::GLOBAL)
+ if (!Subtarget.hasLDG() || CodeAddrSpace != NVPTX::AddressSpace::Global)
return false;
if (N->isInvariant())
@@ -957,33 +1002,87 @@ static bool canLowerToLDG(MemSDNode *N, const NVPTXSubtarget &Subtarget,
});
}
-NVPTX::Ordering NVPTXDAGToDAGISel::insertMemoryInstructionFence(SDLoc DL,
- SDValue &Chain,
- MemSDNode *N) {
- // Some memory instructions - loads, stores, atomics - need an extra fence
- // instruction. Get the memory order of the instruction, and that of its
- // fence, if any.
+static unsigned int getFenceOp(NVPTX::Ordering O, NVPTX::Scope S,
+ NVPTXSubtarget const *T) {
+ if (S == NVPTX::Scope::Cluster)
+ T->failIfClustersUnsupported(".cluster scope fence");
+
+ switch (O) {
+ case NVPTX::Ordering::Acquire:
+ case NVPTX::Ordering::Release:
+ case NVPTX::Ordering::AcquireRelease: {
+ switch (S) {
+ case NVPTX::Scope::System:
+ return T->hasMemoryOrdering() ? NVPTX::atomic_thread_fence_acq_rel_sys
+ : NVPTX::INT_MEMBAR_SYS;
+ case NVPTX::Scope::Block:
+ return T->hasMemoryOrdering() ? NVPTX::atomic_thread_fence_acq_rel_cta
+ : NVPTX::INT_MEMBAR_CTA;
+ case NVPTX::Scope::Cluster:
+ return NVPTX::atomic_thread_fence_acq_rel_cluster;
+ case NVPTX::Scope::Device:
+ return T->hasMemoryOrdering() ? NVPTX::atomic_thread_fence_acq_rel_gpu
+ : NVPTX::INT_MEMBAR_GL;
+ case NVPTX::Scope::Thread:
+ report_fatal_error(
+ formatv("Unsupported scope \"{}\" for acquire/release/acq_rel fence.",
+ ScopeToString(S)));
+ }
+ }
+ case NVPTX::Ordering::SequentiallyConsistent: {
+ switch (S) {
+ case NVPTX::Scope::System:
+ return T->hasMemoryOrdering() ? NVPTX::atomic_thread_fence_seq_cst_sys
+ : NVPTX::INT_MEMBAR_SYS;
+ case NVPTX::Scope::Block:
+ return T->hasMemoryOrdering() ? NVPTX::atomic_thread_fence_seq_cst_cta
+ : NVPTX::INT_MEMBAR_CTA;
+ case NVPTX::Scope::Cluster:
+ return NVPTX::atomic_thread_fence_seq_cst_cluster;
+ case NVPTX::Scope::Device:
+ return T->hasMemoryOrdering() ? NVPTX::atomic_thread_fence_seq_cst_gpu
+ : NVPTX::INT_MEMBAR_GL;
+ case NVPTX::Scope::Thread:
+ report_fatal_error(formatv("Unsupported scope \"{}\" for seq_cst fence.",
+ ScopeToString(S)));
+ }
+ }
+ case NVPTX::Ordering::NotAtomic:
+ case NVPTX::Ordering::Relaxed:
+ case NVPTX::Ordering::Volatile:
+ case NVPTX::Ordering::RelaxedMMIO:
+ report_fatal_error(
+ formatv("Unsupported \"{}\" ordering and \"{}\" scope for fence.",
+ OrderingToString(O), ScopeToString(S)));
+ }
+ llvm_unreachable("unhandled ordering");
+}
+
+// Returns Memory Order and Scope of a memory instruction, and
+// inserts any fence before the instruction that's required to
+// implement its memory ordering.
+std::pair<NVPTX::Ordering, NVPTX::Scope>
+NVPTXDAGToDAGISel::insertMemoryInstructionFence(SDLoc DL, SDValue &Chain,
+ MemSDNode *N) {
auto [InstructionOrdering, FenceOrdering] =
getOperationOrderings(N, Subtarget);
+ auto Scope = getOperationScope(N, InstructionOrdering);
// If a fence is required before the operation, insert it:
switch (NVPTX::Ordering(FenceOrdering)) {
case NVPTX::Ordering::NotAtomic:
break;
case NVPTX::Ordering::SequentiallyConsistent: {
- unsigned Op = Subtarget->hasMemoryOrdering()
- ? NVPTX::atomic_thread_fence_seq_cst_sys
- : NVPTX::INT_MEMBAR_SYS;
+ auto Op = getFenceOp(FenceOrdering, Scope, Subtarget);
Chain = SDValue(CurDAG->getMachineNode(Op, DL, MVT::Other, Chain), 0);
break;
}
default:
report_fatal_error(
formatv("Unexpected fence ordering: \"{}\".",
- OrderingToCString(NVPTX::Ordering(FenceOrdering))));
+ OrderingToString(NVPTX::Ordering(FenceOrdering))));
}
-
- return InstructionOrdering;
+ return {InstructionOrdering, Scope};
}
bool NVPTXDAGToDAGISel::tryIntrinsicNoChain(SDNode *N) {
@@ -1010,11 +1109,21 @@ void NVPTXDAGToDAGISel::SelectAddrSpaceCast(SDNode *N) {
AddrSpaceCastSDNode *CastN = cast<AddrSpaceCastSDNode>(N);
unsigned SrcAddrSpace = CastN->getSrcAddressSpace();
unsigned DstAddrSpace = CastN->getDestAddressSpace();
+ SDLoc DL(N);
assert(SrcAddrSpace != DstAddrSpace &&
"addrspacecast must be between different address spaces");
if (DstAddrSpace == ADDRESS_SPACE_GENERIC) {
// Specific to generic
+
+ if (TM.is64Bit() && TM.getPointerSizeInBits(SrcAddrSpace) == 32) {
+ SDValue CvtNone =
+ CurDAG->getTargetConstant(NVPTX::PTXCvtMode::NONE, DL, MVT::i32);
+ SDNode *Cvt = CurDAG->getMachineNode(NVPTX::CVT_u64_u32, DL, MVT::i64,
+ Src, CvtNone);
+ Src = SDValue(Cvt, 0);
+ }
+
unsigned Opc;
switch (SrcAddrSpace) {
default: report_fatal_error("Bad address space in addrspacecast");
@@ -1022,26 +1131,16 @@ void NVPTXDAGToDAGISel::SelectAddrSpaceCast(SDNode *N) {
Opc = TM.is64Bit() ? NVPTX::cvta_global_64 : NVPTX::cvta_global;
break;
case ADDRESS_SPACE_SHARED:
- Opc = TM.is64Bit() ? (TM.getPointerSizeInBits(SrcAddrSpace) == 32
- ? NVPTX::cvta_shared_6432
- : NVPTX::cvta_shared_64)
- : NVPTX::cvta_shared;
+ Opc = TM.is64Bit() ? NVPTX::cvta_shared_64 : NVPTX::cvta_shared;
break;
case ADDRESS_SPACE_CONST:
- Opc = TM.is64Bit() ? (TM.getPointerSizeInBits(SrcAddrSpace) == 32
- ? NVPTX::cvta_const_6432
- : NVPTX::cvta_const_64)
- : NVPTX::cvta_const;
+ Opc = TM.is64Bit() ? NVPTX::cvta_const_64 : NVPTX::cvta_const;
break;
case ADDRESS_SPACE_LOCAL:
- Opc = TM.is64Bit() ? (TM.getPointerSizeInBits(SrcAddrSpace) == 32
- ? NVPTX::cvta_local_6432
- : NVPTX::cvta_local_64)
- : NVPTX::cvta_local;
+ Opc = TM.is64Bit() ? NVPTX::cvta_local_64 : NVPTX::cvta_local;
break;
}
- ReplaceNode(N, CurDAG->getMachineNode(Opc, SDLoc(N), N->getValueType(0),
- Src));
+ ReplaceNode(N, CurDAG->getMachineNode(Opc, DL, N->getValueType(0), Src));
return;
} else {
// Generic to specific
@@ -1054,30 +1153,28 @@ void NVPTXDAGToDAGISel::SelectAddrSpaceCast(SDNode *N) {
Opc = TM.is64Bit() ? NVPTX::cvta_to_global_64 : NVPTX::cvta_to_global;
break;
case ADDRESS_SPACE_SHARED:
- Opc = TM.is64Bit() ? (TM.getPointerSizeInBits(DstAddrSpace) == 32
- ? NVPTX::cvta_to_shared_3264
- : NVPTX::cvta_to_shared_64)
- : NVPTX::cvta_to_shared;
+ Opc = TM.is64Bit() ? NVPTX::cvta_to_shared_64 : NVPTX::cvta_to_shared;
break;
case ADDRESS_SPACE_CONST:
- Opc = TM.is64Bit() ? (TM.getPointerSizeInBits(DstAddrSpace) == 32
- ? NVPTX::cvta_to_const_3264
- : NVPTX::cvta_to_const_64)
- : NVPTX::cvta_to_const;
+ Opc = TM.is64Bit() ? NVPTX::cvta_to_const_64 : NVPTX::cvta_to_const;
break;
case ADDRESS_SPACE_LOCAL:
- Opc = TM.is64Bit() ? (TM.getPointerSizeInBits(DstAddrSpace) == 32
- ? NVPTX::cvta_to_local_3264
- : NVPTX::cvta_to_local_64)
- : NVPTX::cvta_to_local;
+ Opc = TM.is64Bit() ? NVPTX::cvta_to_local_64 : NVPTX::cvta_to_local;
break;
case ADDRESS_SPACE_PARAM:
- Opc = TM.is64Bit() ? NVPTX::nvvm_ptr_gen_to_param_64
- : NVPTX::nvvm_ptr_gen_to_param;
+ Opc = TM.is64Bit() ? NVPTX::IMOV64rr : NVPTX::IMOV32rr;
break;
}
- ReplaceNode(N, CurDAG->getMachineNode(Opc, SDLoc(N), N->getValueType(0),
- Src));
+
+ SDNode *CVTA = CurDAG->getMachineNode(Opc, DL, N->getValueType(0), Src);
+ if (TM.is64Bit() && TM.getPointerSizeInBits(DstAddrSpace) == 32) {
+ SDValue CvtNone =
+ CurDAG->getTargetConstant(NVPTX::PTXCvtMode::NONE, DL, MVT::i32);
+ CVTA = CurDAG->getMachineNode(NVPTX::CVT_u32_u64, DL, MVT::i32,
+ SDValue(CVTA, 0), CvtNone);
+ }
+
+ ReplaceNode(N, CVTA);
return;
}
}
@@ -1154,7 +1251,7 @@ bool NVPTXDAGToDAGISel::tryLoad(SDNode *N) {
SDLoc DL(N);
SDValue Chain = N->getOperand(0);
- auto InstructionOrdering = insertMemoryInstructionFence(DL, Chain, LD);
+ auto [Ordering, Scope] = insertMemoryInstructionFence(DL, Chain, LD);
// Type Setting: fromType + fromTypeWidth
//
@@ -1189,7 +1286,7 @@ bool NVPTXDAGToDAGISel::tryLoad(SDNode *N) {
std::optional<unsigned> Opcode;
MVT::SimpleValueType TargetVT = LD->getSimpleValueType(0).SimpleTy;
- SmallVector<SDValue, 12> Ops({getI32Imm(InstructionOrdering, DL),
+ SmallVector<SDValue, 12> Ops({getI32Imm(Ordering, DL), getI32Imm(Scope, DL),
getI32Imm(CodeAddrSpace, DL),
getI32Imm(VecType, DL), getI32Imm(FromType, DL),
getI32Imm(FromTypeWidth, DL)});
@@ -1266,7 +1363,7 @@ bool NVPTXDAGToDAGISel::tryLoadVector(SDNode *N) {
SDLoc DL(N);
SDValue Chain = N->getOperand(0);
- auto InstructionOrdering = insertMemoryInstructionFence(DL, Chain, MemSD);
+ auto [Ordering, Scope] = insertMemoryInstructionFence(DL, Chain, MemSD);
// Vector Setting
MVT SimpleVT = LoadedVT.getSimpleVT();
@@ -1319,7 +1416,7 @@ bool NVPTXDAGToDAGISel::tryLoadVector(SDNode *N) {
std::optional<unsigned> Opcode;
SDNode *LD;
- SmallVector<SDValue, 12> Ops({getI32Imm(InstructionOrdering, DL),
+ SmallVector<SDValue, 12> Ops({getI32Imm(Ordering, DL), getI32Imm(Scope, DL),
getI32Imm(CodeAddrSpace, DL),
getI32Imm(VecType, DL), getI32Imm(FromType, DL),
getI32Imm(FromTypeWidth, DL)});
@@ -1895,7 +1992,7 @@ bool NVPTXDAGToDAGISel::tryStore(SDNode *N) {
SDLoc DL(N);
SDValue Chain = ST->getChain();
- auto InstructionOrdering = insertMemoryInstructionFence(DL, Chain, ST);
+ auto [Ordering, Scope] = insertMemoryInstructionFence(DL, Chain, ST);
// Vector Setting
MVT SimpleVT = StoreVT.getSimpleVT();
@@ -1923,10 +2020,10 @@ bool NVPTXDAGToDAGISel::tryStore(SDNode *N) {
MVT::SimpleValueType SourceVT =
Value.getNode()->getSimpleValueType(0).SimpleTy;
- SmallVector<SDValue, 12> Ops({Value, getI32Imm(InstructionOrdering, DL),
- getI32Imm(CodeAddrSpace, DL),
- getI32Imm(VecType, DL), getI32Imm(ToType, DL),
- getI32Imm(ToTypeWidth, DL)});
+ SmallVector<SDValue, 12> Ops(
+ {Value, getI32Imm(Ordering, DL), getI32Imm(Scope, DL),
+ getI32Imm(CodeAddrSpace, DL), getI32Imm(VecType, DL),
+ getI32Imm(ToType, DL), getI32Imm(ToTypeWidth, DL)});
if (SelectDirectAddr(BasePtr, Addr)) {
Opcode = pickOpcodeForVT(SourceVT, NVPTX::ST_i8_avar, NVPTX::ST_i16_avar,
@@ -1996,7 +2093,7 @@ bool NVPTXDAGToDAGISel::tryStoreVector(SDNode *N) {
// Address Space Setting
unsigned CodeAddrSpace = getCodeAddrSpace(MemSD);
- if (CodeAddrSpace == NVPTX::PTXLdStInstCode::CONSTANT) {
+ if (CodeAddrSpace == NVPTX::AddressSpace::Const) {
report_fatal_error("Cannot store to pointer that points to constant "
"memory space");
}
@@ -2005,7 +2102,7 @@ bool NVPTXDAGToDAGISel::tryStoreVector(SDNode *N) {
SDLoc DL(N);
SDValue Chain = N->getOperand(0);
- auto InstructionOrdering = insertMemoryInstructionFence(DL, Chain, MemSD);
+ auto [Ordering, Scope] = insertMemoryInstructionFence(DL, Chain, MemSD);
// Type Setting: toType + toTypeWidth
// - for integer type, always use 'u'
@@ -2044,9 +2141,9 @@ bool NVPTXDAGToDAGISel::tryStoreVector(SDNode *N) {
ToTypeWidth = 32;
}
- Ops.append({getI32Imm(InstructionOrdering, DL), getI32Imm(CodeAddrSpace, DL),
- getI32Imm(VecType, DL), getI32Imm(ToType, DL),
- getI32Imm(ToTypeWidth, DL)});
+ Ops.append({getI32Imm(Ordering, DL), getI32Imm(Scope, DL),
+ getI32Imm(CodeAddrSpace, DL), getI32Imm(VecType, DL),
+ getI32Imm(ToType, DL), getI32Imm(ToTypeWidth, DL)});
if (SelectDirectAddr(N2, Addr)) {
switch (N->getOpcode()) {
@@ -4064,3 +4161,41 @@ unsigned NVPTXDAGToDAGISel::GetConvertOpcode(MVT DestTy, MVT SrcTy,
}
}
}
+
+bool NVPTXDAGToDAGISel::tryFence(SDNode *N) {
+ SDLoc DL(N);
+ assert(N->getOpcode() == ISD::ATOMIC_FENCE);
+ unsigned int FenceOp =
+ getFenceOp(NVPTX::Ordering(N->getConstantOperandVal(1)),
+ Scopes[N->getConstantOperandVal(2)], Subtarget);
+ SDValue Chain = N->getOperand(0);
+ SDNode *FenceNode = CurDAG->getMachineNode(FenceOp, DL, MVT::Other, Chain);
+ ReplaceNode(N, FenceNode);
+ return true;
+}
+
+NVPTXScopes::NVPTXScopes(LLVMContext &C) {
+ Scopes[C.getOrInsertSyncScopeID("singlethread")] = NVPTX::Scope::Thread;
+ Scopes[C.getOrInsertSyncScopeID("")] = NVPTX::Scope::System;
+ Scopes[C.getOrInsertSyncScopeID("block")] = NVPTX::Scope::Block;
+ Scopes[C.getOrInsertSyncScopeID("cluster")] = NVPTX::Scope::Cluster;
+ Scopes[C.getOrInsertSyncScopeID("device")] = NVPTX::Scope::Device;
+}
+
+NVPTX::Scope NVPTXScopes::operator[](SyncScope::ID ID) const {
+ if (Scopes.empty())
+ llvm_unreachable("NVPTX Scopes must be initialized before calling "
+ "NVPTXScopes::operator[]");
+
+ auto S = Scopes.find(ID);
+ if (S == Scopes.end()) {
+ // TODO:
+ // - Add API to LLVMContext to get the name of a single scope.
+ // - Use that API here to print an error containing the name
+ // of this Unknown ID.
+ report_fatal_error(formatv("Could not find scope ID={}.", int(ID)));
+ }
+ return S->second;
+}
+
+bool NVPTXScopes::empty() const { return Scopes.size() == 0; }
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h
index eac4056..c128c08 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h
+++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h
@@ -18,13 +18,25 @@
#include "NVPTXISelLowering.h"
#include "NVPTXRegisterInfo.h"
#include "NVPTXTargetMachine.h"
+#include "llvm/ADT/MapVector.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/LLVMContext.h"
#include "llvm/Support/Compiler.h"
namespace llvm {
+struct NVPTXScopes {
+ NVPTXScopes() = default;
+ NVPTXScopes(LLVMContext &C);
+ NVPTX::Scope operator[](SyncScope::ID ID) const;
+ bool empty() const;
+
+private:
+ SmallMapVector<SyncScope::ID, NVPTX::Scope, 8> Scopes{};
+};
+
class LLVM_LIBRARY_VISIBILITY NVPTXDAGToDAGISel : public SelectionDAGISel {
const NVPTXTargetMachine &TM;
@@ -38,6 +50,8 @@ class LLVM_LIBRARY_VISIBILITY NVPTXDAGToDAGISel : public SelectionDAGISel {
bool allowUnsafeFPMath() const;
bool doRsqrtOpt() const;
+ NVPTXScopes Scopes{};
+
public:
NVPTXDAGToDAGISel() = delete;
@@ -66,6 +80,7 @@ private:
bool tryLoadParam(SDNode *N);
bool tryStoreRetval(SDNode *N);
bool tryStoreParam(SDNode *N);
+ bool tryFence(SDNode *N);
void SelectAddrSpaceCast(SDNode *N);
bool tryTextureIntrinsic(SDNode *N);
bool trySurfaceIntrinsic(SDNode *N);
@@ -100,8 +115,13 @@ private:
static unsigned GetConvertOpcode(MVT DestTy, MVT SrcTy, LoadSDNode *N);
- NVPTX::Ordering insertMemoryInstructionFence(SDLoc DL, SDValue &Chain,
- MemSDNode *N);
+ // Returns the Memory Order and Scope that the PTX memory instruction should
+ // use, and inserts appropriate fence instruction before the memory
+ // instruction, if needed to implement the instructions memory order. Required
+ // fences after the instruction need to be handled elsewhere.
+ std::pair<NVPTX::Ordering, NVPTX::Scope>
+ insertMemoryInstructionFence(SDLoc DL, SDValue &Chain, MemSDNode *N);
+ NVPTX::Scope getOperationScope(MemSDNode *N, NVPTX::Ordering O) const;
};
class NVPTXDAGToDAGISelLegacy : public SelectionDAGISelLegacy {
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 2688834..8812136 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -594,20 +594,13 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
- // TODO: we may consider expanding ROTL/ROTR on older GPUs. Currently on GPUs
- // that don't have h/w rotation we lower them to multi-instruction assembly.
- // See ROT*_sw in NVPTXIntrInfo.td
- setOperationAction(ISD::ROTL, MVT::i64, Legal);
- setOperationAction(ISD::ROTR, MVT::i64, Legal);
- setOperationAction(ISD::ROTL, MVT::i32, Legal);
- setOperationAction(ISD::ROTR, MVT::i32, Legal);
-
- setOperationAction(ISD::ROTL, MVT::i16, Expand);
- setOperationAction(ISD::ROTL, MVT::v2i16, Expand);
- setOperationAction(ISD::ROTR, MVT::i16, Expand);
- setOperationAction(ISD::ROTR, MVT::v2i16, Expand);
- setOperationAction(ISD::ROTL, MVT::i8, Expand);
- setOperationAction(ISD::ROTR, MVT::i8, Expand);
+ setOperationAction({ISD::ROTL, ISD::ROTR},
+ {MVT::i8, MVT::i16, MVT::v2i16, MVT::i32, MVT::i64},
+ Expand);
+
+ if (STI.hasHWROT32())
+ setOperationAction({ISD::FSHL, ISD::FSHR}, MVT::i32, Legal);
+
setOperationAction(ISD::BSWAP, MVT::i16, Expand);
setOperationAction(ISD::BR_JT, MVT::Other, Custom);
diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
index b7e2108..c3a8a77 100644
--- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -174,10 +174,6 @@ def hasSM90a : Predicate<"Subtarget->getFullSmVersion() == 901">;
def hasSHFL : Predicate<"!(Subtarget->getSmVersion() >= 70"
"&& Subtarget->getPTXVersion() >= 64)">;
-def useShortPtrLocal : Predicate<"TM.is64Bit() && TM.getPointerSizeInBits(ADDRESS_SPACE_LOCAL) == 32">;
-def useShortPtrShared : Predicate<"TM.is64Bit() && TM.getPointerSizeInBits(ADDRESS_SPACE_SHARED) == 32">;
-def useShortPtrConst : Predicate<"TM.is64Bit() && TM.getPointerSizeInBits(ADDRESS_SPACE_CONST) == 32">;
-
def useFP16Math: Predicate<"Subtarget->allowFP16Math()">;
def hasBF16Math: Predicate<"Subtarget->hasBF16Math()">;
@@ -1665,167 +1661,6 @@ def BREV64 :
"brev.b64 \t$dst, $a;",
[(set Int64Regs:$dst, (bitreverse Int64Regs:$a))]>;
-//
-// Rotate: Use ptx shf instruction if available.
-//
-
-// 32 bit r2 = rotl r1, n
-// =>
-// r2 = shf.l r1, r1, n
-def ROTL32imm_hw :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, i32imm:$amt),
- "shf.l.wrap.b32 \t$dst, $src, $src, $amt;",
- [(set Int32Regs:$dst, (rotl (i32 Int32Regs:$src), (i32 imm:$amt)))]>,
- Requires<[hasHWROT32]>;
-
-def ROTL32reg_hw :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
- "shf.l.wrap.b32 \t$dst, $src, $src, $amt;",
- [(set Int32Regs:$dst, (rotl (i32 Int32Regs:$src), (i32 Int32Regs:$amt)))]>,
- Requires<[hasHWROT32]>;
-
-// 32 bit r2 = rotr r1, n
-// =>
-// r2 = shf.r r1, r1, n
-def ROTR32imm_hw :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, i32imm:$amt),
- "shf.r.wrap.b32 \t$dst, $src, $src, $amt;",
- [(set Int32Regs:$dst, (rotr (i32 Int32Regs:$src), (i32 imm:$amt)))]>,
- Requires<[hasHWROT32]>;
-
-def ROTR32reg_hw :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
- "shf.r.wrap.b32 \t$dst, $src, $src, $amt;",
- [(set Int32Regs:$dst, (rotr (i32 Int32Regs:$src), (i32 Int32Regs:$amt)))]>,
- Requires<[hasHWROT32]>;
-
-// 32-bit software rotate by immediate. $amt2 should equal 32 - $amt1.
-def ROT32imm_sw :
- NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$src, i32imm:$amt1, i32imm:$amt2),
- "{{\n\t"
- ".reg .b32 %lhs;\n\t"
- ".reg .b32 %rhs;\n\t"
- "shl.b32 \t%lhs, $src, $amt1;\n\t"
- "shr.b32 \t%rhs, $src, $amt2;\n\t"
- "add.u32 \t$dst, %lhs, %rhs;\n\t"
- "}}",
- []>;
-
-def SUB_FRM_32 : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(32 - N->getZExtValue(), SDLoc(N), MVT::i32);
-}]>;
-
-def : Pat<(rotl (i32 Int32Regs:$src), (i32 imm:$amt)),
- (ROT32imm_sw Int32Regs:$src, imm:$amt, (SUB_FRM_32 node:$amt))>,
- Requires<[noHWROT32]>;
-def : Pat<(rotr (i32 Int32Regs:$src), (i32 imm:$amt)),
- (ROT32imm_sw Int32Regs:$src, (SUB_FRM_32 node:$amt), imm:$amt)>,
- Requires<[noHWROT32]>;
-
-// 32-bit software rotate left by register.
-def ROTL32reg_sw :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
- "{{\n\t"
- ".reg .b32 %lhs;\n\t"
- ".reg .b32 %rhs;\n\t"
- ".reg .b32 %amt2;\n\t"
- "shl.b32 \t%lhs, $src, $amt;\n\t"
- "sub.s32 \t%amt2, 32, $amt;\n\t"
- "shr.b32 \t%rhs, $src, %amt2;\n\t"
- "add.u32 \t$dst, %lhs, %rhs;\n\t"
- "}}",
- [(set Int32Regs:$dst, (rotl (i32 Int32Regs:$src), (i32 Int32Regs:$amt)))]>,
- Requires<[noHWROT32]>;
-
-// 32-bit software rotate right by register.
-def ROTR32reg_sw :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
- "{{\n\t"
- ".reg .b32 %lhs;\n\t"
- ".reg .b32 %rhs;\n\t"
- ".reg .b32 %amt2;\n\t"
- "shr.b32 \t%lhs, $src, $amt;\n\t"
- "sub.s32 \t%amt2, 32, $amt;\n\t"
- "shl.b32 \t%rhs, $src, %amt2;\n\t"
- "add.u32 \t$dst, %lhs, %rhs;\n\t"
- "}}",
- [(set Int32Regs:$dst, (rotr (i32 Int32Regs:$src), (i32 Int32Regs:$amt)))]>,
- Requires<[noHWROT32]>;
-
-// 64-bit software rotate by immediate. $amt2 should equal 64 - $amt1.
-def ROT64imm_sw :
- NVPTXInst<(outs Int64Regs:$dst),
- (ins Int64Regs:$src, i32imm:$amt1, i32imm:$amt2),
- "{{\n\t"
- ".reg .b64 %lhs;\n\t"
- ".reg .b64 %rhs;\n\t"
- "shl.b64 \t%lhs, $src, $amt1;\n\t"
- "shr.b64 \t%rhs, $src, $amt2;\n\t"
- "add.u64 \t$dst, %lhs, %rhs;\n\t"
- "}}",
- []>;
-
-def SUB_FRM_64 : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(64-N->getZExtValue(), SDLoc(N), MVT::i32);
-}]>;
-
-def : Pat<(rotl Int64Regs:$src, (i32 imm:$amt)),
- (ROT64imm_sw Int64Regs:$src, imm:$amt, (SUB_FRM_64 node:$amt))>;
-def : Pat<(rotr Int64Regs:$src, (i32 imm:$amt)),
- (ROT64imm_sw Int64Regs:$src, (SUB_FRM_64 node:$amt), imm:$amt)>;
-
-// 64-bit software rotate left by register.
-def ROTL64reg_sw :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src, Int32Regs:$amt),
- "{{\n\t"
- ".reg .b64 %lhs;\n\t"
- ".reg .b64 %rhs;\n\t"
- ".reg .u32 %amt2;\n\t"
- "and.b32 \t%amt2, $amt, 63;\n\t"
- "shl.b64 \t%lhs, $src, %amt2;\n\t"
- "sub.u32 \t%amt2, 64, %amt2;\n\t"
- "shr.b64 \t%rhs, $src, %amt2;\n\t"
- "add.u64 \t$dst, %lhs, %rhs;\n\t"
- "}}",
- [(set Int64Regs:$dst, (rotl Int64Regs:$src, (i32 Int32Regs:$amt)))]>;
-
-def ROTR64reg_sw :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src, Int32Regs:$amt),
- "{{\n\t"
- ".reg .b64 %lhs;\n\t"
- ".reg .b64 %rhs;\n\t"
- ".reg .u32 %amt2;\n\t"
- "and.b32 \t%amt2, $amt, 63;\n\t"
- "shr.b64 \t%lhs, $src, %amt2;\n\t"
- "sub.u32 \t%amt2, 64, %amt2;\n\t"
- "shl.b64 \t%rhs, $src, %amt2;\n\t"
- "add.u64 \t$dst, %lhs, %rhs;\n\t"
- "}}",
- [(set Int64Regs:$dst, (rotr Int64Regs:$src, (i32 Int32Regs:$amt)))]>;
-
-//
-// Funnnel shift in clamp mode
-//
-
-// Create SDNodes so they can be used in the DAG code, e.g.
-// NVPTXISelLowering (LowerShiftLeftParts and LowerShiftRightParts)
-def FUN_SHFL_CLAMP : SDNode<"NVPTXISD::FUN_SHFL_CLAMP", SDTIntShiftDOp, []>;
-def FUN_SHFR_CLAMP : SDNode<"NVPTXISD::FUN_SHFR_CLAMP", SDTIntShiftDOp, []>;
-
-def FUNSHFLCLAMP :
- NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt),
- "shf.l.clamp.b32 \t$dst, $lo, $hi, $amt;",
- [(set Int32Regs:$dst,
- (FUN_SHFL_CLAMP (i32 Int32Regs:$lo), (i32 Int32Regs:$hi), (i32 Int32Regs:$amt)))]>;
-
-def FUNSHFRCLAMP :
- NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt),
- "shf.r.clamp.b32 \t$dst, $lo, $hi, $amt;",
- [(set Int32Regs:$dst,
- (FUN_SHFR_CLAMP (i32 Int32Regs:$lo), (i32 Int32Regs:$hi), (i32 Int32Regs:$amt)))]>;
//
// BFE - bit-field extract
@@ -2971,39 +2806,39 @@ foreach vt = [v2f16, v2bf16, v2i16, v4i8] in {
multiclass LD<NVPTXRegClass regclass> {
def _avar : NVPTXInst<
(outs regclass:$dst),
- (ins LdStCode:$sem, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
i32imm:$fromWidth, imem:$addr),
- "ld${sem:sem}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "ld${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
"\t$dst, [$addr];", []>;
def _areg : NVPTXInst<
(outs regclass:$dst),
- (ins LdStCode:$sem, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
i32imm:$fromWidth, Int32Regs:$addr),
- "ld${sem:sem}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "ld${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
"\t$dst, [$addr];", []>;
def _areg_64 : NVPTXInst<
(outs regclass:$dst),
- (ins LdStCode:$sem, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
i32imm:$fromWidth, Int64Regs:$addr),
- "ld${sem:sem}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "ld${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
"\t$dst, [$addr];", []>;
def _ari : NVPTXInst<
(outs regclass:$dst),
- (ins LdStCode:$sem, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
- "ld${sem:sem}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "ld${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
"\t$dst, [$addr+$offset];", []>;
def _ari_64 : NVPTXInst<
(outs regclass:$dst),
- (ins LdStCode:$sem, LdStCode:$addsp, LdStCode:$Vec,
+ (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec,
LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
- "ld${sem:sem}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "ld${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
"\t$dst, [$addr+$offset];", []>;
def _asi : NVPTXInst<
(outs regclass:$dst),
- (ins LdStCode:$sem, LdStCode:$addsp, LdStCode:$Vec,
+ (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec,
LdStCode:$Sign, i32imm:$fromWidth, imem:$addr, i32imm:$offset),
- "ld${sem:sem}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "ld${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
"\t$dst, [$addr+$offset];", []>;
}
@@ -3019,39 +2854,42 @@ let mayLoad=1, hasSideEffects=0 in {
multiclass ST<NVPTXRegClass regclass> {
def _avar : NVPTXInst<
(outs),
- (ins regclass:$src, LdStCode:$sem, LdStCode:$addsp, LdStCode:$Vec,
- LdStCode:$Sign, i32imm:$toWidth, imem:$addr),
- "st${sem:sem}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
+ (ins regclass:$src, LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp,
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$toWidth, imem:$addr),
+ "st${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
" \t[$addr], $src;", []>;
def _areg : NVPTXInst<
(outs),
- (ins regclass:$src, LdStCode:$sem, LdStCode:$addsp,
+ (ins regclass:$src, LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp,
LdStCode:$Vec, LdStCode:$Sign, i32imm:$toWidth, Int32Regs:$addr),
- "st${sem:sem}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
+ "st${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
" \t[$addr], $src;", []>;
def _areg_64 : NVPTXInst<
(outs),
- (ins regclass:$src, LdStCode:$sem, LdStCode:$addsp, LdStCode:$Vec,
- LdStCode:$Sign, i32imm:$toWidth, Int64Regs:$addr),
- "st${sem:sem}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
+ (ins regclass:$src, LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp,
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$toWidth, Int64Regs:$addr),
+ "st${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
" \t[$addr], $src;", []>;
def _ari : NVPTXInst<
(outs),
- (ins regclass:$src, LdStCode:$sem, LdStCode:$addsp, LdStCode:$Vec,
- LdStCode:$Sign, i32imm:$toWidth, Int32Regs:$addr, i32imm:$offset),
- "st${sem:sem}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
+ (ins regclass:$src, LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp,
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$toWidth, Int32Regs:$addr,
+ i32imm:$offset),
+ "st${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
" \t[$addr+$offset], $src;", []>;
def _ari_64 : NVPTXInst<
(outs),
- (ins regclass:$src, LdStCode:$sem, LdStCode:$addsp, LdStCode:$Vec,
- LdStCode:$Sign, i32imm:$toWidth, Int64Regs:$addr, i32imm:$offset),
- "st${sem:sem}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
+ (ins regclass:$src, LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp,
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$toWidth, Int64Regs:$addr,
+ i32imm:$offset),
+ "st${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
" \t[$addr+$offset], $src;", []>;
def _asi : NVPTXInst<
(outs),
- (ins regclass:$src, LdStCode:$sem, LdStCode:$addsp, LdStCode:$Vec,
- LdStCode:$Sign, i32imm:$toWidth, imem:$addr, i32imm:$offset),
- "st${sem:sem}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
+ (ins regclass:$src, LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp,
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$toWidth, imem:$addr,
+ i32imm:$offset),
+ "st${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
" \t[$addr+$offset], $src;", []>;
}
@@ -3070,75 +2908,75 @@ let mayStore=1, hasSideEffects=0 in {
multiclass LD_VEC<NVPTXRegClass regclass> {
def _v2_avar : NVPTXInst<
(outs regclass:$dst1, regclass:$dst2),
- (ins LdStCode:$sem, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, imem:$addr),
- "ld${sem:sem}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$fromWidth, imem:$addr),
+ "ld${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
"\t{{$dst1, $dst2}}, [$addr];", []>;
def _v2_areg : NVPTXInst<
(outs regclass:$dst1, regclass:$dst2),
- (ins LdStCode:$sem, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int32Regs:$addr),
- "ld${sem:sem}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr),
+ "ld${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
"\t{{$dst1, $dst2}}, [$addr];", []>;
def _v2_areg_64 : NVPTXInst<
(outs regclass:$dst1, regclass:$dst2),
- (ins LdStCode:$sem, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int64Regs:$addr),
- "ld${sem:sem}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr),
+ "ld${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
"\t{{$dst1, $dst2}}, [$addr];", []>;
def _v2_ari : NVPTXInst<
(outs regclass:$dst1, regclass:$dst2),
- (ins LdStCode:$sem, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
- "ld${sem:sem}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
+ "ld${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
"\t{{$dst1, $dst2}}, [$addr+$offset];", []>;
def _v2_ari_64 : NVPTXInst<
(outs regclass:$dst1, regclass:$dst2),
- (ins LdStCode:$sem, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
- "ld${sem:sem}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
+ "ld${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
"\t{{$dst1, $dst2}}, [$addr+$offset];", []>;
def _v2_asi : NVPTXInst<
(outs regclass:$dst1, regclass:$dst2),
- (ins LdStCode:$sem, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, imem:$addr, i32imm:$offset),
- "ld${sem:sem}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$fromWidth, imem:$addr, i32imm:$offset),
+ "ld${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
"\t{{$dst1, $dst2}}, [$addr+$offset];", []>;
def _v4_avar : NVPTXInst<
(outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
- (ins LdStCode:$sem, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, imem:$addr),
- "ld${sem:sem}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$fromWidth, imem:$addr),
+ "ld${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
"\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];", []>;
def _v4_areg : NVPTXInst<
(outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
- (ins LdStCode:$sem, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int32Regs:$addr),
- "ld${sem:sem}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr),
+ "ld${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
"\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];", []>;
def _v4_areg_64 : NVPTXInst<
(outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
- (ins LdStCode:$sem, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int64Regs:$addr),
- "ld${sem:sem}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr),
+ "ld${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
"\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];", []>;
def _v4_ari : NVPTXInst<
(outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
- (ins LdStCode:$sem, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
- "ld${sem:sem}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
+ "ld${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
"\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];", []>;
def _v4_ari_64 : NVPTXInst<
(outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
- (ins LdStCode:$sem, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
- "ld${sem:sem}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
+ "ld${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
"\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];", []>;
def _v4_asi : NVPTXInst<
(outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
- (ins LdStCode:$sem, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, imem:$addr, i32imm:$offset),
- "ld${sem:sem}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$fromWidth, imem:$addr, i32imm:$offset),
+ "ld${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
"\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];", []>;
}
let mayLoad=1, hasSideEffects=0 in {
@@ -3153,84 +2991,87 @@ let mayLoad=1, hasSideEffects=0 in {
multiclass ST_VEC<NVPTXRegClass regclass> {
def _v2_avar : NVPTXInst<
(outs),
- (ins regclass:$src1, regclass:$src2, LdStCode:$sem, LdStCode:$addsp,
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, imem:$addr),
- "st${sem:sem}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ (ins regclass:$src1, regclass:$src2, LdStCode:$sem, LdStCode:$scope,
+ LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth,
+ imem:$addr),
+ "st${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
"\t[$addr], {{$src1, $src2}};", []>;
def _v2_areg : NVPTXInst<
(outs),
- (ins regclass:$src1, regclass:$src2, LdStCode:$sem, LdStCode:$addsp,
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr),
- "st${sem:sem}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ (ins regclass:$src1, regclass:$src2, LdStCode:$sem, LdStCode:$scope,
+ LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth,
+ Int32Regs:$addr),
+ "st${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
"\t[$addr], {{$src1, $src2}};", []>;
def _v2_areg_64 : NVPTXInst<
(outs),
- (ins regclass:$src1, regclass:$src2, LdStCode:$sem, LdStCode:$addsp,
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr),
- "st${sem:sem}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ (ins regclass:$src1, regclass:$src2, LdStCode:$sem, LdStCode:$scope,
+ LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth,
+ Int64Regs:$addr),
+ "st${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
"\t[$addr], {{$src1, $src2}};", []>;
def _v2_ari : NVPTXInst<
(outs),
- (ins regclass:$src1, regclass:$src2, LdStCode:$sem, LdStCode:$addsp,
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr,
- i32imm:$offset),
- "st${sem:sem}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ (ins regclass:$src1, regclass:$src2, LdStCode:$sem, LdStCode:$scope,
+ LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth,
+ Int32Regs:$addr, i32imm:$offset),
+ "st${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
"\t[$addr+$offset], {{$src1, $src2}};", []>;
def _v2_ari_64 : NVPTXInst<
(outs),
- (ins regclass:$src1, regclass:$src2, LdStCode:$sem, LdStCode:$addsp,
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr,
- i32imm:$offset),
- "st${sem:sem}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ (ins regclass:$src1, regclass:$src2, LdStCode:$sem, LdStCode:$scope,
+ LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth,
+ Int64Regs:$addr, i32imm:$offset),
+ "st${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
"\t[$addr+$offset], {{$src1, $src2}};", []>;
def _v2_asi : NVPTXInst<
(outs),
- (ins regclass:$src1, regclass:$src2, LdStCode:$sem, LdStCode:$addsp,
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, imem:$addr,
- i32imm:$offset),
- "st${sem:sem}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ (ins regclass:$src1, regclass:$src2, LdStCode:$sem, LdStCode:$scope,
+ LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth,
+ imem:$addr, i32imm:$offset),
+ "st${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
"\t[$addr+$offset], {{$src1, $src2}};", []>;
def _v4_avar : NVPTXInst<
(outs),
(ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
- LdStCode:$sem, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, imem:$addr),
- "st${sem:sem}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$fromWidth, imem:$addr),
+ "st${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
"\t[$addr], {{$src1, $src2, $src3, $src4}};", []>;
def _v4_areg : NVPTXInst<
(outs),
(ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
- LdStCode:$sem, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int32Regs:$addr),
- "st${sem:sem}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr),
+ "st${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
"\t[$addr], {{$src1, $src2, $src3, $src4}};", []>;
def _v4_areg_64 : NVPTXInst<
(outs),
(ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
- LdStCode:$sem, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int64Regs:$addr),
- "st${sem:sem}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr),
+ "st${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
"\t[$addr], {{$src1, $src2, $src3, $src4}};", []>;
def _v4_ari : NVPTXInst<
(outs),
(ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
- LdStCode:$sem, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
- "st${sem:sem}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
+ "st${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
"\t[$addr+$offset], {{$src1, $src2, $src3, $src4}};", []>;
def _v4_ari_64 : NVPTXInst<
(outs),
(ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
- LdStCode:$sem, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
- "st${sem:sem}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
+ "st${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
"\t[$addr+$offset], {{$src1, $src2, $src3, $src4}};", []>;
def _v4_asi : NVPTXInst<
(outs),
(ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
- LdStCode:$sem, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, imem:$addr, i32imm:$offset),
- "st${sem:sem}${addsp:addsp}${Vec:vec}.${Sign:sign}"
+ LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$fromWidth, imem:$addr, i32imm:$offset),
+ "st${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}"
"$fromWidth \t[$addr+$offset], {{$src1, $src2, $src3, $src4}};", []>;
}
@@ -3651,6 +3492,42 @@ def : Pat<(v2i16 (build_vector (i16 Int16Regs:$a), (i16 Int16Regs:$b))),
def: Pat<(v2i16 (scalar_to_vector (i16 Int16Regs:$a))),
(CVT_u32_u16 Int16Regs:$a, CvtNONE)>;
+//
+// Funnel-Shift
+//
+
+// Create SDNodes so they can be used in the DAG code, e.g.
+// NVPTXISelLowering (LowerShiftLeftParts and LowerShiftRightParts)
+def fshl_clamp : SDNode<"NVPTXISD::FUN_SHFL_CLAMP", SDTIntShiftDOp, []>;
+def fshr_clamp : SDNode<"NVPTXISD::FUN_SHFR_CLAMP", SDTIntShiftDOp, []>;
+
+// Funnel shift, requires >= sm_32. Does not trap if amt is out of range, so
+// no side effects.
+let hasSideEffects = false in {
+ multiclass ShfInst<string mode, SDNode op> {
+ def _i
+ : NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$lo, Int32Regs:$hi, i32imm:$amt),
+ "shf." # mode # ".b32 \t$dst, $lo, $hi, $amt;",
+ [(set Int32Regs:$dst,
+ (op (i32 Int32Regs:$lo), (i32 Int32Regs:$hi), (i32 imm:$amt)))]>,
+ Requires<[hasHWROT32]>;
+
+ def _r
+ : NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt),
+ "shf." # mode # ".b32 \t$dst, $lo, $hi, $amt;",
+ [(set Int32Regs:$dst,
+ (op (i32 Int32Regs:$lo), (i32 Int32Regs:$hi), (i32 Int32Regs:$amt)))]>,
+ Requires<[hasHWROT32]>;
+ }
+
+ defm SHF_L_CLAMP : ShfInst<"l.clamp", fshl_clamp>;
+ defm SHF_R_CLAMP : ShfInst<"r.clamp", fshr_clamp>;
+ defm SHF_L_WRAP : ShfInst<"l.wrap", fshl>;
+ defm SHF_R_WRAP : ShfInst<"r.wrap", fshr>;
+}
+
// Count leading zeros
let hasSideEffects = false in {
def CLZr32 : NVPTXInst<(outs Int32Regs:$d), (ins Int32Regs:$a),
@@ -4003,17 +3880,23 @@ def atomic_thread_fence_acq_rel_sys :
NVPTXInst<(outs), (ins), "fence.acq_rel.sys;", []>,
Requires<[hasPTX<60>, hasSM<70>]>;
-def : Pat<(atomic_fence (i64 4), (i64 1)), (atomic_thread_fence_acq_rel_sys)>, // acquire(4) sys(1)
- Requires<[hasPTX<60>, hasSM<70>]>;
-def : Pat<(atomic_fence (i64 5), (i64 1)), (atomic_thread_fence_acq_rel_sys)>, // release(5) sys(1)
- Requires<[hasPTX<60>, hasSM<70>]>;
-def : Pat<(atomic_fence (i64 6), (i64 1)), (atomic_thread_fence_acq_rel_sys)>, // acq_rel(6) sys(1)
- Requires<[hasPTX<60>, hasSM<70>]>;
-def : Pat<(atomic_fence (i64 7), (i64 1)), (atomic_thread_fence_seq_cst_sys)>, // seq_cst(7) sys(1)
- Requires<[hasPTX<60>, hasSM<70>]>;
-
-// If PTX<60 or SM<70, we fall back to MEMBAR:
-def : Pat<(atomic_fence (i64 4), (i64 1)), (INT_MEMBAR_SYS)>; // acquire(4) sys(1)
-def : Pat<(atomic_fence (i64 5), (i64 1)), (INT_MEMBAR_SYS)>; // release(5) sys(1)
-def : Pat<(atomic_fence (i64 6), (i64 1)), (INT_MEMBAR_SYS)>; // acq_rel(6) sys(1)
-def : Pat<(atomic_fence (i64 7), (i64 1)), (INT_MEMBAR_SYS)>; // seq_cst(7) sys(1)
+def atomic_thread_fence_seq_cst_gpu :
+ NVPTXInst<(outs), (ins), "fence.sc.gpu;", []>,
+ Requires<[hasPTX<60>, hasSM<70>]>;
+def atomic_thread_fence_acq_rel_gpu :
+ NVPTXInst<(outs), (ins), "fence.acq_rel.gpu;", []>,
+ Requires<[hasPTX<60>, hasSM<70>]>;
+
+def atomic_thread_fence_seq_cst_cluster :
+ NVPTXInst<(outs), (ins), "fence.sc.cluster;", []>,
+ Requires<[hasPTX<78>, hasSM<90>]>;
+def atomic_thread_fence_acq_rel_cluster :
+ NVPTXInst<(outs), (ins), "fence.acq_rel.cluster;", []>,
+ Requires<[hasPTX<78>, hasSM<90>]>;
+
+def atomic_thread_fence_seq_cst_cta :
+ NVPTXInst<(outs), (ins), "fence.sc.cta;", []>,
+ Requires<[hasPTX<60>, hasSM<70>]>;
+def atomic_thread_fence_acq_rel_cta :
+ NVPTXInst<(outs), (ins), "fence.acq_rel.cta;", []>,
+ Requires<[hasPTX<60>, hasSM<70>]>; \ No newline at end of file
diff --git a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
index 656fc67..042b096 100644
--- a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
+++ b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
@@ -1578,20 +1578,6 @@ def : Pat<(int_nvvm_e5m2x2_to_f16x2_rn_relu Int16Regs:$a),
(CVT_f16x2_e5m2x2 Int16Regs:$a, CvtRN_RELU)>;
//
-// Bitcast
-//
-
-def INT_NVVM_BITCAST_F2I : F_MATH_1<"mov.b32 \t$dst, $src0;", Int32Regs,
- Float32Regs, int_nvvm_bitcast_f2i>;
-def INT_NVVM_BITCAST_I2F : F_MATH_1<"mov.b32 \t$dst, $src0;", Float32Regs,
- Int32Regs, int_nvvm_bitcast_i2f>;
-
-def INT_NVVM_BITCAST_LL2D : F_MATH_1<"mov.b64 \t$dst, $src0;", Float64Regs,
- Int64Regs, int_nvvm_bitcast_ll2d>;
-def INT_NVVM_BITCAST_D2LL : F_MATH_1<"mov.b64 \t$dst, $src0;", Int64Regs,
- Float64Regs, int_nvvm_bitcast_d2ll>;
-
-//
// FNS
//
@@ -2551,59 +2537,45 @@ defm INT_PTX_LDG_G_v4f32_ELE
: VLDG_G_ELE_V4<"v4.f32 \t{{$dst1, $dst2, $dst3, $dst4}}, [$src];", Float32Regs>;
-multiclass NG_TO_G<string Str, Intrinsic Intrin, Predicate ShortPtr> {
+multiclass NG_TO_G<string Str> {
def "" : NVPTXInst<(outs Int32Regs:$result), (ins Int32Regs:$src),
- !strconcat("cvta.", Str, ".u32 \t$result, $src;"),
- [(set Int32Regs:$result, (Intrin Int32Regs:$src))]>;
+ "cvta." # Str # ".u32 \t$result, $src;", []>;
def _64 : NVPTXInst<(outs Int64Regs:$result), (ins Int64Regs:$src),
- !strconcat("cvta.", Str, ".u64 \t$result, $src;"),
- [(set Int64Regs:$result, (Intrin Int64Regs:$src))]>;
- def _6432 : NVPTXInst<(outs Int64Regs:$result), (ins Int32Regs:$src),
- "{{ .reg .b64 %tmp;\n\t"
- #" cvt.u64.u32 \t%tmp, $src;\n\t"
- #" cvta." # Str # ".u64 \t$result, %tmp; }}",
- [(set Int64Regs:$result, (Intrin Int32Regs:$src))]>,
- Requires<[ShortPtr]>;
+ "cvta." # Str # ".u64 \t$result, $src;", []>;
}
-multiclass G_TO_NG<string Str, Intrinsic Intrin, Predicate ShortPtr> {
+multiclass G_TO_NG<string Str> {
def "" : NVPTXInst<(outs Int32Regs:$result), (ins Int32Regs:$src),
- !strconcat("cvta.to.", Str, ".u32 \t$result, $src;"),
- [(set Int32Regs:$result, (Intrin Int32Regs:$src))]>;
+ "cvta.to." # Str # ".u32 \t$result, $src;", []>;
def _64 : NVPTXInst<(outs Int64Regs:$result), (ins Int64Regs:$src),
- !strconcat("cvta.to.", Str, ".u64 \t$result, $src;"),
- [(set Int64Regs:$result, (Intrin Int64Regs:$src))]>;
- def _3264 : NVPTXInst<(outs Int32Regs:$result), (ins Int64Regs:$src),
- "{{ .reg .b64 %tmp;\n\t"
- #" cvta.to." # Str # ".u64 \t%tmp, $src;\n\t"
- #" cvt.u32.u64 \t$result, %tmp; }}",
- [(set Int32Regs:$result, (Intrin Int64Regs:$src))]>,
- Requires<[ShortPtr]>;
-}
-
-defm cvta_local : NG_TO_G<"local", int_nvvm_ptr_local_to_gen, useShortPtrLocal>;
-defm cvta_shared : NG_TO_G<"shared", int_nvvm_ptr_shared_to_gen, useShortPtrShared>;
-defm cvta_global : NG_TO_G<"global", int_nvvm_ptr_global_to_gen, False>;
-defm cvta_const : NG_TO_G<"const", int_nvvm_ptr_constant_to_gen, useShortPtrConst>;
-defm cvta_param : NG_TO_G<"param", int_nvvm_ptr_param_to_gen, False>;
-
-defm cvta_to_local : G_TO_NG<"local", int_nvvm_ptr_gen_to_local, useShortPtrLocal>;
-defm cvta_to_shared : G_TO_NG<"shared", int_nvvm_ptr_gen_to_shared, useShortPtrShared>;
-defm cvta_to_global : G_TO_NG<"global", int_nvvm_ptr_gen_to_global, False>;
-defm cvta_to_const : G_TO_NG<"const", int_nvvm_ptr_gen_to_constant, useShortPtrConst>;
+ "cvta.to." # Str # ".u64 \t$result, $src;", []>;
+}
+
+defm cvta_local : NG_TO_G<"local">;
+defm cvta_shared : NG_TO_G<"shared">;
+defm cvta_global : NG_TO_G<"global">;
+defm cvta_const : NG_TO_G<"const">;
+
+defm cvta_to_local : G_TO_NG<"local">;
+defm cvta_to_shared : G_TO_NG<"shared">;
+defm cvta_to_global : G_TO_NG<"global">;
+defm cvta_to_const : G_TO_NG<"const">;
+
+// nvvm.ptr.param.to.gen
+defm cvta_param : NG_TO_G<"param">;
+
+def : Pat<(int_nvvm_ptr_param_to_gen Int32Regs:$src),
+ (cvta_param Int32Regs:$src)>;
+
+def : Pat<(int_nvvm_ptr_param_to_gen Int64Regs:$src),
+ (cvta_param_64 Int64Regs:$src)>;
// nvvm.ptr.gen.to.param
-def nvvm_ptr_gen_to_param : NVPTXInst<(outs Int32Regs:$result),
- (ins Int32Regs:$src),
- "mov.u32 \t$result, $src;",
- [(set Int32Regs:$result,
- (int_nvvm_ptr_gen_to_param Int32Regs:$src))]>;
-def nvvm_ptr_gen_to_param_64 : NVPTXInst<(outs Int64Regs:$result),
- (ins Int64Regs:$src),
- "mov.u64 \t$result, $src;",
- [(set Int64Regs:$result,
- (int_nvvm_ptr_gen_to_param Int64Regs:$src))]>;
+def : Pat<(int_nvvm_ptr_gen_to_param Int32Regs:$src),
+ (IMOV32rr Int32Regs:$src)>;
+def : Pat<(int_nvvm_ptr_gen_to_param Int64Regs:$src),
+ (IMOV64rr Int64Regs:$src)>;
// nvvm.move intrinsicc
def nvvm_move_i16 : NVPTXInst<(outs Int16Regs:$r), (ins Int16Regs:$s),
@@ -2646,24 +2618,6 @@ def nvvm_move_sym64 : NVPTXInst<(outs Int64Regs:$r), (ins imem:$s),
[(set Int64Regs:$r,
(int_nvvm_move_ptr texternalsym:$s))]>;*/
-
-// MoveParam %r1, param
-// ptr_local_to_gen %r2, %r1
-// ptr_gen_to_local %r3, %r2
-// ->
-// mov %r1, param
-
-// @TODO: Revisit this. There is a type
-// contradiction between iPTRAny and iPTR for the addr defs, so the move_sym
-// instructions are not currently defined. However, we can use the ptr
-// variants and the asm printer will do the right thing.
-def : Pat<(i64 (int_nvvm_ptr_gen_to_local (int_nvvm_ptr_local_to_gen
- (MoveParam texternalsym:$src)))),
- (nvvm_move_ptr64 texternalsym:$src)>;
-def : Pat<(i32 (int_nvvm_ptr_gen_to_local (int_nvvm_ptr_local_to_gen
- (MoveParam texternalsym:$src)))),
- (nvvm_move_ptr32 texternalsym:$src)>;
-
def texsurf_handles
: NVPTXInst<(outs Int64Regs:$result), (ins imem:$src),
"mov.u64 \t$result, $src;", []>;
@@ -2747,134 +2701,9 @@ def : Pat<(int_nvvm_read_ptx_sreg_envreg30), (MOV_SPECIAL ENVREG30)>;
def : Pat<(int_nvvm_read_ptx_sreg_envreg31), (MOV_SPECIAL ENVREG31)>;
-// rotate builtin support
-
-def ROTATE_B32_HW_IMM
- : NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$src, i32imm:$amt),
- "shf.l.wrap.b32 \t$dst, $src, $src, $amt;",
- [(set Int32Regs:$dst,
- (int_nvvm_rotate_b32 Int32Regs:$src, (i32 imm:$amt)))]>,
- Requires<[hasHWROT32]> ;
-
-def ROTATE_B32_HW_REG
- : NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$src, Int32Regs:$amt),
- "shf.l.wrap.b32 \t$dst, $src, $src, $amt;",
- [(set Int32Regs:$dst,
- (int_nvvm_rotate_b32 Int32Regs:$src, Int32Regs:$amt))]>,
- Requires<[hasHWROT32]> ;
-
-def : Pat<(int_nvvm_rotate_b32 Int32Regs:$src, (i32 imm:$amt)),
- (ROT32imm_sw Int32Regs:$src, imm:$amt, (SUB_FRM_32 node:$amt))>,
- Requires<[noHWROT32]> ;
-
-def : Pat<(int_nvvm_rotate_b32 Int32Regs:$src, Int32Regs:$amt),
- (ROTL32reg_sw Int32Regs:$src, Int32Regs:$amt)>,
- Requires<[noHWROT32]> ;
-
-let hasSideEffects = false in {
- def GET_LO_INT64 : NVPTXInst<(outs Int32Regs:$dst), (ins Int64Regs:$src),
- !strconcat("{{\n\t",
- ".reg .b32 %dummy;\n\t",
- "mov.b64 \t{$dst,%dummy}, $src;\n\t",
- "}}"),
- []> ;
-
- def GET_HI_INT64 : NVPTXInst<(outs Int32Regs:$dst), (ins Int64Regs:$src),
- !strconcat("{{\n\t",
- ".reg .b32 %dummy;\n\t",
- "mov.b64 \t{%dummy,$dst}, $src;\n\t",
- "}}"),
- []> ;
-}
-
-let hasSideEffects = false in {
- def PACK_TWO_INT32
- : NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$lo, Int32Regs:$hi),
- "mov.b64 \t$dst, {{$lo, $hi}};", []> ;
-}
-
def : Pat<(int_nvvm_swap_lo_hi_b64 Int64Regs:$src),
- (PACK_TWO_INT32 (GET_HI_INT64 Int64Regs:$src),
- (GET_LO_INT64 Int64Regs:$src))> ;
-
-// Funnel shift, requires >= sm_32. Does not trap if amt is out of range, so
-// no side effects.
-let hasSideEffects = false in {
- def SHF_L_WRAP_B32_IMM
- : NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$lo, Int32Regs:$hi, i32imm:$amt),
- "shf.l.wrap.b32 \t$dst, $lo, $hi, $amt;",[]>,
- Requires<[hasHWROT32]>;
-
- def SHF_L_WRAP_B32_REG
- : NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt),
- "shf.l.wrap.b32 \t$dst, $lo, $hi, $amt;",[]>,
- Requires<[hasHWROT32]>;
-
- def SHF_R_WRAP_B32_IMM
- : NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$lo, Int32Regs:$hi, i32imm:$amt),
- "shf.r.wrap.b32 \t$dst, $lo, $hi, $amt;",[]>,
- Requires<[hasHWROT32]>;
-
- def SHF_R_WRAP_B32_REG
- : NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt),
- "shf.r.wrap.b32 \t$dst, $lo, $hi, $amt;",[]>,
- Requires<[hasHWROT32]>;
-}
-
-// HW version of rotate 64
-def : Pat<(int_nvvm_rotate_b64 Int64Regs:$src, (i32 imm:$amt)),
- (PACK_TWO_INT32
- (SHF_L_WRAP_B32_IMM (GET_HI_INT64 Int64Regs:$src),
- (GET_LO_INT64 Int64Regs:$src), imm:$amt),
- (SHF_L_WRAP_B32_IMM (GET_LO_INT64 Int64Regs:$src),
- (GET_HI_INT64 Int64Regs:$src), imm:$amt))>,
- Requires<[hasHWROT32]>;
-
-def : Pat<(int_nvvm_rotate_b64 Int64Regs:$src, Int32Regs:$amt),
- (PACK_TWO_INT32
- (SHF_L_WRAP_B32_REG (GET_HI_INT64 Int64Regs:$src),
- (GET_LO_INT64 Int64Regs:$src), Int32Regs:$amt),
- (SHF_L_WRAP_B32_REG (GET_LO_INT64 Int64Regs:$src),
- (GET_HI_INT64 Int64Regs:$src), Int32Regs:$amt))>,
- Requires<[hasHWROT32]>;
-
-
-def : Pat<(int_nvvm_rotate_right_b64 Int64Regs:$src, (i32 imm:$amt)),
- (PACK_TWO_INT32
- (SHF_R_WRAP_B32_IMM (GET_LO_INT64 Int64Regs:$src),
- (GET_HI_INT64 Int64Regs:$src), imm:$amt),
- (SHF_R_WRAP_B32_IMM (GET_HI_INT64 Int64Regs:$src),
- (GET_LO_INT64 Int64Regs:$src), imm:$amt))>,
- Requires<[hasHWROT32]>;
-
-def : Pat<(int_nvvm_rotate_right_b64 Int64Regs:$src, Int32Regs:$amt),
- (PACK_TWO_INT32
- (SHF_R_WRAP_B32_REG (GET_LO_INT64 Int64Regs:$src),
- (GET_HI_INT64 Int64Regs:$src), Int32Regs:$amt),
- (SHF_R_WRAP_B32_REG (GET_HI_INT64 Int64Regs:$src),
- (GET_LO_INT64 Int64Regs:$src), Int32Regs:$amt))>,
- Requires<[hasHWROT32]>;
-
-// SW version of rotate 64
-def : Pat<(int_nvvm_rotate_b64 Int64Regs:$src, (i32 imm:$amt)),
- (ROT64imm_sw Int64Regs:$src, imm:$amt, (SUB_FRM_64 node:$amt))>,
- Requires<[noHWROT32]>;
-def : Pat<(int_nvvm_rotate_b64 Int64Regs:$src, Int32Regs:$amt),
- (ROTL64reg_sw Int64Regs:$src, Int32Regs:$amt)>,
- Requires<[noHWROT32]>;
-def : Pat<(int_nvvm_rotate_right_b64 Int64Regs:$src, (i32 imm:$amt)),
- (ROT64imm_sw Int64Regs:$src, (SUB_FRM_64 node:$amt), imm:$amt)>,
- Requires<[noHWROT32]>;
-def : Pat<(int_nvvm_rotate_right_b64 Int64Regs:$src, Int32Regs:$amt),
- (ROTR64reg_sw Int64Regs:$src, Int32Regs:$amt)>,
- Requires<[noHWROT32]>;
-
+ (V2I32toI64 (I64toI32H Int64Regs:$src),
+ (I64toI32L Int64Regs:$src))> ;
//-----------------------------------
// Texture Intrinsics
diff --git a/llvm/lib/Target/NVPTX/NVPTXLowerUnreachable.cpp b/llvm/lib/Target/NVPTX/NVPTXLowerUnreachable.cpp
index 92b90e2..a289d35 100644
--- a/llvm/lib/Target/NVPTX/NVPTXLowerUnreachable.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXLowerUnreachable.cpp
@@ -110,17 +110,24 @@ StringRef NVPTXLowerUnreachable::getPassName() const {
}
// =============================================================================
-// Returns whether a `trap` intrinsic should be emitted before I.
+// Returns whether a `trap` intrinsic would be emitted before I.
//
// This is a copy of the logic in SelectionDAGBuilder::visitUnreachable().
// =============================================================================
bool NVPTXLowerUnreachable::isLoweredToTrap(const UnreachableInst &I) const {
- if (!TrapUnreachable)
- return false;
- if (!NoTrapAfterNoreturn)
- return true;
- const CallInst *Call = dyn_cast_or_null<CallInst>(I.getPrevNode());
- return Call && Call->doesNotReturn();
+ if (const auto *Call = dyn_cast_or_null<CallInst>(I.getPrevNode())) {
+ // We've already emitted a non-continuable trap.
+ if (Call->isNonContinuableTrap())
+ return true;
+
+ // No traps are emitted for calls that do not return
+ // when this option is enabled.
+ if (NoTrapAfterNoreturn && Call->doesNotReturn())
+ return false;
+ }
+
+ // In all other cases, we will generate a trap if TrapUnreachable is set.
+ return TrapUnreachable;
}
// =============================================================================
diff --git a/llvm/lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp b/llvm/lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp
index f2515f9..f66504b 100644
--- a/llvm/lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp
@@ -11,12 +11,11 @@
// to work reliably, inlining of all function call must be performed.
//
//===----------------------------------------------------------------------===//
-
+#include "MCTargetDesc/NVPTXBaseInfo.h"
#include "NVPTX.h"
#include "NVPTXMachineFunctionInfo.h"
#include "NVPTXSubtarget.h"
#include "NVPTXTargetMachine.h"
-#include "MCTargetDesc/NVPTXBaseInfo.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
@@ -1820,8 +1819,8 @@ findIndexForHandle(MachineOperand &Op, MachineFunction &MF, unsigned &Idx) {
return false;
}
- assert(TexHandleDef.getOperand(6).isSymbol() && "Load is not a symbol!");
- StringRef Sym = TexHandleDef.getOperand(6).getSymbolName();
+ assert(TexHandleDef.getOperand(7).isSymbol() && "Load is not a symbol!");
+ StringRef Sym = TexHandleDef.getOperand(7).getSymbolName();
std::string ParamBaseName = std::string(MF.getName());
ParamBaseName += "_param_";
assert(Sym.starts_with(ParamBaseName) && "Invalid symbol reference");
diff --git a/llvm/lib/Target/NVPTX/NVPTXSubtarget.cpp b/llvm/lib/Target/NVPTX/NVPTXSubtarget.cpp
index 4200655..0e6b75e 100644
--- a/llvm/lib/Target/NVPTX/NVPTXSubtarget.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXSubtarget.cpp
@@ -12,6 +12,8 @@
#include "NVPTXSubtarget.h"
#include "NVPTXTargetMachine.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FormatVariadic.h"
using namespace llvm;
@@ -69,3 +71,14 @@ bool NVPTXSubtarget::hasImageHandles() const {
bool NVPTXSubtarget::allowFP16Math() const {
return hasFP16Math() && NoF16Math == false;
}
+
+void NVPTXSubtarget::failIfClustersUnsupported(
+ std::string const &FailureMessage) const {
+ if (hasClusters())
+ return;
+
+ report_fatal_error(formatv(
+ "NVPTX SM architecture \"{}\" and PTX version \"{}\" do not support {}. "
+ "Requires SM >= 90 and PTX >= 78.",
+ getFullSmVersion(), PTXVersion, FailureMessage));
+}
diff --git a/llvm/lib/Target/NVPTX/NVPTXSubtarget.h b/llvm/lib/Target/NVPTX/NVPTXSubtarget.h
index 457f10f..8b9059b 100644
--- a/llvm/lib/Target/NVPTX/NVPTXSubtarget.h
+++ b/llvm/lib/Target/NVPTX/NVPTXSubtarget.h
@@ -78,6 +78,7 @@ public:
bool hasAtomBitwise64() const { return SmVersion >= 32; }
bool hasAtomMinMax64() const { return SmVersion >= 32; }
bool hasAtomCas16() const { return SmVersion >= 70 && PTXVersion >= 63; }
+ bool hasClusters() const { return SmVersion >= 90 && PTXVersion >= 78; }
bool hasLDG() const { return SmVersion >= 32; }
bool hasHWROT32() const { return SmVersion >= 32; }
bool hasImageHandles() const;
@@ -119,6 +120,8 @@ public:
NVPTXSubtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS);
void ParseSubtargetFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS);
+
+ void failIfClustersUnsupported(std::string const &FailureMessage) const;
};
} // End llvm namespace
diff --git a/llvm/lib/Target/NVPTX/NVPTXUtilities.cpp b/llvm/lib/Target/NVPTX/NVPTXUtilities.cpp
index 8036174..be1c87d 100644
--- a/llvm/lib/Target/NVPTX/NVPTXUtilities.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXUtilities.cpp
@@ -13,6 +13,7 @@
#include "NVPTXUtilities.h"
#include "NVPTX.h"
#include "NVPTXTargetMachine.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalVariable.h"
@@ -130,8 +131,8 @@ static void cacheAnnotationFromMD(const Module *m, const GlobalValue *gv) {
}
}
-bool findOneNVVMAnnotation(const GlobalValue *gv, const std::string &prop,
- unsigned &retval) {
+static std::optional<unsigned> findOneNVVMAnnotation(const GlobalValue *gv,
+ const std::string &prop) {
auto &AC = getAnnotationCache();
std::lock_guard<sys::Mutex> Guard(AC.Lock);
const Module *m = gv->getParent();
@@ -140,21 +141,13 @@ bool findOneNVVMAnnotation(const GlobalValue *gv, const std::string &prop,
else if (AC.Cache[m].find(gv) == AC.Cache[m].end())
cacheAnnotationFromMD(m, gv);
if (AC.Cache[m][gv].find(prop) == AC.Cache[m][gv].end())
- return false;
- retval = AC.Cache[m][gv][prop][0];
- return true;
-}
-
-static std::optional<unsigned>
-findOneNVVMAnnotation(const GlobalValue &GV, const std::string &PropName) {
- unsigned RetVal;
- if (findOneNVVMAnnotation(&GV, PropName, RetVal))
- return RetVal;
- return std::nullopt;
+ return std::nullopt;
+ return AC.Cache[m][gv][prop][0];
}
-bool findAllNVVMAnnotation(const GlobalValue *gv, const std::string &prop,
- std::vector<unsigned> &retval) {
+static bool findAllNVVMAnnotation(const GlobalValue *gv,
+ const std::string &prop,
+ std::vector<unsigned> &retval) {
auto &AC = getAnnotationCache();
std::lock_guard<sys::Mutex> Guard(AC.Lock);
const Module *m = gv->getParent();
@@ -168,25 +161,13 @@ bool findAllNVVMAnnotation(const GlobalValue *gv, const std::string &prop,
return true;
}
-bool isTexture(const Value &val) {
- if (const GlobalValue *gv = dyn_cast<GlobalValue>(&val)) {
- unsigned Annot;
- if (findOneNVVMAnnotation(gv, "texture", Annot)) {
- assert((Annot == 1) && "Unexpected annotation on a texture symbol");
+static bool globalHasNVVMAnnotation(const Value &V, const std::string &Prop) {
+ if (const auto *GV = dyn_cast<GlobalValue>(&V))
+ if (const auto Annot = findOneNVVMAnnotation(GV, Prop)) {
+ assert((*Annot == 1) && "Unexpected annotation on a symbol");
return true;
}
- }
- return false;
-}
-bool isSurface(const Value &val) {
- if (const GlobalValue *gv = dyn_cast<GlobalValue>(&val)) {
- unsigned Annot;
- if (findOneNVVMAnnotation(gv, "surface", Annot)) {
- assert((Annot == 1) && "Unexpected annotation on a surface symbol");
- return true;
- }
- }
return false;
}
@@ -220,71 +201,60 @@ bool isParamGridConstant(const Value &V) {
return false;
}
-bool isSampler(const Value &val) {
+bool isTexture(const Value &V) { return globalHasNVVMAnnotation(V, "texture"); }
+
+bool isSurface(const Value &V) { return globalHasNVVMAnnotation(V, "surface"); }
+
+bool isSampler(const Value &V) {
const char *AnnotationName = "sampler";
- if (const GlobalValue *gv = dyn_cast<GlobalValue>(&val)) {
- unsigned Annot;
- if (findOneNVVMAnnotation(gv, AnnotationName, Annot)) {
- assert((Annot == 1) && "Unexpected annotation on a sampler symbol");
- return true;
- }
- }
- return argHasNVVMAnnotation(val, AnnotationName);
+ return globalHasNVVMAnnotation(V, AnnotationName) ||
+ argHasNVVMAnnotation(V, AnnotationName);
}
-bool isImageReadOnly(const Value &val) {
- return argHasNVVMAnnotation(val, "rdoimage");
+bool isImageReadOnly(const Value &V) {
+ return argHasNVVMAnnotation(V, "rdoimage");
}
-bool isImageWriteOnly(const Value &val) {
- return argHasNVVMAnnotation(val, "wroimage");
+bool isImageWriteOnly(const Value &V) {
+ return argHasNVVMAnnotation(V, "wroimage");
}
-bool isImageReadWrite(const Value &val) {
- return argHasNVVMAnnotation(val, "rdwrimage");
+bool isImageReadWrite(const Value &V) {
+ return argHasNVVMAnnotation(V, "rdwrimage");
}
-bool isImage(const Value &val) {
- return isImageReadOnly(val) || isImageWriteOnly(val) || isImageReadWrite(val);
+bool isImage(const Value &V) {
+ return isImageReadOnly(V) || isImageWriteOnly(V) || isImageReadWrite(V);
}
-bool isManaged(const Value &val) {
- if(const GlobalValue *gv = dyn_cast<GlobalValue>(&val)) {
- unsigned Annot;
- if (findOneNVVMAnnotation(gv, "managed", Annot)) {
- assert((Annot == 1) && "Unexpected annotation on a managed symbol");
- return true;
- }
- }
- return false;
-}
+bool isManaged(const Value &V) { return globalHasNVVMAnnotation(V, "managed"); }
-std::string getTextureName(const Value &val) {
- assert(val.hasName() && "Found texture variable with no name");
- return std::string(val.getName());
+StringRef getTextureName(const Value &V) {
+ assert(V.hasName() && "Found texture variable with no name");
+ return V.getName();
}
-std::string getSurfaceName(const Value &val) {
- assert(val.hasName() && "Found surface variable with no name");
- return std::string(val.getName());
+StringRef getSurfaceName(const Value &V) {
+ assert(V.hasName() && "Found surface variable with no name");
+ return V.getName();
}
-std::string getSamplerName(const Value &val) {
- assert(val.hasName() && "Found sampler variable with no name");
- return std::string(val.getName());
+StringRef getSamplerName(const Value &V) {
+ assert(V.hasName() && "Found sampler variable with no name");
+ return V.getName();
}
std::optional<unsigned> getMaxNTIDx(const Function &F) {
- return findOneNVVMAnnotation(F, "maxntidx");
+ return findOneNVVMAnnotation(&F, "maxntidx");
}
std::optional<unsigned> getMaxNTIDy(const Function &F) {
- return findOneNVVMAnnotation(F, "maxntidy");
+ return findOneNVVMAnnotation(&F, "maxntidy");
}
std::optional<unsigned> getMaxNTIDz(const Function &F) {
- return findOneNVVMAnnotation(F, "maxntidz");
+ return findOneNVVMAnnotation(&F, "maxntidz");
}
std::optional<unsigned> getMaxNTID(const Function &F) {
@@ -302,20 +272,20 @@ std::optional<unsigned> getMaxNTID(const Function &F) {
return std::nullopt;
}
-bool getMaxClusterRank(const Function &F, unsigned &x) {
- return findOneNVVMAnnotation(&F, "maxclusterrank", x);
+std::optional<unsigned> getMaxClusterRank(const Function &F) {
+ return findOneNVVMAnnotation(&F, "maxclusterrank");
}
std::optional<unsigned> getReqNTIDx(const Function &F) {
- return findOneNVVMAnnotation(F, "reqntidx");
+ return findOneNVVMAnnotation(&F, "reqntidx");
}
std::optional<unsigned> getReqNTIDy(const Function &F) {
- return findOneNVVMAnnotation(F, "reqntidy");
+ return findOneNVVMAnnotation(&F, "reqntidy");
}
std::optional<unsigned> getReqNTIDz(const Function &F) {
- return findOneNVVMAnnotation(F, "reqntidz");
+ return findOneNVVMAnnotation(&F, "reqntidz");
}
std::optional<unsigned> getReqNTID(const Function &F) {
@@ -328,21 +298,20 @@ std::optional<unsigned> getReqNTID(const Function &F) {
return std::nullopt;
}
-bool getMinCTASm(const Function &F, unsigned &x) {
- return findOneNVVMAnnotation(&F, "minctasm", x);
+std::optional<unsigned> getMinCTASm(const Function &F) {
+ return findOneNVVMAnnotation(&F, "minctasm");
}
-bool getMaxNReg(const Function &F, unsigned &x) {
- return findOneNVVMAnnotation(&F, "maxnreg", x);
+std::optional<unsigned> getMaxNReg(const Function &F) {
+ return findOneNVVMAnnotation(&F, "maxnreg");
}
bool isKernelFunction(const Function &F) {
- unsigned x = 0;
- if (!findOneNVVMAnnotation(&F, "kernel", x)) {
- // There is no NVVM metadata, check the calling convention
- return F.getCallingConv() == CallingConv::PTX_Kernel;
- }
- return (x == 1);
+ if (const auto X = findOneNVVMAnnotation(&F, "kernel"))
+ return (*X == 1);
+
+ // There is no NVVM metadata, check the calling convention
+ return F.getCallingConv() == CallingConv::PTX_Kernel;
}
MaybeAlign getAlign(const Function &F, unsigned Index) {
diff --git a/llvm/lib/Target/NVPTX/NVPTXUtilities.h b/llvm/lib/Target/NVPTX/NVPTXUtilities.h
index eebd91f..cf15dff 100644
--- a/llvm/lib/Target/NVPTX/NVPTXUtilities.h
+++ b/llvm/lib/Target/NVPTX/NVPTXUtilities.h
@@ -20,6 +20,7 @@
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/Alignment.h"
+#include "llvm/Support/FormatVariadic.h"
#include <cstdarg>
#include <set>
#include <string>
@@ -31,11 +32,6 @@ class TargetMachine;
void clearAnnotationCache(const Module *);
-bool findOneNVVMAnnotation(const GlobalValue *, const std::string &,
- unsigned &);
-bool findAllNVVMAnnotation(const GlobalValue *, const std::string &,
- std::vector<unsigned> &);
-
bool isTexture(const Value &);
bool isSurface(const Value &);
bool isSampler(const Value &);
@@ -45,23 +41,23 @@ bool isImageWriteOnly(const Value &);
bool isImageReadWrite(const Value &);
bool isManaged(const Value &);
-std::string getTextureName(const Value &);
-std::string getSurfaceName(const Value &);
-std::string getSamplerName(const Value &);
+StringRef getTextureName(const Value &);
+StringRef getSurfaceName(const Value &);
+StringRef getSamplerName(const Value &);
std::optional<unsigned> getMaxNTIDx(const Function &);
std::optional<unsigned> getMaxNTIDy(const Function &);
std::optional<unsigned> getMaxNTIDz(const Function &);
-std::optional<unsigned> getMaxNTID(const Function &F);
+std::optional<unsigned> getMaxNTID(const Function &);
std::optional<unsigned> getReqNTIDx(const Function &);
std::optional<unsigned> getReqNTIDy(const Function &);
std::optional<unsigned> getReqNTIDz(const Function &);
std::optional<unsigned> getReqNTID(const Function &);
-bool getMaxClusterRank(const Function &, unsigned &);
-bool getMinCTASm(const Function &, unsigned &);
-bool getMaxNReg(const Function &, unsigned &);
+std::optional<unsigned> getMaxClusterRank(const Function &);
+std::optional<unsigned> getMinCTASm(const Function &);
+std::optional<unsigned> getMaxNReg(const Function &);
bool isKernelFunction(const Function &);
bool isParamGridConstant(const Value &);
@@ -74,10 +70,9 @@ Function *getMaybeBitcastedCallee(const CallBase *CB);
inline unsigned promoteScalarArgumentSize(unsigned size) {
if (size <= 32)
return 32;
- else if (size <= 64)
+ if (size <= 64)
return 64;
- else
- return size;
+ return size;
}
bool shouldEmitPTXNoReturn(const Value *V, const TargetMachine &TM);
@@ -86,7 +81,7 @@ bool Isv2x16VT(EVT VT);
namespace NVPTX {
-inline std::string OrderingToCString(Ordering Order) {
+inline std::string OrderingToString(Ordering Order) {
switch (Order) {
case Ordering::NotAtomic:
return "NotAtomic";
@@ -96,7 +91,8 @@ inline std::string OrderingToCString(Ordering Order) {
return "Acquire";
case Ordering::Release:
return "Release";
- // case Ordering::AcquireRelease: return "AcquireRelease";
+ case Ordering::AcquireRelease:
+ return "AcquireRelease";
case Ordering::SequentiallyConsistent:
return "SequentiallyConsistent";
case Ordering::Volatile:
@@ -104,11 +100,58 @@ inline std::string OrderingToCString(Ordering Order) {
case Ordering::RelaxedMMIO:
return "RelaxedMMIO";
}
- report_fatal_error("unknown ordering");
+ report_fatal_error(formatv("Unknown NVPTX::Ordering \"{}\".",
+ static_cast<OrderingUnderlyingType>(Order)));
}
inline raw_ostream &operator<<(raw_ostream &O, Ordering Order) {
- O << OrderingToCString(Order);
+ O << OrderingToString(Order);
+ return O;
+}
+
+inline std::string ScopeToString(Scope S) {
+ switch (S) {
+ case Scope::Thread:
+ return "Thread";
+ case Scope::System:
+ return "System";
+ case Scope::Block:
+ return "Block";
+ case Scope::Cluster:
+ return "Cluster";
+ case Scope::Device:
+ return "Device";
+ }
+ report_fatal_error(formatv("Unknown NVPTX::Scope \"{}\".",
+ static_cast<ScopeUnderlyingType>(S)));
+}
+
+inline raw_ostream &operator<<(raw_ostream &O, Scope S) {
+ O << ScopeToString(S);
+ return O;
+}
+
+inline std::string AddressSpaceToString(AddressSpace A) {
+ switch (A) {
+ case AddressSpace::Generic:
+ return "generic";
+ case AddressSpace::Global:
+ return "global";
+ case AddressSpace::Const:
+ return "const";
+ case AddressSpace::Shared:
+ return "shared";
+ case AddressSpace::Param:
+ return "param";
+ case AddressSpace::Local:
+ return "local";
+ }
+ report_fatal_error(formatv("Unknown NVPTX::AddressSpace \"{}\".",
+ static_cast<AddressSpaceUnderlyingType>(A)));
+}
+
+inline raw_ostream &operator<<(raw_ostream &O, AddressSpace A) {
+ O << AddressSpaceToString(A);
return O;
}
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp
index 7532363..eb21498 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp
@@ -283,13 +283,18 @@ void RISCVMCCodeEmitter::expandLongCondBr(const MCInst &MI,
Offset = 4;
}
+ // Save the number fixups.
+ size_t FixupStartIndex = Fixups.size();
+
// Emit an unconditional jump to the destination.
MCInst TmpInst =
MCInstBuilder(RISCV::JAL).addReg(RISCV::X0).addOperand(SrcSymbol);
uint32_t Binary = getBinaryCodeForInstr(TmpInst, Fixups, STI);
support::endian::write(CB, Binary, llvm::endianness::little);
- Fixups.clear();
+ // Drop any fixup added so we can add the correct one.
+ Fixups.resize(FixupStartIndex);
+
if (SrcSymbol.isExpr()) {
Fixups.push_back(MCFixup::create(Offset, SrcSymbol.getExpr(),
MCFixupKind(RISCV::fixup_riscv_jal),
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index fcd46b5..05ba18b 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -2725,7 +2725,7 @@ bool RISCVDAGToDAGISel::SelectAddrRegImm(SDValue Addr, SDValue &Base,
}
/// Similar to SelectAddrRegImm, except that the least significant 5 bits of
-/// Offset shoule be all zeros.
+/// Offset should be all zeros.
bool RISCVDAGToDAGISel::SelectAddrRegImmLsb00000(SDValue Addr, SDValue &Base,
SDValue &Offset) {
if (SelectAddrFrameIndex(Addr, Base, Offset))
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index c4458b1..7b00b25 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -4540,7 +4540,7 @@ static SDValue lowerScalarInsert(SDValue Scalar, SDValue VL, MVT VT,
// t33: v8i8 = extract_subvector t11, Constant:i64<8>
// a) t35: v8i8 = vector_shuffle<0,2,4,6,8,10,12,14> t34, t33
// b) t35: v8i8 = vector_shuffle<1,3,5,7,9,11,13,15> t34, t33
-// Returns {Src Vector, Even Elements} om success
+// Returns {Src Vector, Even Elements} on success
static bool isDeinterleaveShuffle(MVT VT, MVT ContainerVT, SDValue V1,
SDValue V2, ArrayRef<int> Mask,
const RISCVSubtarget &Subtarget) {
@@ -10152,13 +10152,15 @@ SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
unsigned OrigIdx = Op.getConstantOperandVal(2);
const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
+ if (OrigIdx == 0 && Vec.isUndef())
+ return Op;
+
// We don't have the ability to slide mask vectors up indexed by their i1
// elements; the smallest we can do is i8. Often we are able to bitcast to
// equivalent i8 vectors. Note that when inserting a fixed-length vector
// into a scalable one, we might not necessarily have enough scalable
// elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid.
- if (SubVecVT.getVectorElementType() == MVT::i1 &&
- (OrigIdx != 0 || !Vec.isUndef())) {
+ if (SubVecVT.getVectorElementType() == MVT::i1) {
if (VecVT.getVectorMinNumElements() >= 8 &&
SubVecVT.getVectorMinNumElements() >= 8) {
assert(OrigIdx % 8 == 0 && "Invalid index");
@@ -10196,25 +10198,16 @@ SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
// vector group up the full amount.
const auto VLen = Subtarget.getRealVLen();
if (SubVecVT.isFixedLengthVector() && !VLen) {
- if (OrigIdx == 0 && Vec.isUndef() && !VecVT.isFixedLengthVector())
- return Op;
MVT ContainerVT = VecVT;
if (VecVT.isFixedLengthVector()) {
ContainerVT = getContainerForFixedLengthVector(VecVT);
Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
}
- if (OrigIdx == 0 && Vec.isUndef() && VecVT.isFixedLengthVector()) {
- SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
- DAG.getUNDEF(ContainerVT), SubVec,
- DAG.getVectorIdxConstant(0, DL));
- SubVec = convertFromScalableVector(VecVT, SubVec, DAG, Subtarget);
- return DAG.getBitcast(Op.getValueType(), SubVec);
- }
-
SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
DAG.getUNDEF(ContainerVT), SubVec,
DAG.getVectorIdxConstant(0, DL));
+
SDValue Mask =
getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
// Set the vector length to only the number of elements we care about. Note
@@ -10306,8 +10299,12 @@ SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
return Op;
}
+ // Use a insert_subvector that will resolve to an insert subreg.
+ assert(VLen);
+ unsigned Vscale = *VLen / RISCV::RVVBitsPerBlock;
SDValue Insert =
- DAG.getTargetInsertSubreg(SubRegIdx, DL, ContainerVecVT, Vec, SubVec);
+ DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVecVT, Vec, SubVec,
+ DAG.getConstant(OrigIdx / Vscale, DL, XLenVT));
if (VecVT.isFixedLengthVector())
Insert = convertFromScalableVector(VecVT, Insert, DAG, Subtarget);
return Insert;
@@ -10323,8 +10320,10 @@ SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
MVT InterSubVT = ContainerVecVT;
SDValue AlignedExtract = Vec;
unsigned AlignedIdx = OrigIdx - RemIdx.getKnownMinValue();
- if (SubVecVT.isFixedLengthVector())
+ if (SubVecVT.isFixedLengthVector()) {
+ assert(VLen);
AlignedIdx /= *VLen / RISCV::RVVBitsPerBlock;
+ }
if (ContainerVecVT.bitsGT(getLMUL1VT(ContainerVecVT))) {
InterSubVT = getLMUL1VT(ContainerVecVT);
// Extract a subvector equal to the nearest full vector register type. This
@@ -10388,12 +10387,17 @@ SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
unsigned OrigIdx = Op.getConstantOperandVal(1);
const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
+ // With an index of 0 this is a cast-like subvector, which can be performed
+ // with subregister operations.
+ if (OrigIdx == 0)
+ return Op;
+
// We don't have the ability to slide mask vectors down indexed by their i1
// elements; the smallest we can do is i8. Often we are able to bitcast to
// equivalent i8 vectors. Note that when extracting a fixed-length vector
// from a scalable one, we might not necessarily have enough scalable
// elements to safely divide by 8: v8i1 = extract nxv1i1 is valid.
- if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) {
+ if (SubVecVT.getVectorElementType() == MVT::i1) {
if (VecVT.getVectorMinNumElements() >= 8 &&
SubVecVT.getVectorMinNumElements() >= 8) {
assert(OrigIdx % 8 == 0 && "Invalid index");
@@ -10425,11 +10429,6 @@ SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
}
}
- // With an index of 0 this is a cast-like subvector, which can be performed
- // with subregister operations.
- if (OrigIdx == 0)
- return Op;
-
const auto VLen = Subtarget.getRealVLen();
// If the subvector vector is a fixed-length type and we don't know VLEN
@@ -10501,10 +10500,14 @@ SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
// If the Idx has been completely eliminated then this is a subvector extract
// which naturally aligns to a vector register. These can easily be handled
- // using subregister manipulation.
+ // using subregister manipulation. We use an extract_subvector that will
+ // resolve to an extract subreg.
if (RemIdx.isZero()) {
if (SubVecVT.isFixedLengthVector()) {
- Vec = DAG.getTargetExtractSubreg(SubRegIdx, DL, ContainerSubVecVT, Vec);
+ assert(VLen);
+ unsigned Vscale = *VLen / RISCV::RVVBitsPerBlock;
+ Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ContainerSubVecVT, Vec,
+ DAG.getConstant(OrigIdx / Vscale, DL, XLenVT));
return convertFromScalableVector(SubVecVT, Vec, DAG, Subtarget);
}
return Op;
@@ -10522,9 +10525,17 @@ SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
// If VecVT has an LMUL > 1, then SubVecVT should have a smaller LMUL, and
// we should have successfully decomposed the extract into a subregister.
+ // We use an extract_subvector that will resolve to a subreg extract.
assert(SubRegIdx != RISCV::NoSubRegister);
+ (void)SubRegIdx;
+ unsigned Idx = OrigIdx - RemIdx.getKnownMinValue();
+ if (SubVecVT.isFixedLengthVector()) {
+ assert(VLen);
+ Idx /= *VLen / RISCV::RVVBitsPerBlock;
+ }
InterSubVT = getLMUL1VT(VecVT);
- Vec = DAG.getTargetExtractSubreg(SubRegIdx, DL, InterSubVT, Vec);
+ Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
+ DAG.getConstant(Idx, DL, XLenVT));
}
// Slide this vector register down by the desired number of elements in order
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 41f93fd..b594531 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -736,8 +736,6 @@ MachineInstr *RISCVInstrInfo::foldMemoryOperandImpl(
MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS,
VirtRegMap *VRM) const {
- const MachineFrameInfo &MFI = MF.getFrameInfo();
-
// The below optimizations narrow the load so they are only valid for little
// endian.
// TODO: Support big endian by adding an offset into the frame object?
@@ -776,17 +774,11 @@ MachineInstr *RISCVInstrInfo::foldMemoryOperandImpl(
break;
}
- MachineMemOperand *MMO = MF.getMachineMemOperand(
- MachinePointerInfo::getFixedStack(MF, FrameIndex),
- MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIndex),
- MFI.getObjectAlign(FrameIndex));
-
Register DstReg = MI.getOperand(0).getReg();
return BuildMI(*MI.getParent(), InsertPt, MI.getDebugLoc(), get(LoadOpc),
DstReg)
.addFrameIndex(FrameIndex)
- .addImm(0)
- .addMemOperand(MMO);
+ .addImm(0);
}
void RISCVInstrInfo::movImm(MachineBasicBlock &MBB,
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index fe7de9d..68182d2 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -232,7 +232,7 @@ class octuple_to_str<int octuple> {
def VLOpFrag : PatFrag<(ops), (XLenVT (VLOp (XLenVT AVL:$vl)))>;
// Output pattern for X0 used to represent VLMAX in the pseudo instructions.
-// We can't use X0 register becuase the AVL operands use GPRNoX0.
+// We can't use X0 register because the AVL operands use GPRNoX0.
// This must be kept in sync with RISCV::VLMaxSentinel.
def VLMax : OutPatFrag<(ops), (XLenVT -1)>;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
index b54cdcb..0df0187d 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
@@ -33,11 +33,11 @@ multiclass VPatUSLoadStoreSDNode<ValueType type,
defvar load_instr = !cast<Instruction>("PseudoVLE"#sew#"_V_"#vlmul.MX);
defvar store_instr = !cast<Instruction>("PseudoVSE"#sew#"_V_"#vlmul.MX);
// Load
- def : Pat<(type (load GPR:$rs1)),
+ def : Pat<(type (load (XLenVT GPR:$rs1))),
(load_instr (type (IMPLICIT_DEF)), GPR:$rs1, avl,
log2sew, TA_MA)>;
// Store
- def : Pat<(store type:$rs2, GPR:$rs1),
+ def : Pat<(store type:$rs2, (XLenVT GPR:$rs1)),
(store_instr reg_class:$rs2, GPR:$rs1, avl, log2sew)>;
}
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index 595475f..768df717 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -635,8 +635,17 @@ InstructionCost RISCVTTIImpl::getScalarizationOverhead(
InstructionCost Cost = BaseT::getScalarizationOverhead(
Ty, DemandedElts, Insert, Extract, CostKind);
std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
- if (Insert && !Extract && LT.first.isValid() && LT.second.isVector() &&
- Ty->getScalarSizeInBits() != 1) {
+ if (Insert && !Extract && LT.first.isValid() && LT.second.isVector()) {
+ if (Ty->getScalarSizeInBits() == 1) {
+ auto *WideVecTy = cast<VectorType>(Ty->getWithNewBitWidth(8));
+ // Note: Implicit scalar anyextend is assumed to be free since the i1
+ // must be stored in a GPR.
+ return getScalarizationOverhead(WideVecTy, DemandedElts, Insert, Extract,
+ CostKind) +
+ getCastInstrCost(Instruction::Trunc, Ty, WideVecTy,
+ TTI::CastContextHint::None, CostKind, nullptr);
+ }
+
assert(LT.second.isFixedLengthVector());
MVT ContainerVT = TLI->getContainerForFixedLengthVector(LT.second);
if (isM1OrSmaller(ContainerVT)) {
@@ -1116,6 +1125,13 @@ RISCVTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
*FOp, ICA.getArgTypes()[0], UI->getPointerAlignment(),
UI->getOperand(1)->getType()->getPointerAddressSpace(), CostKind);
}
+ case Intrinsic::vp_select: {
+ Intrinsic::ID IID = ICA.getID();
+ std::optional<unsigned> FOp = VPIntrinsic::getFunctionalOpcodeForVP(IID);
+ assert(FOp.has_value());
+ return getCmpSelInstrCost(*FOp, ICA.getReturnType(), ICA.getArgTypes()[0],
+ CmpInst::BAD_ICMP_PREDICATE, CostKind);
+ }
}
if (ST->hasVInstructions() && RetTy->isVectorTy()) {
diff --git a/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp b/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp
index 795ddf4..86be79c 100644
--- a/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp
@@ -1351,7 +1351,8 @@ Instruction *SPIRVEmitIntrinsics::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
SmallVector<Value *> Args;
for (auto &Op : I.operands())
Args.push_back(Op);
- Args.push_back(B.getInt32(I.getSyncScopeID()));
+ Args.push_back(B.getInt32(
+ static_cast<uint32_t>(getMemScope(I.getContext(), I.getSyncScopeID()))));
Args.push_back(B.getInt32(
static_cast<uint32_t>(getMemSemantics(I.getSuccessOrdering()))));
Args.push_back(B.getInt32(
diff --git a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp
index ca3e47a..3e1873e 100644
--- a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp
@@ -25,6 +25,7 @@
#include "llvm/IR/Type.h"
#include "llvm/Support/Casting.h"
#include <cassert>
+#include <functional>
using namespace llvm;
SPIRVGlobalRegistry::SPIRVGlobalRegistry(unsigned PointerSize)
@@ -83,8 +84,11 @@ inline Register createTypeVReg(MachineIRBuilder &MIRBuilder) {
}
SPIRVType *SPIRVGlobalRegistry::getOpTypeBool(MachineIRBuilder &MIRBuilder) {
- return MIRBuilder.buildInstr(SPIRV::OpTypeBool)
- .addDef(createTypeVReg(MIRBuilder));
+
+ return createOpType(MIRBuilder, [&](MachineIRBuilder &MIRBuilder) {
+ return MIRBuilder.buildInstr(SPIRV::OpTypeBool)
+ .addDef(createTypeVReg(MIRBuilder));
+ });
}
unsigned SPIRVGlobalRegistry::adjustOpTypeIntWidth(unsigned Width) const {
@@ -118,24 +122,53 @@ SPIRVType *SPIRVGlobalRegistry::getOpTypeInt(unsigned Width,
MIRBuilder.buildInstr(SPIRV::OpCapability)
.addImm(SPIRV::Capability::ArbitraryPrecisionIntegersINTEL);
}
- auto MIB = MIRBuilder.buildInstr(SPIRV::OpTypeInt)
- .addDef(createTypeVReg(MIRBuilder))
- .addImm(Width)
- .addImm(IsSigned ? 1 : 0);
- return MIB;
+ return createOpType(MIRBuilder, [&](MachineIRBuilder &MIRBuilder) {
+ return MIRBuilder.buildInstr(SPIRV::OpTypeInt)
+ .addDef(createTypeVReg(MIRBuilder))
+ .addImm(Width)
+ .addImm(IsSigned ? 1 : 0);
+ });
}
SPIRVType *SPIRVGlobalRegistry::getOpTypeFloat(uint32_t Width,
MachineIRBuilder &MIRBuilder) {
- auto MIB = MIRBuilder.buildInstr(SPIRV::OpTypeFloat)
- .addDef(createTypeVReg(MIRBuilder))
- .addImm(Width);
- return MIB;
+ return createOpType(MIRBuilder, [&](MachineIRBuilder &MIRBuilder) {
+ return MIRBuilder.buildInstr(SPIRV::OpTypeFloat)
+ .addDef(createTypeVReg(MIRBuilder))
+ .addImm(Width);
+ });
}
SPIRVType *SPIRVGlobalRegistry::getOpTypeVoid(MachineIRBuilder &MIRBuilder) {
- return MIRBuilder.buildInstr(SPIRV::OpTypeVoid)
- .addDef(createTypeVReg(MIRBuilder));
+ return createOpType(MIRBuilder, [&](MachineIRBuilder &MIRBuilder) {
+ return MIRBuilder.buildInstr(SPIRV::OpTypeVoid)
+ .addDef(createTypeVReg(MIRBuilder));
+ });
+}
+
+SPIRVType *SPIRVGlobalRegistry::createOpType(
+ MachineIRBuilder &MIRBuilder,
+ std::function<MachineInstr *(MachineIRBuilder &)> Op) {
+ auto oldInsertPoint = MIRBuilder.getInsertPt();
+ MachineBasicBlock *OldMBB = &MIRBuilder.getMBB();
+
+ auto LastInsertedType = LastInsertedTypeMap.find(CurMF);
+ if (LastInsertedType != LastInsertedTypeMap.end()) {
+ MIRBuilder.setInsertPt(*MIRBuilder.getMF().begin(),
+ LastInsertedType->second->getIterator());
+ } else {
+ MIRBuilder.setInsertPt(*MIRBuilder.getMF().begin(),
+ MIRBuilder.getMF().begin()->begin());
+ auto Result = LastInsertedTypeMap.try_emplace(CurMF, nullptr);
+ assert(Result.second);
+ LastInsertedType = Result.first;
+ }
+
+ MachineInstr *Type = Op(MIRBuilder);
+ LastInsertedType->second = Type;
+
+ MIRBuilder.setInsertPt(*OldMBB, oldInsertPoint);
+ return Type;
}
SPIRVType *SPIRVGlobalRegistry::getOpTypeVector(uint32_t NumElems,
@@ -147,11 +180,12 @@ SPIRVType *SPIRVGlobalRegistry::getOpTypeVector(uint32_t NumElems,
EleOpc == SPIRV::OpTypeBool) &&
"Invalid vector element type");
- auto MIB = MIRBuilder.buildInstr(SPIRV::OpTypeVector)
- .addDef(createTypeVReg(MIRBuilder))
- .addUse(getSPIRVTypeID(ElemType))
- .addImm(NumElems);
- return MIB;
+ return createOpType(MIRBuilder, [&](MachineIRBuilder &MIRBuilder) {
+ return MIRBuilder.buildInstr(SPIRV::OpTypeVector)
+ .addDef(createTypeVReg(MIRBuilder))
+ .addUse(getSPIRVTypeID(ElemType))
+ .addImm(NumElems);
+ });
}
std::tuple<Register, ConstantInt *, bool, unsigned>
@@ -688,11 +722,12 @@ SPIRVType *SPIRVGlobalRegistry::getOpTypeArray(uint32_t NumElems,
SPIRVType *SpvTypeInt32 = getOrCreateSPIRVIntegerType(32, MIRBuilder);
Register NumElementsVReg =
buildConstantInt(NumElems, MIRBuilder, SpvTypeInt32, EmitIR);
- auto MIB = MIRBuilder.buildInstr(SPIRV::OpTypeArray)
- .addDef(createTypeVReg(MIRBuilder))
- .addUse(getSPIRVTypeID(ElemType))
- .addUse(NumElementsVReg);
- return MIB;
+ return createOpType(MIRBuilder, [&](MachineIRBuilder &MIRBuilder) {
+ return MIRBuilder.buildInstr(SPIRV::OpTypeArray)
+ .addDef(createTypeVReg(MIRBuilder))
+ .addUse(getSPIRVTypeID(ElemType))
+ .addUse(NumElementsVReg);
+ });
}
SPIRVType *SPIRVGlobalRegistry::getOpTypeOpaque(const StructType *Ty,
@@ -700,10 +735,12 @@ SPIRVType *SPIRVGlobalRegistry::getOpTypeOpaque(const StructType *Ty,
assert(Ty->hasName());
const StringRef Name = Ty->hasName() ? Ty->getName() : "";
Register ResVReg = createTypeVReg(MIRBuilder);
- auto MIB = MIRBuilder.buildInstr(SPIRV::OpTypeOpaque).addDef(ResVReg);
- addStringImm(Name, MIB);
- buildOpName(ResVReg, Name, MIRBuilder);
- return MIB;
+ return createOpType(MIRBuilder, [&](MachineIRBuilder &MIRBuilder) {
+ auto MIB = MIRBuilder.buildInstr(SPIRV::OpTypeOpaque).addDef(ResVReg);
+ addStringImm(Name, MIB);
+ buildOpName(ResVReg, Name, MIRBuilder);
+ return MIB;
+ });
}
SPIRVType *SPIRVGlobalRegistry::getOpTypeStruct(const StructType *Ty,
@@ -717,14 +754,16 @@ SPIRVType *SPIRVGlobalRegistry::getOpTypeStruct(const StructType *Ty,
FieldTypes.push_back(getSPIRVTypeID(ElemTy));
}
Register ResVReg = createTypeVReg(MIRBuilder);
- auto MIB = MIRBuilder.buildInstr(SPIRV::OpTypeStruct).addDef(ResVReg);
- for (const auto &Ty : FieldTypes)
- MIB.addUse(Ty);
- if (Ty->hasName())
- buildOpName(ResVReg, Ty->getName(), MIRBuilder);
- if (Ty->isPacked())
- buildOpDecorate(ResVReg, MIRBuilder, SPIRV::Decoration::CPacked, {});
- return MIB;
+ return createOpType(MIRBuilder, [&](MachineIRBuilder &MIRBuilder) {
+ auto MIB = MIRBuilder.buildInstr(SPIRV::OpTypeStruct).addDef(ResVReg);
+ for (const auto &Ty : FieldTypes)
+ MIB.addUse(Ty);
+ if (Ty->hasName())
+ buildOpName(ResVReg, Ty->getName(), MIRBuilder);
+ if (Ty->isPacked())
+ buildOpDecorate(ResVReg, MIRBuilder, SPIRV::Decoration::CPacked, {});
+ return MIB;
+ });
}
SPIRVType *SPIRVGlobalRegistry::getOrCreateSpecialType(
@@ -739,17 +778,22 @@ SPIRVType *SPIRVGlobalRegistry::getOpTypePointer(
MachineIRBuilder &MIRBuilder, Register Reg) {
if (!Reg.isValid())
Reg = createTypeVReg(MIRBuilder);
- return MIRBuilder.buildInstr(SPIRV::OpTypePointer)
- .addDef(Reg)
- .addImm(static_cast<uint32_t>(SC))
- .addUse(getSPIRVTypeID(ElemType));
+
+ return createOpType(MIRBuilder, [&](MachineIRBuilder &MIRBuilder) {
+ return MIRBuilder.buildInstr(SPIRV::OpTypePointer)
+ .addDef(Reg)
+ .addImm(static_cast<uint32_t>(SC))
+ .addUse(getSPIRVTypeID(ElemType));
+ });
}
SPIRVType *SPIRVGlobalRegistry::getOpTypeForwardPointer(
SPIRV::StorageClass::StorageClass SC, MachineIRBuilder &MIRBuilder) {
- return MIRBuilder.buildInstr(SPIRV::OpTypeForwardPointer)
- .addUse(createTypeVReg(MIRBuilder))
- .addImm(static_cast<uint32_t>(SC));
+ return createOpType(MIRBuilder, [&](MachineIRBuilder &MIRBuilder) {
+ return MIRBuilder.buildInstr(SPIRV::OpTypeForwardPointer)
+ .addUse(createTypeVReg(MIRBuilder))
+ .addImm(static_cast<uint32_t>(SC));
+ });
}
SPIRVType *SPIRVGlobalRegistry::getOpTypeFunction(
diff --git a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.h b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.h
index ed9cfc0..cad2bf9 100644
--- a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.h
+++ b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.h
@@ -64,6 +64,10 @@ class SPIRVGlobalRegistry {
SmallPtrSet<const Type *, 4> TypesInProcessing;
DenseMap<const Type *, SPIRVType *> ForwardPointerTypes;
+ // Stores for each function the last inserted SPIR-V Type.
+ // See: SPIRVGlobalRegistry::createOpType.
+ DenseMap<const MachineFunction *, MachineInstr *> LastInsertedTypeMap;
+
// if a function returns a pointer, this is to map it into TypedPointerType
DenseMap<const Function *, TypedPointerType *> FunResPointerTypes;
@@ -97,6 +101,13 @@ class SPIRVGlobalRegistry {
SPIRV::AccessQualifier::AccessQualifier AccessQual,
bool EmitIR);
+ // Internal function creating the an OpType at the correct position in the
+ // function by tweaking the passed "MIRBuilder" insertion point and restoring
+ // it to the correct position. "Op" should be the function creating the
+ // specific OpType you need, and should return the newly created instruction.
+ SPIRVType *createOpType(MachineIRBuilder &MIRBuilder,
+ std::function<MachineInstr *(MachineIRBuilder &)> Op);
+
public:
SPIRVGlobalRegistry(unsigned PointerSize);
diff --git a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
index 7af92b8..e475810 100644
--- a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
@@ -33,27 +33,6 @@
#include "llvm/IR/IntrinsicsSPIRV.h"
#include "llvm/Support/Debug.h"
-namespace {
-
-struct SyncScopeIDs {
- llvm::SyncScope::ID Work_ItemSSID;
- llvm::SyncScope::ID WorkGroupSSID;
- llvm::SyncScope::ID DeviceSSID;
- llvm::SyncScope::ID AllSVMDevicesSSID;
- llvm::SyncScope::ID SubGroupSSID;
-
- SyncScopeIDs() {}
- SyncScopeIDs(llvm::LLVMContext &Context) {
- Work_ItemSSID = Context.getOrInsertSyncScopeID("work_item");
- WorkGroupSSID = Context.getOrInsertSyncScopeID("workgroup");
- DeviceSSID = Context.getOrInsertSyncScopeID("device");
- AllSVMDevicesSSID = Context.getOrInsertSyncScopeID("all_svm_devices");
- SubGroupSSID = Context.getOrInsertSyncScopeID("sub_group");
- }
-};
-
-} // namespace
-
#define DEBUG_TYPE "spirv-isel"
using namespace llvm;
@@ -76,7 +55,6 @@ class SPIRVInstructionSelector : public InstructionSelector {
const RegisterBankInfo &RBI;
SPIRVGlobalRegistry &GR;
MachineRegisterInfo *MRI;
- SyncScopeIDs SSIDs;
MachineFunction *HasVRegsReset = nullptr;
/// We need to keep track of the number we give to anonymous global values to
@@ -305,7 +283,6 @@ void SPIRVInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
CodeGenCoverage *CoverageInfo,
ProfileSummaryInfo *PSI,
BlockFrequencyInfo *BFI) {
- SSIDs = SyncScopeIDs(MF.getFunction().getContext());
MRI = &MF.getRegInfo();
GR.setCurrentFunc(MF);
InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
@@ -845,29 +822,6 @@ bool SPIRVInstructionSelector::selectBitcast(Register ResVReg,
return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
}
-static SPIRV::Scope::Scope getScope(SyncScope::ID Ord,
- const SyncScopeIDs &SSIDs) {
- if (Ord == SyncScope::SingleThread || Ord == SSIDs.Work_ItemSSID)
- return SPIRV::Scope::Invocation;
- else if (Ord == SyncScope::System || Ord == SSIDs.DeviceSSID)
- return SPIRV::Scope::Device;
- else if (Ord == SSIDs.WorkGroupSSID)
- return SPIRV::Scope::Workgroup;
- else if (Ord == SSIDs.AllSVMDevicesSSID)
- return SPIRV::Scope::CrossDevice;
- else if (Ord == SSIDs.SubGroupSSID)
- return SPIRV::Scope::Subgroup;
- else
- // OpenCL approach is: "The functions that do not have memory_scope argument
- // have the same semantics as the corresponding functions with the
- // memory_scope argument set to memory_scope_device." See ref.: //
- // https://registry.khronos.org/OpenCL/specs/3.0-unified/html/OpenCL_C.html#atomic-functions
- // In our case if the scope is unknown, assuming that SPIR-V code is to be
- // consumed in an OpenCL environment, we use the same approach and set the
- // scope to memory_scope_device.
- return SPIRV::Scope::Device;
-}
-
static void addMemoryOperands(MachineMemOperand *MemOp,
MachineInstrBuilder &MIB) {
uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
@@ -1020,8 +974,8 @@ bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg,
unsigned NegateOpcode) const {
assert(I.hasOneMemOperand());
const MachineMemOperand *MemOp = *I.memoperands_begin();
- uint32_t Scope =
- static_cast<uint32_t>(getScope(MemOp->getSyncScopeID(), SSIDs));
+ uint32_t Scope = static_cast<uint32_t>(getMemScope(
+ GR.CurMF->getFunction().getContext(), MemOp->getSyncScopeID()));
Register ScopeReg = buildI32Constant(Scope, I);
Register Ptr = I.getOperand(1).getReg();
@@ -1092,7 +1046,8 @@ bool SPIRVInstructionSelector::selectFence(MachineInstr &I) const {
uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
Register MemSemReg = buildI32Constant(MemSem, I);
SyncScope::ID Ord = SyncScope::ID(I.getOperand(1).getImm());
- uint32_t Scope = static_cast<uint32_t>(getScope(Ord, SSIDs));
+ uint32_t Scope = static_cast<uint32_t>(
+ getMemScope(GR.CurMF->getFunction().getContext(), Ord));
Register ScopeReg = buildI32Constant(Scope, I);
MachineBasicBlock &BB = *I.getParent();
return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemoryBarrier))
@@ -1111,8 +1066,8 @@ bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
if (!isa<GIntrinsic>(I)) {
assert(I.hasOneMemOperand());
const MachineMemOperand *MemOp = *I.memoperands_begin();
- unsigned Scope =
- static_cast<uint32_t>(getScope(MemOp->getSyncScopeID(), SSIDs));
+ unsigned Scope = static_cast<uint32_t>(getMemScope(
+ GR.CurMF->getFunction().getContext(), MemOp->getSyncScopeID()));
ScopeReg = buildI32Constant(Scope, I);
unsigned ScSem = static_cast<uint32_t>(
diff --git a/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp b/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp
index f1b10e2..cd0aff1 100644
--- a/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp
@@ -389,9 +389,7 @@ void processInstr(MachineInstr &MI, MachineIRBuilder &MIB,
createNewIdReg(nullptr, MI.getOperand(0).getReg(), MRI, *GR).first;
AssignTypeInst.getOperand(1).setReg(NewReg);
MI.getOperand(0).setReg(NewReg);
- MIB.setInsertPt(*MI.getParent(),
- (MI.getNextNode() ? MI.getNextNode()->getIterator()
- : MI.getParent()->end()));
+ MIB.setInsertPt(*MI.getParent(), MI.getIterator());
for (auto &Op : MI.operands()) {
if (!Op.isReg() || Op.isDef())
continue;
diff --git a/llvm/lib/Target/SPIRV/SPIRVRegularizer.cpp b/llvm/lib/Target/SPIRV/SPIRVRegularizer.cpp
index 322e051..246eecd 100644
--- a/llvm/lib/Target/SPIRV/SPIRVRegularizer.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVRegularizer.cpp
@@ -127,7 +127,8 @@ void SPIRVRegularizer::runLowerConstExpr(Function &F) {
ReplList.push_back(Inst);
Repl = InsertElementInst::Create(
(Repl ? Repl : PoisonValue::get(Vec->getType())), V,
- ConstantInt::get(Type::getInt32Ty(Ctx), Idx++), "", InsPoint);
+ ConstantInt::get(Type::getInt32Ty(Ctx), Idx++), "",
+ InsPoint->getIterator());
}
WorkList.splice(WorkList.begin(), ReplList);
return Repl;
@@ -234,11 +235,12 @@ void SPIRVRegularizer::visitCallScalToVec(CallInst *CI, StringRef MangledName,
// %call = OpExtInst %v2uint %1 s_min %14 %11
auto ConstInt = ConstantInt::get(IntegerType::get(CI->getContext(), 32), 0);
PoisonValue *PVal = PoisonValue::get(Arg0Ty);
- Instruction *Inst =
- InsertElementInst::Create(PVal, CI->getOperand(1), ConstInt, "", CI);
+ Instruction *Inst = InsertElementInst::Create(
+ PVal, CI->getOperand(1), ConstInt, "", CI->getIterator());
ElementCount VecElemCount = cast<VectorType>(Arg0Ty)->getElementCount();
Constant *ConstVec = ConstantVector::getSplat(VecElemCount, ConstInt);
- Value *NewVec = new ShuffleVectorInst(Inst, PVal, ConstVec, "", CI);
+ Value *NewVec =
+ new ShuffleVectorInst(Inst, PVal, ConstVec, "", CI->getIterator());
CI->setOperand(1, NewVec);
CI->replaceUsesOfWith(OldF, NewF);
CI->mutateFunctionType(NewF->getFunctionType());
diff --git a/llvm/lib/Target/SPIRV/SPIRVStripConvergentIntrinsics.cpp b/llvm/lib/Target/SPIRV/SPIRVStripConvergentIntrinsics.cpp
index b632d78..c87048b 100644
--- a/llvm/lib/Target/SPIRV/SPIRVStripConvergentIntrinsics.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVStripConvergentIntrinsics.cpp
@@ -62,7 +62,7 @@ public:
return;
auto *NewCall = CallBase::removeOperandBundle(
- CI, LLVMContext::OB_convergencectrl, CI);
+ CI, LLVMContext::OB_convergencectrl, CI->getIterator());
NewCall->copyMetadata(*CI);
CI->replaceAllUsesWith(NewCall);
ToRemove.insert(CI);
diff --git a/llvm/lib/Target/SPIRV/SPIRVUtils.cpp b/llvm/lib/Target/SPIRV/SPIRVUtils.cpp
index 53601e40..aec144f 100644
--- a/llvm/lib/Target/SPIRV/SPIRVUtils.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVUtils.cpp
@@ -253,6 +253,31 @@ SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord) {
llvm_unreachable(nullptr);
}
+SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id) {
+ static const struct {
+ // Named by
+ // https://registry.khronos.org/SPIR-V/specs/unified1/SPIRV.html#_scope_id.
+ // We don't need aliases for Invocation and CrossDevice, as we already have
+ // them covered by "singlethread" and "" strings respectively (see
+ // implementation of LLVMContext::LLVMContext()).
+ llvm::SyncScope::ID SubGroup = Ctx.getOrInsertSyncScopeID("subgroup");
+ llvm::SyncScope::ID WorkGroup = Ctx.getOrInsertSyncScopeID("workgroup");
+ llvm::SyncScope::ID Device = Ctx.getOrInsertSyncScopeID("device");
+ } SSIDs{};
+
+ if (Id == llvm::SyncScope::SingleThread)
+ return SPIRV::Scope::Invocation;
+ else if (Id == llvm::SyncScope::System)
+ return SPIRV::Scope::CrossDevice;
+ else if (Id == SSIDs.SubGroup)
+ return SPIRV::Scope::Subgroup;
+ else if (Id == SSIDs.WorkGroup)
+ return SPIRV::Scope::Workgroup;
+ else if (Id == SSIDs.Device)
+ return SPIRV::Scope::Device;
+ return SPIRV::Scope::CrossDevice;
+}
+
MachineInstr *getDefInstrMaybeConstant(Register &ConstReg,
const MachineRegisterInfo *MRI) {
MachineInstr *MI = MRI->getVRegDef(ConstReg);
diff --git a/llvm/lib/Target/SPIRV/SPIRVUtils.h b/llvm/lib/Target/SPIRV/SPIRVUtils.h
index 93d64a7..7c76160 100644
--- a/llvm/lib/Target/SPIRV/SPIRVUtils.h
+++ b/llvm/lib/Target/SPIRV/SPIRVUtils.h
@@ -145,6 +145,8 @@ getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC);
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord);
+SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id);
+
// Find def instruction for the given ConstReg, walking through
// spv_track_constant and ASSIGN_TYPE instructions. Updates ConstReg by def
// of OpConstant instruction.
diff --git a/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp b/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp
index 8e8d08f..129fdaf 100644
--- a/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp
+++ b/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp
@@ -204,20 +204,20 @@ struct WebAssemblyOperand : public MCParsedAsmOperand {
};
// Perhaps this should go somewhere common.
-static wasm::WasmLimits DefaultLimits() {
+static wasm::WasmLimits defaultLimits() {
return {wasm::WASM_LIMITS_FLAG_NONE, 0, 0};
}
-static MCSymbolWasm *GetOrCreateFunctionTableSymbol(MCContext &Ctx,
+static MCSymbolWasm *getOrCreateFunctionTableSymbol(MCContext &Ctx,
const StringRef &Name,
- bool is64) {
+ bool Is64) {
MCSymbolWasm *Sym = cast_or_null<MCSymbolWasm>(Ctx.lookupSymbol(Name));
if (Sym) {
if (!Sym->isFunctionTable())
Ctx.reportError(SMLoc(), "symbol is not a wasm funcref table");
} else {
Sym = cast<MCSymbolWasm>(Ctx.getOrCreateSymbol(Name));
- Sym->setFunctionTable(is64);
+ Sym->setFunctionTable(Is64);
// The default function table is synthesized by the linker.
Sym->setUndefined();
}
@@ -265,7 +265,7 @@ class WebAssemblyAsmParser final : public MCTargetAsmParser {
MCSymbolWasm *DefaultFunctionTable = nullptr;
MCSymbol *LastFunctionLabel = nullptr;
- bool is64;
+ bool Is64;
WebAssemblyAsmTypeCheck TC;
// Don't type check if -no-type-check was set.
@@ -275,8 +275,8 @@ public:
WebAssemblyAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
const MCInstrInfo &MII, const MCTargetOptions &Options)
: MCTargetAsmParser(Options, STI, MII), Parser(Parser),
- Lexer(Parser.getLexer()), is64(STI.getTargetTriple().isArch64Bit()),
- TC(Parser, MII, is64), SkipTypeCheck(Options.MCNoTypeCheck) {
+ Lexer(Parser.getLexer()), Is64(STI.getTargetTriple().isArch64Bit()),
+ TC(Parser, MII, Is64), SkipTypeCheck(Options.MCNoTypeCheck) {
setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
// Don't type check if this is inline asm, since that is a naked sequence of
// instructions without a function/locals decl.
@@ -290,8 +290,8 @@ public:
void Initialize(MCAsmParser &Parser) override {
MCAsmParserExtension::Initialize(Parser);
- DefaultFunctionTable = GetOrCreateFunctionTableSymbol(
- getContext(), "__indirect_function_table", is64);
+ DefaultFunctionTable = getOrCreateFunctionTableSymbol(
+ getContext(), "__indirect_function_table", Is64);
if (!STI->checkFeatures("+reference-types"))
DefaultFunctionTable->setOmitFromLinkingSection();
}
@@ -538,28 +538,26 @@ public:
auto &Tok = Lexer.getTok();
if (Tok.is(AsmToken::Identifier)) {
auto *Sym =
- GetOrCreateFunctionTableSymbol(getContext(), Tok.getString(), is64);
+ getOrCreateFunctionTableSymbol(getContext(), Tok.getString(), Is64);
const auto *Val = MCSymbolRefExpr::create(Sym, getContext());
*Op = std::make_unique<WebAssemblyOperand>(
Tok.getLoc(), Tok.getEndLoc(), WebAssemblyOperand::SymOp{Val});
Parser.Lex();
return expect(AsmToken::Comma, ",");
- } else {
- const auto *Val =
- MCSymbolRefExpr::create(DefaultFunctionTable, getContext());
- *Op = std::make_unique<WebAssemblyOperand>(
- SMLoc(), SMLoc(), WebAssemblyOperand::SymOp{Val});
- return false;
}
- } else {
- // For the MVP there is at most one table whose number is 0, but we can't
- // write a table symbol or issue relocations. Instead we just ensure the
- // table is live and write a zero.
- getStreamer().emitSymbolAttribute(DefaultFunctionTable, MCSA_NoDeadStrip);
- *Op = std::make_unique<WebAssemblyOperand>(SMLoc(), SMLoc(),
- WebAssemblyOperand::IntOp{0});
+ const auto *Val =
+ MCSymbolRefExpr::create(DefaultFunctionTable, getContext());
+ *Op = std::make_unique<WebAssemblyOperand>(
+ SMLoc(), SMLoc(), WebAssemblyOperand::SymOp{Val});
return false;
}
+ // For the MVP there is at most one table whose number is 0, but we can't
+ // write a table symbol or issue relocations. Instead we just ensure the
+ // table is live and write a zero.
+ getStreamer().emitSymbolAttribute(DefaultFunctionTable, MCSA_NoDeadStrip);
+ *Op = std::make_unique<WebAssemblyOperand>(SMLoc(), SMLoc(),
+ WebAssemblyOperand::IntOp{0});
+ return false;
}
bool parseInstruction(ParseInstructionInfo & /*Info*/, StringRef Name,
@@ -674,7 +672,7 @@ public:
// expects to be able to recreate the actual unique-ified type indices.
auto &Ctx = getContext();
auto Loc = Parser.getTok();
- auto Signature = Ctx.createWasmSignature();
+ auto *Signature = Ctx.createWasmSignature();
if (parseSignature(Signature))
return true;
// Got signature as block type, don't need more
@@ -879,9 +877,9 @@ public:
return false;
}
- bool CheckDataSection() {
+ bool checkDataSection() {
if (CurrentState != DataSection) {
- auto WS = cast<MCSectionWasm>(getStreamer().getCurrentSectionOnly());
+ auto *WS = cast<MCSectionWasm>(getStreamer().getCurrentSectionOnly());
if (WS && WS->isText())
return error("data directive must occur in a data segment: ",
Lexer.getTok());
@@ -929,7 +927,7 @@ public:
return error("Unknown type in .globaltype modifier: ", TypeTok);
}
// Now set this symbol with the correct type.
- auto WasmSym = cast<MCSymbolWasm>(Ctx.getOrCreateSymbol(SymName));
+ auto *WasmSym = cast<MCSymbolWasm>(Ctx.getOrCreateSymbol(SymName));
WasmSym->setType(wasm::WASM_SYMBOL_TYPE_GLOBAL);
WasmSym->setGlobalType(wasm::WasmGlobalType{uint8_t(*Type), Mutable});
// And emit the directive again.
@@ -954,15 +952,15 @@ public:
if (!ElemType)
return error("Unknown type in .tabletype directive: ", ElemTypeTok);
- wasm::WasmLimits Limits = DefaultLimits();
+ wasm::WasmLimits Limits = defaultLimits();
if (isNext(AsmToken::Comma) && parseLimits(&Limits))
return ParseStatus::Failure;
// Now that we have the name and table type, we can actually create the
// symbol
- auto WasmSym = cast<MCSymbolWasm>(Ctx.getOrCreateSymbol(SymName));
+ auto *WasmSym = cast<MCSymbolWasm>(Ctx.getOrCreateSymbol(SymName));
WasmSym->setType(wasm::WASM_SYMBOL_TYPE_TABLE);
- if (is64) {
+ if (Is64) {
Limits.Flags |= wasm::WASM_LIMITS_FLAG_IS_64;
}
wasm::WasmTableType Type = {*ElemType, Limits};
@@ -980,7 +978,7 @@ public:
auto SymName = expectIdent();
if (SymName.empty())
return ParseStatus::Failure;
- auto WasmSym = cast<MCSymbolWasm>(Ctx.getOrCreateSymbol(SymName));
+ auto *WasmSym = cast<MCSymbolWasm>(Ctx.getOrCreateSymbol(SymName));
if (WasmSym->isDefined()) {
// We push 'Function' either when a label is parsed or a .functype
// directive is parsed. The reason it is not easy to do this uniformly
@@ -1001,7 +999,7 @@ public:
CurrentState = FunctionStart;
LastFunctionLabel = WasmSym;
}
- auto Signature = Ctx.createWasmSignature();
+ auto *Signature = Ctx.createWasmSignature();
if (parseSignature(Signature))
return ParseStatus::Failure;
TC.funcDecl(*Signature);
@@ -1021,7 +1019,7 @@ public:
auto ExportName = expectIdent();
if (ExportName.empty())
return ParseStatus::Failure;
- auto WasmSym = cast<MCSymbolWasm>(Ctx.getOrCreateSymbol(SymName));
+ auto *WasmSym = cast<MCSymbolWasm>(Ctx.getOrCreateSymbol(SymName));
WasmSym->setExportName(Ctx.allocateString(ExportName));
TOut.emitExportName(WasmSym, ExportName);
return expect(AsmToken::EndOfStatement, "EOL");
@@ -1036,7 +1034,7 @@ public:
auto ImportModule = expectIdent();
if (ImportModule.empty())
return ParseStatus::Failure;
- auto WasmSym = cast<MCSymbolWasm>(Ctx.getOrCreateSymbol(SymName));
+ auto *WasmSym = cast<MCSymbolWasm>(Ctx.getOrCreateSymbol(SymName));
WasmSym->setImportModule(Ctx.allocateString(ImportModule));
TOut.emitImportModule(WasmSym, ImportModule);
return expect(AsmToken::EndOfStatement, "EOL");
@@ -1051,7 +1049,7 @@ public:
auto ImportName = expectIdent();
if (ImportName.empty())
return ParseStatus::Failure;
- auto WasmSym = cast<MCSymbolWasm>(Ctx.getOrCreateSymbol(SymName));
+ auto *WasmSym = cast<MCSymbolWasm>(Ctx.getOrCreateSymbol(SymName));
WasmSym->setImportName(Ctx.allocateString(ImportName));
TOut.emitImportName(WasmSym, ImportName);
return expect(AsmToken::EndOfStatement, "EOL");
@@ -1061,8 +1059,8 @@ public:
auto SymName = expectIdent();
if (SymName.empty())
return ParseStatus::Failure;
- auto WasmSym = cast<MCSymbolWasm>(Ctx.getOrCreateSymbol(SymName));
- auto Signature = Ctx.createWasmSignature();
+ auto *WasmSym = cast<MCSymbolWasm>(Ctx.getOrCreateSymbol(SymName));
+ auto *Signature = Ctx.createWasmSignature();
if (parseRegTypeList(Signature->Params))
return ParseStatus::Failure;
WasmSym->setSignature(Signature);
@@ -1089,7 +1087,7 @@ public:
DirectiveID.getString() == ".int16" ||
DirectiveID.getString() == ".int32" ||
DirectiveID.getString() == ".int64") {
- if (CheckDataSection())
+ if (checkDataSection())
return ParseStatus::Failure;
const MCExpr *Val;
SMLoc End;
@@ -1102,7 +1100,7 @@ public:
}
if (DirectiveID.getString() == ".asciz") {
- if (CheckDataSection())
+ if (checkDataSection())
return ParseStatus::Failure;
std::string S;
if (Parser.parseEscapedString(S))
@@ -1146,7 +1144,7 @@ public:
if (Op0.getImm() == -1)
Op0.setImm(Align);
}
- if (is64) {
+ if (Is64) {
// Upgrade 32-bit loads/stores to 64-bit. These mostly differ by having
// an offset64 arg instead of offset32, but to the assembler matcher
// they're both immediates so don't get selected for.
@@ -1171,9 +1169,9 @@ public:
SmallString<128> Message;
raw_svector_ostream OS(Message);
OS << "instruction requires:";
- for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i)
- if (MissingFeatures.test(i))
- OS << ' ' << getSubtargetFeatureName(i);
+ for (unsigned I = 0, E = MissingFeatures.size(); I != E; ++I)
+ if (MissingFeatures.test(I))
+ OS << ' ' << getSubtargetFeatureName(I);
return Parser.Error(IDLoc, Message);
}
case Match_MnemonicFail:
@@ -1198,11 +1196,11 @@ public:
void doBeforeLabelEmit(MCSymbol *Symbol, SMLoc IDLoc) override {
// Code below only applies to labels in text sections.
- auto CWS = cast<MCSectionWasm>(getStreamer().getCurrentSectionOnly());
+ auto *CWS = cast<MCSectionWasm>(getStreamer().getCurrentSectionOnly());
if (!CWS->isText())
return;
- auto WasmSym = cast<MCSymbolWasm>(Symbol);
+ auto *WasmSym = cast<MCSymbolWasm>(Symbol);
// Unlike other targets, we don't allow data in text sections (labels
// declared with .type @object).
if (WasmSym->getType() == wasm::WASM_SYMBOL_TYPE_DATA) {
@@ -1222,7 +1220,7 @@ public:
// its name when we create this one. It would be nice to honor their
// choice, while still ensuring that we create one if they forget.
// (that requires coordination with WasmAsmParser::parseSectionDirective)
- auto SecName = ".text." + SymName;
+ std::string SecName = (".text." + SymName).str();
auto *Group = CWS->getGroup();
// If the current section is a COMDAT, also set the flag on the symbol.
@@ -1259,7 +1257,7 @@ public:
if (!SkipTypeCheck)
TC.endOfFunction(ErrorLoc);
// Reset the type checker state.
- TC.Clear();
+ TC.clear();
}
void onEndOfFile() override { ensureEmptyNestingStack(); }
@@ -1277,7 +1275,7 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeWebAssemblyAsmParser() {
#define GET_MATCHER_IMPLEMENTATION
#include "WebAssemblyGenAsmMatcher.inc"
-StringRef GetMnemonic(unsigned Opc) {
+StringRef getMnemonic(unsigned Opc) {
// FIXME: linear search!
for (auto &ME : MatchTable0) {
if (ME.Opcode == Opc) {
diff --git a/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp b/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp
index 9f8f78a..8b1e1dc 100644
--- a/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp
+++ b/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp
@@ -38,14 +38,14 @@ using namespace llvm;
#define DEBUG_TYPE "wasm-asm-parser"
-extern StringRef GetMnemonic(unsigned Opc);
+extern StringRef getMnemonic(unsigned Opc);
namespace llvm {
WebAssemblyAsmTypeCheck::WebAssemblyAsmTypeCheck(MCAsmParser &Parser,
const MCInstrInfo &MII,
- bool is64)
- : Parser(Parser), MII(MII), is64(is64) {}
+ bool Is64)
+ : Parser(Parser), MII(MII), Is64(Is64) {}
void WebAssemblyAsmTypeCheck::funcDecl(const wasm::WasmSignature &Sig) {
LocalTypes.assign(Sig.Params.begin(), Sig.Params.end());
@@ -70,14 +70,9 @@ void WebAssemblyAsmTypeCheck::dumpTypeStack(Twine Msg) {
}
bool WebAssemblyAsmTypeCheck::typeError(SMLoc ErrorLoc, const Twine &Msg) {
- // Once you get one type error in a function, it will likely trigger more
- // which are mostly not helpful.
- if (TypeErrorThisFunction)
- return true;
// If we're currently in unreachable code, we suppress errors completely.
if (Unreachable)
return false;
- TypeErrorThisFunction = true;
dumpTypeStack("current stack: ");
return Parser.Error(ErrorLoc, Msg);
}
@@ -171,11 +166,11 @@ bool WebAssemblyAsmTypeCheck::checkEnd(SMLoc ErrorLoc, bool PopVals) {
bool WebAssemblyAsmTypeCheck::checkSig(SMLoc ErrorLoc,
const wasm::WasmSignature &Sig) {
+ bool Error = false;
for (auto VT : llvm::reverse(Sig.Params))
- if (popType(ErrorLoc, VT))
- return true;
+ Error |= popType(ErrorLoc, VT);
Stack.insert(Stack.end(), Sig.Returns.begin(), Sig.Returns.end());
- return false;
+ return Error;
}
bool WebAssemblyAsmTypeCheck::getSymRef(SMLoc ErrorLoc, const MCOperand &SymOp,
@@ -194,7 +189,7 @@ bool WebAssemblyAsmTypeCheck::getGlobal(SMLoc ErrorLoc,
const MCSymbolRefExpr *SymRef;
if (getSymRef(ErrorLoc, GlobalOp, SymRef))
return true;
- auto WasmSym = cast<MCSymbolWasm>(&SymRef->getSymbol());
+ const auto *WasmSym = cast<MCSymbolWasm>(&SymRef->getSymbol());
switch (WasmSym->getType().value_or(wasm::WASM_SYMBOL_TYPE_DATA)) {
case wasm::WASM_SYMBOL_TYPE_GLOBAL:
Type = static_cast<wasm::ValType>(WasmSym->getGlobalType().Type);
@@ -204,7 +199,7 @@ bool WebAssemblyAsmTypeCheck::getGlobal(SMLoc ErrorLoc,
switch (SymRef->getKind()) {
case MCSymbolRefExpr::VK_GOT:
case MCSymbolRefExpr::VK_WASM_GOT_TLS:
- Type = is64 ? wasm::ValType::I64 : wasm::ValType::I32;
+ Type = Is64 ? wasm::ValType::I64 : wasm::ValType::I32;
return false;
default:
break;
@@ -222,7 +217,7 @@ bool WebAssemblyAsmTypeCheck::getTable(SMLoc ErrorLoc, const MCOperand &TableOp,
const MCSymbolRefExpr *SymRef;
if (getSymRef(ErrorLoc, TableOp, SymRef))
return true;
- auto WasmSym = cast<MCSymbolWasm>(&SymRef->getSymbol());
+ const auto *WasmSym = cast<MCSymbolWasm>(&SymRef->getSymbol());
if (WasmSym->getType().value_or(wasm::WASM_SYMBOL_TYPE_DATA) !=
wasm::WASM_SYMBOL_TYPE_TABLE)
return typeError(ErrorLoc, StringRef("symbol ") + WasmSym->getName() +
@@ -260,198 +255,239 @@ bool WebAssemblyAsmTypeCheck::getSignature(SMLoc ErrorLoc,
}
bool WebAssemblyAsmTypeCheck::endOfFunction(SMLoc ErrorLoc) {
+ bool Error = false;
// Check the return types.
- for (auto RVT : llvm::reverse(ReturnTypes)) {
- if (popType(ErrorLoc, RVT))
- return true;
- }
+ for (auto RVT : llvm::reverse(ReturnTypes))
+ Error |= popType(ErrorLoc, RVT);
if (!Stack.empty()) {
return typeError(ErrorLoc, std::to_string(Stack.size()) +
" superfluous return values");
}
Unreachable = true;
- return false;
+ return Error;
}
bool WebAssemblyAsmTypeCheck::typeCheck(SMLoc ErrorLoc, const MCInst &Inst,
OperandVector &Operands) {
auto Opc = Inst.getOpcode();
- auto Name = GetMnemonic(Opc);
+ auto Name = getMnemonic(Opc);
dumpTypeStack("typechecking " + Name + ": ");
wasm::ValType Type;
+
if (Name == "local.get") {
- if (getLocal(Operands[1]->getStartLoc(), Inst.getOperand(0), Type))
- return true;
- Stack.push_back(Type);
- } else if (Name == "local.set") {
- if (getLocal(Operands[1]->getStartLoc(), Inst.getOperand(0), Type))
- return true;
- if (popType(ErrorLoc, Type))
- return true;
- } else if (Name == "local.tee") {
- if (getLocal(Operands[1]->getStartLoc(), Inst.getOperand(0), Type))
- return true;
- if (popType(ErrorLoc, Type))
- return true;
- Stack.push_back(Type);
- } else if (Name == "global.get") {
- if (getGlobal(Operands[1]->getStartLoc(), Inst.getOperand(0), Type))
- return true;
- Stack.push_back(Type);
- } else if (Name == "global.set") {
- if (getGlobal(Operands[1]->getStartLoc(), Inst.getOperand(0), Type))
- return true;
- if (popType(ErrorLoc, Type))
- return true;
- } else if (Name == "table.get") {
- if (getTable(Operands[1]->getStartLoc(), Inst.getOperand(0), Type))
- return true;
- if (popType(ErrorLoc, wasm::ValType::I32))
- return true;
- Stack.push_back(Type);
- } else if (Name == "table.set") {
- if (getTable(Operands[1]->getStartLoc(), Inst.getOperand(0), Type))
- return true;
- if (popType(ErrorLoc, Type))
- return true;
- if (popType(ErrorLoc, wasm::ValType::I32))
- return true;
- } else if (Name == "table.size") {
- if (getTable(Operands[1]->getStartLoc(), Inst.getOperand(0), Type))
- return true;
+ if (!getLocal(Operands[1]->getStartLoc(), Inst.getOperand(0), Type)) {
+ Stack.push_back(Type);
+ return false;
+ }
+ return true;
+ }
+
+ if (Name == "local.set") {
+ if (!getLocal(Operands[1]->getStartLoc(), Inst.getOperand(0), Type))
+ return popType(ErrorLoc, Type);
+ return true;
+ }
+
+ if (Name == "local.tee") {
+ if (!getLocal(Operands[1]->getStartLoc(), Inst.getOperand(0), Type)) {
+ bool Error = popType(ErrorLoc, Type);
+ Stack.push_back(Type);
+ return Error;
+ }
+ return true;
+ }
+
+ if (Name == "global.get") {
+ if (!getGlobal(Operands[1]->getStartLoc(), Inst.getOperand(0), Type)) {
+ Stack.push_back(Type);
+ return false;
+ }
+ return true;
+ }
+
+ if (Name == "global.set") {
+ if (!getGlobal(Operands[1]->getStartLoc(), Inst.getOperand(0), Type))
+ return popType(ErrorLoc, Type);
+ return true;
+ }
+
+ if (Name == "table.get") {
+ bool Error = popType(ErrorLoc, wasm::ValType::I32);
+ if (!getTable(Operands[1]->getStartLoc(), Inst.getOperand(0), Type)) {
+ Stack.push_back(Type);
+ return Error;
+ }
+ return true;
+ }
+
+ if (Name == "table.set") {
+ bool Error = false;
+ if (!getTable(Operands[1]->getStartLoc(), Inst.getOperand(0), Type))
+ Error |= popType(ErrorLoc, Type);
+ else
+ Error = true;
+ Error |= popType(ErrorLoc, wasm::ValType::I32);
+ return Error;
+ }
+
+ if (Name == "table.size") {
+ bool Error = getTable(Operands[1]->getStartLoc(), Inst.getOperand(0), Type);
Stack.push_back(wasm::ValType::I32);
- } else if (Name == "table.grow") {
- if (getTable(Operands[1]->getStartLoc(), Inst.getOperand(0), Type))
- return true;
- if (popType(ErrorLoc, wasm::ValType::I32))
- return true;
- if (popType(ErrorLoc, Type))
- return true;
+ return Error;
+ }
+
+ if (Name == "table.grow") {
+ bool Error = popType(ErrorLoc, wasm::ValType::I32);
+ if (!getTable(Operands[1]->getStartLoc(), Inst.getOperand(0), Type))
+ Error |= popType(ErrorLoc, Type);
+ else
+ Error = true;
Stack.push_back(wasm::ValType::I32);
- } else if (Name == "table.fill") {
- if (getTable(Operands[1]->getStartLoc(), Inst.getOperand(0), Type))
- return true;
- if (popType(ErrorLoc, wasm::ValType::I32))
- return true;
- if (popType(ErrorLoc, Type))
- return true;
- if (popType(ErrorLoc, wasm::ValType::I32))
- return true;
- } else if (Name == "memory.fill") {
- Type = is64 ? wasm::ValType::I64 : wasm::ValType::I32;
- if (popType(ErrorLoc, Type))
- return true;
- if (popType(ErrorLoc, wasm::ValType::I32))
- return true;
- if (popType(ErrorLoc, Type))
- return true;
- } else if (Name == "memory.copy") {
- Type = is64 ? wasm::ValType::I64 : wasm::ValType::I32;
- if (popType(ErrorLoc, Type))
- return true;
- if (popType(ErrorLoc, Type))
- return true;
- if (popType(ErrorLoc, Type))
- return true;
- } else if (Name == "memory.init") {
- Type = is64 ? wasm::ValType::I64 : wasm::ValType::I32;
- if (popType(ErrorLoc, wasm::ValType::I32))
- return true;
- if (popType(ErrorLoc, wasm::ValType::I32))
- return true;
- if (popType(ErrorLoc, Type))
- return true;
- } else if (Name == "drop") {
- if (popType(ErrorLoc, {}))
- return true;
- } else if (Name == "try" || Name == "block" || Name == "loop" ||
- Name == "if") {
- if (Name == "if" && popType(ErrorLoc, wasm::ValType::I32))
- return true;
+ return Error;
+ }
+
+ if (Name == "table.fill") {
+ bool Error = popType(ErrorLoc, wasm::ValType::I32);
+ if (!getTable(Operands[1]->getStartLoc(), Inst.getOperand(0), Type))
+ Error |= popType(ErrorLoc, Type);
+ else
+ Error = true;
+ Error |= popType(ErrorLoc, wasm::ValType::I32);
+ return Error;
+ }
+
+ if (Name == "memory.fill") {
+ Type = Is64 ? wasm::ValType::I64 : wasm::ValType::I32;
+ bool Error = popType(ErrorLoc, Type);
+ Error |= popType(ErrorLoc, wasm::ValType::I32);
+ Error |= popType(ErrorLoc, Type);
+ return Error;
+ }
+
+ if (Name == "memory.copy") {
+ Type = Is64 ? wasm::ValType::I64 : wasm::ValType::I32;
+ bool Error = popType(ErrorLoc, Type);
+ Error |= popType(ErrorLoc, Type);
+ Error |= popType(ErrorLoc, Type);
+ return Error;
+ }
+
+ if (Name == "memory.init") {
+ Type = Is64 ? wasm::ValType::I64 : wasm::ValType::I32;
+ bool Error = popType(ErrorLoc, wasm::ValType::I32);
+ Error |= popType(ErrorLoc, wasm::ValType::I32);
+ Error |= popType(ErrorLoc, Type);
+ return Error;
+ }
+
+ if (Name == "drop") {
+ return popType(ErrorLoc, {});
+ }
+
+ if (Name == "try" || Name == "block" || Name == "loop" || Name == "if") {
if (Name == "loop")
BrStack.emplace_back(LastSig.Params.begin(), LastSig.Params.end());
else
BrStack.emplace_back(LastSig.Returns.begin(), LastSig.Returns.end());
- } else if (Name == "end_block" || Name == "end_loop" || Name == "end_if" ||
- Name == "else" || Name == "end_try" || Name == "catch" ||
- Name == "catch_all" || Name == "delegate") {
- if (checkEnd(ErrorLoc,
- Name == "else" || Name == "catch" || Name == "catch_all"))
+ if (Name == "if" && popType(ErrorLoc, wasm::ValType::I32))
return true;
+ return false;
+ }
+
+ if (Name == "end_block" || Name == "end_loop" || Name == "end_if" ||
+ Name == "else" || Name == "end_try" || Name == "catch" ||
+ Name == "catch_all" || Name == "delegate") {
+ bool Error = checkEnd(ErrorLoc, Name == "else" || Name == "catch" ||
+ Name == "catch_all");
Unreachable = false;
if (Name == "catch") {
const wasm::WasmSignature *Sig = nullptr;
- if (getSignature(Operands[1]->getStartLoc(), Inst.getOperand(0),
- wasm::WASM_SYMBOL_TYPE_TAG, Sig))
- return true;
- // catch instruction pushes values whose types are specified in the tag's
- // "params" part
- Stack.insert(Stack.end(), Sig->Params.begin(), Sig->Params.end());
+ if (!getSignature(Operands[1]->getStartLoc(), Inst.getOperand(0),
+ wasm::WASM_SYMBOL_TYPE_TAG, Sig))
+ // catch instruction pushes values whose types are specified in the
+ // tag's "params" part
+ Stack.insert(Stack.end(), Sig->Params.begin(), Sig->Params.end());
+ else
+ Error = true;
}
- } else if (Name == "br") {
+ return Error;
+ }
+
+ if (Name == "br") {
const MCOperand &Operand = Inst.getOperand(0);
if (!Operand.isImm())
- return false;
- if (checkBr(ErrorLoc, static_cast<size_t>(Operand.getImm())))
return true;
- } else if (Name == "return") {
- if (endOfFunction(ErrorLoc))
- return true;
- } else if (Name == "call_indirect" || Name == "return_call_indirect") {
+ return checkBr(ErrorLoc, static_cast<size_t>(Operand.getImm()));
+ }
+
+ if (Name == "return") {
+ return endOfFunction(ErrorLoc);
+ }
+
+ if (Name == "call_indirect" || Name == "return_call_indirect") {
// Function value.
- if (popType(ErrorLoc, wasm::ValType::I32))
- return true;
- if (checkSig(ErrorLoc, LastSig))
- return true;
+ bool Error = popType(ErrorLoc, wasm::ValType::I32);
+ Error |= checkSig(ErrorLoc, LastSig);
if (Name == "return_call_indirect" && endOfFunction(ErrorLoc))
return true;
- } else if (Name == "call" || Name == "return_call") {
+ return Error;
+ }
+
+ if (Name == "call" || Name == "return_call") {
+ bool Error = false;
const wasm::WasmSignature *Sig = nullptr;
- if (getSignature(Operands[1]->getStartLoc(), Inst.getOperand(0),
- wasm::WASM_SYMBOL_TYPE_FUNCTION, Sig))
- return true;
- if (checkSig(ErrorLoc, *Sig))
- return true;
+ if (!getSignature(Operands[1]->getStartLoc(), Inst.getOperand(0),
+ wasm::WASM_SYMBOL_TYPE_FUNCTION, Sig))
+ Error |= checkSig(ErrorLoc, *Sig);
+ else
+ Error = true;
if (Name == "return_call" && endOfFunction(ErrorLoc))
return true;
- } else if (Name == "unreachable") {
+ return Error;
+ }
+
+ if (Name == "unreachable") {
Unreachable = true;
- } else if (Name == "ref.is_null") {
- if (popRefType(ErrorLoc))
- return true;
+ return false;
+ }
+
+ if (Name == "ref.is_null") {
+ bool Error = popRefType(ErrorLoc);
Stack.push_back(wasm::ValType::I32);
- } else if (Name == "throw") {
+ return Error;
+ }
+
+ if (Name == "throw") {
const wasm::WasmSignature *Sig = nullptr;
- if (getSignature(Operands[1]->getStartLoc(), Inst.getOperand(0),
- wasm::WASM_SYMBOL_TYPE_TAG, Sig))
- return true;
- if (checkSig(ErrorLoc, *Sig))
- return true;
- } else {
- // The current instruction is a stack instruction which doesn't have
- // explicit operands that indicate push/pop types, so we get those from
- // the register version of the same instruction.
- auto RegOpc = WebAssembly::getRegisterOpcode(Opc);
- assert(RegOpc != -1 && "Failed to get register version of MC instruction");
- const auto &II = MII.get(RegOpc);
- // First pop all the uses off the stack and check them.
- for (unsigned I = II.getNumOperands(); I > II.getNumDefs(); I--) {
- const auto &Op = II.operands()[I - 1];
- if (Op.OperandType == MCOI::OPERAND_REGISTER) {
- auto VT = WebAssembly::regClassToValType(Op.RegClass);
- if (popType(ErrorLoc, VT))
- return true;
- }
- }
- // Now push all the defs onto the stack.
- for (unsigned I = 0; I < II.getNumDefs(); I++) {
- const auto &Op = II.operands()[I];
- assert(Op.OperandType == MCOI::OPERAND_REGISTER && "Register expected");
+ if (!getSignature(Operands[1]->getStartLoc(), Inst.getOperand(0),
+ wasm::WASM_SYMBOL_TYPE_TAG, Sig))
+ return checkSig(ErrorLoc, *Sig);
+ return true;
+ }
+
+ // The current instruction is a stack instruction which doesn't have
+ // explicit operands that indicate push/pop types, so we get those from
+ // the register version of the same instruction.
+ auto RegOpc = WebAssembly::getRegisterOpcode(Opc);
+ assert(RegOpc != -1 && "Failed to get register version of MC instruction");
+ const auto &II = MII.get(RegOpc);
+ bool Error = false;
+ // First pop all the uses off the stack and check them.
+ for (unsigned I = II.getNumOperands(); I > II.getNumDefs(); I--) {
+ const auto &Op = II.operands()[I - 1];
+ if (Op.OperandType == MCOI::OPERAND_REGISTER) {
auto VT = WebAssembly::regClassToValType(Op.RegClass);
- Stack.push_back(VT);
+ Error |= popType(ErrorLoc, VT);
}
}
- return false;
+ // Now push all the defs onto the stack.
+ for (unsigned I = 0; I < II.getNumDefs(); I++) {
+ const auto &Op = II.operands()[I];
+ assert(Op.OperandType == MCOI::OPERAND_REGISTER && "Register expected");
+ auto VT = WebAssembly::regClassToValType(Op.RegClass);
+ Stack.push_back(VT);
+ }
+ return Error;
}
} // end namespace llvm
diff --git a/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.h b/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.h
index 9ba5693..972162d 100644
--- a/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.h
+++ b/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.h
@@ -33,9 +33,8 @@ class WebAssemblyAsmTypeCheck final {
SmallVector<wasm::ValType, 16> LocalTypes;
SmallVector<wasm::ValType, 4> ReturnTypes;
wasm::WasmSignature LastSig;
- bool TypeErrorThisFunction = false;
bool Unreachable = false;
- bool is64;
+ bool Is64;
void dumpTypeStack(Twine Msg);
bool typeError(SMLoc ErrorLoc, const Twine &Msg);
@@ -55,7 +54,7 @@ class WebAssemblyAsmTypeCheck final {
public:
WebAssemblyAsmTypeCheck(MCAsmParser &Parser, const MCInstrInfo &MII,
- bool is64);
+ bool Is64);
void funcDecl(const wasm::WasmSignature &Sig);
void localDecl(const SmallVectorImpl<wasm::ValType> &Locals);
@@ -63,12 +62,11 @@ public:
bool endOfFunction(SMLoc ErrorLoc);
bool typeCheck(SMLoc ErrorLoc, const MCInst &Inst, OperandVector &Operands);
- void Clear() {
+ void clear() {
Stack.clear();
BrStack.clear();
LocalTypes.clear();
ReturnTypes.clear();
- TypeErrorThisFunction = false;
Unreachable = false;
}
};
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
index 1d08853..2f6b55b0 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
@@ -629,15 +629,19 @@ std::optional<MCFixupKind> X86AsmBackend::getFixupKind(StringRef Name) const {
const MCFixupKindInfo &X86AsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
const static MCFixupKindInfo Infos[X86::NumTargetFixupKinds] = {
+ // clang-format off
{"reloc_riprel_4byte", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
{"reloc_riprel_4byte_movq_load", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
+ {"reloc_riprel_4byte_movq_load_rex2", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
{"reloc_riprel_4byte_relax", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
{"reloc_riprel_4byte_relax_rex", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
+ {"reloc_riprel_4byte_relax_rex2", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
{"reloc_signed_4byte", 0, 32, 0},
{"reloc_signed_4byte_relax", 0, 32, 0},
{"reloc_global_offset_table", 0, 32, 0},
{"reloc_global_offset_table8", 0, 64, 0},
{"reloc_branch_4byte_pcrel", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
+ // clang-format on
};
// Fixup kinds from .reloc directive are like R_386_NONE/R_X86_64_NONE. They
@@ -678,7 +682,9 @@ static unsigned getFixupKindSize(unsigned Kind) {
case X86::reloc_riprel_4byte:
case X86::reloc_riprel_4byte_relax:
case X86::reloc_riprel_4byte_relax_rex:
+ case X86::reloc_riprel_4byte_relax_rex2:
case X86::reloc_riprel_4byte_movq_load:
+ case X86::reloc_riprel_4byte_movq_load_rex2:
case X86::reloc_signed_4byte:
case X86::reloc_signed_4byte_relax:
case X86::reloc_global_offset_table:
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp
index 0b2efdf..9022227 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp
@@ -74,7 +74,9 @@ static X86_64RelType getType64(MCFixupKind Kind,
case X86::reloc_riprel_4byte:
case X86::reloc_riprel_4byte_relax:
case X86::reloc_riprel_4byte_relax_rex:
+ case X86::reloc_riprel_4byte_relax_rex2:
case X86::reloc_riprel_4byte_movq_load:
+ case X86::reloc_riprel_4byte_movq_load_rex2:
return RT64_32;
case X86::reloc_branch_4byte_pcrel:
Modifier = MCSymbolRefExpr::VK_PLT;
@@ -205,7 +207,7 @@ static unsigned getRelocType64(MCContext &Ctx, SMLoc Loc,
case MCSymbolRefExpr::VK_GOTPCREL:
checkIs32(Ctx, Loc, Type);
// Older versions of ld.bfd/ld.gold/lld
- // do not support GOTPCRELX/REX_GOTPCRELX,
+ // do not support GOTPCRELX/REX_GOTPCRELX/REX2_GOTPCRELX,
// and we want to keep back-compatibility.
if (!Ctx.getTargetOptions()->X86RelaxRelocations)
return ELF::R_X86_64_GOTPCREL;
@@ -217,6 +219,9 @@ static unsigned getRelocType64(MCContext &Ctx, SMLoc Loc,
case X86::reloc_riprel_4byte_relax_rex:
case X86::reloc_riprel_4byte_movq_load:
return ELF::R_X86_64_REX_GOTPCRELX;
+ case X86::reloc_riprel_4byte_relax_rex2:
+ case X86::reloc_riprel_4byte_movq_load_rex2:
+ return ELF::R_X86_64_REX2_GOTPCRELX;
}
llvm_unreachable("unexpected relocation type!");
case MCSymbolRefExpr::VK_GOTPCREL_NORELAX:
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86FixupKinds.h b/llvm/lib/Target/X86/MCTargetDesc/X86FixupKinds.h
index 2d52171..29bb7ee 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86FixupKinds.h
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86FixupKinds.h
@@ -16,10 +16,14 @@ namespace X86 {
enum Fixups {
reloc_riprel_4byte = FirstTargetFixupKind, // 32-bit rip-relative
reloc_riprel_4byte_movq_load, // 32-bit rip-relative in movq
+ reloc_riprel_4byte_movq_load_rex2, // 32-bit rip-relative in movq
+ // with rex2 prefix
reloc_riprel_4byte_relax, // 32-bit rip-relative in relaxable
// instruction
reloc_riprel_4byte_relax_rex, // 32-bit rip-relative in relaxable
// instruction with rex prefix
+ reloc_riprel_4byte_relax_rex2, // 32-bit rip-relative in relaxable
+ // instruction with rex2 prefix
reloc_signed_4byte, // 32-bit signed. Unlike FK_Data_4
// this will be sign extended at
// runtime.
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
index 71d4286..2064361 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
@@ -568,8 +568,10 @@ void X86MCCodeEmitter::emitImmediate(const MCOperand &DispOp, SMLoc Loc,
if (FixupKind == FK_PCRel_4 ||
FixupKind == MCFixupKind(X86::reloc_riprel_4byte) ||
FixupKind == MCFixupKind(X86::reloc_riprel_4byte_movq_load) ||
+ FixupKind == MCFixupKind(X86::reloc_riprel_4byte_movq_load_rex2) ||
FixupKind == MCFixupKind(X86::reloc_riprel_4byte_relax) ||
FixupKind == MCFixupKind(X86::reloc_riprel_4byte_relax_rex) ||
+ FixupKind == MCFixupKind(X86::reloc_riprel_4byte_relax_rex2) ||
FixupKind == MCFixupKind(X86::reloc_branch_4byte_pcrel)) {
ImmOffset -= 4;
// If this is a pc-relative load off _GLOBAL_OFFSET_TABLE_:
@@ -637,12 +639,11 @@ void X86MCCodeEmitter::emitMemModRMByte(
default:
return X86::reloc_riprel_4byte;
case X86::MOV64rm:
- // movq loads is a subset of reloc_riprel_4byte_relax_rex. It is a
+ // movq loads is a subset of reloc_riprel_4byte_relax_rex/rex2. It is a
// special case because COFF and Mach-O don't support ELF's more
- // flexible R_X86_64_REX_GOTPCRELX relaxation.
- // TODO: Support new relocation for REX2.
- assert(Kind == REX || Kind == REX2);
- return X86::reloc_riprel_4byte_movq_load;
+ // flexible R_X86_64_REX_GOTPCRELX/R_X86_64_REX2_GOTPCRELX relaxation.
+ return Kind == REX2 ? X86::reloc_riprel_4byte_movq_load_rex2
+ : X86::reloc_riprel_4byte_movq_load;
case X86::ADC32rm:
case X86::ADD32rm:
case X86::AND32rm:
@@ -665,11 +666,9 @@ void X86MCCodeEmitter::emitMemModRMByte(
case X86::SBB64rm:
case X86::SUB64rm:
case X86::XOR64rm:
- // We haven't support relocation for REX2 prefix, so temporarily use REX
- // relocation.
- // TODO: Support new relocation for REX2.
- return (Kind == REX || Kind == REX2) ? X86::reloc_riprel_4byte_relax_rex
- : X86::reloc_riprel_4byte_relax;
+ return Kind == REX2 ? X86::reloc_riprel_4byte_relax_rex2
+ : Kind == REX ? X86::reloc_riprel_4byte_relax_rex
+ : X86::reloc_riprel_4byte_relax;
}
}();
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp
index ec95b1f..41ce5c9 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp
@@ -66,8 +66,10 @@ public:
static bool isFixupKindRIPRel(unsigned Kind) {
return Kind == X86::reloc_riprel_4byte ||
Kind == X86::reloc_riprel_4byte_movq_load ||
+ Kind == X86::reloc_riprel_4byte_movq_load_rex2 ||
Kind == X86::reloc_riprel_4byte_relax ||
- Kind == X86::reloc_riprel_4byte_relax_rex;
+ Kind == X86::reloc_riprel_4byte_relax_rex ||
+ Kind == X86::reloc_riprel_4byte_relax_rex2;
}
static unsigned getFixupKindLog2Size(unsigned Kind) {
@@ -83,7 +85,9 @@ static unsigned getFixupKindLog2Size(unsigned Kind) {
case X86::reloc_riprel_4byte:
case X86::reloc_riprel_4byte_relax:
case X86::reloc_riprel_4byte_relax_rex:
+ case X86::reloc_riprel_4byte_relax_rex2:
case X86::reloc_riprel_4byte_movq_load:
+ case X86::reloc_riprel_4byte_movq_load_rex2:
case X86::reloc_signed_4byte:
case X86::reloc_signed_4byte_relax:
case X86::reloc_branch_4byte_pcrel:
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp
index 10fc176..7740500 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp
@@ -66,8 +66,10 @@ unsigned X86WinCOFFObjectWriter::getRelocType(MCContext &Ctx,
case FK_PCRel_4:
case X86::reloc_riprel_4byte:
case X86::reloc_riprel_4byte_movq_load:
+ case X86::reloc_riprel_4byte_movq_load_rex2:
case X86::reloc_riprel_4byte_relax:
case X86::reloc_riprel_4byte_relax_rex:
+ case X86::reloc_riprel_4byte_relax_rex2:
case X86::reloc_branch_4byte_pcrel:
return COFF::IMAGE_REL_AMD64_REL32;
case FK_Data_4:
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index c5dc3ea..d9eedfd 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -336,9 +336,11 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::FP_TO_UINT_SAT, VT, Custom);
setOperationAction(ISD::FP_TO_SINT_SAT, VT, Custom);
}
+ setOperationAction(ISD::FCANONICALIZE, MVT::f32, Custom);
if (Subtarget.is64Bit()) {
setOperationAction(ISD::FP_TO_UINT_SAT, MVT::i64, Custom);
setOperationAction(ISD::FP_TO_SINT_SAT, MVT::i64, Custom);
+ setOperationAction(ISD::FCANONICALIZE, MVT::f64, Custom);
}
}
if (Subtarget.hasAVX10_2()) {
@@ -358,6 +360,9 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
if (!Subtarget.hasSSE2()) {
setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
+ setOperationAction(ISD::FCANONICALIZE, MVT::f32, Custom);
+ setOperationAction(ISD::FCANONICALIZE, MVT::f80, Custom);
+ setOperationAction(ISD::FCANONICALIZE, MVT::f64, Custom);
if (Subtarget.is64Bit()) {
setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
// Without SSE, i64->f64 goes through memory.
@@ -721,6 +726,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::STRICT_FROUNDEVEN, MVT::f16, Promote);
setOperationAction(ISD::STRICT_FTRUNC, MVT::f16, Promote);
setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Custom);
+ setOperationAction(ISD::FCANONICALIZE, MVT::f16, Custom);
setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Custom);
setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Custom);
@@ -937,6 +943,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
if (isTypeLegal(MVT::f80)) {
setOperationAction(ISD::FP_ROUND, MVT::f80, Custom);
setOperationAction(ISD::STRICT_FP_ROUND, MVT::f80, Custom);
+ setOperationAction(ISD::FCANONICALIZE, MVT::f80, Custom);
}
setOperationAction(ISD::SETCC, MVT::f128, Custom);
@@ -1070,9 +1077,11 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::VSELECT, MVT::v4f32, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
+ setOperationAction(ISD::FCANONICALIZE, MVT::v4f32, Custom);
setOperationAction(ISD::LOAD, MVT::v2f32, Custom);
setOperationAction(ISD::STORE, MVT::v2f32, Custom);
+ setOperationAction(ISD::FCANONICALIZE, MVT::v2f32, Custom);
setOperationAction(ISD::STRICT_FADD, MVT::v4f32, Legal);
setOperationAction(ISD::STRICT_FSUB, MVT::v4f32, Legal);
@@ -1133,6 +1142,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::UMULO, MVT::v2i32, Custom);
setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
+ setOperationAction(ISD::FCANONICALIZE, MVT::v2f64, Custom);
setOperationAction(ISD::FABS, MVT::v2f64, Custom);
setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Custom);
@@ -1465,6 +1475,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::FMAXIMUM, VT, Custom);
setOperationAction(ISD::FMINIMUM, VT, Custom);
+ setOperationAction(ISD::FCANONICALIZE, VT, Custom);
}
setOperationAction(ISD::LRINT, MVT::v8f32, Custom);
@@ -1730,6 +1741,9 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::FP_TO_UINT, MVT::v2i1, Custom);
setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i1, Custom);
setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i1, Custom);
+ setOperationAction(ISD::FCANONICALIZE, MVT::v8f16, Custom);
+ setOperationAction(ISD::FCANONICALIZE, MVT::v16f16, Custom);
+ setOperationAction(ISD::FCANONICALIZE, MVT::v32f16, Custom);
// There is no byte sized k-register load or store without AVX512DQ.
if (!Subtarget.hasDQI()) {
@@ -1809,6 +1823,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::FMA, VT, Legal);
setOperationAction(ISD::STRICT_FMA, VT, Legal);
setOperationAction(ISD::FCOPYSIGN, VT, Custom);
+ setOperationAction(ISD::FCANONICALIZE, VT, Custom);
}
setOperationAction(ISD::LRINT, MVT::v16f32,
Subtarget.hasDQI() ? Legal : Custom);
@@ -32694,6 +32709,24 @@ static SDValue LowerPREFETCH(SDValue Op, const X86Subtarget &Subtarget,
return Op;
}
+static SDValue LowerFCanonicalize(SDValue Op, SelectionDAG &DAG) {
+ SDNode *N = Op.getNode();
+ SDValue Operand = N->getOperand(0);
+ EVT VT = Operand.getValueType();
+ SDLoc dl(N);
+
+ SDValue One = DAG.getConstantFP(1.0, dl, VT);
+
+ // TODO: Fix Crash for bf16 when generating strict_fmul as it
+ // leads to a error : SoftPromoteHalfResult #0: t11: bf16,ch = strict_fmul t0,
+ // ConstantFP:bf16<APFloat(16256)>, t5 LLVM ERROR: Do not know how to soft
+ // promote this operator's result!
+ SDValue Chain = DAG.getEntryNode();
+ SDValue StrictFmul = DAG.getNode(ISD::STRICT_FMUL, dl, {VT, MVT::Other},
+ {Chain, Operand, One});
+ return StrictFmul;
+}
+
static StringRef getInstrStrFromOpNo(const SmallVectorImpl<StringRef> &AsmStrs,
unsigned OpNo) {
const APInt Operand(32, OpNo);
@@ -32833,6 +32866,7 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG);
case ISD::FSHL:
case ISD::FSHR: return LowerFunnelShift(Op, Subtarget, DAG);
+ case ISD::FCANONICALIZE: return LowerFCanonicalize(Op, DAG);
case ISD::STRICT_SINT_TO_FP:
case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
case ISD::STRICT_UINT_TO_FP:
@@ -42866,9 +42900,8 @@ bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
// Don't bother broadcasting if we just need the 0'th element.
if (DemandedElts == 1) {
if (!SrcVT.isVector())
- return TLO.CombineTo(
- Op, TLO.DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(Op), VT, Src));
- if (Src.getValueType() != VT)
+ Src = TLO.DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(Op), VT, Src);
+ else if (Src.getValueType() != VT)
Src = widenSubVector(VT.getSimpleVT(), Src, false, Subtarget, TLO.DAG,
SDLoc(Op));
return TLO.CombineTo(Op, Src);
@@ -43076,6 +43109,8 @@ bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
case X86ISD::FMIN:
case X86ISD::FMAXC:
case X86ISD::FMINC:
+ case X86ISD::FRSQRT:
+ case X86ISD::FRCP:
// Horizontal Ops.
case X86ISD::HADD:
case X86ISD::HSUB:
@@ -52948,10 +52983,7 @@ static SDValue combineTruncatedArithmetic(SDNode *N, SelectionDAG &DAG,
// combiner.
static SDValue combinePMULH(SDValue Src, EVT VT, const SDLoc &DL,
SelectionDAG &DAG, const X86Subtarget &Subtarget) {
- // First instruction should be a right shift of a multiply.
- if (Src.getOpcode() != ISD::SRL ||
- Src.getOperand(0).getOpcode() != ISD::MUL)
- return SDValue();
+ using namespace llvm::SDPatternMatch;
if (!Subtarget.hasSSE2())
return SDValue();
@@ -52966,15 +52998,12 @@ static SDValue combinePMULH(SDValue Src, EVT VT, const SDLoc &DL,
if (InVT.getVectorElementType().getSizeInBits() < 32)
return SDValue();
- // Need a shift by 16.
- APInt ShiftAmt;
- if (!ISD::isConstantSplatVector(Src.getOperand(1).getNode(), ShiftAmt) ||
- ShiftAmt != 16)
+ // First instruction should be a right shift by 16 of a multiply.
+ SDValue LHS, RHS;
+ if (!sd_match(Src,
+ m_Srl(m_Mul(m_Value(LHS), m_Value(RHS)), m_SpecificInt(16))))
return SDValue();
- SDValue LHS = Src.getOperand(0).getOperand(0);
- SDValue RHS = Src.getOperand(0).getOperand(1);
-
// Count leading sign/zero bits on both inputs - if there are enough then
// truncation back to vXi16 will be cheap - either as a pack/shuffle
// sequence or using AVX512 truncations. If the inputs are sext/zext then the
@@ -52992,12 +53021,13 @@ static SDValue combinePMULH(SDValue Src, EVT VT, const SDLoc &DL,
return SDValue();
// Check if both inputs are extensions, which will be removed by truncation.
- bool IsTruncateFree = (LHS.getOpcode() == ISD::SIGN_EXTEND ||
- LHS.getOpcode() == ISD::ZERO_EXTEND) &&
- (RHS.getOpcode() == ISD::SIGN_EXTEND ||
- RHS.getOpcode() == ISD::ZERO_EXTEND) &&
- LHS.getOperand(0).getScalarValueSizeInBits() <= 16 &&
- RHS.getOperand(0).getScalarValueSizeInBits() <= 16;
+ auto isOpTruncateFree = [](SDValue Op) {
+ if (Op.getOpcode() == ISD::SIGN_EXTEND ||
+ Op.getOpcode() == ISD::ZERO_EXTEND)
+ return Op.getOperand(0).getScalarValueSizeInBits() <= 16;
+ return ISD::isBuildVectorOfConstantSDNodes(Op.getNode());
+ };
+ bool IsTruncateFree = isOpTruncateFree(LHS) && isOpTruncateFree(RHS);
// For AVX2+ targets, with the upper bits known zero, we can perform MULHU on
// the (bitcasted) inputs directly, and then cheaply pack/truncate the result
diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td
index 417f31a..b9ff4a5 100644
--- a/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -2617,20 +2617,19 @@ defm VFPCLASS : avx512_fp_fpclass_all<"vfpclass", 0x66, 0x67, SchedWriteFCmp>, E
multiclass avx512_mask_mov<bits<8> opc_kk, bits<8> opc_km, bits<8> opc_mk,
string OpcodeStr, RegisterClass KRC, ValueType vvt,
X86MemOperand x86memop, string Suffix = ""> {
- let explicitOpPrefix = !if(!eq(Suffix, ""), NoExplicitOpPrefix, ExplicitEVEX) in {
- let isMoveReg = 1, hasSideEffects = 0, SchedRW = [WriteMove] in
- def kk#Suffix : I<opc_kk, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>,
- Sched<[WriteMove]>;
- def km#Suffix : I<opc_km, MRMSrcMem, (outs KRC:$dst), (ins x86memop:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set KRC:$dst, (vvt (load addr:$src)))]>,
- Sched<[WriteLoad]>, NoCD8;
- def mk#Suffix : I<opc_mk, MRMDestMem, (outs), (ins x86memop:$dst, KRC:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(store KRC:$src, addr:$dst)]>,
- Sched<[WriteStore]>, NoCD8;
- }
+ let isMoveReg = 1, hasSideEffects = 0, SchedRW = [WriteMove],
+ explicitOpPrefix = !if(!eq(Suffix, ""), NoExplicitOpPrefix, ExplicitEVEX) in
+ def kk#Suffix : I<opc_kk, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>,
+ Sched<[WriteMove]>;
+ def km#Suffix : I<opc_km, MRMSrcMem, (outs KRC:$dst), (ins x86memop:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set KRC:$dst, (vvt (load addr:$src)))]>,
+ Sched<[WriteLoad]>, NoCD8;
+ def mk#Suffix : I<opc_mk, MRMDestMem, (outs), (ins x86memop:$dst, KRC:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(store KRC:$src, addr:$dst)]>,
+ Sched<[WriteStore]>, NoCD8;
}
multiclass avx512_mask_mov_gpr<bits<8> opc_kr, bits<8> opc_rk,
diff --git a/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp b/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
index 09ffc2d..01642b0 100644
--- a/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
+++ b/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
@@ -1052,6 +1052,13 @@ void StrNCmpInliner::inlineCompare(Value *LHS, StringRef RHS, uint64_t N,
bool Swapped) {
auto &Ctx = CI->getContext();
IRBuilder<> B(Ctx);
+ // We want these instructions to be recognized as inlined instructions for the
+ // compare call, but we don't have a source location for the definition of
+ // that function, since we're generating that code now. Because the generated
+ // code is a viable point for a memory access error, we make the pragmatic
+ // choice here to directly use CI's location so that we have useful
+ // attribution for the generated code.
+ B.SetCurrentDebugLocation(CI->getDebugLoc());
BasicBlock *BBCI = CI->getParent();
BasicBlock *BBTail =
diff --git a/llvm/lib/Transforms/IPO/ElimAvailExtern.cpp b/llvm/lib/Transforms/IPO/ElimAvailExtern.cpp
index 2b34d3b..d3d27de 100644
--- a/llvm/lib/Transforms/IPO/ElimAvailExtern.cpp
+++ b/llvm/lib/Transforms/IPO/ElimAvailExtern.cpp
@@ -14,6 +14,7 @@
#include "llvm/Transforms/IPO/ElimAvailExtern.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/CtxProfAnalysis.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/Function.h"
@@ -88,7 +89,7 @@ static void convertToLocalCopy(Module &M, Function &F) {
++NumConversions;
}
-static bool eliminateAvailableExternally(Module &M) {
+static bool eliminateAvailableExternally(Module &M, bool Convert) {
bool Changed = false;
// Drop initializers of available externally global variables.
@@ -112,7 +113,7 @@ static bool eliminateAvailableExternally(Module &M) {
if (F.isDeclaration() || !F.hasAvailableExternallyLinkage())
continue;
- if (ConvertToLocal)
+ if (Convert || ConvertToLocal)
convertToLocalCopy(M, F);
else
deleteFunction(F);
@@ -125,8 +126,16 @@ static bool eliminateAvailableExternally(Module &M) {
}
PreservedAnalyses
-EliminateAvailableExternallyPass::run(Module &M, ModuleAnalysisManager &) {
- if (!eliminateAvailableExternally(M))
- return PreservedAnalyses::all();
+EliminateAvailableExternallyPass::run(Module &M, ModuleAnalysisManager &MAM) {
+ auto *CtxProf = MAM.getCachedResult<CtxProfAnalysis>(M);
+ // Convert to local instead of eliding if we use contextual profiling in this
+ // module. This is because the IPO decisions performed with contextual
+ // information will likely differ from decisions made without. For a function
+ // that's imported, its optimizations will, thus, differ, and be specialized
+ // for this contextual information. Eliding it in favor of the original would
+ // undo these optimizations.
+ if (!eliminateAvailableExternally(M, /*Convert=*/(CtxProf && !!(*CtxProf))))
+ ;
+ return PreservedAnalyses::all();
return PreservedAnalyses::none();
}
diff --git a/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp b/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp
index 6927fe5..576a31f 100644
--- a/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp
+++ b/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp
@@ -1362,12 +1362,22 @@ void CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::
}
}
+#ifndef NDEBUG
// Find the node for the last stack id, which should be the same
// across all calls recorded for this id, and is this node's id.
uint64_t LastId = Node->OrigStackOrAllocId;
ContextNode *LastNode = getNodeForStackId(LastId);
// We should only have kept stack ids that had nodes.
assert(LastNode);
+ assert(LastNode == Node);
+#else
+ ContextNode *LastNode = Node;
+#endif
+
+ // Compute the last node's context ids once, as it is shared by all calls in
+ // this entry.
+ DenseSet<uint32_t> LastNodeContextIds = LastNode->getContextIds();
+ assert(!LastNodeContextIds.empty());
for (unsigned I = 0; I < Calls.size(); I++) {
auto &[Call, Ids, Func, SavedContextIds] = Calls[I];
@@ -1389,40 +1399,43 @@ void CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::
assert(LastId == Ids.back());
- ContextNode *FirstNode = getNodeForStackId(Ids[0]);
- assert(FirstNode);
-
// Recompute the context ids for this stack id sequence (the
// intersection of the context ids of the corresponding nodes).
// Start with the ids we saved in the map for this call, which could be
// duplicated context ids. We have to recompute as we might have overlap
// overlap between the saved context ids for different last nodes, and
// removed them already during the post order traversal.
- set_intersect(SavedContextIds, FirstNode->getContextIds());
- ContextNode *PrevNode = nullptr;
- for (auto Id : Ids) {
+ set_intersect(SavedContextIds, LastNodeContextIds);
+ ContextNode *PrevNode = LastNode;
+ bool Skip = false;
+ // Iterate backwards through the stack Ids, starting after the last Id
+ // in the list, which was handled once outside for all Calls.
+ for (auto IdIter = Ids.rbegin() + 1; IdIter != Ids.rend(); IdIter++) {
+ auto Id = *IdIter;
ContextNode *CurNode = getNodeForStackId(Id);
// We should only have kept stack ids that had nodes and weren't
// recursive.
assert(CurNode);
assert(!CurNode->Recursive);
- if (!PrevNode) {
- PrevNode = CurNode;
- continue;
- }
- auto *Edge = CurNode->findEdgeFromCallee(PrevNode);
+
+ auto *Edge = CurNode->findEdgeFromCaller(PrevNode);
if (!Edge) {
- SavedContextIds.clear();
+ Skip = true;
break;
}
PrevNode = CurNode;
+
+ // Update the context ids, which is the intersection of the ids along
+ // all edges in the sequence.
set_intersect(SavedContextIds, Edge->getContextIds());
// If we now have no context ids for clone, skip this call.
- if (SavedContextIds.empty())
+ if (SavedContextIds.empty()) {
+ Skip = true;
break;
+ }
}
- if (SavedContextIds.empty())
+ if (Skip)
continue;
// Create new context node.
@@ -1433,6 +1446,9 @@ void CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::
NonAllocationCallToContextNodeMap[Call] = NewNode;
NewNode->AllocTypes = computeAllocType(SavedContextIds);
+ ContextNode *FirstNode = getNodeForStackId(Ids[0]);
+ assert(FirstNode);
+
// Connect to callees of innermost stack frame in inlined call chain.
// This updates context ids for FirstNode's callee's to reflect those
// moved to NewNode.
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 56aac0e..80d3ade 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -4699,11 +4699,10 @@ Instruction *InstCombinerImpl::visitXor(BinaryOperator &I) {
// (X | Y) ^ M -> (Y ^ M) ^ X
if (match(&I, m_c_Xor(m_OneUse(m_DisjointOr(m_Value(X), m_Value(Y))),
m_Value(M)))) {
- if (Value *XorAC =
- simplifyBinOp(Instruction::Xor, X, M, SQ.getWithInstruction(&I)))
+ if (Value *XorAC = simplifyXorInst(X, M, SQ.getWithInstruction(&I)))
return BinaryOperator::CreateXor(XorAC, Y);
- if (Value *XorBC = simplifyBinOp(Instruction::Xor, Y, M, SQ))
+ if (Value *XorBC = simplifyXorInst(Y, M, SQ.getWithInstruction(&I)))
return BinaryOperator::CreateXor(XorBC, X);
}
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 80d6cec..698abbb 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -6087,12 +6087,12 @@ Instruction *InstCombinerImpl::foldICmpWithCastOp(ICmpInst &ICmp) {
// Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
// integer type is the same size as the pointer type.
- auto CompatibleSizes = [&](Type *SrcTy, Type *DestTy) {
- if (isa<VectorType>(SrcTy)) {
- SrcTy = cast<VectorType>(SrcTy)->getElementType();
- DestTy = cast<VectorType>(DestTy)->getElementType();
+ auto CompatibleSizes = [&](Type *PtrTy, Type *IntTy) {
+ if (isa<VectorType>(PtrTy)) {
+ PtrTy = cast<VectorType>(PtrTy)->getElementType();
+ IntTy = cast<VectorType>(IntTy)->getElementType();
}
- return DL.getPointerTypeSizeInBits(SrcTy) == DestTy->getIntegerBitWidth();
+ return DL.getPointerTypeSizeInBits(PtrTy) == IntTy->getIntegerBitWidth();
};
if (CastOp0->getOpcode() == Instruction::PtrToInt &&
CompatibleSizes(SrcTy, DestTy)) {
@@ -6109,6 +6109,22 @@ Instruction *InstCombinerImpl::foldICmpWithCastOp(ICmpInst &ICmp) {
return new ICmpInst(ICmp.getPredicate(), Op0Src, NewOp1);
}
+ // Do the same in the other direction for icmp (inttoptr x), (inttoptr/c).
+ if (CastOp0->getOpcode() == Instruction::IntToPtr &&
+ CompatibleSizes(DestTy, SrcTy)) {
+ Value *NewOp1 = nullptr;
+ if (auto *IntToPtrOp1 = dyn_cast<IntToPtrInst>(ICmp.getOperand(1))) {
+ Value *IntSrc = IntToPtrOp1->getOperand(0);
+ if (IntSrc->getType() == Op0Src->getType())
+ NewOp1 = IntToPtrOp1->getOperand(0);
+ } else if (auto *RHSC = dyn_cast<Constant>(ICmp.getOperand(1))) {
+ NewOp1 = ConstantFoldConstant(ConstantExpr::getPtrToInt(RHSC, SrcTy), DL);
+ }
+
+ if (NewOp1)
+ return new ICmpInst(ICmp.getPredicate(), Op0Src, NewOp1);
+ }
+
if (Instruction *R = foldICmpWithTrunc(ICmp))
return R;
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
index 66f7c45..7476db9e 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -3551,9 +3551,7 @@ Instruction *InstCombinerImpl::foldSelectToCmp(SelectInst &SI) {
Pred = ICmpInst::getSwappedPredicate(Pred);
std::swap(LHS, RHS);
}
-
- Intrinsic::ID IID =
- ICmpInst::isSigned(Pred) ? Intrinsic::scmp : Intrinsic::ucmp;
+ bool IsSigned = ICmpInst::isSigned(Pred);
bool Replace = false;
ICmpInst::Predicate ExtendedCmpPredicate;
@@ -3575,6 +3573,32 @@ Instruction *InstCombinerImpl::foldSelectToCmp(SelectInst &SI) {
ICmpInst::getSwappedPredicate(ExtendedCmpPredicate) == Pred))
Replace = true;
+ // (x == y) ? 0 : (x > y ? 1 : -1)
+ ICmpInst::Predicate FalseBranchSelectPredicate;
+ const APInt *InnerTV, *InnerFV;
+ if (Pred == ICmpInst::ICMP_EQ && match(TV, m_Zero()) &&
+ match(FV, m_Select(m_c_ICmp(FalseBranchSelectPredicate, m_Specific(LHS),
+ m_Specific(RHS)),
+ m_APInt(InnerTV), m_APInt(InnerFV)))) {
+ if (!ICmpInst::isGT(FalseBranchSelectPredicate)) {
+ FalseBranchSelectPredicate =
+ ICmpInst::getSwappedPredicate(FalseBranchSelectPredicate);
+ std::swap(LHS, RHS);
+ }
+
+ if (!InnerTV->isOne()) {
+ std::swap(InnerTV, InnerFV);
+ std::swap(LHS, RHS);
+ }
+
+ if (ICmpInst::isGT(FalseBranchSelectPredicate) && InnerTV->isOne() &&
+ InnerFV->isAllOnes()) {
+ IsSigned = ICmpInst::isSigned(FalseBranchSelectPredicate);
+ Replace = true;
+ }
+ }
+
+ Intrinsic::ID IID = IsSigned ? Intrinsic::scmp : Intrinsic::ucmp;
if (Replace)
return replaceInstUsesWith(
SI, Builder.CreateIntrinsic(SI.getType(), IID, {LHS, RHS}));
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index f6a0f58..5740285 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -1826,11 +1826,8 @@ Instruction *InstCombinerImpl::foldOpIntoPhi(Instruction &I, PHINode *PN) {
// If the only use of phi is comparing it with a constant then we can
// put this comparison in the incoming BB directly after a ucmp/scmp call
// because we know that it will simplify to a single icmp.
- // NOTE: the single-use check here is not only to ensure that the
- // optimization is profitable, but also to avoid creating a potentially
- // invalid phi node when we have a multi-edge in the CFG.
const APInt *Ignored;
- if (isa<CmpIntrinsic>(InVal) && InVal->hasOneUse() &&
+ if (isa<CmpIntrinsic>(InVal) && InVal->hasOneUser() &&
match(&I, m_ICmp(m_Specific(PN), m_APInt(Ignored)))) {
OpsToMoveUseToIncomingBB.push_back(i);
NewPhiValues.push_back(nullptr);
@@ -1868,18 +1865,24 @@ Instruction *InstCombinerImpl::foldOpIntoPhi(Instruction &I, PHINode *PN) {
// Clone the instruction that uses the phi node and move it into the incoming
// BB because we know that the next iteration of InstCombine will simplify it.
+ SmallDenseMap<BasicBlock *, Instruction *> Clones;
for (auto OpIndex : OpsToMoveUseToIncomingBB) {
Value *Op = PN->getIncomingValue(OpIndex);
BasicBlock *OpBB = PN->getIncomingBlock(OpIndex);
- Instruction *Clone = I.clone();
- for (Use &U : Clone->operands()) {
- if (U == PN)
- U = Op;
- else
- U = U->DoPHITranslation(PN->getParent(), OpBB);
+ Instruction *Clone = Clones.lookup(OpBB);
+ if (!Clone) {
+ Clone = I.clone();
+ for (Use &U : Clone->operands()) {
+ if (U == PN)
+ U = Op;
+ else
+ U = U->DoPHITranslation(PN->getParent(), OpBB);
+ }
+ Clone = InsertNewInstBefore(Clone, OpBB->getTerminator()->getIterator());
+ Clones.insert({OpBB, Clone});
}
- Clone = InsertNewInstBefore(Clone, OpBB->getTerminator()->getIterator());
+
NewPhiValues[OpIndex] = Clone;
}
diff --git a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
index 3c3cc25..577647c 100644
--- a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
@@ -1077,17 +1077,16 @@ void DFSanFunction::addReachesFunctionCallbacksIfEnabled(IRBuilder<> &IRB,
if (dbgloc.get() == nullptr) {
CILine = llvm::ConstantInt::get(I.getContext(), llvm::APInt(32, 0));
- FilePathPtr = IRB.CreateGlobalStringPtr(
+ FilePathPtr = IRB.CreateGlobalString(
I.getFunction()->getParent()->getSourceFileName());
} else {
CILine = llvm::ConstantInt::get(I.getContext(),
llvm::APInt(32, dbgloc.getLine()));
- FilePathPtr =
- IRB.CreateGlobalStringPtr(dbgloc->getFilename());
+ FilePathPtr = IRB.CreateGlobalString(dbgloc->getFilename());
}
llvm::Value *FunctionNamePtr =
- IRB.CreateGlobalStringPtr(I.getFunction()->getName());
+ IRB.CreateGlobalString(I.getFunction()->getName());
CallInst *CB;
std::vector<Value *> args;
@@ -1293,7 +1292,7 @@ void DataFlowSanitizer::buildExternWeakCheckIfNeeded(IRBuilder<> &IRB,
if (GlobalValue::isExternalWeakLinkage(F->getLinkage())) {
std::vector<Value *> Args;
Args.push_back(F);
- Args.push_back(IRB.CreateGlobalStringPtr(F->getName()));
+ Args.push_back(IRB.CreateGlobalString(F->getName()));
IRB.CreateCall(DFSanWrapperExternWeakNullFn, Args);
}
}
@@ -1313,8 +1312,7 @@ DataFlowSanitizer::buildWrapperFunction(Function *F, StringRef NewFName,
if (F->isVarArg()) {
NewF->removeFnAttr("split-stack");
CallInst::Create(DFSanVarargWrapperFn,
- IRBuilder<>(BB).CreateGlobalStringPtr(F->getName()), "",
- BB);
+ IRBuilder<>(BB).CreateGlobalString(F->getName()), "", BB);
new UnreachableInst(*Ctx, BB);
} else {
auto ArgIt = pointer_iterator<Argument *>(NewF->arg_begin());
@@ -3086,7 +3084,7 @@ bool DFSanVisitor::visitWrappedCallBase(Function &F, CallBase &CB) {
case DataFlowSanitizer::WK_Warning:
CB.setCalledFunction(&F);
IRB.CreateCall(DFSF.DFS.DFSanUnimplementedFn,
- IRB.CreateGlobalStringPtr(F.getName()));
+ IRB.CreateGlobalString(F.getName()));
DFSF.DFS.buildExternWeakCheckIfNeeded(IRB, &F);
DFSF.setShadow(&CB, DFSF.DFS.getZeroShadow(&CB));
DFSF.setOrigin(&CB, DFSF.DFS.ZeroOrigin);
diff --git a/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp b/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
index 694b2e6..a409f61 100644
--- a/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
+++ b/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
@@ -1126,7 +1126,7 @@ Function *GCOVProfiler::insertCounterWriteout(
uint32_t CfgChecksum = FileChecksums.empty() ? 0 : FileChecksums[i];
auto *StartFileCallArgs = ConstantStruct::get(
StartFileCallArgsTy,
- {Builder.CreateGlobalStringPtr(FilenameGcda),
+ {Builder.CreateGlobalString(FilenameGcda),
Builder.getInt32(endian::read32be(Options.Version)),
Builder.getInt32(CfgChecksum)});
diff --git a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
index e386fa5..7d3db5c 100644
--- a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
@@ -64,7 +64,6 @@
#include "llvm/Transforms/Utils/MemoryTaggingSupport.h"
#include "llvm/Transforms/Utils/ModuleUtils.h"
#include "llvm/Transforms/Utils/PromoteMemToReg.h"
-#include <cstdint>
#include <optional>
#include <random>
@@ -422,7 +421,7 @@ private:
void init(Triple &TargetTriple, bool InstrumentWithCalls);
Align getObjectAlignment() const { return Align(1ULL << Scale); }
bool isInGlobal() const { return Kind == OffsetKind::kGlobal; }
- bool isInifunc() const { return Kind == OffsetKind::kIfunc; }
+ bool isInIfunc() const { return Kind == OffsetKind::kIfunc; }
bool isInTls() const { return Kind == OffsetKind::kTls; }
bool isFixed() const { return Kind == OffsetKind::kFixed; }
uint8_t scale() const { return Scale; };
@@ -836,7 +835,7 @@ Value *HWAddressSanitizer::getShadowNonTls(IRBuilder<> &IRB) {
ConstantInt::get(IntptrTy, Mapping.offset()), PtrTy));
}
- if (Mapping.isInifunc())
+ if (Mapping.isInIfunc())
return getDynamicShadowIfunc(IRB);
Value *GlobalDynamicAddress =
diff --git a/llvm/lib/Transforms/Instrumentation/PGOCtxProfFlattening.cpp b/llvm/lib/Transforms/Instrumentation/PGOCtxProfFlattening.cpp
index 4bb505a..ca29d8b 100644
--- a/llvm/lib/Transforms/Instrumentation/PGOCtxProfFlattening.cpp
+++ b/llvm/lib/Transforms/Instrumentation/PGOCtxProfFlattening.cpp
@@ -154,6 +154,8 @@ class ProfileAnnotator final {
bool hasCount() const { return Count.has_value(); }
+ uint64_t getCount() const { return *Count; }
+
bool trySetSingleUnknownInEdgeCount() {
if (UnknownCountInEdges == 1) {
setSingleUnknownEdgeCount(InEdges);
@@ -266,6 +268,21 @@ class ProfileAnnotator final {
return HitExit;
}
+ bool allNonColdSelectsHaveProfile() const {
+ for (const auto &BB : F) {
+ if (getBBInfo(BB).getCount() > 0) {
+ for (const auto &I : BB) {
+ if (const auto *SI = dyn_cast<SelectInst>(&I)) {
+ if (!SI->getMetadata(LLVMContext::MD_prof)) {
+ return false;
+ }
+ }
+ }
+ }
+ }
+ return true;
+ }
+
public:
ProfileAnnotator(Function &F, const SmallVectorImpl<uint64_t> &Counters,
InstrProfSummaryBuilder &PB)
@@ -315,6 +332,32 @@ public:
"populated, because we need pointers to its contents to be stable");
}
+ void setProfileForSelectInstructions(BasicBlock &BB, const BBInfo &BBInfo) {
+ if (BBInfo.getCount() == 0)
+ return;
+
+ for (auto &I : BB) {
+ if (auto *SI = dyn_cast<SelectInst>(&I)) {
+ if (auto *Step = CtxProfAnalysis::getSelectInstrumentation(*SI)) {
+ auto Index = Step->getIndex()->getZExtValue();
+ assert(Index < Counters.size() &&
+ "The index of the step instruction must be inside the "
+ "counters vector by "
+ "construction - tripping this assertion indicates a bug in "
+ "how the contextual profile is managed by IPO transforms");
+ auto TotalCount = BBInfo.getCount();
+ auto TrueCount = Counters[Index];
+ auto FalseCount =
+ (TotalCount > TrueCount ? TotalCount - TrueCount : 0U);
+ setProfMetadata(F.getParent(), SI, {TrueCount, FalseCount},
+ std::max(TrueCount, FalseCount));
+ PB.addInternalCount(TrueCount);
+ PB.addInternalCount(FalseCount);
+ }
+ }
+ }
+ }
+
/// Assign branch weights and function entry count. Also update the PSI
/// builder.
void assignProfileData() {
@@ -324,12 +367,14 @@ public:
PB.addEntryCount(Counters[0]);
for (auto &BB : F) {
+ const auto &BBInfo = getBBInfo(BB);
+ setProfileForSelectInstructions(BB, BBInfo);
if (succ_size(&BB) < 2)
continue;
auto *Term = BB.getTerminator();
SmallVector<uint64_t, 2> EdgeCounts(Term->getNumSuccessors(), 0);
uint64_t MaxCount = 0;
- const auto &BBInfo = getBBInfo(BB);
+
for (unsigned SuccIdx = 0, Size = BBInfo.getNumOutEdges(); SuccIdx < Size;
++SuccIdx) {
uint64_t EdgeCount = BBInfo.getEdgeCount(SuccIdx);
@@ -343,12 +388,15 @@ public:
setProfMetadata(F.getParent(), Term, EdgeCounts, MaxCount);
}
assert(allCountersAreAssigned() &&
- "Expected all counters have been assigned.");
+ "[ctx-prof] Expected all counters have been assigned.");
assert(allTakenPathsExit() &&
"[ctx-prof] Encountered a BB with more than one successor, where "
"all outgoing edges have a 0 count. This occurs in non-exiting "
"functions (message pumps, usually) which are not supported in the "
"contextual profiling case");
+ assert(allNonColdSelectsHaveProfile() &&
+ "[ctx-prof] All non-cold select instructions were expected to have "
+ "a profile.");
}
};
diff --git a/llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp b/llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp
index ef9c264..0e2b5c9 100644
--- a/llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp
+++ b/llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp
@@ -194,7 +194,6 @@ void unfold(DomTreeUpdater *DTU, LoopInfo *LI, SelectInstToUnfold SIToUnfold,
SelectInst *SI = SIToUnfold.getInst();
PHINode *SIUse = SIToUnfold.getUse();
BasicBlock *StartBlock = SI->getParent();
- BasicBlock *EndBlock = SIUse->getParent();
BranchInst *StartBlockTerm =
dyn_cast<BranchInst>(StartBlock->getTerminator());
@@ -202,6 +201,7 @@ void unfold(DomTreeUpdater *DTU, LoopInfo *LI, SelectInstToUnfold SIToUnfold,
assert(SI->hasOneUse());
if (StartBlockTerm->isUnconditional()) {
+ BasicBlock *EndBlock = StartBlock->getUniqueSuccessor();
// Arbitrarily choose the 'false' side for a new input value to the PHI.
BasicBlock *NewBlock = BasicBlock::Create(
SI->getContext(), Twine(SI->getName(), ".si.unfold.false"),
@@ -223,32 +223,44 @@ void unfold(DomTreeUpdater *DTU, LoopInfo *LI, SelectInstToUnfold SIToUnfold,
NewBlock->getFirstInsertionPt());
NewPhi->addIncoming(SIOp2, StartBlock);
- if (auto *OpSi = dyn_cast<SelectInst>(SIOp1))
- NewSIsToUnfold->push_back(SelectInstToUnfold(OpSi, SIUse));
- if (auto *OpSi = dyn_cast<SelectInst>(SIOp2))
- NewSIsToUnfold->push_back(SelectInstToUnfold(OpSi, NewPhi));
-
- // Update the phi node of SI.
- for (unsigned Idx = 0; Idx < SIUse->getNumIncomingValues(); ++Idx) {
- if (SIUse->getIncomingBlock(Idx) == StartBlock)
- SIUse->setIncomingValue(Idx, SIOp1);
+ // Update any other PHI nodes in EndBlock.
+ for (PHINode &Phi : EndBlock->phis()) {
+ if (SIUse == &Phi)
+ continue;
+ Phi.addIncoming(Phi.getIncomingValueForBlock(StartBlock), NewBlock);
}
- SIUse->addIncoming(NewPhi, NewBlock);
- // Update any other PHI nodes in EndBlock.
- for (auto II = EndBlock->begin(); PHINode *Phi = dyn_cast<PHINode>(II);
- ++II) {
- if (Phi != SIUse)
- Phi->addIncoming(Phi->getIncomingValueForBlock(StartBlock), NewBlock);
+ // Update the phi node of SI, which is its only use.
+ if (EndBlock == SIUse->getParent()) {
+ SIUse->addIncoming(NewPhi, NewBlock);
+ SIUse->replaceUsesOfWith(SI, SIOp1);
+ } else {
+ PHINode *EndPhi = PHINode::Create(SIUse->getType(), pred_size(EndBlock),
+ Twine(SI->getName(), ".si.unfold.phi"),
+ EndBlock->getFirstInsertionPt());
+ for (BasicBlock *Pred : predecessors(EndBlock)) {
+ if (Pred != StartBlock && Pred != NewBlock)
+ EndPhi->addIncoming(EndPhi, Pred);
+ }
+
+ EndPhi->addIncoming(SIOp1, StartBlock);
+ EndPhi->addIncoming(NewPhi, NewBlock);
+ SIUse->replaceUsesOfWith(SI, EndPhi);
+ SIUse = EndPhi;
}
- StartBlockTerm->eraseFromParent();
+ if (auto *OpSi = dyn_cast<SelectInst>(SIOp1))
+ NewSIsToUnfold->push_back(SelectInstToUnfold(OpSi, SIUse));
+ if (auto *OpSi = dyn_cast<SelectInst>(SIOp2))
+ NewSIsToUnfold->push_back(SelectInstToUnfold(OpSi, NewPhi));
// Insert the real conditional branch based on the original condition.
+ StartBlockTerm->eraseFromParent();
BranchInst::Create(EndBlock, NewBlock, SI->getCondition(), StartBlock);
DTU->applyUpdates({{DominatorTree::Insert, StartBlock, EndBlock},
{DominatorTree::Insert, StartBlock, NewBlock}});
} else {
+ BasicBlock *EndBlock = SIUse->getParent();
BasicBlock *NewBlockT = BasicBlock::Create(
SI->getContext(), Twine(SI->getName(), ".si.unfold.true"),
EndBlock->getParent(), EndBlock);
diff --git a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index 1d67773..2f88b19 100644
--- a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -638,6 +638,7 @@ bool MemCpyOptPass::processStoreOfLoad(StoreInst *SI, LoadInst *LI,
if (!LI->isSimple() || !LI->hasOneUse() || LI->getParent() != SI->getParent())
return false;
+ BatchAAResults BAA(*AA);
auto *T = LI->getType();
// Don't introduce calls to memcpy/memmove intrinsics out of thin air if
// the corresponding libcalls are not available.
@@ -647,19 +648,17 @@ bool MemCpyOptPass::processStoreOfLoad(StoreInst *SI, LoadInst *LI,
(EnableMemCpyOptWithoutLibcalls ||
(TLI->has(LibFunc_memcpy) && TLI->has(LibFunc_memmove)))) {
MemoryLocation LoadLoc = MemoryLocation::get(LI);
-
- // We use alias analysis to check if an instruction may store to
- // the memory we load from in between the load and the store. If
- // such an instruction is found, we try to promote there instead
- // of at the store position.
- // TODO: Can use MSSA for this.
- Instruction *P = SI;
- for (auto &I : make_range(++LI->getIterator(), SI->getIterator())) {
- if (isModSet(AA->getModRefInfo(&I, LoadLoc))) {
- P = &I;
- break;
- }
- }
+ MemoryUseOrDef *LoadAccess = MSSA->getMemoryAccess(LI),
+ *StoreAccess = MSSA->getMemoryAccess(SI);
+
+ // We use MSSA to check if an instruction may store to the memory we load
+ // from in between the load and the store. If such an instruction is found,
+ // we try to promote there instead of at the store position.
+ auto *Clobber = MSSA->getWalker()->getClobberingMemoryAccess(
+ StoreAccess->getDefiningAccess(), LoadLoc, BAA);
+ Instruction *P = MSSA->dominates(LoadAccess, Clobber)
+ ? cast<MemoryUseOrDef>(Clobber)->getMemoryInst()
+ : SI;
// If we found an instruction that may write to the loaded memory,
// we can try to promote at this position instead of the store
@@ -707,7 +706,6 @@ bool MemCpyOptPass::processStoreOfLoad(StoreInst *SI, LoadInst *LI,
// Detect cases where we're performing call slot forwarding, but
// happen to be using a load-store pair to implement it, rather than
// a memcpy.
- BatchAAResults BAA(*AA);
auto GetCall = [&]() -> CallInst * {
// We defer this expensive clobber walk until the cheap checks
// have been done on the source inside performCallSlotOptzn.
diff --git a/llvm/lib/Transforms/Utils/InlineFunction.cpp b/llvm/lib/Transforms/Utils/InlineFunction.cpp
index 152b494..25a0155 100644
--- a/llvm/lib/Transforms/Utils/InlineFunction.cpp
+++ b/llvm/lib/Transforms/Utils/InlineFunction.cpp
@@ -2223,7 +2223,21 @@ remapIndices(Function &Caller, BasicBlock *StartBB,
}
for (auto &I : llvm::make_early_inc_range(*BB)) {
if (auto *Inc = dyn_cast<InstrProfIncrementInst>(&I)) {
- if (Inc != BBID) {
+ if (isa<InstrProfIncrementInstStep>(Inc)) {
+ // Step instrumentation is used for select instructions. Inlining may
+ // have propagated a constant resulting in the condition of the select
+ // being resolved, case in which function cloning resolves the value
+ // of the select, and elides the select instruction. If that is the
+ // case, the step parameter of the instrumentation will reflect that.
+ // We can delete the instrumentation in that case.
+ if (isa<Constant>(Inc->getStep())) {
+ assert(!Inc->getNextNode() || !isa<SelectInst>(Inc->getNextNode()));
+ Inc->eraseFromParent();
+ } else {
+ assert(isa_and_nonnull<SelectInst>(Inc->getNextNode()));
+ RewriteInstrIfNeeded(*Inc);
+ }
+ } else if (Inc != BBID) {
// If we're here it means that the BB had more than 1 IDs, presumably
// some coming from the callee. We "made up our mind" to keep the
// first one (which may or may not have been originally the caller's).
diff --git a/llvm/lib/Transforms/Utils/LoopConstrainer.cpp b/llvm/lib/Transforms/Utils/LoopConstrainer.cpp
index 4ae2bac..8f10315 100644
--- a/llvm/lib/Transforms/Utils/LoopConstrainer.cpp
+++ b/llvm/lib/Transforms/Utils/LoopConstrainer.cpp
@@ -510,7 +510,7 @@ void LoopConstrainer::cloneLoop(LoopConstrainer::ClonedLoop &Result,
for (PHINode &PN : SBB->phis()) {
Value *OldIncoming = PN.getIncomingValueForBlock(OriginalBB);
PN.addIncoming(GetClonedValue(OldIncoming), ClonedBB);
- SE.forgetValue(&PN);
+ SE.forgetLcssaPhiWithNewPredecessor(&OriginalLoop, &PN);
}
}
}
diff --git a/llvm/lib/Transforms/Utils/LoopUnroll.cpp b/llvm/lib/Transforms/Utils/LoopUnroll.cpp
index a0406111..b90addc 100644
--- a/llvm/lib/Transforms/Utils/LoopUnroll.cpp
+++ b/llvm/lib/Transforms/Utils/LoopUnroll.cpp
@@ -770,7 +770,7 @@ llvm::UnrollLoop(Loop *L, UnrollLoopOptions ULO, LoopInfo *LI,
if (It != LastValueMap.end())
Incoming = It->second;
PHI.addIncoming(Incoming, New);
- SE->forgetValue(&PHI);
+ SE->forgetLcssaPhiWithNewPredecessor(L, &PHI);
}
}
// Keep track of new headers and latches as we create them, so that
diff --git a/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp b/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp
index 2d74b2b..5e2c153 100644
--- a/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp
+++ b/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp
@@ -146,7 +146,7 @@ static void ConnectProlog(Loop *L, Value *BECount, unsigned Count,
PN.setIncomingValueForBlock(NewPreHeader, NewPN);
else
PN.addIncoming(NewPN, PrologExit);
- SE.forgetValue(&PN);
+ SE.forgetLcssaPhiWithNewPredecessor(L, &PN);
}
}
diff --git a/llvm/lib/Transforms/Utils/LoopVersioning.cpp b/llvm/lib/Transforms/Utils/LoopVersioning.cpp
index af2ed62..8f8c40a 100644
--- a/llvm/lib/Transforms/Utils/LoopVersioning.cpp
+++ b/llvm/lib/Transforms/Utils/LoopVersioning.cpp
@@ -137,7 +137,7 @@ void LoopVersioning::addPHINodes(
// original loop.
for (auto I = PHIBlock->begin(); (PN = dyn_cast<PHINode>(I)); ++I) {
if (PN->getIncomingValue(0) == Inst) {
- SE->forgetValue(PN);
+ SE->forgetLcssaPhiWithNewPredecessor(VersionedLoop, PN);
break;
}
}
diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
index 03db86a..1f2c938 100644
--- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -2477,6 +2477,16 @@ static bool sinkCommonCodeFromPredecessors(BasicBlock *BB,
bool followedByDeoptOrUnreachable = IsBlockFollowedByDeoptOrUnreachable(BB);
if (!followedByDeoptOrUnreachable) {
+ // Check whether this is the pointer operand of a load/store.
+ auto IsMemOperand = [](Use &U) {
+ auto *I = cast<Instruction>(U.getUser());
+ if (isa<LoadInst>(I))
+ return U.getOperandNo() == LoadInst::getPointerOperandIndex();
+ if (isa<StoreInst>(I))
+ return U.getOperandNo() == StoreInst::getPointerOperandIndex();
+ return false;
+ };
+
// Okay, we *could* sink last ScanIdx instructions. But how many can we
// actually sink before encountering instruction that is unprofitable to
// sink?
@@ -2488,6 +2498,13 @@ static bool sinkCommonCodeFromPredecessors(BasicBlock *BB,
return InstructionsToSink.contains(V);
})) {
++NumPHIInsts;
+ // Do not separate a load/store from the gep producing the address.
+ // The gep can likely be folded into the load/store as an addressing
+ // mode. Additionally, a load of a gep is easier to analyze than a
+ // load of a phi.
+ if (IsMemOperand(U) &&
+ any_of(It->second, [](Value *V) { return isa<GEPOperator>(V); }))
+ return false;
// FIXME: this check is overly optimistic. We may end up not sinking
// said instruction, due to the very same profitability check.
// See @creating_too_many_phis in sink-common-code.ll.
@@ -6511,9 +6528,10 @@ SwitchLookupTable::SwitchLookupTable(
if (LinearMappingPossible) {
LinearOffset = cast<ConstantInt>(TableContents[0]);
LinearMultiplier = ConstantInt::get(M.getContext(), DistToPrev);
- bool MayWrap = false;
APInt M = LinearMultiplier->getValue();
- (void)M.smul_ov(APInt(M.getBitWidth(), TableSize - 1), MayWrap);
+ bool MayWrap = true;
+ if (isIntN(M.getBitWidth(), TableSize - 1))
+ (void)M.smul_ov(APInt(M.getBitWidth(), TableSize - 1), MayWrap);
LinearMapValWrapped = NonMonotonic || MayWrap;
Kind = LinearMapKind;
++NumLinearMaps;
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
index a478748..e695902 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
@@ -1334,11 +1334,17 @@ bool LoopVectorizationLegality::canVectorizeWithIfConvert() {
// we restrict this to loads; stores are more complicated due to
// concurrency restrictions.
ScalarEvolution &SE = *PSE.getSE();
+ SmallVector<const SCEVPredicate *, 4> Predicates;
for (Instruction &I : *BB) {
LoadInst *LI = dyn_cast<LoadInst>(&I);
+ // Pass the Predicates pointer to isDereferenceableAndAlignedInLoop so
+ // that it will consider loops that need guarding by SCEV checks. The
+ // vectoriser will generate these checks if we decide to vectorise.
if (LI && !LI->getType()->isVectorTy() && !mustSuppressSpeculation(*LI) &&
- isDereferenceableAndAlignedInLoop(LI, TheLoop, SE, *DT, AC))
+ isDereferenceableAndAlignedInLoop(LI, TheLoop, SE, *DT, AC,
+ &Predicates))
SafePointers.insert(LI->getPointerOperand());
+ Predicates.clear();
}
}
@@ -1467,13 +1473,13 @@ bool LoopVectorizationLegality::isVectorizableEarlyExitLoop() {
// Keep a record of all the exiting blocks.
SmallVector<const SCEVPredicate *, 4> Predicates;
- for (BasicBlock *BB1 : ExitingBlocks) {
+ for (BasicBlock *BB : ExitingBlocks) {
const SCEV *EC =
- PSE.getSE()->getPredicatedExitCount(TheLoop, BB1, &Predicates);
+ PSE.getSE()->getPredicatedExitCount(TheLoop, BB, &Predicates);
if (isa<SCEVCouldNotCompute>(EC)) {
- UncountableExitingBlocks.push_back(BB1);
+ UncountableExitingBlocks.push_back(BB);
- SmallVector<BasicBlock *, 2> Succs(successors(BB1));
+ SmallVector<BasicBlock *, 2> Succs(successors(BB));
if (Succs.size() != 2) {
reportVectorizationFailure(
"Early exiting block does not have exactly two successors",
@@ -1482,17 +1488,21 @@ bool LoopVectorizationLegality::isVectorizableEarlyExitLoop() {
return false;
}
- BasicBlock *BB2;
+ BasicBlock *ExitBlock;
if (!TheLoop->contains(Succs[0]))
- BB2 = Succs[0];
+ ExitBlock = Succs[0];
else {
assert(!TheLoop->contains(Succs[1]));
- BB2 = Succs[1];
+ ExitBlock = Succs[1];
}
- UncountableExitBlocks.push_back(BB2);
+ UncountableExitBlocks.push_back(ExitBlock);
} else
- CountableExitingBlocks.push_back(BB1);
+ CountableExitingBlocks.push_back(BB);
}
+ // We can safely ignore the predicates here because when vectorizing the loop
+ // the PredicatatedScalarEvolution class will keep track of all predicates
+ // for each exiting block anyway. This happens when calling
+ // PSE.getSymbolicMaxBackedgeTakenCount() below.
Predicates.clear();
// We only support one uncountable early exit.
@@ -1507,13 +1517,25 @@ bool LoopVectorizationLegality::isVectorizableEarlyExitLoop() {
// The only supported early exit loops so far are ones where the early
// exiting block is a unique predecessor of the latch block.
BasicBlock *LatchPredBB = LatchBB->getUniquePredecessor();
- if (LatchPredBB != getSpeculativeEarlyExitingBlock()) {
+ if (LatchPredBB != getUncountableEarlyExitingBlock()) {
reportVectorizationFailure("Early exit is not the latch predecessor",
"Cannot vectorize early exit loop",
"EarlyExitNotLatchPredecessor", ORE, TheLoop);
return false;
}
+ // The latch block must have a countable exit.
+ if (isa<SCEVCouldNotCompute>(
+ PSE.getSE()->getPredicatedExitCount(TheLoop, LatchBB, &Predicates))) {
+ reportVectorizationFailure(
+ "Cannot determine exact exit count for latch block",
+ "Cannot vectorize early exit loop",
+ "UnknownLatchExitCountEarlyExitLoop", ORE, TheLoop);
+ return false;
+ }
+ assert(llvm::is_contained(CountableExitingBlocks, LatchBB) &&
+ "Latch block not found in list of countable exits!");
+
// Check to see if there are instructions that could potentially generate
// exceptions or have side-effects.
auto IsSafeOperation = [](Instruction *I) -> bool {
@@ -1549,22 +1571,14 @@ bool LoopVectorizationLegality::isVectorizableEarlyExitLoop() {
}
}
- // The latch block must have a countable exit.
- if (isa<SCEVCouldNotCompute>(
- PSE.getSE()->getPredicatedExitCount(TheLoop, LatchBB, &Predicates))) {
- reportVectorizationFailure(
- "Cannot determine exact exit count for latch block",
- "Cannot vectorize early exit loop",
- "UnknownLatchExitCountEarlyExitLoop", ORE, TheLoop);
- return false;
- }
-
// The vectoriser cannot handle loads that occur after the early exit block.
- assert(LatchBB->getUniquePredecessor() == getSpeculativeEarlyExitingBlock() &&
+ assert(LatchBB->getUniquePredecessor() == getUncountableEarlyExitingBlock() &&
"Expected latch predecessor to be the early exiting block");
// TODO: Handle loops that may fault.
- if (!isDereferenceableReadOnlyLoop(TheLoop, PSE.getSE(), DT, AC)) {
+ Predicates.clear();
+ if (!isDereferenceableReadOnlyLoop(TheLoop, PSE.getSE(), DT, AC,
+ &Predicates)) {
reportVectorizationFailure(
"Loop may fault",
"Cannot vectorize potentially faulting early exit loop",
@@ -1572,16 +1586,15 @@ bool LoopVectorizationLegality::isVectorizableEarlyExitLoop() {
return false;
}
- LLVM_DEBUG(
- dbgs()
- << "LV: Found an early exit. Retrying with speculative exit count.\n");
- [[maybe_unused]] const SCEV *SpecExitCount =
+ [[maybe_unused]] const SCEV *SymbolicMaxBTC =
PSE.getSymbolicMaxBackedgeTakenCount();
- assert(!isa<SCEVCouldNotCompute>(SpecExitCount) &&
+ // Since we have an exact exit count for the latch and the early exit
+ // dominates the latch, then this should guarantee a computed SCEV value.
+ assert(!isa<SCEVCouldNotCompute>(SymbolicMaxBTC) &&
"Failed to get symbolic expression for backedge taken count");
-
- LLVM_DEBUG(dbgs() << "LV: Found speculative backedge taken count: "
- << *SpecExitCount << '\n');
+ LLVM_DEBUG(dbgs() << "LV: Found an early exit loop with symbolic max "
+ "backedge taken count: "
+ << *SymbolicMaxBTC << '\n');
return true;
}
@@ -1645,7 +1658,7 @@ bool LoopVectorizationLegality::canVectorize(bool UseVPlanNativePath) {
return false;
}
- HasSpeculativeEarlyExit = false;
+ HasUncountableEarlyExit = false;
if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) {
if (!isVectorizableEarlyExitLoop()) {
if (DoExtraAnalysis)
@@ -1653,7 +1666,7 @@ bool LoopVectorizationLegality::canVectorize(bool UseVPlanNativePath) {
else
return false;
} else
- HasSpeculativeEarlyExit = true;
+ HasUncountableEarlyExit = true;
}
// Go over each instruction and look at memory deps.
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h b/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h
index 034fdf4..00eec0a 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h
@@ -220,6 +220,11 @@ public:
new VPInstruction(Instruction::ICmp, Pred, A, B, DL, Name));
}
+ VPInstruction *createPtrAdd(VPValue *Ptr, VPValue *Offset, DebugLoc DL,
+ const Twine &Name = "") {
+ return createInstruction(VPInstruction::PtrAdd, {Ptr, Offset}, DL, Name);
+ }
+
VPDerivedIVRecipe *createDerivedIV(InductionDescriptor::InductionKind Kind,
FPMathOperator *FPBinOp, VPValue *Start,
VPCanonicalIVPHIRecipe *CanonicalIV,
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 9685e7d1..09e4d0f 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -538,12 +538,6 @@ protected:
/// A small list of PHINodes.
using PhiVector = SmallVector<PHINode *, 4>;
- /// A type for scalarized values in the new loop. Each value from the
- /// original loop, when scalarized, is represented by UF x VF scalar values
- /// in the new unrolled loop, where UF is the unroll factor and VF is the
- /// vectorization factor.
- using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
-
/// Set up the values of the IVs correctly when exiting the vector loop.
void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
Value *VectorTripCount, Value *EndValue,
@@ -3075,13 +3069,13 @@ void InnerLoopVectorizer::fixNonInductionPHIs(VPlan &Plan,
VPWidenPHIRecipe *VPPhi = dyn_cast<VPWidenPHIRecipe>(&P);
if (!VPPhi)
continue;
- PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0));
+ PHINode *NewPhi = cast<PHINode>(State.get(VPPhi));
// Make sure the builder has a valid insert point.
Builder.SetInsertPoint(NewPhi);
for (unsigned Idx = 0; Idx < VPPhi->getNumOperands(); ++Idx) {
VPValue *Inc = VPPhi->getIncomingValue(Idx);
VPBasicBlock *VPBB = VPPhi->getIncomingBlock(Idx);
- NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]);
+ NewPhi->addIncoming(State.get(Inc), State.CFG.VPBB2IRBB[VPBB]);
}
}
}
@@ -5207,7 +5201,9 @@ LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) {
const auto &TTICapture = TTI;
auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) -> unsigned {
- if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty))
+ if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty) ||
+ (VF.isScalable() &&
+ !TTICapture.isElementTypeLegalForScalableVector(Ty)))
return 0;
return TTICapture.getRegUsageForType(VectorType::get(Ty, VF));
};
@@ -9445,7 +9441,7 @@ void VPReplicateRecipe::execute(VPTransformState &State) {
assert(!State.VF.isScalable() && "VF is assumed to be non scalable.");
Value *Poison = PoisonValue::get(
VectorType::get(UI->getType(), State.VF));
- State.set(this, Poison, State.Instance->Part);
+ State.set(this, Poison);
}
State.packScalarIntoVectorValue(this, *State.Instance);
}
@@ -9467,7 +9463,7 @@ void VPReplicateRecipe::execute(VPTransformState &State) {
return;
}
- // Generate scalar instances for all VF lanes of all UF parts.
+ // Generate scalar instances for all VF lanes.
assert(!State.VF.isScalable() && "Can't scalarize a scalable vector");
const unsigned EndLane = State.VF.getKnownMinValue();
for (unsigned Lane = 0; Lane < EndLane; ++Lane)
@@ -9792,11 +9788,12 @@ bool LoopVectorizePass::processLoop(Loop *L) {
return false;
}
- if (LVL.hasSpeculativeEarlyExit()) {
- reportVectorizationFailure(
- "Auto-vectorization of early exit loops is not yet supported.",
- "Auto-vectorization of early exit loops is not yet supported.",
- "EarlyExitLoopsUnsupported", ORE, L);
+ if (LVL.hasUncountableEarlyExit()) {
+ reportVectorizationFailure("Auto-vectorization of loops with uncountable "
+ "early exit is not yet supported",
+ "Auto-vectorization of loops with uncountable "
+ "early exit is not yet supported",
+ "UncountableEarlyExitLoopsUnsupported", ORE, L);
return false;
}
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index a88702b..414c638 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -1930,30 +1930,38 @@ public:
/// elements in the lane, it will be vectorized with higher probability
/// after removing duplicates. Currently the SLP vectorizer supports only
/// vectorization of the power-of-2 number of unique scalars.
- int getSplatScore(unsigned Lane, unsigned OpIdx, unsigned Idx) const {
+ int getSplatScore(unsigned Lane, unsigned OpIdx, unsigned Idx,
+ const SmallBitVector &UsedLanes) const {
Value *IdxLaneV = getData(Idx, Lane).V;
- if (!isa<Instruction>(IdxLaneV) || IdxLaneV == getData(OpIdx, Lane).V)
+ if (!isa<Instruction>(IdxLaneV) || IdxLaneV == getData(OpIdx, Lane).V ||
+ isa<ExtractElementInst>(IdxLaneV))
return 0;
- SmallPtrSet<Value *, 4> Uniques;
- for (unsigned Ln = 0, E = getNumLanes(); Ln < E; ++Ln) {
+ SmallDenseMap<Value *, unsigned, 4> Uniques;
+ for (unsigned Ln : seq<unsigned>(getNumLanes())) {
if (Ln == Lane)
continue;
Value *OpIdxLnV = getData(OpIdx, Ln).V;
if (!isa<Instruction>(OpIdxLnV))
return 0;
- Uniques.insert(OpIdxLnV);
+ Uniques.try_emplace(OpIdxLnV, Ln);
}
- int UniquesCount = Uniques.size();
- int UniquesCntWithIdxLaneV =
- Uniques.contains(IdxLaneV) ? UniquesCount : UniquesCount + 1;
+ unsigned UniquesCount = Uniques.size();
+ auto IdxIt = Uniques.find(IdxLaneV);
+ unsigned UniquesCntWithIdxLaneV =
+ IdxIt != Uniques.end() ? UniquesCount : UniquesCount + 1;
Value *OpIdxLaneV = getData(OpIdx, Lane).V;
- int UniquesCntWithOpIdxLaneV =
- Uniques.contains(OpIdxLaneV) ? UniquesCount : UniquesCount + 1;
+ auto OpIdxIt = Uniques.find(OpIdxLaneV);
+ unsigned UniquesCntWithOpIdxLaneV =
+ OpIdxIt != Uniques.end() ? UniquesCount : UniquesCount + 1;
if (UniquesCntWithIdxLaneV == UniquesCntWithOpIdxLaneV)
return 0;
- return (PowerOf2Ceil(UniquesCntWithOpIdxLaneV) -
- UniquesCntWithOpIdxLaneV) -
- (PowerOf2Ceil(UniquesCntWithIdxLaneV) - UniquesCntWithIdxLaneV);
+ return std::min(bit_ceil(UniquesCntWithOpIdxLaneV) -
+ UniquesCntWithOpIdxLaneV,
+ UniquesCntWithOpIdxLaneV -
+ bit_floor(UniquesCntWithOpIdxLaneV)) -
+ ((IdxIt != Uniques.end() && UsedLanes.test(IdxIt->second))
+ ? UniquesCntWithIdxLaneV - bit_floor(UniquesCntWithIdxLaneV)
+ : bit_ceil(UniquesCntWithIdxLaneV) - UniquesCntWithIdxLaneV);
}
/// \param Lane lane of the operands under analysis.
@@ -1993,7 +2001,7 @@ public:
/// predecessors.
int getLookAheadScore(Value *LHS, Value *RHS, ArrayRef<Value *> MainAltOps,
int Lane, unsigned OpIdx, unsigned Idx,
- bool &IsUsed) {
+ bool &IsUsed, const SmallBitVector &UsedLanes) {
LookAheadHeuristics LookAhead(TLI, DL, SE, R, getNumLanes(),
LookAheadMaxDepth);
// Keep track of the instruction stack as we recurse into the operands
@@ -2002,11 +2010,10 @@ public:
LookAhead.getScoreAtLevelRec(LHS, RHS, /*U1=*/nullptr, /*U2=*/nullptr,
/*CurrLevel=*/1, MainAltOps);
if (Score) {
- int SplatScore = getSplatScore(Lane, OpIdx, Idx);
+ int SplatScore = getSplatScore(Lane, OpIdx, Idx, UsedLanes);
if (Score <= -SplatScore) {
- // Set the minimum score for splat-like sequence to avoid setting
- // failed state.
- Score = 1;
+ // Failed score.
+ Score = 0;
} else {
Score += SplatScore;
// Scale score to see the difference between different operands
@@ -2036,7 +2043,8 @@ public:
std::optional<unsigned>
getBestOperand(unsigned OpIdx, int Lane, int LastLane,
ArrayRef<ReorderingMode> ReorderingModes,
- ArrayRef<Value *> MainAltOps) {
+ ArrayRef<Value *> MainAltOps,
+ const SmallBitVector &UsedLanes) {
unsigned NumOperands = getNumOperands();
// The operand of the previous lane at OpIdx.
@@ -2092,7 +2100,7 @@ public:
Value *OpLeft = (LeftToRight) ? OpLastLane : Op;
Value *OpRight = (LeftToRight) ? Op : OpLastLane;
int Score = getLookAheadScore(OpLeft, OpRight, MainAltOps, Lane,
- OpIdx, Idx, IsUsed);
+ OpIdx, Idx, IsUsed, UsedLanes);
if (Score > static_cast<int>(BestOp.Score) ||
(Score > 0 && Score == static_cast<int>(BestOp.Score) &&
Idx == OpIdx)) {
@@ -2507,20 +2515,24 @@ public:
for (unsigned I = 0; I < NumOperands; ++I)
MainAltOps[I].push_back(getData(I, FirstLane).V);
+ SmallBitVector UsedLanes(NumLanes);
+ UsedLanes.set(FirstLane);
for (unsigned Distance = 1; Distance != NumLanes; ++Distance) {
// Visit the lane on the right and then the lane on the left.
for (int Direction : {+1, -1}) {
int Lane = FirstLane + Direction * Distance;
if (Lane < 0 || Lane >= (int)NumLanes)
continue;
+ UsedLanes.set(Lane);
int LastLane = Lane - Direction;
assert(LastLane >= 0 && LastLane < (int)NumLanes &&
"Out of bounds");
// Look for a good match for each operand.
for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
// Search for the operand that matches SortedOps[OpIdx][Lane-1].
- std::optional<unsigned> BestIdx = getBestOperand(
- OpIdx, Lane, LastLane, ReorderingModes, MainAltOps[OpIdx]);
+ std::optional<unsigned> BestIdx =
+ getBestOperand(OpIdx, Lane, LastLane, ReorderingModes,
+ MainAltOps[OpIdx], UsedLanes);
// By not selecting a value, we allow the operands that follow to
// select a better matching value. We will get a non-null value in
// the next run of getBestOperand().
@@ -9986,8 +9998,8 @@ public:
}
Cost += ::getShuffleCost(
TTI, TTI::SK_InsertSubvector,
- FixedVectorType::get(ScalarTy, CommonMask.size()), {}, CostKind,
- Idx, FixedVectorType::get(ScalarTy, E->getVectorFactor()));
+ getWidenedType(ScalarTy, CommonMask.size()), {}, CostKind, Idx,
+ getWidenedType(ScalarTy, E->getVectorFactor()));
if (!CommonMask.empty()) {
std::iota(std::next(CommonMask.begin(), Idx),
std::next(CommonMask.begin(), Idx + E->getVectorFactor()),
@@ -12326,8 +12338,7 @@ InstructionCost BoUpSLP::getGatherCost(ArrayRef<Value *> VL, bool ForPoisonSrc,
// Find the cost of inserting/extracting values from the vector.
// Check if the same elements are inserted several times and count them as
// shuffle candidates.
- unsigned ScalarTyNumElements = getNumElements(ScalarTy);
- APInt ShuffledElements = APInt::getZero(VecTy->getNumElements());
+ APInt ShuffledElements = APInt::getZero(VL.size());
DenseMap<Value *, unsigned> UniqueElements;
constexpr TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
InstructionCost Cost;
@@ -12347,8 +12358,7 @@ InstructionCost BoUpSLP::getGatherCost(ArrayRef<Value *> VL, bool ForPoisonSrc,
Value *V = VL[I];
// No need to shuffle duplicates for constants.
if ((ForPoisonSrc && isConstant(V)) || isa<UndefValue>(V)) {
- ShuffledElements.setBits(I * ScalarTyNumElements,
- I * ScalarTyNumElements + ScalarTyNumElements);
+ ShuffledElements.setBit(I);
ShuffleMask[I] = isa<PoisonValue>(V) ? PoisonMaskElem : I;
continue;
}
@@ -12361,14 +12371,27 @@ InstructionCost BoUpSLP::getGatherCost(ArrayRef<Value *> VL, bool ForPoisonSrc,
}
DuplicateNonConst = true;
- ShuffledElements.setBits(I * ScalarTyNumElements,
- I * ScalarTyNumElements + ScalarTyNumElements);
+ ShuffledElements.setBit(I);
ShuffleMask[I] = Res.first->second;
}
- if (ForPoisonSrc)
- Cost =
- TTI->getScalarizationOverhead(VecTy, ~ShuffledElements, /*Insert*/ true,
- /*Extract*/ false, CostKind);
+ if (ForPoisonSrc) {
+ if (isa<FixedVectorType>(ScalarTy)) {
+ assert(SLPReVec && "Only supported by REVEC.");
+ // We don't need to insert elements one by one. Instead, we can insert the
+ // entire vector into the destination.
+ Cost = 0;
+ unsigned ScalarTyNumElements = getNumElements(ScalarTy);
+ for (unsigned I : seq<unsigned>(VL.size()))
+ if (!ShuffledElements[I])
+ Cost += TTI->getShuffleCost(
+ TTI::SK_InsertSubvector, VecTy, std::nullopt, CostKind,
+ I * ScalarTyNumElements, cast<FixedVectorType>(ScalarTy));
+ } else {
+ Cost = TTI->getScalarizationOverhead(VecTy, ~ShuffledElements,
+ /*Insert*/ true,
+ /*Extract*/ false, CostKind);
+ }
+ }
if (DuplicateNonConst)
Cost += ::getShuffleCost(*TTI, TargetTransformInfo::SK_PermuteSingleSrc,
VecTy, ShuffleMask);
@@ -18668,6 +18691,14 @@ public:
// Vectorize a tree.
Value *VectorizedRoot =
V.vectorizeTree(LocalExternallyUsedValues, InsertPt);
+ // Update TrackedToOrig mapping, since the tracked values might be
+ // updated.
+ for (Value *RdxVal : Candidates) {
+ Value *OrigVal = TrackedToOrig.at(RdxVal);
+ Value *TransformedRdxVal = TrackedVals.at(OrigVal);
+ if (TransformedRdxVal != RdxVal)
+ TrackedToOrig.try_emplace(TransformedRdxVal, OrigVal);
+ }
Builder.SetInsertPoint(InsertPt);
diff --git a/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Region.cpp b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Region.cpp
index 34aa9f3..5f2c284 100644
--- a/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Region.cpp
+++ b/llvm/lib/Transforms/Vectorize/SandboxVectorizer/Region.cpp
@@ -11,15 +11,23 @@
namespace llvm::sandboxir {
Region::Region(Context &Ctx) : Ctx(Ctx) {
- static unsigned StaticRegionID;
- RegionID = StaticRegionID++;
+ LLVMContext &LLVMCtx = Ctx.LLVMCtx;
+ auto *RegionStrMD = MDString::get(LLVMCtx, RegionStr);
+ RegionMDN = MDNode::getDistinct(LLVMCtx, {RegionStrMD});
}
Region::~Region() {}
-void Region::add(Instruction *I) { Insts.insert(I); }
+void Region::add(Instruction *I) {
+ Insts.insert(I);
+ // TODO: Consider tagging instructions lazily.
+ cast<llvm::Instruction>(I->Val)->setMetadata(MDKind, RegionMDN);
+}
-void Region::remove(Instruction *I) { Insts.remove(I); }
+void Region::remove(Instruction *I) {
+ Insts.remove(I);
+ cast<llvm::Instruction>(I->Val)->setMetadata(MDKind, nullptr);
+}
#ifndef NDEBUG
bool Region::operator==(const Region &Other) const {
@@ -31,7 +39,6 @@ bool Region::operator==(const Region &Other) const {
}
void Region::dump(raw_ostream &OS) const {
- OS << "RegionID: " << getID() << "\n";
for (auto *I : Insts)
OS << *I << "\n";
}
@@ -42,4 +49,27 @@ void Region::dump() const {
}
#endif // NDEBUG
+SmallVector<std::unique_ptr<Region>> Region::createRegionsFromMD(Function &F) {
+ SmallVector<std::unique_ptr<Region>> Regions;
+ DenseMap<MDNode *, Region *> MDNToRegion;
+ auto &Ctx = F.getContext();
+ for (BasicBlock &BB : F) {
+ for (Instruction &Inst : BB) {
+ if (auto *MDN = cast<llvm::Instruction>(Inst.Val)->getMetadata(MDKind)) {
+ Region *R = nullptr;
+ auto It = MDNToRegion.find(MDN);
+ if (It == MDNToRegion.end()) {
+ Regions.push_back(std::make_unique<Region>(Ctx));
+ R = Regions.back().get();
+ MDNToRegion[MDN] = R;
+ } else {
+ R = It->second;
+ }
+ R->add(&Inst);
+ }
+ }
+ }
+ return Regions;
+}
+
} // namespace llvm::sandboxir
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.cpp b/llvm/lib/Transforms/Vectorize/VPlan.cpp
index 9a0c353..5e4d487 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlan.cpp
@@ -233,17 +233,16 @@ Value *VPTransformState::get(VPValue *Def, const VPIteration &Instance) {
return Def->getLiveInIRValue();
if (hasScalarValue(Def, Instance)) {
- return Data
- .PerPartScalars[Def][Instance.Part][Instance.Lane.mapToCacheIndex(VF)];
+ return Data.VPV2Scalars[Def][Instance.Lane.mapToCacheIndex(VF)];
}
if (!Instance.Lane.isFirstLane() &&
vputils::isUniformAfterVectorization(Def) &&
hasScalarValue(Def, {Instance.Part, VPLane::getFirstLane()})) {
- return Data.PerPartScalars[Def][Instance.Part][0];
+ return Data.VPV2Scalars[Def][0];
}
- assert(hasVectorValue(Def, Instance.Part));
- auto *VecPart = Data.PerPartOutput[Def][Instance.Part];
+ assert(hasVectorValue(Def));
+ auto *VecPart = Data.VPV2Vector[Def];
if (!VecPart->getType()->isVectorTy()) {
assert(Instance.Lane.isFirstLane() && "cannot get lane > 0 for scalar");
return VecPart;
@@ -255,20 +254,20 @@ Value *VPTransformState::get(VPValue *Def, const VPIteration &Instance) {
return Extract;
}
-Value *VPTransformState::get(VPValue *Def, unsigned Part, bool NeedsScalar) {
+Value *VPTransformState::get(VPValue *Def, bool NeedsScalar) {
if (NeedsScalar) {
- assert((VF.isScalar() || Def->isLiveIn() || hasVectorValue(Def, Part) ||
+ assert((VF.isScalar() || Def->isLiveIn() || hasVectorValue(Def) ||
!vputils::onlyFirstLaneUsed(Def) ||
- (hasScalarValue(Def, VPIteration(Part, 0)) &&
- Data.PerPartScalars[Def][Part].size() == 1)) &&
+ (hasScalarValue(Def, VPIteration(0, 0)) &&
+ Data.VPV2Scalars[Def].size() == 1)) &&
"Trying to access a single scalar per part but has multiple scalars "
"per part.");
- return get(Def, VPIteration(Part, 0));
+ return get(Def, VPIteration(0, 0));
}
// If Values have been set for this Def return the one relevant for \p Part.
- if (hasVectorValue(Def, Part))
- return Data.PerPartOutput[Def][Part];
+ if (hasVectorValue(Def))
+ return Data.VPV2Vector[Def];
auto GetBroadcastInstrs = [this, Def](Value *V) {
bool SafeToHoist = Def->isDefinedOutsideLoopRegions();
@@ -290,21 +289,19 @@ Value *VPTransformState::get(VPValue *Def, unsigned Part, bool NeedsScalar) {
return Shuf;
};
- if (!hasScalarValue(Def, {Part, 0})) {
+ if (!hasScalarValue(Def, {0, 0})) {
assert(Def->isLiveIn() && "expected a live-in");
- if (Part != 0)
- return get(Def, 0);
Value *IRV = Def->getLiveInIRValue();
Value *B = GetBroadcastInstrs(IRV);
- set(Def, B, Part);
+ set(Def, B);
return B;
}
- Value *ScalarValue = get(Def, {Part, 0});
+ Value *ScalarValue = get(Def, {0, 0});
// If we aren't vectorizing, we can just copy the scalar map values over
// to the vector map.
if (VF.isScalar()) {
- set(Def, ScalarValue, Part);
+ set(Def, ScalarValue);
return ScalarValue;
}
@@ -312,7 +309,7 @@ Value *VPTransformState::get(VPValue *Def, unsigned Part, bool NeedsScalar) {
unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1;
// Check if there is a scalar value for the selected lane.
- if (!hasScalarValue(Def, {Part, LastLane})) {
+ if (!hasScalarValue(Def, {0, LastLane})) {
// At the moment, VPWidenIntOrFpInductionRecipes, VPScalarIVStepsRecipes and
// VPExpandSCEVRecipes can also be uniform.
assert((isa<VPWidenIntOrFpInductionRecipe>(Def->getDefiningRecipe()) ||
@@ -323,7 +320,7 @@ Value *VPTransformState::get(VPValue *Def, unsigned Part, bool NeedsScalar) {
LastLane = 0;
}
- auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane}));
+ auto *LastInst = cast<Instruction>(get(Def, {0, LastLane}));
// Set the insert point after the last scalarized instruction or after the
// last PHI, if LastInst is a PHI. This ensures the insertelement sequence
// will directly follow the scalar definitions.
@@ -336,22 +333,22 @@ Value *VPTransformState::get(VPValue *Def, unsigned Part, bool NeedsScalar) {
// However, if we are vectorizing, we need to construct the vector values.
// If the value is known to be uniform after vectorization, we can just
- // broadcast the scalar value corresponding to lane zero for each unroll
- // iteration. Otherwise, we construct the vector values using
- // insertelement instructions. Since the resulting vectors are stored in
- // State, we will only generate the insertelements once.
+ // broadcast the scalar value corresponding to lane zero. Otherwise, we
+ // construct the vector values using insertelement instructions. Since the
+ // resulting vectors are stored in State, we will only generate the
+ // insertelements once.
Value *VectorValue = nullptr;
if (IsUniform) {
VectorValue = GetBroadcastInstrs(ScalarValue);
- set(Def, VectorValue, Part);
+ set(Def, VectorValue);
} else {
// Initialize packing with insertelements to start from undef.
assert(!VF.isScalable() && "VF is assumed to be non scalable.");
Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF));
- set(Def, Undef, Part);
+ set(Def, Undef);
for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane)
- packScalarIntoVectorValue(Def, {Part, Lane});
- VectorValue = get(Def, Part);
+ packScalarIntoVectorValue(Def, {0, Lane});
+ VectorValue = get(Def);
}
Builder.restoreIP(OldIP);
return VectorValue;
@@ -406,10 +403,10 @@ void VPTransformState::setDebugLocFrom(DebugLoc DL) {
void VPTransformState::packScalarIntoVectorValue(VPValue *Def,
const VPIteration &Instance) {
Value *ScalarInst = get(Def, Instance);
- Value *VectorValue = get(Def, Instance.Part);
+ Value *VectorValue = get(Def);
VectorValue = Builder.CreateInsertElement(
VectorValue, ScalarInst, Instance.Lane.getAsRuntimeExpr(Builder, VF));
- set(Def, VectorValue, Instance.Part);
+ set(Def, VectorValue);
}
BasicBlock *
@@ -772,15 +769,15 @@ void VPRegionBlock::execute(VPTransformState *State) {
// Enter replicating mode.
State->Instance = VPIteration(0, 0);
- assert(!State->VF.isScalable() && "VF is assumed to be non scalable.");
- for (unsigned Lane = 0, VF = State->VF.getKnownMinValue(); Lane < VF;
- ++Lane) {
- State->Instance->Lane = VPLane(Lane, VPLane::Kind::First);
- // Visit the VPBlocks connected to \p this, starting from it.
- for (VPBlockBase *Block : RPOT) {
- LLVM_DEBUG(dbgs() << "LV: VPBlock in RPO " << Block->getName() << '\n');
- Block->execute(State);
- }
+ assert(!State->VF.isScalable() && "VF is assumed to be non scalable.");
+ for (unsigned Lane = 0, VF = State->VF.getKnownMinValue(); Lane < VF;
+ ++Lane) {
+ State->Instance->Lane = VPLane(Lane, VPLane::Kind::First);
+ // Visit the VPBlocks connected to \p this, starting from it.
+ for (VPBlockBase *Block : RPOT) {
+ LLVM_DEBUG(dbgs() << "LV: VPBlock in RPO " << Block->getName() << '\n');
+ Block->execute(State);
+ }
}
// Exit replicating mode.
@@ -1074,12 +1071,12 @@ void VPlan::execute(VPTransformState *State) {
isa<VPWidenIntOrFpInductionRecipe>(&R)) {
PHINode *Phi = nullptr;
if (isa<VPWidenIntOrFpInductionRecipe>(&R)) {
- Phi = cast<PHINode>(State->get(R.getVPSingleValue(), 0));
+ Phi = cast<PHINode>(State->get(R.getVPSingleValue()));
} else {
auto *WidenPhi = cast<VPWidenPointerInductionRecipe>(&R);
assert(!WidenPhi->onlyScalarsGenerated(State->VF.isScalable()) &&
"recipe generating only scalars should have been replaced");
- auto *GEP = cast<GetElementPtrInst>(State->get(WidenPhi, 0));
+ auto *GEP = cast<GetElementPtrInst>(State->get(WidenPhi));
Phi = cast<PHINode>(GEP->getPointerOperand());
}
@@ -1092,7 +1089,7 @@ void VPlan::execute(VPTransformState *State) {
// Use the steps for the last part as backedge value for the induction.
if (auto *IV = dyn_cast<VPWidenIntOrFpInductionRecipe>(&R))
- Inc->setOperand(0, State->get(IV->getLastUnrolledPartOperand(), 0));
+ Inc->setOperand(0, State->get(IV->getLastUnrolledPartOperand()));
continue;
}
@@ -1101,8 +1098,8 @@ void VPlan::execute(VPTransformState *State) {
isa<VPCanonicalIVPHIRecipe, VPEVLBasedIVPHIRecipe>(PhiR) ||
(isa<VPReductionPHIRecipe>(PhiR) &&
cast<VPReductionPHIRecipe>(PhiR)->isInLoop());
- Value *Phi = State->get(PhiR, 0, NeedsScalar);
- Value *Val = State->get(PhiR->getBackedgeValue(), 0, NeedsScalar);
+ Value *Phi = State->get(PhiR, NeedsScalar);
+ Value *Val = State->get(PhiR->getBackedgeValue(), NeedsScalar);
cast<PHINode>(Phi)->addIncoming(Val, VectorLatchBB);
}
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h
index fda0a89..c886a39 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -254,7 +254,7 @@ struct VPTransformState {
DominatorTree *DT, IRBuilderBase &Builder,
InnerLoopVectorizer *ILV, VPlan *Plan);
- /// The chosen Vectorization and Unroll Factors of the loop being vectorized.
+ /// The chosen Vectorization Factor of the loop being vectorized.
ElementCount VF;
/// Hold the indices to generate specific scalar instructions. Null indicates
@@ -263,72 +263,52 @@ struct VPTransformState {
std::optional<VPIteration> Instance;
struct DataState {
- /// A type for vectorized values in the new loop. Each value from the
- /// original loop, when vectorized, is represented by UF vector values in
- /// the new unrolled loop, where UF is the unroll factor.
- typedef SmallVector<Value *, 2> PerPartValuesTy;
+ // Each value from the original loop, when vectorized, is represented by a
+ // vector value in the map.
+ DenseMap<VPValue *, Value *> VPV2Vector;
- DenseMap<VPValue *, PerPartValuesTy> PerPartOutput;
-
- using ScalarsPerPartValuesTy = SmallVector<SmallVector<Value *, 4>, 2>;
- DenseMap<VPValue *, ScalarsPerPartValuesTy> PerPartScalars;
+ DenseMap<VPValue *, SmallVector<Value *, 4>> VPV2Scalars;
} Data;
- /// Get the generated vector Value for a given VPValue \p Def and a given \p
- /// Part if \p IsScalar is false, otherwise return the generated scalar
- /// for \p Part. \See set.
- Value *get(VPValue *Def, unsigned Part, bool IsScalar = false);
+ /// Get the generated vector Value for a given VPValue \p Def if \p IsScalar
+ /// is false, otherwise return the generated scalar. \See set.
+ Value *get(VPValue *Def, bool IsScalar = false);
/// Get the generated Value for a given VPValue and given Part and Lane.
Value *get(VPValue *Def, const VPIteration &Instance);
- bool hasVectorValue(VPValue *Def, unsigned Part) {
- auto I = Data.PerPartOutput.find(Def);
- return I != Data.PerPartOutput.end() && Part < I->second.size() &&
- I->second[Part];
- }
+ bool hasVectorValue(VPValue *Def) { return Data.VPV2Vector.contains(Def); }
bool hasScalarValue(VPValue *Def, VPIteration Instance) {
- auto I = Data.PerPartScalars.find(Def);
- if (I == Data.PerPartScalars.end())
+ auto I = Data.VPV2Scalars.find(Def);
+ if (I == Data.VPV2Scalars.end())
return false;
unsigned CacheIdx = Instance.Lane.mapToCacheIndex(VF);
- return Instance.Part < I->second.size() &&
- CacheIdx < I->second[Instance.Part].size() &&
- I->second[Instance.Part][CacheIdx];
+ return CacheIdx < I->second.size() && I->second[CacheIdx];
}
- /// Set the generated vector Value for a given VPValue and a given Part, if \p
- /// IsScalar is false. If \p IsScalar is true, set the scalar in (Part, 0).
- void set(VPValue *Def, Value *V, unsigned Part, bool IsScalar = false) {
+ /// Set the generated vector Value for a given VPValue, if \p
+ /// IsScalar is false. If \p IsScalar is true, set the scalar in lane 0.
+ void set(VPValue *Def, Value *V, bool IsScalar = false) {
if (IsScalar) {
- set(Def, V, VPIteration(Part, 0));
+ set(Def, V, VPIteration(0, 0));
return;
}
assert((VF.isScalar() || V->getType()->isVectorTy()) &&
- "scalar values must be stored as (Part, 0)");
- if (!Data.PerPartOutput.count(Def)) {
- DataState::PerPartValuesTy Entry(1);
- Data.PerPartOutput[Def] = Entry;
- }
- Data.PerPartOutput[Def][Part] = V;
+ "scalar values must be stored as (0, 0)");
+ Data.VPV2Vector[Def] = V;
}
/// Reset an existing vector value for \p Def and a given \p Part.
- void reset(VPValue *Def, Value *V, unsigned Part) {
- auto Iter = Data.PerPartOutput.find(Def);
- assert(Iter != Data.PerPartOutput.end() &&
- "need to overwrite existing value");
- Iter->second[Part] = V;
+ void reset(VPValue *Def, Value *V) {
+ assert(Data.VPV2Vector.contains(Def) && "need to overwrite existing value");
+ Data.VPV2Vector[Def] = V;
}
/// Set the generated scalar \p V for \p Def and the given \p Instance.
void set(VPValue *Def, Value *V, const VPIteration &Instance) {
- auto Iter = Data.PerPartScalars.insert({Def, {}});
- auto &PerPartVec = Iter.first->second;
- if (PerPartVec.size() <= Instance.Part)
- PerPartVec.resize(Instance.Part + 1);
- auto &Scalars = PerPartVec[Instance.Part];
+ auto Iter = Data.VPV2Scalars.insert({Def, {}});
+ auto &Scalars = Iter.first->second;
unsigned CacheIdx = Instance.Lane.mapToCacheIndex(VF);
if (Scalars.size() <= CacheIdx)
Scalars.resize(CacheIdx + 1);
@@ -338,15 +318,13 @@ struct VPTransformState {
/// Reset an existing scalar value for \p Def and a given \p Instance.
void reset(VPValue *Def, Value *V, const VPIteration &Instance) {
- auto Iter = Data.PerPartScalars.find(Def);
- assert(Iter != Data.PerPartScalars.end() &&
- "need to overwrite existing value");
- assert(Instance.Part < Iter->second.size() &&
+ auto Iter = Data.VPV2Scalars.find(Def);
+ assert(Iter != Data.VPV2Scalars.end() &&
"need to overwrite existing value");
unsigned CacheIdx = Instance.Lane.mapToCacheIndex(VF);
- assert(CacheIdx < Iter->second[Instance.Part].size() &&
+ assert(CacheIdx < Iter->second.size() &&
"need to overwrite existing value");
- Iter->second[Instance.Part][CacheIdx] = V;
+ Iter->second[CacheIdx] = V;
}
/// Add additional metadata to \p To that was not present on \p Orig.
@@ -1275,9 +1253,7 @@ public:
ComputeReductionResult,
// Takes the VPValue to extract from as first operand and the lane or part
// to extract as second operand, counting from the end starting with 1 for
- // last. The second operand must be a positive constant and <= VF when
- // extracting from a vector or <= UF when extracting from an unrolled
- // scalar.
+ // last. The second operand must be a positive constant and <= VF.
ExtractFromEnd,
LogicalAnd, // Non-poison propagating logical And.
// Add an offset in bytes (second operand) to a base pointer (first
diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
index 8f4b2951..f33293e 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
@@ -404,8 +404,8 @@ Value *VPInstruction::generate(VPTransformState &State) {
if (Instruction::isBinaryOp(getOpcode())) {
bool OnlyFirstLaneUsed = vputils::onlyFirstLaneUsed(this);
- Value *A = State.get(getOperand(0), 0, OnlyFirstLaneUsed);
- Value *B = State.get(getOperand(1), 0, OnlyFirstLaneUsed);
+ Value *A = State.get(getOperand(0), OnlyFirstLaneUsed);
+ Value *B = State.get(getOperand(1), OnlyFirstLaneUsed);
auto *Res =
Builder.CreateBinOp((Instruction::BinaryOps)getOpcode(), A, B, Name);
if (auto *I = dyn_cast<Instruction>(Res))
@@ -415,19 +415,19 @@ Value *VPInstruction::generate(VPTransformState &State) {
switch (getOpcode()) {
case VPInstruction::Not: {
- Value *A = State.get(getOperand(0), 0);
+ Value *A = State.get(getOperand(0));
return Builder.CreateNot(A, Name);
}
case Instruction::ICmp: {
bool OnlyFirstLaneUsed = vputils::onlyFirstLaneUsed(this);
- Value *A = State.get(getOperand(0), 0, OnlyFirstLaneUsed);
- Value *B = State.get(getOperand(1), 0, OnlyFirstLaneUsed);
+ Value *A = State.get(getOperand(0), OnlyFirstLaneUsed);
+ Value *B = State.get(getOperand(1), OnlyFirstLaneUsed);
return Builder.CreateCmp(getPredicate(), A, B, Name);
}
case Instruction::Select: {
- Value *Cond = State.get(getOperand(0), 0);
- Value *Op1 = State.get(getOperand(1), 0);
- Value *Op2 = State.get(getOperand(2), 0);
+ Value *Cond = State.get(getOperand(0));
+ Value *Op1 = State.get(getOperand(1));
+ Value *Op2 = State.get(getOperand(2));
return Builder.CreateSelect(Cond, Op1, Op2, Name);
}
case VPInstruction::ActiveLaneMask: {
@@ -461,10 +461,10 @@ Value *VPInstruction::generate(VPTransformState &State) {
// v2 = a[i, i+1, i+2, i+3];
// v3 = vector(v1(3), v2(0, 1, 2))
- auto *V1 = State.get(getOperand(0), 0);
+ auto *V1 = State.get(getOperand(0));
if (!V1->getType()->isVectorTy())
return V1;
- Value *V2 = State.get(getOperand(1), 0);
+ Value *V2 = State.get(getOperand(1));
return Builder.CreateVectorSplice(V1, V2, -1, Name);
}
case VPInstruction::CalculateTripCountMinusVF: {
@@ -530,8 +530,8 @@ Value *VPInstruction::generate(VPTransformState &State) {
}
case VPInstruction::BranchOnCount: {
// First create the compare.
- Value *IV = State.get(getOperand(0), 0, /*IsScalar*/ true);
- Value *TC = State.get(getOperand(1), 0, /*IsScalar*/ true);
+ Value *IV = State.get(getOperand(0), /*IsScalar*/ true);
+ Value *TC = State.get(getOperand(1), /*IsScalar*/ true);
Value *Cond = Builder.CreateICmpEQ(IV, TC);
// Now create the branch.
@@ -566,7 +566,7 @@ Value *VPInstruction::generate(VPTransformState &State) {
unsigned UF = getNumOperands() - 1;
VectorParts RdxParts(UF);
for (unsigned Part = 0; Part < UF; ++Part)
- RdxParts[Part] = State.get(getOperand(1 + Part), 0, PhiR->isInLoop());
+ RdxParts[Part] = State.get(getOperand(1 + Part), PhiR->isInLoop());
// If the vector reduction can be performed in a smaller type, we truncate
// then extend the loop exit value to enable InstCombine to evaluate the
@@ -637,30 +637,29 @@ Value *VPInstruction::generate(VPTransformState &State) {
VPIteration(0, VPLane::getLaneFromEnd(State.VF, Offset)));
} else {
assert(Offset <= 1 && "invalid offset to extract from");
- // When loop is unrolled without vectorizing, retrieve UF - Offset.
- Res = State.get(getOperand(0), 1 - Offset);
+ Res = State.get(getOperand(0));
}
if (isa<ExtractElementInst>(Res))
Res->setName(Name);
return Res;
}
case VPInstruction::LogicalAnd: {
- Value *A = State.get(getOperand(0), 0);
- Value *B = State.get(getOperand(1), 0);
+ Value *A = State.get(getOperand(0));
+ Value *B = State.get(getOperand(1));
return Builder.CreateLogicalAnd(A, B, Name);
}
case VPInstruction::PtrAdd: {
assert(vputils::onlyFirstLaneUsed(this) &&
"can only generate first lane for PtrAdd");
- Value *Ptr = State.get(getOperand(0), 0, /* IsScalar */ true);
- Value *Addend = State.get(getOperand(1), 0, /* IsScalar */ true);
+ Value *Ptr = State.get(getOperand(0), /* IsScalar */ true);
+ Value *Addend = State.get(getOperand(1), /* IsScalar */ true);
return Builder.CreatePtrAdd(Ptr, Addend, Name);
}
case VPInstruction::ResumePhi: {
Value *IncomingFromVPlanPred =
- State.get(getOperand(0), 0, /* IsScalar */ true);
+ State.get(getOperand(0), /* IsScalar */ true);
Value *IncomingFromOtherPreds =
- State.get(getOperand(1), 0, /* IsScalar */ true);
+ State.get(getOperand(1), /* IsScalar */ true);
auto *NewPhi =
Builder.CreatePHI(IncomingFromOtherPreds->getType(), 2, Name);
BasicBlock *VPlanPred =
@@ -731,7 +730,7 @@ void VPInstruction::execute(VPTransformState &State) {
(GeneratedValue->getType()->isVectorTy() == !GeneratesPerFirstLaneOnly ||
State.VF.isScalar()) &&
"scalar value but not only first lane defined");
- State.set(this, GeneratedValue, 0,
+ State.set(this, GeneratedValue,
/*IsScalar*/ GeneratesPerFirstLaneOnly);
}
@@ -921,7 +920,7 @@ void VPWidenCallRecipe::execute(VPTransformState &State) {
else if (VFTy && !VFTy->getParamType(I.index())->isVectorTy())
Arg = State.get(I.value(), VPIteration(0, 0));
else
- Arg = State.get(I.value(), 0);
+ Arg = State.get(I.value());
if (UseIntrinsic &&
isVectorIntrinsicWithOverloadTypeAtArg(VectorIntrinsicID, I.index()))
TysForDecl.push_back(Arg->getType());
@@ -952,7 +951,7 @@ void VPWidenCallRecipe::execute(VPTransformState &State) {
V->copyFastMathFlags(CI);
if (!V->getType()->isVoidTy())
- State.set(this, V, 0);
+ State.set(this, V);
State.addMetadata(V, CI);
}
@@ -1056,11 +1055,11 @@ void VPWidenSelectRecipe::execute(VPTransformState &State) {
auto *InvarCond =
isInvariantCond() ? State.get(getCond(), VPIteration(0, 0)) : nullptr;
- Value *Cond = InvarCond ? InvarCond : State.get(getCond(), 0);
- Value *Op0 = State.get(getOperand(1), 0);
- Value *Op1 = State.get(getOperand(2), 0);
+ Value *Cond = InvarCond ? InvarCond : State.get(getCond());
+ Value *Op0 = State.get(getOperand(1));
+ Value *Op1 = State.get(getOperand(2));
Value *Sel = State.Builder.CreateSelect(Cond, Op0, Op1);
- State.set(this, Sel, 0);
+ State.set(this, Sel);
State.addMetadata(Sel, dyn_cast_or_null<Instruction>(getUnderlyingValue()));
}
@@ -1146,7 +1145,7 @@ void VPWidenRecipe::execute(VPTransformState &State) {
// Just widen unops and binops.
SmallVector<Value *, 2> Ops;
for (VPValue *VPOp : operands())
- Ops.push_back(State.get(VPOp, 0));
+ Ops.push_back(State.get(VPOp));
Value *V = Builder.CreateNAryOp(Opcode, Ops);
@@ -1154,23 +1153,23 @@ void VPWidenRecipe::execute(VPTransformState &State) {
setFlags(VecOp);
// Use this vector value for all users of the original instruction.
- State.set(this, V, 0);
+ State.set(this, V);
State.addMetadata(V, dyn_cast_or_null<Instruction>(getUnderlyingValue()));
break;
}
case Instruction::Freeze: {
- Value *Op = State.get(getOperand(0), 0);
+ Value *Op = State.get(getOperand(0));
Value *Freeze = Builder.CreateFreeze(Op);
- State.set(this, Freeze, 0);
+ State.set(this, Freeze);
break;
}
case Instruction::ICmp:
case Instruction::FCmp: {
// Widen compares. Generate vector compares.
bool FCmp = Opcode == Instruction::FCmp;
- Value *A = State.get(getOperand(0), 0);
- Value *B = State.get(getOperand(1), 0);
+ Value *A = State.get(getOperand(0));
+ Value *B = State.get(getOperand(1));
Value *C = nullptr;
if (FCmp) {
// Propagate fast math flags.
@@ -1181,7 +1180,7 @@ void VPWidenRecipe::execute(VPTransformState &State) {
} else {
C = Builder.CreateICmp(getPredicate(), A, B);
}
- State.set(this, C, 0);
+ State.set(this, C);
State.addMetadata(C, dyn_cast_or_null<Instruction>(getUnderlyingValue()));
break;
}
@@ -1196,7 +1195,7 @@ void VPWidenRecipe::execute(VPTransformState &State) {
// Verify that VPlan type inference results agree with the type of the
// generated values.
assert(VectorType::get(State.TypeAnalysis.inferScalarType(this), State.VF) ==
- State.get(this, 0)->getType() &&
+ State.get(this)->getType() &&
"inferred type and type from generated instructions do not match");
#endif
}
@@ -1283,11 +1282,11 @@ void VPWidenEVLRecipe::execute(VPTransformState &State) {
State.setDebugLocFrom(getDebugLoc());
- assert(State.get(getOperand(0), 0)->getType()->isVectorTy() &&
+ assert(State.get(getOperand(0))->getType()->isVectorTy() &&
"VPWidenEVLRecipe should not be used for scalars");
VPValue *EVL = getEVL();
- Value *EVLArg = State.get(EVL, 0, /*NeedsScalar=*/true);
+ Value *EVLArg = State.get(EVL, /*NeedsScalar=*/true);
IRBuilderBase &BuilderIR = State.Builder;
VectorBuilder Builder(BuilderIR);
Value *Mask = BuilderIR.CreateVectorSplat(State.VF, BuilderIR.getTrue());
@@ -1295,7 +1294,7 @@ void VPWidenEVLRecipe::execute(VPTransformState &State) {
SmallVector<Value *, 4> Ops;
for (unsigned I = 0, E = getNumOperands() - 1; I < E; ++I) {
VPValue *VPOp = getOperand(I);
- Ops.push_back(State.get(VPOp, 0));
+ Ops.push_back(State.get(VPOp));
}
Builder.setMask(Mask).setEVL(EVLArg);
@@ -1306,7 +1305,7 @@ void VPWidenEVLRecipe::execute(VPTransformState &State) {
if (isa<FPMathOperator>(VPInst))
setFlags(cast<Instruction>(VPInst));
- State.set(this, VPInst, 0);
+ State.set(this, VPInst);
State.addMetadata(VPInst,
dyn_cast_or_null<Instruction>(getUnderlyingValue()));
}
@@ -1338,9 +1337,9 @@ void VPWidenCastRecipe::execute(VPTransformState &State) {
assert(State.VF.isVector() && "Not vectorizing?");
Type *DestTy = VectorType::get(getResultType(), State.VF);
VPValue *Op = getOperand(0);
- Value *A = State.get(Op, 0);
+ Value *A = State.get(Op);
Value *Cast = Builder.CreateCast(Instruction::CastOps(Opcode), A, DestTy);
- State.set(this, Cast, 0);
+ State.set(this, Cast);
State.addMetadata(Cast, cast_or_null<Instruction>(getUnderlyingValue()));
}
@@ -1474,7 +1473,7 @@ void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
if (VPValue *SplatVFOperand = getSplatVFValue()) {
// The recipe has been unrolled. In that case, fetch the splat value for the
// induction increment.
- SplatVF = State.get(SplatVFOperand, 0);
+ SplatVF = State.get(SplatVFOperand);
} else {
// Multiply the vectorization factor by the step using integer or
// floating-point arithmetic as appropriate.
@@ -1503,10 +1502,10 @@ void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind");
VecInd->insertBefore(State.CFG.PrevBB->getFirstInsertionPt());
VecInd->setDebugLoc(EntryVal->getDebugLoc());
- State.set(this, VecInd, 0);
+ State.set(this, VecInd);
- Instruction *LastInduction = cast<Instruction>(Builder.CreateBinOp(
- AddOp, VecInd, SplatVF, "vec.ind.next", EntryVal->getDebugLoc()));
+ Instruction *LastInduction = cast<Instruction>(
+ Builder.CreateBinOp(AddOp, VecInd, SplatVF, "vec.ind.next"));
if (isa<TruncInst>(EntryVal))
State.addMetadata(LastInduction, EntryVal);
LastInduction->setDebugLoc(EntryVal->getDebugLoc());
@@ -1628,7 +1627,7 @@ void VPScalarIVStepsRecipe::execute(VPTransformState &State) {
InitVec = Builder.CreateSIToFP(InitVec, VecIVTy);
auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep);
auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul);
- State.set(this, Add, 0);
+ State.set(this, Add);
// It's useful to record the lane values too for the known minimum number
// of elements so we do those below. This improves the code quality when
// trying to extract the first element, for example.
@@ -1691,7 +1690,7 @@ void VPWidenGEPRecipe::execute(VPTransformState &State) {
State.Builder.CreateGEP(GEP->getSourceElementType(), Ops[0],
ArrayRef(Ops).drop_front(), "", isInBounds());
Value *Splat = State.Builder.CreateVectorSplat(State.VF, NewGEP);
- State.set(this, Splat, 0);
+ State.set(this, Splat);
State.addMetadata(Splat, GEP);
} else {
// If the GEP has at least one loop-varying operand, we are sure to
@@ -1700,7 +1699,7 @@ void VPWidenGEPRecipe::execute(VPTransformState &State) {
// won't broadcast it.
auto *Ptr = isPointerLoopInvariant()
? State.get(getOperand(0), VPIteration(0, 0))
- : State.get(getOperand(0), 0);
+ : State.get(getOperand(0));
// Collect all the indices for the new GEP. If any index is
// loop-invariant, we won't broadcast it.
@@ -1710,7 +1709,7 @@ void VPWidenGEPRecipe::execute(VPTransformState &State) {
if (isIndexLoopInvariant(I - 1))
Indices.push_back(State.get(Operand, VPIteration(0, 0)));
else
- Indices.push_back(State.get(Operand, 0));
+ Indices.push_back(State.get(Operand));
}
// Create the new GEP. Note that this GEP may be a scalar if VF == 1,
@@ -1719,7 +1718,7 @@ void VPWidenGEPRecipe::execute(VPTransformState &State) {
Indices, "", isInBounds());
assert((State.VF.isScalar() || NewGEP->getType()->isVectorTy()) &&
"NewGEP is not a pointer vector");
- State.set(this, NewGEP, 0);
+ State.set(this, NewGEP);
State.addMetadata(NewGEP, GEP);
}
}
@@ -1773,7 +1772,7 @@ void VPVectorPointerRecipe ::execute(VPTransformState &State) {
ResultPtr = Builder.CreateGEP(IndexedTy, Ptr, Increment, "", InBounds);
}
- State.set(this, ResultPtr, 0, /*IsScalar*/ true);
+ State.set(this, ResultPtr, /*IsScalar*/ true);
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
@@ -1813,17 +1812,17 @@ void VPBlendRecipe::execute(VPTransformState &State) {
for (unsigned In = 0; In < NumIncoming; ++In) {
// We might have single edge PHIs (blocks) - use an identity
// 'select' for the first PHI operand.
- Value *In0 = State.get(getIncomingValue(In), 0, OnlyFirstLaneUsed);
+ Value *In0 = State.get(getIncomingValue(In), OnlyFirstLaneUsed);
if (In == 0)
Result = In0; // Initialize with the first incoming value.
else {
// Select between the current value and the previous incoming edge
// based on the incoming mask.
- Value *Cond = State.get(getMask(In), 0, OnlyFirstLaneUsed);
+ Value *Cond = State.get(getMask(In), OnlyFirstLaneUsed);
Result = State.Builder.CreateSelect(Cond, In0, Result, "predphi");
}
}
- State.set(this, Result, 0, OnlyFirstLaneUsed);
+ State.set(this, Result, OnlyFirstLaneUsed);
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
@@ -1852,14 +1851,14 @@ void VPBlendRecipe::print(raw_ostream &O, const Twine &Indent,
void VPReductionRecipe::execute(VPTransformState &State) {
assert(!State.Instance && "Reduction being replicated.");
- Value *PrevInChain = State.get(getChainOp(), 0, /*IsScalar*/ true);
+ Value *PrevInChain = State.get(getChainOp(), /*IsScalar*/ true);
RecurKind Kind = RdxDesc.getRecurrenceKind();
// Propagate the fast-math flags carried by the underlying instruction.
IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder);
State.Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
- Value *NewVecOp = State.get(getVecOp(), 0);
+ Value *NewVecOp = State.get(getVecOp());
if (VPValue *Cond = getCondOp()) {
- Value *NewCond = State.get(Cond, 0, State.VF.isScalar());
+ Value *NewCond = State.get(Cond, State.VF.isScalar());
VectorType *VecTy = dyn_cast<VectorType>(NewVecOp->getType());
Type *ElementTy = VecTy ? VecTy->getElementType() : NewVecOp->getType();
@@ -1888,7 +1887,7 @@ void VPReductionRecipe::execute(VPTransformState &State) {
PrevInChain = NewRed;
NextInChain = NewRed;
} else {
- PrevInChain = State.get(getChainOp(), 0, /*IsScalar*/ true);
+ PrevInChain = State.get(getChainOp(), /*IsScalar*/ true);
NewRed = createReduction(State.Builder, RdxDesc, NewVecOp);
if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind))
NextInChain = createMinMaxOp(State.Builder, RdxDesc.getRecurrenceKind(),
@@ -1897,7 +1896,7 @@ void VPReductionRecipe::execute(VPTransformState &State) {
NextInChain = State.Builder.CreateBinOp(
(Instruction::BinaryOps)RdxDesc.getOpcode(Kind), NewRed, PrevInChain);
}
- State.set(this, NextInChain, 0, /*IsScalar*/ true);
+ State.set(this, NextInChain, /*IsScalar*/ true);
}
void VPReductionEVLRecipe::execute(VPTransformState &State) {
@@ -1910,8 +1909,8 @@ void VPReductionEVLRecipe::execute(VPTransformState &State) {
Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
RecurKind Kind = RdxDesc.getRecurrenceKind();
- Value *Prev = State.get(getChainOp(), 0, /*IsScalar*/ true);
- Value *VecOp = State.get(getVecOp(), 0);
+ Value *Prev = State.get(getChainOp(), /*IsScalar*/ true);
+ Value *VecOp = State.get(getVecOp());
Value *EVL = State.get(getEVL(), VPIteration(0, 0));
VectorBuilder VBuilder(Builder);
@@ -1919,7 +1918,7 @@ void VPReductionEVLRecipe::execute(VPTransformState &State) {
Value *Mask;
// TODO: move the all-true mask generation into VectorBuilder.
if (VPValue *CondOp = getCondOp())
- Mask = State.get(CondOp, 0);
+ Mask = State.get(CondOp);
else
Mask = Builder.CreateVectorSplat(State.VF, Builder.getTrue());
VBuilder.setMask(Mask);
@@ -1935,7 +1934,7 @@ void VPReductionEVLRecipe::execute(VPTransformState &State) {
NewRed = Builder.CreateBinOp(
(Instruction::BinaryOps)RdxDesc.getOpcode(Kind), NewRed, Prev);
}
- State.set(this, NewRed, 0, /*IsScalar*/ true);
+ State.set(this, NewRed, /*IsScalar*/ true);
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
@@ -2065,7 +2064,7 @@ void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
Value *ConditionBit = nullptr;
VPValue *BlockInMask = getMask();
if (BlockInMask) {
- ConditionBit = State.get(BlockInMask, 0);
+ ConditionBit = State.get(BlockInMask);
if (ConditionBit->getType()->isVectorTy())
ConditionBit = State.Builder.CreateExtractElement(
ConditionBit, State.Builder.getInt32(Lane));
@@ -2098,19 +2097,19 @@ void VPPredInstPHIRecipe::execute(VPTransformState &State) {
// needed. In this case the recipe of the predicated instruction is marked to
// also do that packing, thereby "hoisting" the insert-element sequence.
// Otherwise, a phi node for the scalar value is needed.
- if (State.hasVectorValue(getOperand(0), 0)) {
- Value *VectorValue = State.get(getOperand(0), 0);
+ if (State.hasVectorValue(getOperand(0))) {
+ Value *VectorValue = State.get(getOperand(0));
InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
- if (State.hasVectorValue(this, 0))
- State.reset(this, VPhi, 0);
+ if (State.hasVectorValue(this))
+ State.reset(this, VPhi);
else
- State.set(this, VPhi, 0);
+ State.set(this, VPhi);
// NOTE: Currently we need to update the value of the operand, so the next
// predicated iteration inserts its generated value in the correct vector.
- State.reset(getOperand(0), VPhi, 0);
+ State.reset(getOperand(0), VPhi);
} else {
Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType();
PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
@@ -2190,12 +2189,12 @@ void VPWidenLoadRecipe::execute(VPTransformState &State) {
if (auto *VPMask = getMask()) {
// Mask reversal is only needed for non-all-one (null) masks, as reverse
// of a null all-one mask is a null mask.
- Mask = State.get(VPMask, 0);
+ Mask = State.get(VPMask);
if (isReverse())
Mask = Builder.CreateVectorReverse(Mask, "reverse");
}
- Value *Addr = State.get(getAddr(), 0, /*IsScalar*/ !CreateGather);
+ Value *Addr = State.get(getAddr(), /*IsScalar*/ !CreateGather);
Value *NewLI;
if (CreateGather) {
NewLI = Builder.CreateMaskedGather(DataTy, Addr, Alignment, Mask, nullptr,
@@ -2211,7 +2210,7 @@ void VPWidenLoadRecipe::execute(VPTransformState &State) {
State.addMetadata(NewLI, LI);
if (Reverse)
NewLI = Builder.CreateVectorReverse(NewLI, "reverse");
- State.set(this, NewLI, 0);
+ State.set(this, NewLI);
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
@@ -2247,10 +2246,10 @@ void VPWidenLoadEVLRecipe::execute(VPTransformState &State) {
State.setDebugLocFrom(getDebugLoc());
CallInst *NewLI;
Value *EVL = State.get(getEVL(), VPIteration(0, 0));
- Value *Addr = State.get(getAddr(), 0, !CreateGather);
+ Value *Addr = State.get(getAddr(), !CreateGather);
Value *Mask = nullptr;
if (VPValue *VPMask = getMask()) {
- Mask = State.get(VPMask, 0);
+ Mask = State.get(VPMask);
if (isReverse())
Mask = createReverseEVL(Builder, Mask, EVL, "vp.reverse.mask");
} else {
@@ -2273,7 +2272,7 @@ void VPWidenLoadEVLRecipe::execute(VPTransformState &State) {
Instruction *Res = NewLI;
if (isReverse())
Res = createReverseEVL(Builder, Res, EVL, "vp.reverse");
- State.set(this, Res, 0);
+ State.set(this, Res);
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
@@ -2300,12 +2299,12 @@ void VPWidenStoreRecipe::execute(VPTransformState &State) {
if (auto *VPMask = getMask()) {
// Mask reversal is only needed for non-all-one (null) masks, as reverse
// of a null all-one mask is a null mask.
- Mask = State.get(VPMask, 0);
+ Mask = State.get(VPMask);
if (isReverse())
Mask = Builder.CreateVectorReverse(Mask, "reverse");
}
- Value *StoredVal = State.get(StoredVPValue, 0);
+ Value *StoredVal = State.get(StoredVPValue);
if (isReverse()) {
// If we store to reverse consecutive memory locations, then we need
// to reverse the order of elements in the stored value.
@@ -2313,7 +2312,7 @@ void VPWidenStoreRecipe::execute(VPTransformState &State) {
// We don't want to update the value in the map as it might be used in
// another expression. So don't call resetVectorValue(StoredVal).
}
- Value *Addr = State.get(getAddr(), 0, /*IsScalar*/ !CreateScatter);
+ Value *Addr = State.get(getAddr(), /*IsScalar*/ !CreateScatter);
Instruction *NewSI = nullptr;
if (CreateScatter)
NewSI = Builder.CreateMaskedScatter(StoredVal, Addr, Alignment, Mask);
@@ -2343,19 +2342,19 @@ void VPWidenStoreEVLRecipe::execute(VPTransformState &State) {
State.setDebugLocFrom(getDebugLoc());
CallInst *NewSI = nullptr;
- Value *StoredVal = State.get(StoredValue, 0);
+ Value *StoredVal = State.get(StoredValue);
Value *EVL = State.get(getEVL(), VPIteration(0, 0));
if (isReverse())
StoredVal = createReverseEVL(Builder, StoredVal, EVL, "vp.reverse");
Value *Mask = nullptr;
if (VPValue *VPMask = getMask()) {
- Mask = State.get(VPMask, 0);
+ Mask = State.get(VPMask);
if (isReverse())
Mask = createReverseEVL(Builder, Mask, EVL, "vp.reverse.mask");
} else {
Mask = Builder.CreateVectorSplat(State.VF, Builder.getTrue());
}
- Value *Addr = State.get(getAddr(), 0, !CreateScatter);
+ Value *Addr = State.get(getAddr(), !CreateScatter);
if (CreateScatter) {
NewSI = Builder.CreateIntrinsic(Type::getVoidTy(EVL->getContext()),
Intrinsic::vp_scatter,
@@ -2491,9 +2490,7 @@ void VPInterleaveRecipe::execute(VPTransformState &State) {
// If the group is reverse, adjust the index to refer to the last vector lane
// instead of the first. We adjust the index from the first vector lane,
// rather than directly getting the pointer for lane VF - 1, because the
- // pointer operand of the interleaved access is supposed to be uniform. For
- // uniform instructions, we're only required to generate a value for the
- // first vector lane in each unroll iteration.
+ // pointer operand of the interleaved access is supposed to be uniform.
if (Group->isReverse()) {
Value *RuntimeVF =
getRuntimeVF(State.Builder, State.Builder.getInt32Ty(), State.VF);
@@ -2536,7 +2533,7 @@ void VPInterleaveRecipe::execute(VPTransformState &State) {
assert(!MaskForGaps && "Interleaved groups with gaps are not supported.");
assert(InterleaveFactor == 2 &&
"Unsupported deinterleave factor for scalable vectors");
- auto *ResBlockInMask = State.get(BlockInMask, 0);
+ auto *ResBlockInMask = State.get(BlockInMask);
SmallVector<Value *, 2> Ops = {ResBlockInMask, ResBlockInMask};
auto *MaskTy = VectorType::get(State.Builder.getInt1Ty(),
State.VF.getKnownMinValue() * 2, true);
@@ -2548,7 +2545,7 @@ void VPInterleaveRecipe::execute(VPTransformState &State) {
if (!BlockInMask)
return MaskForGaps;
- Value *ResBlockInMask = State.get(BlockInMask, 0);
+ Value *ResBlockInMask = State.get(BlockInMask);
Value *ShuffledMask = State.Builder.CreateShuffleVector(
ResBlockInMask,
createReplicatedMask(InterleaveFactor, State.VF.getKnownMinValue()),
@@ -2608,7 +2605,7 @@ void VPInterleaveRecipe::execute(VPTransformState &State) {
if (Group->isReverse())
StridedVec = State.Builder.CreateVectorReverse(StridedVec, "reverse");
- State.set(VPDefs[J], StridedVec, 0);
+ State.set(VPDefs[J], StridedVec);
++J;
}
@@ -2641,7 +2638,7 @@ void VPInterleaveRecipe::execute(VPTransformState &State) {
if (Group->isReverse())
StridedVec = State.Builder.CreateVectorReverse(StridedVec, "reverse");
- State.set(VPDefs[J], StridedVec, 0);
+ State.set(VPDefs[J], StridedVec);
++J;
}
return;
@@ -2671,7 +2668,7 @@ void VPInterleaveRecipe::execute(VPTransformState &State) {
continue;
}
- Value *StoredVec = State.get(StoredValues[StoredIdx], 0);
+ Value *StoredVec = State.get(StoredValues[StoredIdx]);
++StoredIdx;
if (Group->isReverse())
@@ -2738,7 +2735,7 @@ void VPCanonicalIVPHIRecipe::execute(VPTransformState &State) {
BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this);
Phi->addIncoming(Start, VectorPH);
Phi->setDebugLoc(getDebugLoc());
- State.set(this, Phi, 0, /*IsScalar*/ true);
+ State.set(this, Phi, /*IsScalar*/ true);
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
@@ -2783,7 +2780,7 @@ void VPWidenPointerInductionRecipe::execute(VPTransformState &State) {
"Recipe should have been replaced");
auto *IVR = getParent()->getPlan()->getCanonicalIV();
- PHINode *CanonicalIV = cast<PHINode>(State.get(IVR, 0, /*IsScalar*/ true));
+ PHINode *CanonicalIV = cast<PHINode>(State.get(IVR, /*IsScalar*/ true));
unsigned CurrentPart = getUnrollPart(*this);
// Build a pointer phi
@@ -2800,7 +2797,7 @@ void VPWidenPointerInductionRecipe::execute(VPTransformState &State) {
// The recipe has been unrolled. In that case, fetch the single pointer phi
// shared among all unrolled parts of the recipe.
auto *GEP =
- cast<GetElementPtrInst>(State.get(getFirstUnrolledPartOperand(), 0));
+ cast<GetElementPtrInst>(State.get(getFirstUnrolledPartOperand()));
NewPointerPhi = cast<PHINode>(GEP->getPointerOperand());
}
@@ -2847,7 +2844,7 @@ void VPWidenPointerInductionRecipe::execute(VPTransformState &State) {
State.Builder.CreateMul(StartOffset, State.Builder.CreateVectorSplat(
State.VF, ScalarStepValue)),
"vector.gep");
- State.set(this, GEP, 0);
+ State.set(this, GEP);
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
@@ -2892,7 +2889,7 @@ void VPExpandSCEVRecipe::print(raw_ostream &O, const Twine &Indent,
#endif
void VPWidenCanonicalIVRecipe::execute(VPTransformState &State) {
- Value *CanonicalIV = State.get(getOperand(0), 0, /*IsScalar*/ true);
+ Value *CanonicalIV = State.get(getOperand(0), /*IsScalar*/ true);
Type *STy = CanonicalIV->getType();
IRBuilder<> Builder(State.CFG.PrevBB->getTerminator());
ElementCount VF = State.VF;
@@ -2906,7 +2903,7 @@ void VPWidenCanonicalIVRecipe::execute(VPTransformState &State) {
Builder.CreateAdd(VStep, Builder.CreateStepVector(VStep->getType()));
}
Value *CanonicalVectorIV = Builder.CreateAdd(VStart, VStep, "vec.iv");
- State.set(this, CanonicalVectorIV, 0);
+ State.set(this, CanonicalVectorIV);
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
@@ -2944,7 +2941,7 @@ void VPFirstOrderRecurrencePHIRecipe::execute(VPTransformState &State) {
PHINode *Phi = PHINode::Create(VecTy, 2, "vector.recur");
Phi->insertBefore(State.CFG.PrevBB->getFirstInsertionPt());
Phi->addIncoming(VectorInit, VectorPH);
- State.set(this, Phi, 0);
+ State.set(this, Phi);
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
@@ -2978,7 +2975,7 @@ void VPReductionPHIRecipe::execute(VPTransformState &State) {
"recipe must be in the vector loop header");
auto *Phi = PHINode::Create(VecTy, 2, "vec.phi");
Phi->insertBefore(HeaderBB->getFirstInsertionPt());
- State.set(this, Phi, 0, IsInLoop);
+ State.set(this, Phi, IsInLoop);
BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this);
@@ -2994,7 +2991,7 @@ void VPReductionPHIRecipe::execute(VPTransformState &State) {
} else {
IRBuilderBase::InsertPointGuard IPBuilder(Builder);
Builder.SetInsertPoint(VectorPH->getTerminator());
- StartV = Iden = State.get(StartVPV, 0);
+ StartV = Iden = State.get(StartVPV);
}
} else {
Iden = llvm::getRecurrenceIdentity(RK, VecTy->getScalarType(),
@@ -3016,7 +3013,7 @@ void VPReductionPHIRecipe::execute(VPTransformState &State) {
}
}
- Phi = cast<PHINode>(State.get(this, 0, IsInLoop));
+ Phi = cast<PHINode>(State.get(this, IsInLoop));
Value *StartVal = (CurrentPart == 0) ? StartV : Iden;
Phi->addIncoming(StartVal, VectorPH);
}
@@ -3036,10 +3033,10 @@ void VPWidenPHIRecipe::execute(VPTransformState &State) {
assert(EnableVPlanNativePath &&
"Non-native vplans are not expected to have VPWidenPHIRecipes.");
- Value *Op0 = State.get(getOperand(0), 0);
+ Value *Op0 = State.get(getOperand(0));
Type *VecTy = Op0->getType();
Value *VecPhi = State.Builder.CreatePHI(VecTy, 2, "vec.phi");
- State.set(this, VecPhi, 0);
+ State.set(this, VecPhi);
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
@@ -3067,12 +3064,12 @@ void VPWidenPHIRecipe::print(raw_ostream &O, const Twine &Indent,
// remove VPActiveLaneMaskPHIRecipe.
void VPActiveLaneMaskPHIRecipe::execute(VPTransformState &State) {
BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this);
- Value *StartMask = State.get(getOperand(0), 0);
+ Value *StartMask = State.get(getOperand(0));
PHINode *Phi =
State.Builder.CreatePHI(StartMask->getType(), 2, "active.lane.mask");
Phi->addIncoming(StartMask, VectorPH);
Phi->setDebugLoc(getDebugLoc());
- State.set(this, Phi, 0);
+ State.set(this, Phi);
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
@@ -3092,7 +3089,7 @@ void VPEVLBasedIVPHIRecipe::execute(VPTransformState &State) {
PHINode *Phi = State.Builder.CreatePHI(Start->getType(), 2, "evl.based.iv");
Phi->addIncoming(Start, VectorPH);
Phi->setDebugLoc(getDebugLoc());
- State.set(this, Phi, 0, /*IsScalar=*/true);
+ State.set(this, Phi, /*IsScalar=*/true);
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index b2893e8..3b37a1e 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -593,12 +593,10 @@ static void legalizeAndOptimizeInductions(VPlan &Plan) {
Plan, InductionDescriptor::IK_IntInduction, Instruction::Add, nullptr,
nullptr, StartV, StepV, Builder);
- auto *Recipe = new VPInstruction(VPInstruction::PtrAdd,
- {PtrIV->getStartValue(), Steps},
- PtrIV->getDebugLoc(), "next.gep");
+ VPValue *PtrAdd = Builder.createPtrAdd(PtrIV->getStartValue(), Steps,
+ PtrIV->getDebugLoc(), "next.gep");
- Recipe->insertAfter(Steps);
- PtrIV->replaceAllUsesWith(Recipe);
+ PtrIV->replaceAllUsesWith(PtrAdd);
continue;
}
diff --git a/llvm/test/Analysis/CostModel/AArch64/reduce-fadd.ll b/llvm/test/Analysis/CostModel/AArch64/reduce-fadd.ll
index 58cb8c2..a95542f 100644
--- a/llvm/test/Analysis/CostModel/AArch64/reduce-fadd.ll
+++ b/llvm/test/Analysis/CostModel/AArch64/reduce-fadd.ll
@@ -76,49 +76,49 @@ define void @fast_fp_reductions() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %fadd_v16f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 38 for instruction: %fadd_v11f16 = call fast half @llvm.vector.reduce.fadd.v11f16(half 0xH0000, <11 x half> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 38 for instruction: %fadd_v13f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v13f16(half 0xH0000, <13 x half> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fadd_v2f32 = call fast float @llvm.vector.reduce.fadd.v2f32(float 0.000000e+00, <2 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fadd_v2f32_reassoc = call reassoc float @llvm.vector.reduce.fadd.v2f32(float 0.000000e+00, <2 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fadd_v4f32 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fadd_v4f32_reassoc = call reassoc float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %fadd_v8f32 = call fast float @llvm.vector.reduce.fadd.v8f32(float 0.000000e+00, <8 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %fadd_v8f32_reassoc = call reassoc float @llvm.vector.reduce.fadd.v8f32(float 0.000000e+00, <8 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 25 for instruction: %fadd_v13f32 = call fast float @llvm.vector.reduce.fadd.v13f32(float 0.000000e+00, <13 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %fadd_v5f32_reassoc = call reassoc float @llvm.vector.reduce.fadd.v5f32(float 0.000000e+00, <5 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fadd_v2f64 = call fast double @llvm.vector.reduce.fadd.v2f64(double 0.000000e+00, <2 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fadd_v2f64_reassoc = call reassoc double @llvm.vector.reduce.fadd.v2f64(double 0.000000e+00, <2 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %fadd_v4f64 = call fast double @llvm.vector.reduce.fadd.v4f64(double 0.000000e+00, <4 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %fadd_v4f64_reassoc = call reassoc double @llvm.vector.reduce.fadd.v4f64(double 0.000000e+00, <4 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %fadd_v7f64 = call fast double @llvm.vector.reduce.fadd.v7f64(double 0.000000e+00, <7 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %fadd_v9f64_reassoc = call reassoc double @llvm.vector.reduce.fadd.v9f64(double 0.000000e+00, <9 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fadd_v2f32 = call fast float @llvm.vector.reduce.fadd.v2f32(float 0.000000e+00, <2 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fadd_v2f32_reassoc = call reassoc float @llvm.vector.reduce.fadd.v2f32(float 0.000000e+00, <2 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fadd_v4f32 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fadd_v4f32_reassoc = call reassoc float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %fadd_v8f32 = call fast float @llvm.vector.reduce.fadd.v8f32(float 0.000000e+00, <8 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %fadd_v8f32_reassoc = call reassoc float @llvm.vector.reduce.fadd.v8f32(float 0.000000e+00, <8 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %fadd_v13f32 = call fast float @llvm.vector.reduce.fadd.v13f32(float 0.000000e+00, <13 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %fadd_v5f32_reassoc = call reassoc float @llvm.vector.reduce.fadd.v5f32(float 0.000000e+00, <5 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fadd_v2f64 = call fast double @llvm.vector.reduce.fadd.v2f64(double 0.000000e+00, <2 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fadd_v2f64_reassoc = call reassoc double @llvm.vector.reduce.fadd.v2f64(double 0.000000e+00, <2 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fadd_v4f64 = call fast double @llvm.vector.reduce.fadd.v4f64(double 0.000000e+00, <4 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fadd_v4f64_reassoc = call reassoc double @llvm.vector.reduce.fadd.v4f64(double 0.000000e+00, <4 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %fadd_v7f64 = call fast double @llvm.vector.reduce.fadd.v7f64(double 0.000000e+00, <7 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fadd_v9f64_reassoc = call reassoc double @llvm.vector.reduce.fadd.v9f64(double 0.000000e+00, <9 x double> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %fadd_v4f8 = call reassoc bfloat @llvm.vector.reduce.fadd.v4bf16(bfloat 0xR8000, <4 x bfloat> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %fadd_v4f128 = call reassoc fp128 @llvm.vector.reduce.fadd.v4f128(fp128 undef, <4 x fp128> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
; FP16-LABEL: 'fast_fp_reductions'
-; FP16-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %fadd_v2f16_fast = call fast half @llvm.vector.reduce.fadd.v2f16(half 0xH0000, <2 x half> undef)
-; FP16-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %fadd_v2f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v2f16(half 0xH0000, <2 x half> undef)
-; FP16-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fadd_v4f16_fast = call fast half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> undef)
-; FP16-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fadd_v4f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> undef)
-; FP16-NEXT: Cost Model: Found an estimated cost of 27 for instruction: %fadd_v8f16 = call fast half @llvm.vector.reduce.fadd.v8f16(half 0xH0000, <8 x half> undef)
-; FP16-NEXT: Cost Model: Found an estimated cost of 27 for instruction: %fadd_v8f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v8f16(half 0xH0000, <8 x half> undef)
-; FP16-NEXT: Cost Model: Found an estimated cost of 44 for instruction: %fadd_v16f16 = call fast half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> undef)
-; FP16-NEXT: Cost Model: Found an estimated cost of 44 for instruction: %fadd_v16f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> undef)
-; FP16-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %fadd_v11f16 = call fast half @llvm.vector.reduce.fadd.v11f16(half 0xH0000, <11 x half> undef)
-; FP16-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %fadd_v13f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v13f16(half 0xH0000, <13 x half> undef)
-; FP16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fadd_v2f32 = call fast float @llvm.vector.reduce.fadd.v2f32(float 0.000000e+00, <2 x float> undef)
-; FP16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fadd_v2f32_reassoc = call reassoc float @llvm.vector.reduce.fadd.v2f32(float 0.000000e+00, <2 x float> undef)
-; FP16-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fadd_v4f32 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> undef)
-; FP16-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fadd_v4f32_reassoc = call reassoc float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> undef)
-; FP16-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %fadd_v8f32 = call fast float @llvm.vector.reduce.fadd.v8f32(float 0.000000e+00, <8 x float> undef)
-; FP16-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %fadd_v8f32_reassoc = call reassoc float @llvm.vector.reduce.fadd.v8f32(float 0.000000e+00, <8 x float> undef)
-; FP16-NEXT: Cost Model: Found an estimated cost of 25 for instruction: %fadd_v13f32 = call fast float @llvm.vector.reduce.fadd.v13f32(float 0.000000e+00, <13 x float> undef)
-; FP16-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %fadd_v5f32_reassoc = call reassoc float @llvm.vector.reduce.fadd.v5f32(float 0.000000e+00, <5 x float> undef)
-; FP16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fadd_v2f64 = call fast double @llvm.vector.reduce.fadd.v2f64(double 0.000000e+00, <2 x double> undef)
-; FP16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fadd_v2f64_reassoc = call reassoc double @llvm.vector.reduce.fadd.v2f64(double 0.000000e+00, <2 x double> undef)
-; FP16-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %fadd_v4f64 = call fast double @llvm.vector.reduce.fadd.v4f64(double 0.000000e+00, <4 x double> undef)
-; FP16-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %fadd_v4f64_reassoc = call reassoc double @llvm.vector.reduce.fadd.v4f64(double 0.000000e+00, <4 x double> undef)
-; FP16-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %fadd_v7f64 = call fast double @llvm.vector.reduce.fadd.v7f64(double 0.000000e+00, <7 x double> undef)
-; FP16-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %fadd_v9f64_reassoc = call reassoc double @llvm.vector.reduce.fadd.v9f64(double 0.000000e+00, <9 x double> undef)
+; FP16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fadd_v2f16_fast = call fast half @llvm.vector.reduce.fadd.v2f16(half 0xH0000, <2 x half> undef)
+; FP16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fadd_v2f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v2f16(half 0xH0000, <2 x half> undef)
+; FP16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fadd_v4f16_fast = call fast half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> undef)
+; FP16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fadd_v4f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> undef)
+; FP16-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %fadd_v8f16 = call fast half @llvm.vector.reduce.fadd.v8f16(half 0xH0000, <8 x half> undef)
+; FP16-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %fadd_v8f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v8f16(half 0xH0000, <8 x half> undef)
+; FP16-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %fadd_v16f16 = call fast half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> undef)
+; FP16-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %fadd_v16f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> undef)
+; FP16-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %fadd_v11f16 = call fast half @llvm.vector.reduce.fadd.v11f16(half 0xH0000, <11 x half> undef)
+; FP16-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %fadd_v13f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v13f16(half 0xH0000, <13 x half> undef)
+; FP16-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fadd_v2f32 = call fast float @llvm.vector.reduce.fadd.v2f32(float 0.000000e+00, <2 x float> undef)
+; FP16-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fadd_v2f32_reassoc = call reassoc float @llvm.vector.reduce.fadd.v2f32(float 0.000000e+00, <2 x float> undef)
+; FP16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fadd_v4f32 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> undef)
+; FP16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fadd_v4f32_reassoc = call reassoc float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> undef)
+; FP16-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %fadd_v8f32 = call fast float @llvm.vector.reduce.fadd.v8f32(float 0.000000e+00, <8 x float> undef)
+; FP16-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %fadd_v8f32_reassoc = call reassoc float @llvm.vector.reduce.fadd.v8f32(float 0.000000e+00, <8 x float> undef)
+; FP16-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %fadd_v13f32 = call fast float @llvm.vector.reduce.fadd.v13f32(float 0.000000e+00, <13 x float> undef)
+; FP16-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %fadd_v5f32_reassoc = call reassoc float @llvm.vector.reduce.fadd.v5f32(float 0.000000e+00, <5 x float> undef)
+; FP16-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fadd_v2f64 = call fast double @llvm.vector.reduce.fadd.v2f64(double 0.000000e+00, <2 x double> undef)
+; FP16-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fadd_v2f64_reassoc = call reassoc double @llvm.vector.reduce.fadd.v2f64(double 0.000000e+00, <2 x double> undef)
+; FP16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fadd_v4f64 = call fast double @llvm.vector.reduce.fadd.v4f64(double 0.000000e+00, <4 x double> undef)
+; FP16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fadd_v4f64_reassoc = call reassoc double @llvm.vector.reduce.fadd.v4f64(double 0.000000e+00, <4 x double> undef)
+; FP16-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %fadd_v7f64 = call fast double @llvm.vector.reduce.fadd.v7f64(double 0.000000e+00, <7 x double> undef)
+; FP16-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fadd_v9f64_reassoc = call reassoc double @llvm.vector.reduce.fadd.v9f64(double 0.000000e+00, <9 x double> undef)
; FP16-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %fadd_v4f8 = call reassoc bfloat @llvm.vector.reduce.fadd.v4bf16(bfloat 0xR8000, <4 x bfloat> undef)
; FP16-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %fadd_v4f128 = call reassoc fp128 @llvm.vector.reduce.fadd.v4f128(fp128 undef, <4 x fp128> undef)
; FP16-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
@@ -134,20 +134,20 @@ define void @fast_fp_reductions() {
; BF16-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %fadd_v16f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> undef)
; BF16-NEXT: Cost Model: Found an estimated cost of 38 for instruction: %fadd_v11f16 = call fast half @llvm.vector.reduce.fadd.v11f16(half 0xH0000, <11 x half> undef)
; BF16-NEXT: Cost Model: Found an estimated cost of 38 for instruction: %fadd_v13f16_reassoc = call reassoc half @llvm.vector.reduce.fadd.v13f16(half 0xH0000, <13 x half> undef)
-; BF16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fadd_v2f32 = call fast float @llvm.vector.reduce.fadd.v2f32(float 0.000000e+00, <2 x float> undef)
-; BF16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fadd_v2f32_reassoc = call reassoc float @llvm.vector.reduce.fadd.v2f32(float 0.000000e+00, <2 x float> undef)
-; BF16-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fadd_v4f32 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> undef)
-; BF16-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fadd_v4f32_reassoc = call reassoc float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> undef)
-; BF16-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %fadd_v8f32 = call fast float @llvm.vector.reduce.fadd.v8f32(float 0.000000e+00, <8 x float> undef)
-; BF16-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %fadd_v8f32_reassoc = call reassoc float @llvm.vector.reduce.fadd.v8f32(float 0.000000e+00, <8 x float> undef)
-; BF16-NEXT: Cost Model: Found an estimated cost of 25 for instruction: %fadd_v13f32 = call fast float @llvm.vector.reduce.fadd.v13f32(float 0.000000e+00, <13 x float> undef)
-; BF16-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %fadd_v5f32_reassoc = call reassoc float @llvm.vector.reduce.fadd.v5f32(float 0.000000e+00, <5 x float> undef)
-; BF16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fadd_v2f64 = call fast double @llvm.vector.reduce.fadd.v2f64(double 0.000000e+00, <2 x double> undef)
-; BF16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fadd_v2f64_reassoc = call reassoc double @llvm.vector.reduce.fadd.v2f64(double 0.000000e+00, <2 x double> undef)
-; BF16-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %fadd_v4f64 = call fast double @llvm.vector.reduce.fadd.v4f64(double 0.000000e+00, <4 x double> undef)
-; BF16-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %fadd_v4f64_reassoc = call reassoc double @llvm.vector.reduce.fadd.v4f64(double 0.000000e+00, <4 x double> undef)
-; BF16-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %fadd_v7f64 = call fast double @llvm.vector.reduce.fadd.v7f64(double 0.000000e+00, <7 x double> undef)
-; BF16-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %fadd_v9f64_reassoc = call reassoc double @llvm.vector.reduce.fadd.v9f64(double 0.000000e+00, <9 x double> undef)
+; BF16-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fadd_v2f32 = call fast float @llvm.vector.reduce.fadd.v2f32(float 0.000000e+00, <2 x float> undef)
+; BF16-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fadd_v2f32_reassoc = call reassoc float @llvm.vector.reduce.fadd.v2f32(float 0.000000e+00, <2 x float> undef)
+; BF16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fadd_v4f32 = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> undef)
+; BF16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fadd_v4f32_reassoc = call reassoc float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> undef)
+; BF16-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %fadd_v8f32 = call fast float @llvm.vector.reduce.fadd.v8f32(float 0.000000e+00, <8 x float> undef)
+; BF16-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %fadd_v8f32_reassoc = call reassoc float @llvm.vector.reduce.fadd.v8f32(float 0.000000e+00, <8 x float> undef)
+; BF16-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %fadd_v13f32 = call fast float @llvm.vector.reduce.fadd.v13f32(float 0.000000e+00, <13 x float> undef)
+; BF16-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %fadd_v5f32_reassoc = call reassoc float @llvm.vector.reduce.fadd.v5f32(float 0.000000e+00, <5 x float> undef)
+; BF16-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fadd_v2f64 = call fast double @llvm.vector.reduce.fadd.v2f64(double 0.000000e+00, <2 x double> undef)
+; BF16-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fadd_v2f64_reassoc = call reassoc double @llvm.vector.reduce.fadd.v2f64(double 0.000000e+00, <2 x double> undef)
+; BF16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fadd_v4f64 = call fast double @llvm.vector.reduce.fadd.v4f64(double 0.000000e+00, <4 x double> undef)
+; BF16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fadd_v4f64_reassoc = call reassoc double @llvm.vector.reduce.fadd.v4f64(double 0.000000e+00, <4 x double> undef)
+; BF16-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %fadd_v7f64 = call fast double @llvm.vector.reduce.fadd.v7f64(double 0.000000e+00, <7 x double> undef)
+; BF16-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fadd_v9f64_reassoc = call reassoc double @llvm.vector.reduce.fadd.v9f64(double 0.000000e+00, <9 x double> undef)
; BF16-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fadd_v4f8 = call reassoc bfloat @llvm.vector.reduce.fadd.v4bf16(bfloat 0xR8000, <4 x bfloat> undef)
; BF16-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %fadd_v4f128 = call reassoc fp128 @llvm.vector.reduce.fadd.v4f128(fp128 undef, <4 x fp128> undef)
; BF16-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
diff --git a/llvm/test/Analysis/CostModel/RISCV/arith-fp.ll b/llvm/test/Analysis/CostModel/RISCV/arith-fp.ll
index ff2609b..b96fdb01 100644
--- a/llvm/test/Analysis/CostModel/RISCV/arith-fp.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/arith-fp.ll
@@ -1,13 +1,24 @@
; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
-; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv64 -mattr=+v,+f,+d,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
-; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv64 -mattr=+v,+f,+d,+zfh,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv64 -mattr=+v,+f,+d,+zfh,+zvfh,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv64 -mattr=+v,+f,+d,+zfh,+zvfhmin,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
; Check that we don't crash querying costs when vectors are not enabled.
; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv64
define void @fadd() {
; CHECK-LABEL: 'fadd'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %BF16 = fadd bfloat undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F32 = fadd float undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F64 = fadd double undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1BF16 = fadd <1 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2BF16 = fadd <2 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4BF16 = fadd <4 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8BF16 = fadd <8 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16BF16 = fadd <16 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1BF16 = fadd <vscale x 1 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2BF16 = fadd <vscale x 2 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4BF16 = fadd <vscale x 4 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8BF16 = fadd <vscale x 8 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16BF16 = fadd <vscale x 16 x bfloat> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F32 = fadd <1 x float> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F32 = fadd <2 x float> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = fadd <4 x float> undef, undef
@@ -28,9 +39,22 @@ define void @fadd() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV8F64 = fadd <vscale x 8 x double> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
+ %BF16 = fadd bfloat undef, undef
%F32 = fadd float undef, undef
%F64 = fadd double undef, undef
+ %V1BF16 = fadd <1 x bfloat> undef, undef
+ %V2BF16 = fadd <2 x bfloat> undef, undef
+ %V4BF16 = fadd <4 x bfloat> undef, undef
+ %V8BF16 = fadd <8 x bfloat> undef, undef
+ %V16BF16 = fadd <16 x bfloat> undef, undef
+
+ %NXV1BF16 = fadd <vscale x 1 x bfloat> undef, undef
+ %NXV2BF16 = fadd <vscale x 2 x bfloat> undef, undef
+ %NXV4BF16 = fadd <vscale x 4 x bfloat> undef, undef
+ %NXV8BF16 = fadd <vscale x 8 x bfloat> undef, undef
+ %NXV16BF16 = fadd <vscale x 16 x bfloat> undef, undef
+
%V1F32 = fadd <1 x float> undef, undef
%V2F32 = fadd <2 x float> undef, undef
%V4F32 = fadd <4 x float> undef, undef
@@ -94,8 +118,19 @@ define void @fadd_f16() {
define void @fsub() {
; CHECK-LABEL: 'fsub'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = fsub half undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F32 = fsub float undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F64 = fsub double undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1BF16 = fsub <1 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2BF16 = fsub <2 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4BF16 = fsub <4 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8BF16 = fsub <8 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16BF16 = fsub <16 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1BF16 = fsub <vscale x 1 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2BF16 = fsub <vscale x 2 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4BF16 = fsub <vscale x 4 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8BF16 = fsub <vscale x 8 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16BF16 = fsub <vscale x 16 x bfloat> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F32 = fsub <1 x float> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F32 = fsub <2 x float> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = fsub <4 x float> undef, undef
@@ -116,9 +151,22 @@ define void @fsub() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV8F64 = fsub <vscale x 8 x double> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
+ %F16 = fsub half undef, undef
%F32 = fsub float undef, undef
%F64 = fsub double undef, undef
+ %V1BF16 = fsub <1 x bfloat> undef, undef
+ %V2BF16 = fsub <2 x bfloat> undef, undef
+ %V4BF16 = fsub <4 x bfloat> undef, undef
+ %V8BF16 = fsub <8 x bfloat> undef, undef
+ %V16BF16 = fsub <16 x bfloat> undef, undef
+
+ %NXV1BF16 = fsub <vscale x 1 x bfloat> undef, undef
+ %NXV2BF16 = fsub <vscale x 2 x bfloat> undef, undef
+ %NXV4BF16 = fsub <vscale x 4 x bfloat> undef, undef
+ %NXV8BF16 = fsub <vscale x 8 x bfloat> undef, undef
+ %NXV16BF16 = fsub <vscale x 16 x bfloat> undef, undef
+
%V1F32 = fsub <1 x float> undef, undef
%V2F32 = fsub <2 x float> undef, undef
%V4F32 = fsub <4 x float> undef, undef
@@ -182,8 +230,19 @@ define void @fsub_f16() {
define void @fmul() {
; CHECK-LABEL: 'fmul'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %BF16 = fmul bfloat undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F32 = fmul float undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F64 = fmul double undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1BF16 = fmul <1 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2BF16 = fmul <2 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4BF16 = fmul <4 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8BF16 = fmul <8 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16BF16 = fmul <16 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1BF16 = fmul <vscale x 1 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2BF16 = fmul <vscale x 2 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4BF16 = fmul <vscale x 4 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8BF16 = fmul <vscale x 8 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16BF16 = fmul <vscale x 16 x bfloat> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F32 = fmul <1 x float> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F32 = fmul <2 x float> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = fmul <4 x float> undef, undef
@@ -204,9 +263,22 @@ define void @fmul() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV8F64 = fmul <vscale x 8 x double> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
+ %BF16 = fmul bfloat undef, undef
%F32 = fmul float undef, undef
%F64 = fmul double undef, undef
+ %V1BF16 = fmul <1 x bfloat> undef, undef
+ %V2BF16 = fmul <2 x bfloat> undef, undef
+ %V4BF16 = fmul <4 x bfloat> undef, undef
+ %V8BF16 = fmul <8 x bfloat> undef, undef
+ %V16BF16 = fmul <16 x bfloat> undef, undef
+
+ %NXV1BF16 = fmul <vscale x 1 x bfloat> undef, undef
+ %NXV2BF16 = fmul <vscale x 2 x bfloat> undef, undef
+ %NXV4BF16 = fmul <vscale x 4 x bfloat> undef, undef
+ %NXV8BF16 = fmul <vscale x 8 x bfloat> undef, undef
+ %NXV16BF16 = fmul <vscale x 16 x bfloat> undef, undef
+
%V1F32 = fmul <1 x float> undef, undef
%V2F32 = fmul <2 x float> undef, undef
%V4F32 = fmul <4 x float> undef, undef
@@ -270,8 +342,19 @@ define void @fmul_f16() {
define void @fdiv() {
; CHECK-LABEL: 'fdiv'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %BF16 = fdiv bfloat undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F32 = fdiv float undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F64 = fdiv double undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1BF16 = fdiv <1 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2BF16 = fdiv <2 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4BF16 = fdiv <4 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8BF16 = fdiv <8 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16BF16 = fdiv <16 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1BF16 = fdiv <vscale x 1 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2BF16 = fdiv <vscale x 2 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4BF16 = fdiv <vscale x 4 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8BF16 = fdiv <vscale x 8 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16BF16 = fdiv <vscale x 16 x bfloat> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F32 = fdiv <1 x float> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F32 = fdiv <2 x float> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = fdiv <4 x float> undef, undef
@@ -292,9 +375,22 @@ define void @fdiv() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV8F64 = fdiv <vscale x 8 x double> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
+ %BF16 = fdiv bfloat undef, undef
%F32 = fdiv float undef, undef
%F64 = fdiv double undef, undef
+ %V1BF16 = fdiv <1 x bfloat> undef, undef
+ %V2BF16 = fdiv <2 x bfloat> undef, undef
+ %V4BF16 = fdiv <4 x bfloat> undef, undef
+ %V8BF16 = fdiv <8 x bfloat> undef, undef
+ %V16BF16 = fdiv <16 x bfloat> undef, undef
+
+ %NXV1BF16 = fdiv <vscale x 1 x bfloat> undef, undef
+ %NXV2BF16 = fdiv <vscale x 2 x bfloat> undef, undef
+ %NXV4BF16 = fdiv <vscale x 4 x bfloat> undef, undef
+ %NXV8BF16 = fdiv <vscale x 8 x bfloat> undef, undef
+ %NXV16BF16 = fdiv <vscale x 16 x bfloat> undef, undef
+
%V1F32 = fdiv <1 x float> undef, undef
%V2F32 = fdiv <2 x float> undef, undef
%V4F32 = fdiv <4 x float> undef, undef
@@ -358,8 +454,19 @@ define void @fdiv_f16() {
define void @frem() {
; CHECK-LABEL: 'frem'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %BF16 = frem bfloat undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F32 = frem float undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F64 = frem double undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1BF16 = frem <1 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V2BF16 = frem <2 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %V4BF16 = frem <4 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %V8BF16 = frem <8 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 63 for instruction: %V16BF16 = frem <16 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1BF16 = frem <vscale x 1 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2BF16 = frem <vscale x 2 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4BF16 = frem <vscale x 4 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV8BF16 = frem <vscale x 8 x bfloat> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV16BF16 = frem <vscale x 16 x bfloat> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1F32 = frem <1 x float> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V2F32 = frem <2 x float> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %V4F32 = frem <4 x float> undef, undef
@@ -380,9 +487,22 @@ define void @frem() {
; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV8F64 = frem <vscale x 8 x double> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
+ %BF16 = frem bfloat undef, undef
%F32 = frem float undef, undef
%F64 = frem double undef, undef
+ %V1BF16 = frem <1 x bfloat> undef, undef
+ %V2BF16 = frem <2 x bfloat> undef, undef
+ %V4BF16 = frem <4 x bfloat> undef, undef
+ %V8BF16 = frem <8 x bfloat> undef, undef
+ %V16BF16 = frem <16 x bfloat> undef, undef
+
+ %NXV1BF16 = frem <vscale x 1 x bfloat> undef, undef
+ %NXV2BF16 = frem <vscale x 2 x bfloat> undef, undef
+ %NXV4BF16 = frem <vscale x 4 x bfloat> undef, undef
+ %NXV8BF16 = frem <vscale x 8 x bfloat> undef, undef
+ %NXV16BF16 = frem <vscale x 16 x bfloat> undef, undef
+
%V1F32 = frem <1 x float> undef, undef
%V2F32 = frem <2 x float> undef, undef
%V4F32 = frem <4 x float> undef, undef
@@ -462,8 +582,19 @@ define void @frem_f16() {
define void @fneg() {
; CHECK-LABEL: 'fneg'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %BF16 = fneg half undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F32 = fneg float undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F64 = fneg double undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1BF16 = fneg <1 x bfloat> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2BF16 = fneg <2 x bfloat> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4BF16 = fneg <4 x bfloat> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8BF16 = fneg <8 x bfloat> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16BF16 = fneg <16 x bfloat> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1BF16 = fneg <vscale x 1 x bfloat> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2BF16 = fneg <vscale x 2 x bfloat> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4BF16 = fneg <vscale x 4 x bfloat> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8BF16 = fneg <vscale x 8 x bfloat> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16BF16 = fneg <vscale x 16 x bfloat> undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F32 = fneg <1 x float> undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F32 = fneg <2 x float> undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = fneg <4 x float> undef
@@ -484,9 +615,22 @@ define void @fneg() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV8F64 = fneg <vscale x 8 x double> undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
+ %BF16 = fneg half undef
%F32 = fneg float undef
%F64 = fneg double undef
+ %V1BF16 = fneg <1 x bfloat> undef
+ %V2BF16 = fneg <2 x bfloat> undef
+ %V4BF16 = fneg <4 x bfloat> undef
+ %V8BF16 = fneg <8 x bfloat> undef
+ %V16BF16 = fneg <16 x bfloat> undef
+
+ %NXV1BF16 = fneg <vscale x 1 x bfloat> undef
+ %NXV2BF16 = fneg <vscale x 2 x bfloat> undef
+ %NXV4BF16 = fneg <vscale x 4 x bfloat> undef
+ %NXV8BF16 = fneg <vscale x 8 x bfloat> undef
+ %NXV16BF16 = fneg <vscale x 16 x bfloat> undef
+
%V1F32 = fneg <1 x float> undef
%V2F32 = fneg <2 x float> undef
%V4F32 = fneg <4 x float> undef
@@ -550,8 +694,19 @@ define void @fneg_f16() {
define void @fcopysign() {
; CHECK-LABEL: 'fcopysign'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %BF16 = call bfloat @llvm.copysign.bf16(bfloat undef, bfloat undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %F32 = call float @llvm.copysign.f32(float undef, float undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %F64 = call double @llvm.copysign.f64(double undef, double undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V1BF16 = call <1 x bfloat> @llvm.copysign.v1bf16(<1 x bfloat> undef, <1 x bfloat> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2BF16 = call <2 x bfloat> @llvm.copysign.v2bf16(<2 x bfloat> undef, <2 x bfloat> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4BF16 = call <4 x bfloat> @llvm.copysign.v4bf16(<4 x bfloat> undef, <4 x bfloat> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8BF16 = call <8 x bfloat> @llvm.copysign.v8bf16(<8 x bfloat> undef, <8 x bfloat> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 47 for instruction: %V16BF16 = call <16 x bfloat> @llvm.copysign.v16bf16(<16 x bfloat> undef, <16 x bfloat> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV1BF16 = call <vscale x 1 x bfloat> @llvm.copysign.nxv1bf16(<vscale x 1 x bfloat> undef, <vscale x 1 x bfloat> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV2BF16 = call <vscale x 2 x bfloat> @llvm.copysign.nxv2bf16(<vscale x 2 x bfloat> undef, <vscale x 2 x bfloat> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV4BF16 = call <vscale x 4 x bfloat> @llvm.copysign.nxv4bf16(<vscale x 4 x bfloat> undef, <vscale x 4 x bfloat> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV8BF16 = call <vscale x 8 x bfloat> @llvm.copysign.nxv8bf16(<vscale x 8 x bfloat> undef, <vscale x 8 x bfloat> undef)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV16BF16 = call <vscale x 16 x bfloat> @llvm.copysign.nxv16bf16(<vscale x 16 x bfloat> undef, <vscale x 16 x bfloat> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F32 = call <1 x float> @llvm.copysign.v1f32(<1 x float> undef, <1 x float> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F32 = call <2 x float> @llvm.copysign.v2f32(<2 x float> undef, <2 x float> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = call <4 x float> @llvm.copysign.v4f32(<4 x float> undef, <4 x float> undef)
@@ -572,9 +727,22 @@ define void @fcopysign() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV8F64 = call <vscale x 8 x double> @llvm.copysign.nxv8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
+ %BF16 = call bfloat @llvm.copysign.bf16(bfloat undef, bfloat undef)
%F32 = call float @llvm.copysign.f32(float undef, float undef)
%F64 = call double @llvm.copysign.f64(double undef, double undef)
+ %V1BF16 = call <1 x bfloat> @llvm.copysign.v1bf16(<1 x bfloat> undef, <1 x bfloat> undef)
+ %V2BF16 = call <2 x bfloat> @llvm.copysign.v2bf16(<2 x bfloat> undef, <2 x bfloat> undef)
+ %V4BF16 = call <4 x bfloat> @llvm.copysign.v4bf16(<4 x bfloat> undef, <4 x bfloat> undef)
+ %V8BF16 = call <8 x bfloat> @llvm.copysign.v8bf16(<8 x bfloat> undef, <8 x bfloat> undef)
+ %V16BF16 = call <16 x bfloat> @llvm.copysign.v16bf16(<16 x bfloat> undef, <16 x bfloat> undef)
+
+ %NXV1BF16 = call <vscale x 1 x bfloat> @llvm.copysign.nxv1bf16(<vscale x 1 x bfloat> undef, <vscale x 1 x bfloat> undef)
+ %NXV2BF16 = call <vscale x 2 x bfloat> @llvm.copysign.nxv2bf16(<vscale x 2 x bfloat> undef, <vscale x 2 x bfloat> undef)
+ %NXV4BF16 = call <vscale x 4 x bfloat> @llvm.copysign.nxv4bf16(<vscale x 4 x bfloat> undef, <vscale x 4 x bfloat> undef)
+ %NXV8BF16 = call <vscale x 8 x bfloat> @llvm.copysign.nxv8bf16(<vscale x 8 x bfloat> undef, <vscale x 8 x bfloat> undef)
+ %NXV16BF16 = call <vscale x 16 x bfloat> @llvm.copysign.nxv16bf16(<vscale x 16 x bfloat> undef, <vscale x 16 x bfloat> undef)
+
%V1F32 = call <1 x float> @llvm.copysign.v1f32(<1 x float> undef, <1 x float> undef)
%V2F32 = call <2 x float> @llvm.copysign.v2f32(<2 x float> undef, <2 x float> undef)
%V4F32 = call <4 x float> @llvm.copysign.v4f32(<4 x float> undef, <4 x float> undef)
@@ -654,8 +822,19 @@ define void @fcopysign_f16() {
define void @fma() {
; CHECK-LABEL: 'fma'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %BF16 = call bfloat @llvm.fma.bf16(bfloat undef, bfloat undef, bfloat undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %F32 = call float @llvm.fma.f32(float undef, float undef, float undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %F64 = call double @llvm.fma.f64(double undef, double undef, double undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V1BF16 = call <1 x bfloat> @llvm.fma.v1bf16(<1 x bfloat> undef, <1 x bfloat> undef, <1 x bfloat> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2BF16 = call <2 x bfloat> @llvm.fma.v2bf16(<2 x bfloat> undef, <2 x bfloat> undef, <2 x bfloat> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4BF16 = call <4 x bfloat> @llvm.fma.v4bf16(<4 x bfloat> undef, <4 x bfloat> undef, <4 x bfloat> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8BF16 = call <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat> undef, <8 x bfloat> undef, <8 x bfloat> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 47 for instruction: %V16BF16 = call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> undef, <16 x bfloat> undef, <16 x bfloat> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV1BF16 = call <vscale x 1 x bfloat> @llvm.fma.nxv1bf16(<vscale x 1 x bfloat> undef, <vscale x 1 x bfloat> undef, <vscale x 1 x bfloat> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV2BF16 = call <vscale x 2 x bfloat> @llvm.fma.nxv2bf16(<vscale x 2 x bfloat> undef, <vscale x 2 x bfloat> undef, <vscale x 2 x bfloat> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV4BF16 = call <vscale x 4 x bfloat> @llvm.fma.nxv4bf16(<vscale x 4 x bfloat> undef, <vscale x 4 x bfloat> undef, <vscale x 4 x bfloat> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV8BF16 = call <vscale x 8 x bfloat> @llvm.fma.nxv8bf16(<vscale x 8 x bfloat> undef, <vscale x 8 x bfloat> undef, <vscale x 8 x bfloat> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV16BF16 = call <vscale x 16 x bfloat> @llvm.fma.nxv16bf16(<vscale x 16 x bfloat> undef, <vscale x 16 x bfloat> undef, <vscale x 16 x bfloat> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F32 = call <1 x float> @llvm.fma.v1f32(<1 x float> undef, <1 x float> undef, <1 x float> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F32 = call <2 x float> @llvm.fma.v2f32(<2 x float> undef, <2 x float> undef, <2 x float> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = call <4 x float> @llvm.fma.v4f32(<4 x float> undef, <4 x float> undef, <4 x float> undef)
@@ -676,9 +855,22 @@ define void @fma() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV8F64 = call <vscale x 8 x double> @llvm.fma.nxv8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef, <vscale x 8 x double> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
+ %BF16 = call bfloat @llvm.fma.bf16(bfloat undef, bfloat undef, bfloat undef)
%F32 = call float @llvm.fma.f32(float undef, float undef, float undef)
%F64 = call double @llvm.fma.f64(double undef, double undef, double undef)
+ %V1BF16 = call <1 x bfloat> @llvm.fma.v1bf16(<1 x bfloat> undef, <1 x bfloat> undef, <1 x bfloat> undef)
+ %V2BF16 = call <2 x bfloat> @llvm.fma.v2bf16(<2 x bfloat> undef, <2 x bfloat> undef, <2 x bfloat> undef)
+ %V4BF16 = call <4 x bfloat> @llvm.fma.v4bf16(<4 x bfloat> undef, <4 x bfloat> undef, <4 x bfloat> undef)
+ %V8BF16 = call <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat> undef, <8 x bfloat> undef, <8 x bfloat> undef)
+ %V16BF16 = call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> undef, <16 x bfloat> undef, <16 x bfloat> undef)
+
+ %NXV1BF16 = call <vscale x 1 x bfloat> @llvm.fma.nxv1bf16(<vscale x 1 x bfloat> undef, <vscale x 1 x bfloat> undef, <vscale x 1 x bfloat> undef)
+ %NXV2BF16 = call <vscale x 2 x bfloat> @llvm.fma.nxv2bf16(<vscale x 2 x bfloat> undef, <vscale x 2 x bfloat> undef, <vscale x 2 x bfloat> undef)
+ %NXV4BF16 = call <vscale x 4 x bfloat> @llvm.fma.nxv4bf16(<vscale x 4 x bfloat> undef, <vscale x 4 x bfloat> undef, <vscale x 4 x bfloat> undef)
+ %NXV8BF16 = call <vscale x 8 x bfloat> @llvm.fma.nxv8bf16(<vscale x 8 x bfloat> undef, <vscale x 8 x bfloat> undef, <vscale x 8 x bfloat> undef)
+ %NXV16BF16 = call <vscale x 16 x bfloat> @llvm.fma.nxv16bf16(<vscale x 16 x bfloat> undef, <vscale x 16 x bfloat> undef, <vscale x 16 x bfloat> undef)
+
%V1F32 = call <1 x float> @llvm.fma.v1f32(<1 x float> undef, <1 x float> undef, <1 x float> undef)
%V2F32 = call <2 x float> @llvm.fma.v2f32(<2 x float> undef, <2 x float> undef, <2 x float> undef)
%V4F32 = call <4 x float> @llvm.fma.v4f32(<4 x float> undef, <4 x float> undef, <4 x float> undef)
@@ -758,30 +950,45 @@ define void @fma_f16() {
define void @fmuladd() {
; CHECK-LABEL: 'fmuladd'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call float @llvm.fmuladd.f32(float undef, float undef, float undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call double @llvm.fmuladd.f64(double undef, double undef, double undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %3 = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> undef, <2 x float> undef, <2 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %4 = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> undef, <4 x float> undef, <4 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %5 = call <8 x float> @llvm.fmuladd.v8f32(<8 x float> undef, <8 x float> undef, <8 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %6 = call <16 x float> @llvm.fmuladd.v16f32(<16 x float> undef, <16 x float> undef, <16 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %7 = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> undef, <2 x double> undef, <2 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %8 = call <4 x double> @llvm.fmuladd.v4f64(<4 x double> undef, <4 x double> undef, <4 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %9 = call <8 x double> @llvm.fmuladd.v8f64(<8 x double> undef, <8 x double> undef, <8 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %10 = call <16 x double> @llvm.fmuladd.v16f64(<16 x double> undef, <16 x double> undef, <16 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %11 = call <vscale x 1 x float> @llvm.fmuladd.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %12 = call <vscale x 2 x float> @llvm.fmuladd.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %13 = call <vscale x 4 x float> @llvm.fmuladd.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %14 = call <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, <vscale x 8 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %15 = call <vscale x 16 x float> @llvm.fmuladd.nxv16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef, <vscale x 16 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 1 x double> @llvm.fmuladd.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %17 = call <vscale x 2 x double> @llvm.fmuladd.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %18 = call <vscale x 4 x double> @llvm.fmuladd.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, <vscale x 4 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %19 = call <vscale x 8 x double> @llvm.fmuladd.nxv8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef, <vscale x 8 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %20 = call <vscale x 16 x double> @llvm.fmuladd.nxv16f64(<vscale x 16 x double> undef, <vscale x 16 x double> undef, <vscale x 16 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call bfloat @llvm.fmuladd.bf16(bfloat undef, bfloat undef, bfloat undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call float @llvm.fmuladd.f32(float undef, float undef, float undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call double @llvm.fmuladd.f64(double undef, double undef, double undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %4 = call <2 x bfloat> @llvm.fmuladd.v2bf16(<2 x bfloat> undef, <2 x bfloat> undef, <2 x bfloat> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %5 = call <4 x bfloat> @llvm.fmuladd.v4bf16(<4 x bfloat> undef, <4 x bfloat> undef, <4 x bfloat> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %6 = call <8 x bfloat> @llvm.fmuladd.v8bf16(<8 x bfloat> undef, <8 x bfloat> undef, <8 x bfloat> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %7 = call <16 x bfloat> @llvm.fmuladd.v16bf16(<16 x bfloat> undef, <16 x bfloat> undef, <16 x bfloat> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %8 = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> undef, <2 x float> undef, <2 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %9 = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> undef, <4 x float> undef, <4 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %10 = call <8 x float> @llvm.fmuladd.v8f32(<8 x float> undef, <8 x float> undef, <8 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %11 = call <16 x float> @llvm.fmuladd.v16f32(<16 x float> undef, <16 x float> undef, <16 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %12 = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> undef, <2 x double> undef, <2 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %13 = call <4 x double> @llvm.fmuladd.v4f64(<4 x double> undef, <4 x double> undef, <4 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %14 = call <8 x double> @llvm.fmuladd.v8f64(<8 x double> undef, <8 x double> undef, <8 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %15 = call <16 x double> @llvm.fmuladd.v16f64(<16 x double> undef, <16 x double> undef, <16 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 1 x bfloat> @llvm.fmuladd.nxv1bf16(<vscale x 1 x bfloat> undef, <vscale x 1 x bfloat> undef, <vscale x 1 x bfloat> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %17 = call <vscale x 2 x bfloat> @llvm.fmuladd.nxv2bf16(<vscale x 2 x bfloat> undef, <vscale x 2 x bfloat> undef, <vscale x 2 x bfloat> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %18 = call <vscale x 4 x bfloat> @llvm.fmuladd.nxv4bf16(<vscale x 4 x bfloat> undef, <vscale x 4 x bfloat> undef, <vscale x 4 x bfloat> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %19 = call <vscale x 8 x bfloat> @llvm.fmuladd.nxv8bf16(<vscale x 8 x bfloat> undef, <vscale x 8 x bfloat> undef, <vscale x 8 x bfloat> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %20 = call <vscale x 16 x bfloat> @llvm.fmuladd.nxv16bf16(<vscale x 16 x bfloat> undef, <vscale x 16 x bfloat> undef, <vscale x 16 x bfloat> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %21 = call <vscale x 1 x float> @llvm.fmuladd.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %22 = call <vscale x 2 x float> @llvm.fmuladd.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %23 = call <vscale x 4 x float> @llvm.fmuladd.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %24 = call <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, <vscale x 8 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %25 = call <vscale x 16 x float> @llvm.fmuladd.nxv16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef, <vscale x 16 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %26 = call <vscale x 1 x double> @llvm.fmuladd.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %27 = call <vscale x 2 x double> @llvm.fmuladd.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %28 = call <vscale x 4 x double> @llvm.fmuladd.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, <vscale x 4 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %29 = call <vscale x 8 x double> @llvm.fmuladd.nxv8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef, <vscale x 8 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %30 = call <vscale x 16 x double> @llvm.fmuladd.nxv16f64(<vscale x 16 x double> undef, <vscale x 16 x double> undef, <vscale x 16 x double> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
+ call bfloat @llvm.fmuladd.bf16(bfloat undef, bfloat undef, bfloat undef)
call float @llvm.fmuladd.f32(float undef, float undef, float undef)
call double @llvm.fmuladd.f64(double undef, double undef, double undef)
+ call <2 x bfloat> @llvm.fmuladd.v2bf16(<2 x bfloat> undef, <2 x bfloat> undef, <2 x bfloat> undef)
+ call <4 x bfloat> @llvm.fmuladd.v4bf16(<4 x bfloat> undef, <4 x bfloat> undef, <4 x bfloat> undef)
+ call <8 x bfloat> @llvm.fmuladd.v8bf16(<8 x bfloat> undef, <8 x bfloat> undef, <8 x bfloat> undef)
+ call <16 x bfloat> @llvm.fmuladd.v16bf16(<16 x bfloat> undef, <16 x bfloat> undef, <16 x bfloat> undef)
call <2 x float> @llvm.fmuladd.v2f32(<2 x float> undef, <2 x float> undef, <2 x float> undef)
call <4 x float> @llvm.fmuladd.v4f32(<4 x float> undef, <4 x float> undef, <4 x float> undef)
call <8 x float> @llvm.fmuladd.v8f32(<8 x float> undef, <8 x float> undef, <8 x float> undef)
@@ -790,6 +997,11 @@ define void @fmuladd() {
call <4 x double> @llvm.fmuladd.v4f64(<4 x double> undef, <4 x double> undef, <4 x double> undef)
call <8 x double> @llvm.fmuladd.v8f64(<8 x double> undef, <8 x double> undef, <8 x double> undef)
call <16 x double> @llvm.fmuladd.v16f64(<16 x double> undef, <16 x double> undef, <16 x double> undef)
+ call <vscale x 1 x bfloat> @llvm.fmuladd.nxv1bf16(<vscale x 1 x bfloat> undef, <vscale x 1 x bfloat> undef, <vscale x 1 x bfloat> undef)
+ call <vscale x 2 x bfloat> @llvm.fmuladd.nxv2bf16(<vscale x 2 x bfloat> undef, <vscale x 2 x bfloat> undef, <vscale x 2 x bfloat> undef)
+ call <vscale x 4 x bfloat> @llvm.fmuladd.nxv4bf16(<vscale x 4 x bfloat> undef, <vscale x 4 x bfloat> undef, <vscale x 4 x bfloat> undef)
+ call <vscale x 8 x bfloat> @llvm.fmuladd.nxv8bf16(<vscale x 8 x bfloat> undef, <vscale x 8 x bfloat> undef, <vscale x 8 x bfloat> undef)
+ call <vscale x 16 x bfloat> @llvm.fmuladd.nxv16bf16(<vscale x 16 x bfloat> undef, <vscale x 16 x bfloat> undef, <vscale x 16 x bfloat> undef)
call <vscale x 1 x float> @llvm.fmuladd.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef)
call <vscale x 2 x float> @llvm.fmuladd.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef)
call <vscale x 4 x float> @llvm.fmuladd.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef)
diff --git a/llvm/test/Analysis/CostModel/RISCV/rvv-intrinsics.ll b/llvm/test/Analysis/CostModel/RISCV/rvv-intrinsics.ll
index 8076577..bb98508 100644
--- a/llvm/test/Analysis/CostModel/RISCV/rvv-intrinsics.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/rvv-intrinsics.ll
@@ -1029,10 +1029,10 @@ define void @store() {
define void @strided_load() {
; CHECK-LABEL: 'strided_load'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %ti1_2 = call <2 x i1> @llvm.experimental.vp.strided.load.v2i1.p0.i64(ptr undef, i64 undef, <2 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %ti1_4 = call <4 x i1> @llvm.experimental.vp.strided.load.v4i1.p0.i64(ptr undef, i64 undef, <4 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 70 for instruction: %ti1_8 = call <8 x i1> @llvm.experimental.vp.strided.load.v8i1.p0.i64(ptr undef, i64 undef, <8 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 142 for instruction: %ti1_16 = call <16 x i1> @llvm.experimental.vp.strided.load.v16i1.p0.i64(ptr undef, i64 undef, <16 x i1> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %ti1_2 = call <2 x i1> @llvm.experimental.vp.strided.load.v2i1.p0.i64(ptr undef, i64 undef, <2 x i1> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %ti1_4 = call <4 x i1> @llvm.experimental.vp.strided.load.v4i1.p0.i64(ptr undef, i64 undef, <4 x i1> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %ti1_8 = call <8 x i1> @llvm.experimental.vp.strided.load.v8i1.p0.i64(ptr undef, i64 undef, <8 x i1> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 65 for instruction: %ti1_16 = call <16 x i1> @llvm.experimental.vp.strided.load.v16i1.p0.i64(ptr undef, i64 undef, <16 x i1> undef, i32 undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %t0 = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr undef, i64 undef, <2 x i1> undef, i32 undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %t2 = call <4 x i8> @llvm.experimental.vp.strided.load.v4i8.p0.i64(ptr undef, i64 undef, <4 x i1> undef, i32 undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %t4 = call <8 x i8> @llvm.experimental.vp.strided.load.v8i8.p0.i64(ptr undef, i64 undef, <8 x i1> undef, i32 undef)
@@ -1056,10 +1056,10 @@ define void @strided_load() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
; TYPEBASED-LABEL: 'strided_load'
-; TYPEBASED-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %ti1_2 = call <2 x i1> @llvm.experimental.vp.strided.load.v2i1.p0.i64(ptr undef, i64 undef, <2 x i1> undef, i32 undef)
-; TYPEBASED-NEXT: Cost Model: Found an estimated cost of 42 for instruction: %ti1_4 = call <4 x i1> @llvm.experimental.vp.strided.load.v4i1.p0.i64(ptr undef, i64 undef, <4 x i1> undef, i32 undef)
-; TYPEBASED-NEXT: Cost Model: Found an estimated cost of 86 for instruction: %ti1_8 = call <8 x i1> @llvm.experimental.vp.strided.load.v8i1.p0.i64(ptr undef, i64 undef, <8 x i1> undef, i32 undef)
-; TYPEBASED-NEXT: Cost Model: Found an estimated cost of 174 for instruction: %ti1_16 = call <16 x i1> @llvm.experimental.vp.strided.load.v16i1.p0.i64(ptr undef, i64 undef, <16 x i1> undef, i32 undef)
+; TYPEBASED-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %ti1_2 = call <2 x i1> @llvm.experimental.vp.strided.load.v2i1.p0.i64(ptr undef, i64 undef, <2 x i1> undef, i32 undef)
+; TYPEBASED-NEXT: Cost Model: Found an estimated cost of 25 for instruction: %ti1_4 = call <4 x i1> @llvm.experimental.vp.strided.load.v4i1.p0.i64(ptr undef, i64 undef, <4 x i1> undef, i32 undef)
+; TYPEBASED-NEXT: Cost Model: Found an estimated cost of 49 for instruction: %ti1_8 = call <8 x i1> @llvm.experimental.vp.strided.load.v8i1.p0.i64(ptr undef, i64 undef, <8 x i1> undef, i32 undef)
+; TYPEBASED-NEXT: Cost Model: Found an estimated cost of 97 for instruction: %ti1_16 = call <16 x i1> @llvm.experimental.vp.strided.load.v16i1.p0.i64(ptr undef, i64 undef, <16 x i1> undef, i32 undef)
; TYPEBASED-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %t0 = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr undef, i64 undef, <2 x i1> undef, i32 undef)
; TYPEBASED-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %t2 = call <4 x i8> @llvm.experimental.vp.strided.load.v4i8.p0.i64(ptr undef, i64 undef, <4 x i1> undef, i32 undef)
; TYPEBASED-NEXT: Cost Model: Found an estimated cost of 47 for instruction: %t4 = call <8 x i8> @llvm.experimental.vp.strided.load.v8i8.p0.i64(ptr undef, i64 undef, <8 x i1> undef, i32 undef)
diff --git a/llvm/test/Analysis/CostModel/RISCV/rvv-select.ll b/llvm/test/Analysis/CostModel/RISCV/rvv-select.ll
index a20d86c..9eadcac 100644
--- a/llvm/test/Analysis/CostModel/RISCV/rvv-select.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/rvv-select.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv64 -mattr=+v,+f,+d,+zfh,+zvfh -riscv-v-fixed-length-vector-lmul-max=1 < %s | FileCheck %s
+; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv64 -mattr=+v,+f,+d,+zfh,+zvfh -riscv-v-fixed-length-vector-lmul-max=1 --type-based-intrinsic-cost=true < %s | FileCheck %s
; Check that we don't crash querying costs when vectors are not enabled.
; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv64
@@ -30,106 +31,166 @@ define void @select() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %23 = select <vscale x 8 x i1> undef, <vscale x 8 x i1> undef, <vscale x 8 x i1> undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %24 = select <vscale x 16 x i1> undef, <vscale x 16 x i1> undef, <vscale x 16 x i1> undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %25 = select <vscale x 32 x i1> undef, <vscale x 32 x i1> undef, <vscale x 32 x i1> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %26 = select i1 undef, i8 undef, i8 undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %27 = select i1 undef, <1 x i8> undef, <1 x i8> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %28 = select i1 undef, <2 x i8> undef, <2 x i8> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %29 = select i1 undef, <4 x i8> undef, <4 x i8> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %30 = select i1 undef, <8 x i8> undef, <8 x i8> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %31 = select i1 undef, <16 x i8> undef, <16 x i8> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %32 = select i1 undef, <32 x i8> undef, <32 x i8> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %33 = select <1 x i1> undef, <1 x i8> undef, <1 x i8> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %34 = select <2 x i1> undef, <2 x i8> undef, <2 x i8> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %35 = select <4 x i1> undef, <4 x i8> undef, <4 x i8> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %36 = select <8 x i1> undef, <8 x i8> undef, <8 x i8> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %37 = select <16 x i1> undef, <16 x i8> undef, <16 x i8> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %38 = select <32 x i1> undef, <32 x i8> undef, <32 x i8> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %39 = select i1 undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %40 = select i1 undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %41 = select i1 undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %42 = select i1 undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %43 = select i1 undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %44 = select i1 undef, <vscale x 32 x i8> undef, <vscale x 32 x i8> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %45 = select <vscale x 1 x i1> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %46 = select <vscale x 2 x i1> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %47 = select <vscale x 4 x i1> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %48 = select <vscale x 8 x i1> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %49 = select <vscale x 16 x i1> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %50 = select <vscale x 32 x i1> undef, <vscale x 32 x i8> undef, <vscale x 32 x i8> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %51 = select i1 undef, i16 undef, i16 undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %52 = select i1 undef, <1 x i16> undef, <1 x i16> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %53 = select i1 undef, <2 x i16> undef, <2 x i16> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %54 = select i1 undef, <4 x i16> undef, <4 x i16> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %55 = select i1 undef, <8 x i16> undef, <8 x i16> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %56 = select i1 undef, <16 x i16> undef, <16 x i16> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %57 = select i1 undef, <32 x i16> undef, <32 x i16> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %58 = select <1 x i1> undef, <1 x i16> undef, <1 x i16> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %59 = select <2 x i1> undef, <2 x i16> undef, <2 x i16> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %60 = select <4 x i1> undef, <4 x i16> undef, <4 x i16> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %61 = select <8 x i1> undef, <8 x i16> undef, <8 x i16> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %62 = select <16 x i1> undef, <16 x i16> undef, <16 x i16> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %63 = select <32 x i1> undef, <32 x i16> undef, <32 x i16> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %64 = select i1 undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %65 = select i1 undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %66 = select i1 undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %67 = select i1 undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %68 = select i1 undef, <vscale x 16 x i16> undef, <vscale x 16 x i16> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %69 = select i1 undef, <vscale x 32 x i16> undef, <vscale x 32 x i16> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %70 = select <vscale x 1 x i1> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %71 = select <vscale x 2 x i1> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %72 = select <vscale x 4 x i1> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %73 = select <vscale x 8 x i1> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %74 = select <vscale x 16 x i1> undef, <vscale x 16 x i16> undef, <vscale x 16 x i16> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %75 = select <vscale x 32 x i1> undef, <vscale x 32 x i16> undef, <vscale x 32 x i16> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %76 = select i1 undef, i32 undef, i32 undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %77 = select i1 undef, <1 x i32> undef, <1 x i32> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %78 = select i1 undef, <2 x i32> undef, <2 x i32> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %79 = select i1 undef, <4 x i32> undef, <4 x i32> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %80 = select i1 undef, <8 x i32> undef, <8 x i32> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %81 = select i1 undef, <16 x i32> undef, <16 x i32> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %82 = select i1 undef, <32 x i32> undef, <32 x i32> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %83 = select <1 x i1> undef, <1 x i32> undef, <1 x i32> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %84 = select <2 x i1> undef, <2 x i32> undef, <2 x i32> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %85 = select <4 x i1> undef, <4 x i32> undef, <4 x i32> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %86 = select <8 x i1> undef, <8 x i32> undef, <8 x i32> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %87 = select <16 x i1> undef, <16 x i32> undef, <16 x i32> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %88 = select <32 x i1> undef, <32 x i32> undef, <32 x i32> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %89 = select i1 undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %90 = select i1 undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %91 = select i1 undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %92 = select i1 undef, <vscale x 8 x i32> undef, <vscale x 8 x i32> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %93 = select i1 undef, <vscale x 16 x i32> undef, <vscale x 16 x i32> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %94 = select i1 undef, <vscale x 32 x i32> undef, <vscale x 32 x i32> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %95 = select <vscale x 1 x i1> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %96 = select <vscale x 2 x i1> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %97 = select <vscale x 4 x i1> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %98 = select <vscale x 8 x i1> undef, <vscale x 8 x i32> undef, <vscale x 8 x i32> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %99 = select <vscale x 16 x i1> undef, <vscale x 16 x i32> undef, <vscale x 16 x i32> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %100 = select <vscale x 32 x i1> undef, <vscale x 32 x i32> undef, <vscale x 32 x i32> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %101 = select i1 undef, i64 undef, i64 undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %102 = select i1 undef, <1 x i64> undef, <1 x i64> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %103 = select i1 undef, <2 x i64> undef, <2 x i64> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %104 = select i1 undef, <4 x i64> undef, <4 x i64> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %105 = select i1 undef, <8 x i64> undef, <8 x i64> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %106 = select i1 undef, <16 x i64> undef, <16 x i64> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %107 = select i1 undef, <32 x i64> undef, <32 x i64> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %108 = select <1 x i1> undef, <1 x i64> undef, <1 x i64> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %109 = select <2 x i1> undef, <2 x i64> undef, <2 x i64> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %110 = select <4 x i1> undef, <4 x i64> undef, <4 x i64> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %111 = select <8 x i1> undef, <8 x i64> undef, <8 x i64> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %112 = select <16 x i1> undef, <16 x i64> undef, <16 x i64> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %113 = select <32 x i1> undef, <32 x i64> undef, <32 x i64> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %114 = select i1 undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %115 = select i1 undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %116 = select i1 undef, <vscale x 4 x i64> undef, <vscale x 4 x i64> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %117 = select i1 undef, <vscale x 8 x i64> undef, <vscale x 8 x i64> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %118 = select i1 undef, <vscale x 16 x i64> undef, <vscale x 16 x i64> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 96 for instruction: %119 = select i1 undef, <vscale x 32 x i64> undef, <vscale x 32 x i64> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %120 = select <vscale x 1 x i1> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %121 = select <vscale x 2 x i1> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %122 = select <vscale x 4 x i1> undef, <vscale x 4 x i64> undef, <vscale x 4 x i64> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %123 = select <vscale x 8 x i1> undef, <vscale x 8 x i64> undef, <vscale x 8 x i64> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %124 = select <vscale x 16 x i1> undef, <vscale x 16 x i64> undef, <vscale x 16 x i64> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %125 = select <vscale x 32 x i1> undef, <vscale x 32 x i64> undef, <vscale x 32 x i64> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %26 = call <1 x i1> @llvm.vp.select.v1i1(<1 x i1> undef, <1 x i1> undef, <1 x i1> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %27 = call <2 x i1> @llvm.vp.select.v2i1(<2 x i1> undef, <2 x i1> undef, <2 x i1> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %28 = call <4 x i1> @llvm.vp.select.v4i1(<4 x i1> undef, <4 x i1> undef, <4 x i1> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %29 = call <8 x i1> @llvm.vp.select.v8i1(<8 x i1> undef, <8 x i1> undef, <8 x i1> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %30 = call <16 x i1> @llvm.vp.select.v16i1(<16 x i1> undef, <16 x i1> undef, <16 x i1> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %31 = call <32 x i1> @llvm.vp.select.v32i1(<32 x i1> undef, <32 x i1> undef, <32 x i1> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %32 = call <vscale x 1 x i1> @llvm.vp.select.nxv1i1(<vscale x 1 x i1> undef, <vscale x 1 x i1> undef, <vscale x 1 x i1> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %33 = call <vscale x 2 x i1> @llvm.vp.select.nxv2i1(<vscale x 2 x i1> undef, <vscale x 2 x i1> undef, <vscale x 2 x i1> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %34 = call <vscale x 4 x i1> @llvm.vp.select.nxv4i1(<vscale x 4 x i1> undef, <vscale x 4 x i1> undef, <vscale x 4 x i1> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %35 = call <vscale x 8 x i1> @llvm.vp.select.nxv8i1(<vscale x 8 x i1> undef, <vscale x 8 x i1> undef, <vscale x 8 x i1> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %36 = call <vscale x 16 x i1> @llvm.vp.select.nxv16i1(<vscale x 16 x i1> undef, <vscale x 16 x i1> undef, <vscale x 16 x i1> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %37 = call <vscale x 32 x i1> @llvm.vp.select.nxv32i1(<vscale x 32 x i1> undef, <vscale x 32 x i1> undef, <vscale x 32 x i1> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %38 = select i1 undef, i8 undef, i8 undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %39 = select i1 undef, <1 x i8> undef, <1 x i8> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %40 = select i1 undef, <2 x i8> undef, <2 x i8> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %41 = select i1 undef, <4 x i8> undef, <4 x i8> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %42 = select i1 undef, <8 x i8> undef, <8 x i8> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %43 = select i1 undef, <16 x i8> undef, <16 x i8> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %44 = select i1 undef, <32 x i8> undef, <32 x i8> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %45 = select <1 x i1> undef, <1 x i8> undef, <1 x i8> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %46 = select <2 x i1> undef, <2 x i8> undef, <2 x i8> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %47 = select <4 x i1> undef, <4 x i8> undef, <4 x i8> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %48 = select <8 x i1> undef, <8 x i8> undef, <8 x i8> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %49 = select <16 x i1> undef, <16 x i8> undef, <16 x i8> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %50 = select <32 x i1> undef, <32 x i8> undef, <32 x i8> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %51 = select i1 undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %52 = select i1 undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %53 = select i1 undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %54 = select i1 undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %55 = select i1 undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %56 = select i1 undef, <vscale x 32 x i8> undef, <vscale x 32 x i8> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %57 = select <vscale x 1 x i1> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %58 = select <vscale x 2 x i1> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %59 = select <vscale x 4 x i1> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %60 = select <vscale x 8 x i1> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %61 = select <vscale x 16 x i1> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %62 = select <vscale x 32 x i1> undef, <vscale x 32 x i8> undef, <vscale x 32 x i8> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %63 = call <1 x i8> @llvm.vp.select.v1i8(<1 x i1> undef, <1 x i8> undef, <1 x i8> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %64 = call <2 x i8> @llvm.vp.select.v2i8(<2 x i1> undef, <2 x i8> undef, <2 x i8> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %65 = call <4 x i8> @llvm.vp.select.v4i8(<4 x i1> undef, <4 x i8> undef, <4 x i8> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %66 = call <8 x i8> @llvm.vp.select.v8i8(<8 x i1> undef, <8 x i8> undef, <8 x i8> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %67 = call <16 x i8> @llvm.vp.select.v16i8(<16 x i1> undef, <16 x i8> undef, <16 x i8> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %68 = call <32 x i8> @llvm.vp.select.v32i8(<32 x i1> undef, <32 x i8> undef, <32 x i8> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %69 = call <vscale x 1 x i8> @llvm.vp.select.nxv1i8(<vscale x 1 x i1> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %70 = call <vscale x 2 x i8> @llvm.vp.select.nxv2i8(<vscale x 2 x i1> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %71 = call <vscale x 4 x i8> @llvm.vp.select.nxv4i8(<vscale x 4 x i1> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %72 = call <vscale x 8 x i8> @llvm.vp.select.nxv8i8(<vscale x 8 x i1> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %73 = call <vscale x 16 x i8> @llvm.vp.select.nxv16i8(<vscale x 16 x i1> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %74 = call <vscale x 32 x i8> @llvm.vp.select.nxv32i8(<vscale x 32 x i1> undef, <vscale x 32 x i8> undef, <vscale x 32 x i8> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %75 = select i1 undef, i16 undef, i16 undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %76 = select i1 undef, <1 x i16> undef, <1 x i16> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %77 = select i1 undef, <2 x i16> undef, <2 x i16> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %78 = select i1 undef, <4 x i16> undef, <4 x i16> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %79 = select i1 undef, <8 x i16> undef, <8 x i16> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %80 = select i1 undef, <16 x i16> undef, <16 x i16> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %81 = select i1 undef, <32 x i16> undef, <32 x i16> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %82 = select <1 x i1> undef, <1 x i16> undef, <1 x i16> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %83 = select <2 x i1> undef, <2 x i16> undef, <2 x i16> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %84 = select <4 x i1> undef, <4 x i16> undef, <4 x i16> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %85 = select <8 x i1> undef, <8 x i16> undef, <8 x i16> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %86 = select <16 x i1> undef, <16 x i16> undef, <16 x i16> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %87 = select <32 x i1> undef, <32 x i16> undef, <32 x i16> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %88 = select i1 undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %89 = select i1 undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %90 = select i1 undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %91 = select i1 undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %92 = select i1 undef, <vscale x 16 x i16> undef, <vscale x 16 x i16> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %93 = select i1 undef, <vscale x 32 x i16> undef, <vscale x 32 x i16> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %94 = select <vscale x 1 x i1> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %95 = select <vscale x 2 x i1> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %96 = select <vscale x 4 x i1> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %97 = select <vscale x 8 x i1> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %98 = select <vscale x 16 x i1> undef, <vscale x 16 x i16> undef, <vscale x 16 x i16> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %99 = select <vscale x 32 x i1> undef, <vscale x 32 x i16> undef, <vscale x 32 x i16> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %100 = call <1 x i16> @llvm.vp.select.v1i16(<1 x i1> undef, <1 x i16> undef, <1 x i16> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %101 = call <2 x i16> @llvm.vp.select.v2i16(<2 x i1> undef, <2 x i16> undef, <2 x i16> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %102 = call <4 x i16> @llvm.vp.select.v4i16(<4 x i1> undef, <4 x i16> undef, <4 x i16> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %103 = call <8 x i16> @llvm.vp.select.v8i16(<8 x i1> undef, <8 x i16> undef, <8 x i16> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %104 = call <16 x i16> @llvm.vp.select.v16i16(<16 x i1> undef, <16 x i16> undef, <16 x i16> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %105 = call <32 x i16> @llvm.vp.select.v32i16(<32 x i1> undef, <32 x i16> undef, <32 x i16> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %106 = call <vscale x 1 x i16> @llvm.vp.select.nxv1i16(<vscale x 1 x i1> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %107 = call <vscale x 2 x i16> @llvm.vp.select.nxv2i16(<vscale x 2 x i1> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %108 = call <vscale x 4 x i16> @llvm.vp.select.nxv4i16(<vscale x 4 x i1> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %109 = call <vscale x 8 x i16> @llvm.vp.select.nxv8i16(<vscale x 8 x i1> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %110 = call <vscale x 16 x i16> @llvm.vp.select.nxv16i16(<vscale x 16 x i1> undef, <vscale x 16 x i16> undef, <vscale x 16 x i16> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %111 = call <vscale x 32 x i16> @llvm.vp.select.nxv32i16(<vscale x 32 x i1> undef, <vscale x 32 x i16> undef, <vscale x 32 x i16> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %112 = select i1 undef, i32 undef, i32 undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %113 = select i1 undef, <1 x i32> undef, <1 x i32> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %114 = select i1 undef, <2 x i32> undef, <2 x i32> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %115 = select i1 undef, <4 x i32> undef, <4 x i32> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %116 = select i1 undef, <8 x i32> undef, <8 x i32> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %117 = select i1 undef, <16 x i32> undef, <16 x i32> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %118 = select i1 undef, <32 x i32> undef, <32 x i32> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %119 = select <1 x i1> undef, <1 x i32> undef, <1 x i32> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %120 = select <2 x i1> undef, <2 x i32> undef, <2 x i32> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %121 = select <4 x i1> undef, <4 x i32> undef, <4 x i32> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %122 = select <8 x i1> undef, <8 x i32> undef, <8 x i32> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %123 = select <16 x i1> undef, <16 x i32> undef, <16 x i32> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %124 = select <32 x i1> undef, <32 x i32> undef, <32 x i32> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %125 = select i1 undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %126 = select i1 undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %127 = select i1 undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %128 = select i1 undef, <vscale x 8 x i32> undef, <vscale x 8 x i32> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %129 = select i1 undef, <vscale x 16 x i32> undef, <vscale x 16 x i32> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %130 = select i1 undef, <vscale x 32 x i32> undef, <vscale x 32 x i32> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %131 = select <vscale x 1 x i1> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %132 = select <vscale x 2 x i1> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %133 = select <vscale x 4 x i1> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %134 = select <vscale x 8 x i1> undef, <vscale x 8 x i32> undef, <vscale x 8 x i32> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %135 = select <vscale x 16 x i1> undef, <vscale x 16 x i32> undef, <vscale x 16 x i32> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %136 = select <vscale x 32 x i1> undef, <vscale x 32 x i32> undef, <vscale x 32 x i32> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %137 = call <1 x i32> @llvm.vp.select.v1i32(<1 x i1> undef, <1 x i32> undef, <1 x i32> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %138 = call <2 x i32> @llvm.vp.select.v2i32(<2 x i1> undef, <2 x i32> undef, <2 x i32> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %139 = call <4 x i32> @llvm.vp.select.v4i32(<4 x i1> undef, <4 x i32> undef, <4 x i32> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %140 = call <8 x i32> @llvm.vp.select.v8i32(<8 x i1> undef, <8 x i32> undef, <8 x i32> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %141 = call <16 x i32> @llvm.vp.select.v16i32(<16 x i1> undef, <16 x i32> undef, <16 x i32> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %142 = call <32 x i32> @llvm.vp.select.v32i32(<32 x i1> undef, <32 x i32> undef, <32 x i32> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %143 = call <vscale x 1 x i32> @llvm.vp.select.nxv1i32(<vscale x 1 x i1> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %144 = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %145 = call <vscale x 4 x i32> @llvm.vp.select.nxv4i32(<vscale x 4 x i1> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %146 = call <vscale x 8 x i32> @llvm.vp.select.nxv8i32(<vscale x 8 x i1> undef, <vscale x 8 x i32> undef, <vscale x 8 x i32> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %147 = call <vscale x 16 x i32> @llvm.vp.select.nxv16i32(<vscale x 16 x i1> undef, <vscale x 16 x i32> undef, <vscale x 16 x i32> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %148 = call <vscale x 32 x i32> @llvm.vp.select.nxv32i32(<vscale x 32 x i1> undef, <vscale x 32 x i32> undef, <vscale x 32 x i32> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %149 = select i1 undef, i64 undef, i64 undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %150 = select i1 undef, <1 x i64> undef, <1 x i64> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %151 = select i1 undef, <2 x i64> undef, <2 x i64> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %152 = select i1 undef, <4 x i64> undef, <4 x i64> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %153 = select i1 undef, <8 x i64> undef, <8 x i64> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %154 = select i1 undef, <16 x i64> undef, <16 x i64> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %155 = select i1 undef, <32 x i64> undef, <32 x i64> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %156 = select <1 x i1> undef, <1 x i64> undef, <1 x i64> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %157 = select <2 x i1> undef, <2 x i64> undef, <2 x i64> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %158 = select <4 x i1> undef, <4 x i64> undef, <4 x i64> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %159 = select <8 x i1> undef, <8 x i64> undef, <8 x i64> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %160 = select <16 x i1> undef, <16 x i64> undef, <16 x i64> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %161 = select <32 x i1> undef, <32 x i64> undef, <32 x i64> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %162 = select i1 undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %163 = select i1 undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %164 = select i1 undef, <vscale x 4 x i64> undef, <vscale x 4 x i64> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %165 = select i1 undef, <vscale x 8 x i64> undef, <vscale x 8 x i64> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %166 = select i1 undef, <vscale x 16 x i64> undef, <vscale x 16 x i64> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 96 for instruction: %167 = select i1 undef, <vscale x 32 x i64> undef, <vscale x 32 x i64> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %168 = select <vscale x 1 x i1> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %169 = select <vscale x 2 x i1> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %170 = select <vscale x 4 x i1> undef, <vscale x 4 x i64> undef, <vscale x 4 x i64> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %171 = select <vscale x 8 x i1> undef, <vscale x 8 x i64> undef, <vscale x 8 x i64> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %172 = select <vscale x 16 x i1> undef, <vscale x 16 x i64> undef, <vscale x 16 x i64> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %173 = select <vscale x 32 x i1> undef, <vscale x 32 x i64> undef, <vscale x 32 x i64> undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %174 = call <1 x i64> @llvm.vp.select.v1i64(<1 x i1> undef, <1 x i64> undef, <1 x i64> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %175 = call <2 x i64> @llvm.vp.select.v2i64(<2 x i1> undef, <2 x i64> undef, <2 x i64> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %176 = call <4 x i64> @llvm.vp.select.v4i64(<4 x i1> undef, <4 x i64> undef, <4 x i64> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %177 = call <8 x i64> @llvm.vp.select.v8i64(<8 x i1> undef, <8 x i64> undef, <8 x i64> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %178 = call <16 x i64> @llvm.vp.select.v16i64(<16 x i1> undef, <16 x i64> undef, <16 x i64> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %179 = call <32 x i64> @llvm.vp.select.v32i64(<32 x i1> undef, <32 x i64> undef, <32 x i64> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %180 = call <vscale x 1 x i64> @llvm.vp.select.nxv1i64(<vscale x 1 x i1> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %181 = call <vscale x 2 x i64> @llvm.vp.select.nxv2i64(<vscale x 2 x i1> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %182 = call <vscale x 4 x i64> @llvm.vp.select.nxv4i64(<vscale x 4 x i1> undef, <vscale x 4 x i64> undef, <vscale x 4 x i64> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %183 = call <vscale x 8 x i64> @llvm.vp.select.nxv8i64(<vscale x 8 x i1> undef, <vscale x 8 x i64> undef, <vscale x 8 x i64> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %184 = call <vscale x 16 x i64> @llvm.vp.select.nxv16i64(<vscale x 16 x i1> undef, <vscale x 16 x i64> undef, <vscale x 16 x i64> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %185 = call <vscale x 32 x i64> @llvm.vp.select.nxv32i64(<vscale x 32 x i1> undef, <vscale x 32 x i64> undef, <vscale x 32 x i64> undef, i32 undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
select i1 undef, i1 undef, i1 undef
@@ -158,6 +219,19 @@ define void @select() {
select <vscale x 16 x i1> undef, <vscale x 16 x i1> undef, <vscale x 16 x i1> undef
select <vscale x 32 x i1> undef, <vscale x 32 x i1> undef, <vscale x 32 x i1> undef
+ call <1 x i1> @llvm.vp.select.v1i1(<1 x i1> undef, <1 x i1> undef, <1 x i1> undef, i32 undef)
+ call <2 x i1> @llvm.vp.select.v2i1(<2 x i1> undef, <2 x i1> undef, <2 x i1> undef, i32 undef)
+ call <4 x i1> @llvm.vp.select.v4i1(<4 x i1> undef, <4 x i1> undef, <4 x i1> undef, i32 undef)
+ call <8 x i1> @llvm.vp.select.v8i1(<8 x i1> undef, <8 x i1> undef, <8 x i1> undef, i32 undef)
+ call <16 x i1> @llvm.vp.select.v16i1(<16 x i1> undef, <16 x i1> undef, <16 x i1> undef, i32 undef)
+ call <32 x i1> @llvm.vp.select.v32i1(<32 x i1> undef, <32 x i1> undef, <32 x i1> undef, i32 undef)
+ call <vscale x 1 x i1> @llvm.vp.select.nxv1i1(<vscale x 1 x i1> undef, <vscale x 1 x i1> undef, <vscale x 1 x i1> undef, i32 undef)
+ call <vscale x 2 x i1> @llvm.vp.select.nxv2i1(<vscale x 2 x i1> undef, <vscale x 2 x i1> undef, <vscale x 2 x i1> undef, i32 undef)
+ call <vscale x 4 x i1> @llvm.vp.select.nxv4i1(<vscale x 4 x i1> undef, <vscale x 4 x i1> undef, <vscale x 4 x i1> undef, i32 undef)
+ call <vscale x 8 x i1> @llvm.vp.select.nxv8i1(<vscale x 8 x i1> undef, <vscale x 8 x i1> undef, <vscale x 8 x i1> undef, i32 undef)
+ call <vscale x 16 x i1> @llvm.vp.select.nxv16i1(<vscale x 16 x i1> undef, <vscale x 16 x i1> undef, <vscale x 16 x i1> undef, i32 undef)
+ call <vscale x 32 x i1> @llvm.vp.select.nxv32i1(<vscale x 32 x i1> undef, <vscale x 32 x i1> undef, <vscale x 32 x i1> undef, i32 undef)
+
select i1 undef, i8 undef, i8 undef
select i1 undef, <1 x i8> undef, <1 x i8> undef
select i1 undef, <2 x i8> undef, <2 x i8> undef
@@ -184,6 +258,19 @@ define void @select() {
select <vscale x 16 x i1> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef
select <vscale x 32 x i1> undef, <vscale x 32 x i8> undef, <vscale x 32 x i8> undef
+ call <1 x i8> @llvm.vp.select.v1i8(<1 x i1> undef, <1 x i8> undef, <1 x i8> undef, i32 undef)
+ call <2 x i8> @llvm.vp.select.v2i8(<2 x i1> undef, <2 x i8> undef, <2 x i8> undef, i32 undef)
+ call <4 x i8> @llvm.vp.select.v4i8(<4 x i1> undef, <4 x i8> undef, <4 x i8> undef, i32 undef)
+ call <8 x i8> @llvm.vp.select.v8i8(<8 x i1> undef, <8 x i8> undef, <8 x i8> undef, i32 undef)
+ call <16 x i8> @llvm.vp.select.v16i8(<16 x i1> undef, <16 x i8> undef, <16 x i8> undef, i32 undef)
+ call <32 x i8> @llvm.vp.select.v32i8(<32 x i1> undef, <32 x i8> undef, <32 x i8> undef, i32 undef)
+ call <vscale x 1 x i8> @llvm.vp.select.nxv1i8(<vscale x 1 x i1> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, i32 undef)
+ call <vscale x 2 x i8> @llvm.vp.select.nxv2i8(<vscale x 2 x i1> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, i32 undef)
+ call <vscale x 4 x i8> @llvm.vp.select.nxv4i8(<vscale x 4 x i1> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, i32 undef)
+ call <vscale x 8 x i8> @llvm.vp.select.nxv8i8(<vscale x 8 x i1> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, i32 undef)
+ call <vscale x 16 x i8> @llvm.vp.select.nxv16i8(<vscale x 16 x i1> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, i32 undef)
+ call <vscale x 32 x i8> @llvm.vp.select.nxv32i8(<vscale x 32 x i1> undef, <vscale x 32 x i8> undef, <vscale x 32 x i8> undef, i32 undef)
+
select i1 undef, i16 undef, i16 undef
select i1 undef, <1 x i16> undef, <1 x i16> undef
select i1 undef, <2 x i16> undef, <2 x i16> undef
@@ -210,6 +297,19 @@ define void @select() {
select <vscale x 16 x i1> undef, <vscale x 16 x i16> undef, <vscale x 16 x i16> undef
select <vscale x 32 x i1> undef, <vscale x 32 x i16> undef, <vscale x 32 x i16> undef
+ call <1 x i16> @llvm.vp.select.v1i16(<1 x i1> undef, <1 x i16> undef, <1 x i16> undef, i32 undef)
+ call <2 x i16> @llvm.vp.select.v2i16(<2 x i1> undef, <2 x i16> undef, <2 x i16> undef, i32 undef)
+ call <4 x i16> @llvm.vp.select.v4i16(<4 x i1> undef, <4 x i16> undef, <4 x i16> undef, i32 undef)
+ call <8 x i16> @llvm.vp.select.v8i16(<8 x i1> undef, <8 x i16> undef, <8 x i16> undef, i32 undef)
+ call <16 x i16> @llvm.vp.select.v16i16(<16 x i1> undef, <16 x i16> undef, <16 x i16> undef, i32 undef)
+ call <32 x i16> @llvm.vp.select.v32i16(<32 x i1> undef, <32 x i16> undef, <32 x i16> undef, i32 undef)
+ call <vscale x 1 x i16> @llvm.vp.select.nxv1i16(<vscale x 1 x i1> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, i32 undef)
+ call <vscale x 2 x i16> @llvm.vp.select.nxv2i16(<vscale x 2 x i1> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, i32 undef)
+ call <vscale x 4 x i16> @llvm.vp.select.nxv4i16(<vscale x 4 x i1> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, i32 undef)
+ call <vscale x 8 x i16> @llvm.vp.select.nxv8i16(<vscale x 8 x i1> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, i32 undef)
+ call <vscale x 16 x i16> @llvm.vp.select.nxv16i16(<vscale x 16 x i1> undef, <vscale x 16 x i16> undef, <vscale x 16 x i16> undef, i32 undef)
+ call <vscale x 32 x i16> @llvm.vp.select.nxv32i16(<vscale x 32 x i1> undef, <vscale x 32 x i16> undef, <vscale x 32 x i16> undef, i32 undef)
+
select i1 undef, i32 undef, i32 undef
select i1 undef, <1 x i32> undef, <1 x i32> undef
select i1 undef, <2 x i32> undef, <2 x i32> undef
@@ -236,6 +336,18 @@ define void @select() {
select <vscale x 16 x i1> undef, <vscale x 16 x i32> undef, <vscale x 16 x i32> undef
select <vscale x 32 x i1> undef, <vscale x 32 x i32> undef, <vscale x 32 x i32> undef
+ call <1 x i32> @llvm.vp.select.v1i32(<1 x i1> undef, <1 x i32> undef, <1 x i32> undef, i32 undef)
+ call <2 x i32> @llvm.vp.select.v2i32(<2 x i1> undef, <2 x i32> undef, <2 x i32> undef, i32 undef)
+ call <4 x i32> @llvm.vp.select.v4i32(<4 x i1> undef, <4 x i32> undef, <4 x i32> undef, i32 undef)
+ call <8 x i32> @llvm.vp.select.v8i32(<8 x i1> undef, <8 x i32> undef, <8 x i32> undef, i32 undef)
+ call <16 x i32> @llvm.vp.select.v16i32(<16 x i1> undef, <16 x i32> undef, <16 x i32> undef, i32 undef)
+ call <32 x i32> @llvm.vp.select.v32i32(<32 x i1> undef, <32 x i32> undef, <32 x i32> undef, i32 undef)
+ call <vscale x 1 x i32> @llvm.vp.select.nxv1i32(<vscale x 1 x i1> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, i32 undef)
+ call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, i32 undef)
+ call <vscale x 4 x i32> @llvm.vp.select.nxv4i32(<vscale x 4 x i1> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, i32 undef)
+ call <vscale x 8 x i32> @llvm.vp.select.nxv8i32(<vscale x 8 x i1> undef, <vscale x 8 x i32> undef, <vscale x 8 x i32> undef, i32 undef)
+ call <vscale x 16 x i32> @llvm.vp.select.nxv16i32(<vscale x 16 x i1> undef, <vscale x 16 x i32> undef, <vscale x 16 x i32> undef, i32 undef)
+ call <vscale x 32 x i32> @llvm.vp.select.nxv32i32(<vscale x 32 x i1> undef, <vscale x 32 x i32> undef, <vscale x 32 x i32> undef, i32 undef)
select i1 undef, i64 undef, i64 undef
select i1 undef, <1 x i64> undef, <1 x i64> undef
@@ -263,5 +375,43 @@ define void @select() {
select <vscale x 16 x i1> undef, <vscale x 16 x i64> undef, <vscale x 16 x i64> undef
select <vscale x 32 x i1> undef, <vscale x 32 x i64> undef, <vscale x 32 x i64> undef
+ call <1 x i64> @llvm.vp.select.v1i64(<1 x i1> undef, <1 x i64> undef, <1 x i64> undef, i32 undef)
+ call <2 x i64> @llvm.vp.select.v2i64(<2 x i1> undef, <2 x i64> undef, <2 x i64> undef, i32 undef)
+ call <4 x i64> @llvm.vp.select.v4i64(<4 x i1> undef, <4 x i64> undef, <4 x i64> undef, i32 undef)
+ call <8 x i64> @llvm.vp.select.v8i64(<8 x i1> undef, <8 x i64> undef, <8 x i64> undef, i32 undef)
+ call <16 x i64> @llvm.vp.select.v16i64(<16 x i1> undef, <16 x i64> undef, <16 x i64> undef, i32 undef)
+ call <32 x i64> @llvm.vp.select.v32i64(<32 x i1> undef, <32 x i64> undef, <32 x i64> undef, i32 undef)
+ call <vscale x 1 x i64> @llvm.vp.select.nxv1i64(<vscale x 1 x i1> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, i32 undef)
+ call <vscale x 2 x i64> @llvm.vp.select.nxv2i64(<vscale x 2 x i1> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, i32 undef)
+ call <vscale x 4 x i64> @llvm.vp.select.nxv4i64(<vscale x 4 x i1> undef, <vscale x 4 x i64> undef, <vscale x 4 x i64> undef, i32 undef)
+ call <vscale x 8 x i64> @llvm.vp.select.nxv8i64(<vscale x 8 x i1> undef, <vscale x 8 x i64> undef, <vscale x 8 x i64> undef, i32 undef)
+ call <vscale x 16 x i64> @llvm.vp.select.nxv16i64(<vscale x 16 x i1> undef, <vscale x 16 x i64> undef, <vscale x 16 x i64> undef, i32 undef)
+ call <vscale x 32 x i64> @llvm.vp.select.nxv32i64(<vscale x 32 x i1> undef, <vscale x 32 x i64> undef, <vscale x 32 x i64> undef, i32 undef)
+
ret void
}
+
+define void @select_of_constants() {
+; CHECK-LABEL: 'select_of_constants'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %1 = select i1 undef, <2 x i64> <i64 128, i64 128>, <2 x i64> zeroinitializer
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %2 = select i1 undef, <2 x i64> <i64 128, i64 127>, <2 x i64> zeroinitializer
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %3 = select i1 undef, <2 x i64> <i64 0, i64 1>, <2 x i64> zeroinitializer
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %4 = select i1 undef, <2 x i64> <i64 128, i64 533>, <2 x i64> <i64 0, i64 573>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = select <4 x i1> undef, <4 x i32> <i32 524288, i32 262144, i32 131072, i32 65536>, <4 x i32> zeroinitializer
+; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+ ; Splat constants
+ select i1 undef, <2 x i64> <i64 128, i64 128>, <2 x i64> zeroinitializer
+ ; LHS is a VID patern
+ select i1 undef, <2 x i64> <i64 128, i64 127>, <2 x i64> zeroinitializer
+ select i1 undef, <2 x i64> <i64 0, i64 1>, <2 x i64> zeroinitializer
+ ; 2x general (expensive) constants
+ select i1 undef, <2 x i64> <i64 128, i64 533>, <2 x i64> <i64 0, i64 573>
+
+ ; powers of two (still expensive)
+ select <4 x i1> undef, <4 x i32> <i32 524288, i32 262144, i32 131072, i32 65536>, <4 x i32> zeroinitializer
+
+ ret void
+}
+
+
diff --git a/llvm/test/Analysis/CtxProfAnalysis/handle-select.ll b/llvm/test/Analysis/CtxProfAnalysis/handle-select.ll
new file mode 100644
index 0000000..e740466
--- /dev/null
+++ b/llvm/test/Analysis/CtxProfAnalysis/handle-select.ll
@@ -0,0 +1,76 @@
+; Check that we handle `step` instrumentations. These addorn `select`s.
+; We don't want to confuse the `step` with normal increments, the latter of which
+; we use for BB ID-ing: we want to keep the `step`s after inlining, except if
+; the `select` is elided.
+;
+; RUN: split-file %s %t
+; RUN: llvm-ctxprof-util fromJSON --input=%t/profile.json --output=%t/profile.ctxprofdata
+;
+; RUN: opt -passes=ctx-instr-gen %t/example.ll -use-ctx-profile=%t/profile.ctxprofdata -S -o - | FileCheck %s --check-prefix=INSTR
+; RUN: opt -passes=ctx-instr-gen,module-inline %t/example.ll -use-ctx-profile=%t/profile.ctxprofdata -S -o - | FileCheck %s --check-prefix=POST-INL
+; RUN: opt -passes=ctx-instr-gen,module-inline,ctx-prof-flatten %t/example.ll -use-ctx-profile=%t/profile.ctxprofdata -S -o - | FileCheck %s --check-prefix=FLATTEN
+
+; INSTR-LABEL: yes:
+; INSTR-NEXT: call void @llvm.instrprof.increment(ptr @foo, i64 [[#]], i32 2, i32 1)
+; INSTR-NEXT: call void @llvm.instrprof.callsite(ptr @foo, i64 [[#]], i32 2, i32 0, ptr @bar)
+
+; INSTR-LABEL: no:
+; INSTR-NEXT: call void @llvm.instrprof.callsite(ptr @foo, i64 [[#]], i32 2, i32 1, ptr @bar)
+
+; INSTR-LABEL: define i32 @bar
+; INSTR-NEXT: call void @llvm.instrprof.increment(ptr @bar, i64 [[#]], i32 2, i32 0)
+; INSTR-NEXT: %inc =
+; INSTR: %test = icmp eq i32 %t, 0
+; INSTR-NEXT: %1 = zext i1 %test to i64
+; INSTR-NEXT: call void @llvm.instrprof.increment.step(ptr @bar, i64 [[#]], i32 2, i32 1, i64 %1)
+; INSTR-NEXT: %res = select
+
+; POST-INL-LABEL: yes:
+; POST-INL-NEXT: call void @llvm.instrprof.increment
+; POST-INL: call void @llvm.instrprof.increment.step
+; POST-INL-NEXT: %res.i = select
+
+; POST-INL-LABEL: no:
+; POST-INL-NEXT: call void @llvm.instrprof.increment
+; POST-INL-NEXT: br label
+
+; POST-INL-LABEL: exit:
+; POST-INL-NEXT: %res = phi i32 [ %res.i, %yes ], [ 1, %no ]
+
+; FLATTEN-LABEL: yes:
+; FLATTEN: %res.i = select i1 %test.i, i32 %inc.i, i32 %dec.i, !prof ![[SELPROF:[0-9]+]]
+; FLATTEN-LABEL: no:
+;
+; See the profile, in the "yes" case we set the step counter's value, in @bar, to 3. The total
+; entry count of that BB is 4.
+; ![[SELPROF]] = !{!"branch_weights", i32 3, i32 1}
+
+;--- example.ll
+define i32 @foo(i32 %t) !guid !0 {
+ %test = icmp slt i32 %t, 0
+ br i1 %test, label %yes, label %no
+yes:
+ %res1 = call i32 @bar(i32 %t) alwaysinline
+ br label %exit
+no:
+ ; this will result in eliding the select in @bar, when inlined.
+ %res2 = call i32 @bar(i32 0) alwaysinline
+ br label %exit
+exit:
+ %res = phi i32 [%res1, %yes], [%res2, %no]
+ ret i32 %res
+}
+
+define i32 @bar(i32 %t) !guid !1 {
+ %inc = add i32 %t, 1
+ %dec = sub i32 %t, 1
+ %test = icmp eq i32 %t, 0
+ %res = select i1 %test, i32 %inc, i32 %dec
+ ret i32 %res
+}
+
+!0 = !{i64 1234}
+!1 = !{i64 5678}
+
+;--- profile.json
+[{"Guid":1234, "Counters":[10, 4], "Callsites":[[{"Guid": 5678, "Counters":[4,3]}],[{"Guid": 5678, "Counters":[6,6]}]]}]
diff --git a/llvm/test/Analysis/ScalarEvolution/exit-count-non-strict.ll b/llvm/test/Analysis/ScalarEvolution/exit-count-non-strict.ll
index 6d64f76..f7a18c7 100644
--- a/llvm/test/Analysis/ScalarEvolution/exit-count-non-strict.ll
+++ b/llvm/test/Analysis/ScalarEvolution/exit-count-non-strict.ll
@@ -109,6 +109,9 @@ define void @ule_from_zero_no_nuw(i32 %M, i32 %N) {
; CHECK-NEXT: Loop %loop: Predicated backedge-taken count is ((zext i32 %N to i64) umin (1 + (zext i32 %M to i64))<nuw><nsw>)
; CHECK-NEXT: Predicates:
; CHECK-NEXT: {0,+,1}<%loop> Added Flags: <nusw>
+; CHECK-NEXT: Loop %loop: Predicated constant max backedge-taken count is i64 4294967295
+; CHECK-NEXT: Predicates:
+; CHECK-NEXT: {0,+,1}<%loop> Added Flags: <nusw>
; CHECK-NEXT: Loop %loop: Predicated symbolic max backedge-taken count is ((zext i32 %N to i64) umin (1 + (zext i32 %M to i64))<nuw><nsw>)
; CHECK-NEXT: Predicates:
; CHECK-NEXT: {0,+,1}<%loop> Added Flags: <nusw>
@@ -238,6 +241,9 @@ define void @sle_from_int_min_no_nsw(i32 %M, i32 %N) {
; CHECK-NEXT: Loop %loop: Predicated backedge-taken count is ((zext i32 (-2147483648 + %N) to i64) umin (2147483649 + (sext i32 %M to i64))<nsw>)
; CHECK-NEXT: Predicates:
; CHECK-NEXT: {-2147483648,+,1}<%loop> Added Flags: <nssw>
+; CHECK-NEXT: Loop %loop: Predicated constant max backedge-taken count is i64 4294967295
+; CHECK-NEXT: Predicates:
+; CHECK-NEXT: {-2147483648,+,1}<%loop> Added Flags: <nssw>
; CHECK-NEXT: Loop %loop: Predicated symbolic max backedge-taken count is ((zext i32 (-2147483648 + %N) to i64) umin (2147483649 + (sext i32 %M to i64))<nsw>)
; CHECK-NEXT: Predicates:
; CHECK-NEXT: {-2147483648,+,1}<%loop> Added Flags: <nssw>
diff --git a/llvm/test/Analysis/ScalarEvolution/finite-trip-count.ll b/llvm/test/Analysis/ScalarEvolution/finite-trip-count.ll
index 471954f..a1538fd 100644
--- a/llvm/test/Analysis/ScalarEvolution/finite-trip-count.ll
+++ b/llvm/test/Analysis/ScalarEvolution/finite-trip-count.ll
@@ -59,6 +59,9 @@ define void @sle_pre_inc_infinite(i32 %len) {
; CHECK-NEXT: Loop %for.body: Predicated backedge-taken count is (0 smax (1 + (sext i32 %len to i64))<nsw>)
; CHECK-NEXT: Predicates:
; CHECK-NEXT: {0,+,1}<%for.body> Added Flags: <nssw>
+; CHECK-NEXT: Loop %for.body: Predicated constant max backedge-taken count is i64 2147483648
+; CHECK-NEXT: Predicates:
+; CHECK-NEXT: {0,+,1}<%for.body> Added Flags: <nssw>
; CHECK-NEXT: Loop %for.body: Predicated symbolic max backedge-taken count is (0 smax (1 + (sext i32 %len to i64))<nsw>)
; CHECK-NEXT: Predicates:
; CHECK-NEXT: {0,+,1}<%for.body> Added Flags: <nssw>
@@ -130,6 +133,9 @@ define void @ule_pre_inc_infinite(i32 %len) {
; CHECK-NEXT: Loop %for.body: Predicated backedge-taken count is (1 + (zext i32 %len to i64))<nuw><nsw>
; CHECK-NEXT: Predicates:
; CHECK-NEXT: {0,+,1}<%for.body> Added Flags: <nusw>
+; CHECK-NEXT: Loop %for.body: Predicated constant max backedge-taken count is i64 4294967296
+; CHECK-NEXT: Predicates:
+; CHECK-NEXT: {0,+,1}<%for.body> Added Flags: <nusw>
; CHECK-NEXT: Loop %for.body: Predicated symbolic max backedge-taken count is (1 + (zext i32 %len to i64))<nuw><nsw>
; CHECK-NEXT: Predicates:
; CHECK-NEXT: {0,+,1}<%for.body> Added Flags: <nusw>
diff --git a/llvm/test/Analysis/ScalarEvolution/ne-overflow.ll b/llvm/test/Analysis/ScalarEvolution/ne-overflow.ll
index 49288c8..3022281 100644
--- a/llvm/test/Analysis/ScalarEvolution/ne-overflow.ll
+++ b/llvm/test/Analysis/ScalarEvolution/ne-overflow.ll
@@ -240,6 +240,9 @@ define void @test_zext(i64 %N) mustprogress {
; CHECK-NEXT: Loop %for.body: Predicated backedge-taken count is (%N /u 2)
; CHECK-NEXT: Predicates:
; CHECK-NEXT: {0,+,2}<nuw><%for.body> Added Flags: <nusw>
+; CHECK-NEXT: Loop %for.body: Predicated constant max backedge-taken count is i64 9223372036854775807
+; CHECK-NEXT: Predicates:
+; CHECK-NEXT: {0,+,2}<nuw><%for.body> Added Flags: <nusw>
; CHECK-NEXT: Loop %for.body: Predicated symbolic max backedge-taken count is (%N /u 2)
; CHECK-NEXT: Predicates:
; CHECK-NEXT: {0,+,2}<nuw><%for.body> Added Flags: <nusw>
diff --git a/llvm/test/Analysis/ScalarEvolution/predicated-exit-count.ll b/llvm/test/Analysis/ScalarEvolution/predicated-exit-count.ll
index de21418..3b398d4 100644
--- a/llvm/test/Analysis/ScalarEvolution/predicated-exit-count.ll
+++ b/llvm/test/Analysis/ScalarEvolution/predicated-exit-count.ll
@@ -30,6 +30,10 @@ define i32 @multiple_exits_with_predicates(ptr %src1, ptr readonly %src2, i32 %e
; CHECK-NEXT: Predicates:
; CHECK-NEXT: {1,+,1}<%for.body> Added Flags: <nusw>
; CHECK-EMPTY:
+; CHECK-NEXT: Loop %for.body: Predicated constant max backedge-taken count is i32 1023
+; CHECK-NEXT: Predicates:
+; CHECK-NEXT: {1,+,1}<%for.body> Added Flags: <nusw>
+; CHECK-NEXT: {1,+,1}<%for.body> Added Flags: <nusw>
; CHECK-NEXT: Loop %for.body: Predicated symbolic max backedge-taken count is (1023 umin (-1 + (1 umax %end)))
; CHECK-NEXT: Predicates:
; CHECK-NEXT: {1,+,1}<%for.body> Added Flags: <nusw>
diff --git a/llvm/test/Analysis/ScalarEvolution/predicated-symbolic-max-backedge-taken-count.ll b/llvm/test/Analysis/ScalarEvolution/predicated-symbolic-max-backedge-taken-count.ll
index 2ec6158..ee60526 100644
--- a/llvm/test/Analysis/ScalarEvolution/predicated-symbolic-max-backedge-taken-count.ll
+++ b/llvm/test/Analysis/ScalarEvolution/predicated-symbolic-max-backedge-taken-count.ll
@@ -20,6 +20,9 @@ define void @test1(i64 %x, ptr %a, ptr %b) {
; CHECK-NEXT: Predicates:
; CHECK-NEXT: {1,+,1}<%header> Added Flags: <nusw>
; CHECK-EMPTY:
+; CHECK-NEXT: Loop %header: Predicated constant max backedge-taken count is i64 -2
+; CHECK-NEXT: Predicates:
+; CHECK-NEXT: {1,+,1}<%header> Added Flags: <nusw>
; CHECK-NEXT: Loop %header: Predicated symbolic max backedge-taken count is (-1 + (1 umax %x))
; CHECK-NEXT: Predicates:
; CHECK-NEXT: {1,+,1}<%header> Added Flags: <nusw>
@@ -71,6 +74,9 @@ define void @test2(i64 %x, ptr %a) {
; CHECK-NEXT: Predicates:
; CHECK-NEXT: {1,+,1}<%header> Added Flags: <nusw>
; CHECK-EMPTY:
+; CHECK-NEXT: Loop %header: Predicated constant max backedge-taken count is i64 -2
+; CHECK-NEXT: Predicates:
+; CHECK-NEXT: {1,+,1}<%header> Added Flags: <nusw>
; CHECK-NEXT: Loop %header: Predicated symbolic max backedge-taken count is (-1 + (1 umax %x))
; CHECK-NEXT: Predicates:
; CHECK-NEXT: {1,+,1}<%header> Added Flags: <nusw>
diff --git a/llvm/test/Analysis/ScalarEvolution/trip-count-implied-addrec.ll b/llvm/test/Analysis/ScalarEvolution/trip-count-implied-addrec.ll
index b313842..2ee2ec5 100644
--- a/llvm/test/Analysis/ScalarEvolution/trip-count-implied-addrec.ll
+++ b/llvm/test/Analysis/ScalarEvolution/trip-count-implied-addrec.ll
@@ -61,6 +61,9 @@ define void @nw_implies_nsw(i16 %n) mustprogress {
; CHECK-NEXT: Loop %for.body: Predicated backedge-taken count is (128 + (-128 smax %n))
; CHECK-NEXT: Predicates:
; CHECK-NEXT: {-128,+,1}<%for.body> Added Flags: <nssw>
+; CHECK-NEXT: Loop %for.body: Predicated constant max backedge-taken count is i16 -32641
+; CHECK-NEXT: Predicates:
+; CHECK-NEXT: {-128,+,1}<%for.body> Added Flags: <nssw>
; CHECK-NEXT: Loop %for.body: Predicated symbolic max backedge-taken count is (128 + (-128 smax %n))
; CHECK-NEXT: Predicates:
; CHECK-NEXT: {-128,+,1}<%for.body> Added Flags: <nssw>
@@ -110,6 +113,9 @@ define void @actually_infinite() {
; CHECK-NEXT: Loop %for.body: Predicated backedge-taken count is i16 257
; CHECK-NEXT: Predicates:
; CHECK-NEXT: {0,+,1}<%for.body> Added Flags: <nusw>
+; CHECK-NEXT: Loop %for.body: Predicated constant max backedge-taken count is i16 257
+; CHECK-NEXT: Predicates:
+; CHECK-NEXT: {0,+,1}<%for.body> Added Flags: <nusw>
; CHECK-NEXT: Loop %for.body: Predicated symbolic max backedge-taken count is i16 257
; CHECK-NEXT: Predicates:
; CHECK-NEXT: {0,+,1}<%for.body> Added Flags: <nusw>
@@ -138,6 +144,9 @@ define void @rhs_mustexit_1(i16 %n.raw) mustprogress {
; CHECK-NEXT: Loop %for.body: Predicated backedge-taken count is (-1 + (1 umax (-1 + (zext i8 (trunc i16 %n.raw to i8) to i16))<nsw>))
; CHECK-NEXT: Predicates:
; CHECK-NEXT: {1,+,1}<nw><%for.body> Added Flags: <nusw>
+; CHECK-NEXT: Loop %for.body: Predicated constant max backedge-taken count is i16 -2
+; CHECK-NEXT: Predicates:
+; CHECK-NEXT: {1,+,1}<nw><%for.body> Added Flags: <nusw>
; CHECK-NEXT: Loop %for.body: Predicated symbolic max backedge-taken count is (-1 + (1 umax (-1 + (zext i8 (trunc i16 %n.raw to i8) to i16))<nsw>))
; CHECK-NEXT: Predicates:
; CHECK-NEXT: {1,+,1}<nw><%for.body> Added Flags: <nusw>
@@ -266,6 +275,9 @@ define void @neg_rhs_maybe_infinite(i16 %n.raw) {
; CHECK-NEXT: Loop %for.body: Predicated backedge-taken count is (-1 + (1 umax (-1 + (zext i8 (trunc i16 %n.raw to i8) to i16))<nsw>))
; CHECK-NEXT: Predicates:
; CHECK-NEXT: {1,+,1}<%for.body> Added Flags: <nusw>
+; CHECK-NEXT: Loop %for.body: Predicated constant max backedge-taken count is i16 -2
+; CHECK-NEXT: Predicates:
+; CHECK-NEXT: {1,+,1}<%for.body> Added Flags: <nusw>
; CHECK-NEXT: Loop %for.body: Predicated symbolic max backedge-taken count is (-1 + (1 umax (-1 + (zext i8 (trunc i16 %n.raw to i8) to i16))<nsw>))
; CHECK-NEXT: Predicates:
; CHECK-NEXT: {1,+,1}<%for.body> Added Flags: <nusw>
@@ -391,6 +403,9 @@ define void @ult_constant_rhs_stride2_neg(i16 %n.raw, i8 %start) {
; CHECK-NEXT: Loop %for.body: Predicated backedge-taken count is ((256 + (-1 * (zext i8 (2 + %start) to i16))<nsw>)<nsw> /u 2)
; CHECK-NEXT: Predicates:
; CHECK-NEXT: {(2 + %start),+,2}<%for.body> Added Flags: <nusw>
+; CHECK-NEXT: Loop %for.body: Predicated constant max backedge-taken count is i16 128
+; CHECK-NEXT: Predicates:
+; CHECK-NEXT: {(2 + %start),+,2}<%for.body> Added Flags: <nusw>
; CHECK-NEXT: Loop %for.body: Predicated symbolic max backedge-taken count is ((256 + (-1 * (zext i8 (2 + %start) to i16))<nsw>)<nsw> /u 2)
; CHECK-NEXT: Predicates:
; CHECK-NEXT: {(2 + %start),+,2}<%for.body> Added Flags: <nusw>
diff --git a/llvm/test/Assembler/auto_upgrade_nvvm_intrinsics.ll b/llvm/test/Assembler/auto_upgrade_nvvm_intrinsics.ll
index 1c11e12..584c0ef 100644
--- a/llvm/test/Assembler/auto_upgrade_nvvm_intrinsics.ll
+++ b/llvm/test/Assembler/auto_upgrade_nvvm_intrinsics.ll
@@ -26,6 +26,24 @@ declare i16 @llvm.nvvm.min.us(i16, i16)
declare i32 @llvm.nvvm.min.ui(i32, i32)
declare i64 @llvm.nvvm.min.ull(i64, i64)
+declare i32 @llvm.nvvm.bitcast.f2i(float)
+declare float @llvm.nvvm.bitcast.i2f(i32)
+declare i64 @llvm.nvvm.bitcast.d2ll(double)
+declare double @llvm.nvvm.bitcast.ll2d(i64)
+
+declare i32 @llvm.nvvm.rotate.b32(i32, i32)
+declare i64 @llvm.nvvm.rotate.right.b64(i64, i32)
+declare i64 @llvm.nvvm.rotate.b64(i64, i32)
+
+declare ptr addrspace(1) @llvm.nvvm.ptr.gen.to.global.p1.p0(ptr)
+declare ptr addrspace(3) @llvm.nvvm.ptr.gen.to.shared.p3.p0(ptr)
+declare ptr addrspace(4) @llvm.nvvm.ptr.gen.to.constant.p4.p0(ptr)
+declare ptr addrspace(5) @llvm.nvvm.ptr.gen.to.local.p5.p0(ptr)
+declare ptr @llvm.nvvm.ptr.global.to.gen.p0.p1(ptr addrspace(1))
+declare ptr @llvm.nvvm.ptr.shared.to.gen.p0.p3(ptr addrspace(3))
+declare ptr @llvm.nvvm.ptr.constant.to.gen.p0.p4(ptr addrspace(4))
+declare ptr @llvm.nvvm.ptr.local.to.gen.p0.p5(ptr addrspace(5))
+
; CHECK-LABEL: @simple_upgrade
define void @simple_upgrade(i32 %a, i64 %b, i16 %c) {
; CHECK: call i32 @llvm.bitreverse.i32(i32 %a)
@@ -120,3 +138,56 @@ define void @min_max(i16 %a1, i16 %a2, i32 %b1, i32 %b2, i64 %c1, i64 %c2) {
ret void
}
+
+; CHECK-LABEL: @bitcast
+define void @bitcast(i32 %a, i64 %b, float %c, double %d) {
+; CHECK: bitcast float %c to i32
+; CHECK: bitcast i32 %a to float
+; CHECK: bitcast double %d to i64
+; CHECK: bitcast i64 %b to double
+;
+ %r1 = call i32 @llvm.nvvm.bitcast.f2i(float %c)
+ %r2 = call float @llvm.nvvm.bitcast.i2f(i32 %a)
+ %r3 = call i64 @llvm.nvvm.bitcast.d2ll(double %d)
+ %r4 = call double @llvm.nvvm.bitcast.ll2d(i64 %b)
+
+ ret void
+}
+
+; CHECK-LABEL: @rotate
+define void @rotate(i32 %a, i64 %b) {
+; CHECK: call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 6)
+; CHECK: call i64 @llvm.fshr.i64(i64 %b, i64 %b, i64 7)
+; CHECK: call i64 @llvm.fshl.i64(i64 %b, i64 %b, i64 8)
+;
+ %r1 = call i32 @llvm.nvvm.rotate.b32(i32 %a, i32 6)
+ %r2 = call i64 @llvm.nvvm.rotate.right.b64(i64 %b, i32 7)
+ %r3 = call i64 @llvm.nvvm.rotate.b64(i64 %b, i32 8)
+ ret void
+}
+
+; CHECK-LABEL: @addrspacecast
+define void @addrspacecast(ptr %p0) {
+; CHECK: %1 = addrspacecast ptr %p0 to ptr addrspace(1)
+; CHECK: %2 = addrspacecast ptr addrspace(1) %1 to ptr
+; CHECK: %3 = addrspacecast ptr %2 to ptr addrspace(3)
+; CHECK: %4 = addrspacecast ptr addrspace(3) %3 to ptr
+; CHECK: %5 = addrspacecast ptr %4 to ptr addrspace(4)
+; CHECK: %6 = addrspacecast ptr addrspace(4) %5 to ptr
+; CHECK: %7 = addrspacecast ptr %6 to ptr addrspace(5)
+; CHECK: %8 = addrspacecast ptr addrspace(5) %7 to ptr
+;
+ %p1 = call ptr addrspace(1) @llvm.nvvm.ptr.gen.to.global.p1.p0(ptr %p0)
+ %p2 = call ptr @llvm.nvvm.ptr.global.to.gen.p0.p1(ptr addrspace(1) %p1)
+
+ %p3 = call ptr addrspace(3) @llvm.nvvm.ptr.gen.to.shared.p3.p0(ptr %p2)
+ %p4 = call ptr @llvm.nvvm.ptr.shared.to.gen.p0.p3(ptr addrspace(3) %p3)
+
+ %p5 = call ptr addrspace(4) @llvm.nvvm.ptr.gen.to.constant.p4.p0(ptr %p4)
+ %p6 = call ptr @llvm.nvvm.ptr.constant.to.gen.p0.p4(ptr addrspace(4) %p5)
+
+ %p7 = call ptr addrspace(5) @llvm.nvvm.ptr.gen.to.local.p5.p0(ptr %p6)
+ %p8 = call ptr @llvm.nvvm.ptr.local.to.gen.p0.p5(ptr addrspace(5) %p7)
+
+ ret void
+}
diff --git a/llvm/test/Bindings/llvm-c/atomics.ll b/llvm/test/Bindings/llvm-c/atomics.ll
index 162368c..588bd24 100644
--- a/llvm/test/Bindings/llvm-c/atomics.ll
+++ b/llvm/test/Bindings/llvm-c/atomics.ll
@@ -58,6 +58,9 @@ define void @atomic_rmw_ops(ptr %p, i32 %i, float %f) {
%a.uinc_wrap = atomicrmw uinc_wrap ptr %p, i32 %i acq_rel, align 8
%a.udec_wrap = atomicrmw udec_wrap ptr %p, i32 %i acq_rel, align 8
+ %a.usub_sat = atomicrmw usub_sat ptr %p, i32 %i acq_rel, align 8
+ %a.usub_cond = atomicrmw usub_cond ptr %p, i32 %i acq_rel, align 8
+
ret void
}
diff --git a/llvm/test/Bindings/llvm-c/debug_info_new_format.ll b/llvm/test/Bindings/llvm-c/debug_info_new_format.ll
index 50496cb..e7f537a 100644
--- a/llvm/test/Bindings/llvm-c/debug_info_new_format.ll
+++ b/llvm/test/Bindings/llvm-c/debug_info_new_format.ll
@@ -1,4 +1,4 @@
-; RUN: llvm-c-test --test-dibuilder-debuginfo-format | FileCheck %s
+; RUN: llvm-c-test --test-dibuilder | FileCheck %s
;; Duplicate of debug_info.ll using debug records instead of intrinsics.
; CHECK: ; ModuleID = 'debuginfo.c'
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-freeze.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-freeze.mir
index 3e768c4..03c28ef 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-freeze.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-freeze.mir
@@ -159,25 +159,13 @@ body: |
; CHECK-LABEL: name: test_freeze_v3s8
; CHECK: liveins: $q0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
- ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[UV]](s16)
- ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[UV1]](s16)
- ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[UV2]](s16)
- ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s8>) = G_BUILD_VECTOR [[TRUNC]](s8), [[TRUNC1]](s8), [[TRUNC2]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8)
- ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(<8 x s16>) = G_ANYEXT [[BUILD_VECTOR]](<8 x s8>)
- ; CHECK-NEXT: [[UV4:%[0-9]+]]:_(<4 x s16>), [[UV5:%[0-9]+]]:_(<4 x s16>) = G_UNMERGE_VALUES [[ANYEXT]](<8 x s16>)
- ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(<4 x s16>) = G_FREEZE [[UV4]]
- ; CHECK-NEXT: [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16), [[UV8:%[0-9]+]]:_(s16), [[UV9:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[FREEZE]](<4 x s16>)
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<4 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(<4 x s8>) = G_FREEZE [[DEF]]
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8), [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[FREEZE]](<4 x s8>)
; CHECK-NEXT: %undef:_(s32) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV6]](s16)
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
- ; CHECK-NEXT: %ext0:_(s32) = G_AND [[ANYEXT1]], [[C]]
- ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV7]](s16)
- ; CHECK-NEXT: %ext1:_(s32) = G_AND [[ANYEXT2]], [[C]]
- ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV8]](s16)
- ; CHECK-NEXT: %ext2:_(s32) = G_AND [[ANYEXT3]], [[C]]
+ ; CHECK-NEXT: %ext0:_(s32) = G_ZEXT [[UV]](s8)
+ ; CHECK-NEXT: %ext1:_(s32) = G_ZEXT [[UV1]](s8)
+ ; CHECK-NEXT: %ext2:_(s32) = G_ZEXT [[UV2]](s8)
; CHECK-NEXT: %res:_(<4 x s32>) = G_BUILD_VECTOR %ext0(s32), %ext1(s32), %ext2(s32), %undef(s32)
; CHECK-NEXT: $q0 = COPY %res(<4 x s32>)
%x:_(<3 x s8>) = G_IMPLICIT_DEF
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-insert-vector-elt.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-insert-vector-elt.mir
index 9a8697c..11c6c7f 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-insert-vector-elt.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-insert-vector-elt.mir
@@ -248,13 +248,10 @@ body: |
; CHECK-NEXT: [[TRUNC3:%[0-9]+]]:_(s8) = G_TRUNC [[UV2]](s16)
; CHECK-NEXT: [[TRUNC4:%[0-9]+]]:_(s8) = G_TRUNC [[UV3]](s16)
; CHECK-NEXT: [[TRUNC5:%[0-9]+]]:_(s8) = G_TRUNC [[UV4]](s16)
- ; CHECK-NEXT: [[DEF2:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16), [[UV8:%[0-9]+]]:_(s16), [[UV9:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[DEF2]](<4 x s16>)
- ; CHECK-NEXT: [[TRUNC6:%[0-9]+]]:_(s8) = G_TRUNC [[UV6]](s16)
- ; CHECK-NEXT: [[TRUNC7:%[0-9]+]]:_(s8) = G_TRUNC [[UV7]](s16)
- ; CHECK-NEXT: [[TRUNC8:%[0-9]+]]:_(s8) = G_TRUNC [[UV8]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[TRUNC3]](s8), [[TRUNC4]](s8), [[TRUNC5]](s8), [[TRUNC6]](s8), [[TRUNC7]](s8), [[TRUNC8]](s8), [[TRUNC6]](s8), [[TRUNC7]](s8), [[TRUNC8]](s8), [[TRUNC6]](s8), [[TRUNC7]](s8), [[TRUNC8]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8)
- ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[C]](s8), [[DEF]](s8), [[DEF]](s8), [[TRUNC6]](s8), [[TRUNC7]](s8), [[TRUNC8]](s8), [[TRUNC6]](s8), [[TRUNC7]](s8), [[TRUNC8]](s8), [[TRUNC6]](s8), [[TRUNC7]](s8), [[TRUNC8]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8)
+ ; CHECK-NEXT: [[DEF2:%[0-9]+]]:_(<4 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[UV6:%[0-9]+]]:_(s8), [[UV7:%[0-9]+]]:_(s8), [[UV8:%[0-9]+]]:_(s8), [[UV9:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[DEF2]](<4 x s8>)
+ ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[TRUNC3]](s8), [[TRUNC4]](s8), [[TRUNC5]](s8), [[UV6]](s8), [[UV7]](s8), [[UV8]](s8), [[UV6]](s8), [[UV7]](s8), [[UV8]](s8), [[UV6]](s8), [[UV7]](s8), [[UV8]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8)
+ ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[C]](s8), [[DEF]](s8), [[DEF]](s8), [[UV6]](s8), [[UV7]](s8), [[UV8]](s8), [[UV6]](s8), [[UV7]](s8), [[UV8]](s8), [[UV6]](s8), [[UV7]](s8), [[UV8]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8)
; CHECK-NEXT: [[SHUF:%[0-9]+]]:_(<16 x s8>) = G_SHUFFLE_VECTOR [[BUILD_VECTOR1]](<16 x s8>), [[BUILD_VECTOR2]], shufflemask(0, 16, 16, 16, 1, 16, 16, 16, 2, 16, 16, 16, undef, undef, undef, undef)
; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<4 x s32>) = G_BITCAST [[SHUF]](<16 x s8>)
; CHECK-NEXT: [[UITOFP:%[0-9]+]]:_(<4 x s32>) = G_UITOFP [[BITCAST]](<4 x s32>)
diff --git a/llvm/test/CodeGen/AArch64/bswap.ll b/llvm/test/CodeGen/AArch64/bswap.ll
index e90014b..b14f1a4 100644
--- a/llvm/test/CodeGen/AArch64/bswap.ll
+++ b/llvm/test/CodeGen/AArch64/bswap.ll
@@ -177,9 +177,7 @@ define <2 x i16> @bswap_v2i16(<2 x i16> %a){
;
; CHECK-GI-LABEL: bswap_v2i16:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: mov w8, v0.s[1]
-; CHECK-GI-NEXT: mov v0.h[1], w8
+; CHECK-GI-NEXT: uzp1 v0.4h, v0.4h, v0.4h
; CHECK-GI-NEXT: rev16 v0.8b, v0.8b
; CHECK-GI-NEXT: mov h1, v0.h[1]
; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
diff --git a/llvm/test/CodeGen/AArch64/concat-vector.ll b/llvm/test/CodeGen/AArch64/concat-vector.ll
index 18570b2..eee917e 100644
--- a/llvm/test/CodeGen/AArch64/concat-vector.ll
+++ b/llvm/test/CodeGen/AArch64/concat-vector.ll
@@ -183,15 +183,12 @@ define <8 x i16> @concat_v8s16_v2s16(ptr %ptr) {
;
; CHECK-GI-LABEL: concat_v8s16_v2s16:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: ldr h1, [x0]
-; CHECK-GI-NEXT: ldr h2, [x0, #2]
-; CHECK-GI-NEXT: dup v0.4s, w8
-; CHECK-GI-NEXT: mov v1.s[1], v2.s[0]
-; CHECK-GI-NEXT: xtn v2.4h, v0.4s
-; CHECK-GI-NEXT: xtn v1.4h, v1.4s
-; CHECK-GI-NEXT: fmov w8, s1
+; CHECK-GI-NEXT: ldr h0, [x0]
+; CHECK-GI-NEXT: ldr h1, [x0, #2]
+; CHECK-GI-NEXT: mov v0.s[1], v1.s[0]
+; CHECK-GI-NEXT: xtn v0.4h, v0.4s
+; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: mov v0.s[0], w8
-; CHECK-GI-NEXT: fmov w8, s2
; CHECK-GI-NEXT: mov v0.s[1], w8
; CHECK-GI-NEXT: mov v0.s[2], w8
; CHECK-GI-NEXT: mov v0.s[3], w8
@@ -209,10 +206,7 @@ define <16 x i8> @concat_v16s8_v4s8(ptr %ptr) {
;
; CHECK-GI-LABEL: concat_v16s8_v4s8:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: dup v0.8h, w8
-; CHECK-GI-NEXT: xtn v1.8b, v0.8h
; CHECK-GI-NEXT: ldr s0, [x0]
-; CHECK-GI-NEXT: fmov w8, s1
; CHECK-GI-NEXT: mov v0.s[1], w8
; CHECK-GI-NEXT: mov v0.s[2], w8
; CHECK-GI-NEXT: mov v0.s[3], w8
diff --git a/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll b/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll
index aa20304..a9618fd 100644
--- a/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll
+++ b/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll
@@ -3,24 +3,10 @@
; RUN: llc -mtriple=aarch64-none-linux-gnu -global-isel %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
define <4 x half> @interleave2_v4f16(<2 x half> %vec0, <2 x half> %vec1) {
-; CHECK-SD-LABEL: interleave2_v4f16:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: zip1 v0.4h, v0.4h, v1.4h
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: interleave2_v4f16:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: dup v2.4s, w8
-; CHECK-GI-NEXT: fmov w8, s0
-; CHECK-GI-NEXT: fmov w9, s1
-; CHECK-GI-NEXT: xtn v0.4h, v2.4s
-; CHECK-GI-NEXT: mov v1.s[0], w8
-; CHECK-GI-NEXT: mov v2.s[0], w9
-; CHECK-GI-NEXT: fmov w8, s0
-; CHECK-GI-NEXT: mov v1.s[1], w8
-; CHECK-GI-NEXT: mov v2.s[1], w8
-; CHECK-GI-NEXT: zip1 v0.4h, v1.4h, v2.4h
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: interleave2_v4f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: zip1 v0.4h, v0.4h, v1.4h
+; CHECK-NEXT: ret
%retval = call <4 x half> @llvm.vector.interleave2.v4f16(<2 x half> %vec0, <2 x half> %vec1)
ret <4 x half> %retval
}
diff --git a/llvm/test/CodeGen/AArch64/fptoi.ll b/llvm/test/CodeGen/AArch64/fptoi.ll
index 20b5567..f72a49f 100644
--- a/llvm/test/CodeGen/AArch64/fptoi.ll
+++ b/llvm/test/CodeGen/AArch64/fptoi.ll
@@ -3172,42 +3172,22 @@ entry:
}
define <3 x i16> @fptos_v3f32_v3i16(<3 x float> %a) {
-; CHECK-SD-LABEL: fptos_v3f32_v3i16:
-; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: fcvtzs v0.4s, v0.4s
-; CHECK-SD-NEXT: xtn v0.4h, v0.4s
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: fptos_v3f32_v3i16:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: fcvtzs v0.4s, v0.4s
-; CHECK-GI-NEXT: mov w8, v0.s[1]
-; CHECK-GI-NEXT: mov w9, v0.s[2]
-; CHECK-GI-NEXT: mov v0.h[1], w8
-; CHECK-GI-NEXT: mov v0.h[2], w9
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: fptos_v3f32_v3i16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcvtzs v0.4s, v0.4s
+; CHECK-NEXT: xtn v0.4h, v0.4s
+; CHECK-NEXT: ret
entry:
%c = fptosi <3 x float> %a to <3 x i16>
ret <3 x i16> %c
}
define <3 x i16> @fptou_v3f32_v3i16(<3 x float> %a) {
-; CHECK-SD-LABEL: fptou_v3f32_v3i16:
-; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: fcvtzu v0.4s, v0.4s
-; CHECK-SD-NEXT: xtn v0.4h, v0.4s
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: fptou_v3f32_v3i16:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: fcvtzu v0.4s, v0.4s
-; CHECK-GI-NEXT: mov w8, v0.s[1]
-; CHECK-GI-NEXT: mov w9, v0.s[2]
-; CHECK-GI-NEXT: mov v0.h[1], w8
-; CHECK-GI-NEXT: mov v0.h[2], w9
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: fptou_v3f32_v3i16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcvtzu v0.4s, v0.4s
+; CHECK-NEXT: xtn v0.4h, v0.4s
+; CHECK-NEXT: ret
entry:
%c = fptoui <3 x float> %a to <3 x i16>
ret <3 x i16> %c
@@ -6077,11 +6057,7 @@ define <3 x i16> @fptos_v3f16_v3i16(<3 x half> %a) {
; CHECK-GI-NOFP16: // %bb.0: // %entry
; CHECK-GI-NOFP16-NEXT: fcvtl v0.4s, v0.4h
; CHECK-GI-NOFP16-NEXT: fcvtzs v0.4s, v0.4s
-; CHECK-GI-NOFP16-NEXT: mov w8, v0.s[1]
-; CHECK-GI-NOFP16-NEXT: mov w9, v0.s[2]
-; CHECK-GI-NOFP16-NEXT: mov v0.h[1], w8
-; CHECK-GI-NOFP16-NEXT: mov v0.h[2], w9
-; CHECK-GI-NOFP16-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NOFP16-NEXT: xtn v0.4h, v0.4s
; CHECK-GI-NOFP16-NEXT: ret
;
; CHECK-GI-FP16-LABEL: fptos_v3f16_v3i16:
@@ -6110,11 +6086,7 @@ define <3 x i16> @fptou_v3f16_v3i16(<3 x half> %a) {
; CHECK-GI-NOFP16: // %bb.0: // %entry
; CHECK-GI-NOFP16-NEXT: fcvtl v0.4s, v0.4h
; CHECK-GI-NOFP16-NEXT: fcvtzu v0.4s, v0.4s
-; CHECK-GI-NOFP16-NEXT: mov w8, v0.s[1]
-; CHECK-GI-NOFP16-NEXT: mov w9, v0.s[2]
-; CHECK-GI-NOFP16-NEXT: mov v0.h[1], w8
-; CHECK-GI-NOFP16-NEXT: mov v0.h[2], w9
-; CHECK-GI-NOFP16-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NOFP16-NEXT: xtn v0.4h, v0.4s
; CHECK-GI-NOFP16-NEXT: ret
;
; CHECK-GI-FP16-LABEL: fptou_v3f16_v3i16:
diff --git a/llvm/test/CodeGen/AArch64/itofp.ll b/llvm/test/CodeGen/AArch64/itofp.ll
index 4ac0479..f70ec0f 100644
--- a/llvm/test/CodeGen/AArch64/itofp.ll
+++ b/llvm/test/CodeGen/AArch64/itofp.ll
@@ -7450,9 +7450,7 @@ define <2 x half> @stofp_v2i16_v2f16(<2 x i16> %a) {
;
; CHECK-GI-FP16-LABEL: stofp_v2i16_v2f16:
; CHECK-GI-FP16: // %bb.0: // %entry
-; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-FP16-NEXT: mov w8, v0.s[1]
-; CHECK-GI-FP16-NEXT: mov v0.h[1], w8
+; CHECK-GI-FP16-NEXT: uzp1 v0.4h, v0.4h, v0.4h
; CHECK-GI-FP16-NEXT: scvtf v0.4h, v0.4h
; CHECK-GI-FP16-NEXT: mov h1, v0.h[1]
; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
@@ -7493,9 +7491,7 @@ define <2 x half> @utofp_v2i16_v2f16(<2 x i16> %a) {
;
; CHECK-GI-FP16-LABEL: utofp_v2i16_v2f16:
; CHECK-GI-FP16: // %bb.0: // %entry
-; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-FP16-NEXT: mov w8, v0.s[1]
-; CHECK-GI-FP16-NEXT: mov v0.h[1], w8
+; CHECK-GI-FP16-NEXT: uzp1 v0.4h, v0.4h, v0.4h
; CHECK-GI-FP16-NEXT: ucvtf v0.4h, v0.4h
; CHECK-GI-FP16-NEXT: mov h1, v0.h[1]
; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
@@ -8059,8 +8055,7 @@ define <2 x half> @utofp_v2i8_v2f16(<2 x i8> %a) {
; CHECK-GI-FP16-NEXT: movi d1, #0x0000ff000000ff
; CHECK-GI-FP16-NEXT: ushll v0.4s, v0.4h, #0
; CHECK-GI-FP16-NEXT: and v0.8b, v0.8b, v1.8b
-; CHECK-GI-FP16-NEXT: mov w8, v0.s[1]
-; CHECK-GI-FP16-NEXT: mov v0.h[1], w8
+; CHECK-GI-FP16-NEXT: uzp1 v0.4h, v0.4h, v0.4h
; CHECK-GI-FP16-NEXT: ucvtf v0.4h, v0.4h
; CHECK-GI-FP16-NEXT: mov h1, v0.h[1]
; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
diff --git a/llvm/test/CodeGen/AArch64/mlicm-stack-write-check.mir b/llvm/test/CodeGen/AArch64/mlicm-stack-write-check.mir
index 51bc77d..406025c 100644
--- a/llvm/test/CodeGen/AArch64/mlicm-stack-write-check.mir
+++ b/llvm/test/CodeGen/AArch64/mlicm-stack-write-check.mir
@@ -3,6 +3,7 @@
---
name: test
tracksRegLiveness: true
+isSSA: false
registers:
- { id: 0, class: gpr64 }
stack:
@@ -30,11 +31,11 @@ body: |
bb.2:
liveins: $x0
%0 = COPY $x0
- %0 = COPY $x0 ; Force isSSA = false.
...
---
name: test2
tracksRegLiveness: true
+isSSA: false
registers:
- { id: 0, class: gpr64 }
stack:
@@ -62,5 +63,4 @@ body: |
bb.2:
liveins: $x0
%0 = COPY $x0
- %0 = COPY $x0 ; Force isSSA = false.
...
diff --git a/llvm/test/CodeGen/AArch64/shift.ll b/llvm/test/CodeGen/AArch64/shift.ll
index 7014a4a..54f7887 100644
--- a/llvm/test/CodeGen/AArch64/shift.ll
+++ b/llvm/test/CodeGen/AArch64/shift.ll
@@ -531,26 +531,8 @@ define <4 x i8> @shl_v4i8(<4 x i8> %0, <4 x i8> %1){
;
; CHECK-GI-LABEL: shl_v4i8:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-GI-NEXT: mov h2, v0.h[1]
-; CHECK-GI-NEXT: mov h3, v1.h[1]
-; CHECK-GI-NEXT: mov h4, v0.h[2]
-; CHECK-GI-NEXT: mov h5, v0.h[3]
-; CHECK-GI-NEXT: fmov w8, s2
-; CHECK-GI-NEXT: mov h2, v1.h[2]
-; CHECK-GI-NEXT: fmov w9, s3
-; CHECK-GI-NEXT: mov h3, v1.h[3]
-; CHECK-GI-NEXT: mov v0.b[1], w8
-; CHECK-GI-NEXT: mov v1.b[1], w9
-; CHECK-GI-NEXT: fmov w8, s4
-; CHECK-GI-NEXT: fmov w9, s2
-; CHECK-GI-NEXT: mov v0.b[2], w8
-; CHECK-GI-NEXT: mov v1.b[2], w9
-; CHECK-GI-NEXT: fmov w8, s5
-; CHECK-GI-NEXT: fmov w9, s3
-; CHECK-GI-NEXT: mov v0.b[3], w8
-; CHECK-GI-NEXT: mov v1.b[3], w9
+; CHECK-GI-NEXT: uzp1 v0.8b, v0.8b, v0.8b
+; CHECK-GI-NEXT: uzp1 v1.8b, v1.8b, v0.8b
; CHECK-GI-NEXT: ushl v0.8b, v0.8b, v1.8b
; CHECK-GI-NEXT: mov b1, v0.b[1]
; CHECK-GI-NEXT: mov v2.b[0], v0.b[0]
@@ -592,12 +574,8 @@ define <2 x i16> @shl_v2i16(<2 x i16> %0, <2 x i16> %1){
;
; CHECK-GI-LABEL: shl_v2i16:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-GI-NEXT: mov w8, v0.s[1]
-; CHECK-GI-NEXT: mov w9, v1.s[1]
-; CHECK-GI-NEXT: mov v0.h[1], w8
-; CHECK-GI-NEXT: mov v1.h[1], w9
+; CHECK-GI-NEXT: uzp1 v0.4h, v0.4h, v0.4h
+; CHECK-GI-NEXT: uzp1 v1.4h, v1.4h, v0.4h
; CHECK-GI-NEXT: ushl v0.4h, v0.4h, v1.4h
; CHECK-GI-NEXT: mov h1, v0.h[1]
; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
@@ -741,26 +719,8 @@ define <4 x i8> @ashr_v4i8(<4 x i8> %0, <4 x i8> %1){
;
; CHECK-GI-LABEL: ashr_v4i8:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-GI-NEXT: mov h2, v1.h[1]
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: mov h3, v0.h[1]
-; CHECK-GI-NEXT: mov h4, v1.h[2]
-; CHECK-GI-NEXT: fmov w8, s2
-; CHECK-GI-NEXT: mov h2, v1.h[3]
-; CHECK-GI-NEXT: fmov w9, s4
-; CHECK-GI-NEXT: mov h4, v0.h[3]
-; CHECK-GI-NEXT: mov v1.b[1], w8
-; CHECK-GI-NEXT: fmov w8, s3
-; CHECK-GI-NEXT: mov h3, v0.h[2]
-; CHECK-GI-NEXT: mov v0.b[1], w8
-; CHECK-GI-NEXT: fmov w8, s3
-; CHECK-GI-NEXT: mov v1.b[2], w9
-; CHECK-GI-NEXT: mov v0.b[2], w8
-; CHECK-GI-NEXT: fmov w8, s2
-; CHECK-GI-NEXT: mov v1.b[3], w8
-; CHECK-GI-NEXT: fmov w8, s4
-; CHECK-GI-NEXT: mov v0.b[3], w8
+; CHECK-GI-NEXT: uzp1 v1.8b, v1.8b, v0.8b
+; CHECK-GI-NEXT: uzp1 v0.8b, v0.8b, v0.8b
; CHECK-GI-NEXT: neg v1.8b, v1.8b
; CHECK-GI-NEXT: sshl v0.8b, v0.8b, v1.8b
; CHECK-GI-NEXT: mov b1, v0.b[1]
@@ -802,12 +762,8 @@ define <2 x i16> @ashr_v2i16(<2 x i16> %0, <2 x i16> %1){
;
; CHECK-GI-LABEL: ashr_v2i16:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-GI-NEXT: mov w8, v1.s[1]
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: mov w9, v0.s[1]
-; CHECK-GI-NEXT: mov v1.h[1], w8
-; CHECK-GI-NEXT: mov v0.h[1], w9
+; CHECK-GI-NEXT: uzp1 v1.4h, v1.4h, v0.4h
+; CHECK-GI-NEXT: uzp1 v0.4h, v0.4h, v0.4h
; CHECK-GI-NEXT: neg v1.4h, v1.4h
; CHECK-GI-NEXT: sshl v0.4h, v0.4h, v1.4h
; CHECK-GI-NEXT: mov h1, v0.h[1]
@@ -946,26 +902,8 @@ define <4 x i8> @lshr_v4i8(<4 x i8> %0, <4 x i8> %1){
;
; CHECK-GI-LABEL: lshr_v4i8:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-GI-NEXT: mov h2, v1.h[1]
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: mov h3, v0.h[1]
-; CHECK-GI-NEXT: mov h4, v1.h[2]
-; CHECK-GI-NEXT: fmov w8, s2
-; CHECK-GI-NEXT: mov h2, v1.h[3]
-; CHECK-GI-NEXT: fmov w9, s4
-; CHECK-GI-NEXT: mov h4, v0.h[3]
-; CHECK-GI-NEXT: mov v1.b[1], w8
-; CHECK-GI-NEXT: fmov w8, s3
-; CHECK-GI-NEXT: mov h3, v0.h[2]
-; CHECK-GI-NEXT: mov v0.b[1], w8
-; CHECK-GI-NEXT: fmov w8, s3
-; CHECK-GI-NEXT: mov v1.b[2], w9
-; CHECK-GI-NEXT: mov v0.b[2], w8
-; CHECK-GI-NEXT: fmov w8, s2
-; CHECK-GI-NEXT: mov v1.b[3], w8
-; CHECK-GI-NEXT: fmov w8, s4
-; CHECK-GI-NEXT: mov v0.b[3], w8
+; CHECK-GI-NEXT: uzp1 v1.8b, v1.8b, v0.8b
+; CHECK-GI-NEXT: uzp1 v0.8b, v0.8b, v0.8b
; CHECK-GI-NEXT: neg v1.8b, v1.8b
; CHECK-GI-NEXT: ushl v0.8b, v0.8b, v1.8b
; CHECK-GI-NEXT: mov b1, v0.b[1]
@@ -1006,12 +944,8 @@ define <2 x i16> @lshr_v2i16(<2 x i16> %0, <2 x i16> %1){
;
; CHECK-GI-LABEL: lshr_v2i16:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-GI-NEXT: mov w8, v1.s[1]
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: mov w9, v0.s[1]
-; CHECK-GI-NEXT: mov v1.h[1], w8
-; CHECK-GI-NEXT: mov v0.h[1], w9
+; CHECK-GI-NEXT: uzp1 v1.4h, v1.4h, v0.4h
+; CHECK-GI-NEXT: uzp1 v0.4h, v0.4h, v0.4h
; CHECK-GI-NEXT: neg v1.4h, v1.4h
; CHECK-GI-NEXT: ushl v0.4h, v0.4h, v1.4h
; CHECK-GI-NEXT: mov h1, v0.h[1]
diff --git a/llvm/test/CodeGen/AArch64/shufflevector.ll b/llvm/test/CodeGen/AArch64/shufflevector.ll
index 954458e..5f4ff1e 100644
--- a/llvm/test/CodeGen/AArch64/shufflevector.ll
+++ b/llvm/test/CodeGen/AArch64/shufflevector.ll
@@ -209,27 +209,9 @@ define i32 @shufflevector_v4i8(<4 x i8> %a, <4 x i8> %b){
;
; CHECK-GI-LABEL: shufflevector_v4i8:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-GI-NEXT: mov h2, v0.h[1]
-; CHECK-GI-NEXT: mov h3, v1.h[1]
-; CHECK-GI-NEXT: mov h4, v0.h[2]
-; CHECK-GI-NEXT: mov h5, v0.h[3]
-; CHECK-GI-NEXT: fmov w8, s2
-; CHECK-GI-NEXT: mov h2, v1.h[2]
-; CHECK-GI-NEXT: fmov w9, s3
-; CHECK-GI-NEXT: mov h3, v1.h[3]
-; CHECK-GI-NEXT: mov v0.b[1], w8
-; CHECK-GI-NEXT: mov v1.b[1], w9
-; CHECK-GI-NEXT: fmov w8, s4
-; CHECK-GI-NEXT: fmov w9, s2
-; CHECK-GI-NEXT: mov v0.b[2], w8
-; CHECK-GI-NEXT: mov v1.b[2], w9
-; CHECK-GI-NEXT: fmov w8, s5
-; CHECK-GI-NEXT: fmov w9, s3
-; CHECK-GI-NEXT: mov v0.b[3], w8
-; CHECK-GI-NEXT: mov v1.b[3], w9
+; CHECK-GI-NEXT: uzp1 v0.8b, v0.8b, v0.8b
; CHECK-GI-NEXT: adrp x8, .LCPI15_0
+; CHECK-GI-NEXT: uzp1 v1.8b, v1.8b, v0.8b
; CHECK-GI-NEXT: mov v0.d[1], v1.d[0]
; CHECK-GI-NEXT: ldr d1, [x8, :lo12:.LCPI15_0]
; CHECK-GI-NEXT: tbl v0.16b, { v0.16b }, v1.16b
@@ -284,13 +266,9 @@ define i32 @shufflevector_v2i16(<2 x i16> %a, <2 x i16> %b){
;
; CHECK-GI-LABEL: shufflevector_v2i16:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-GI-NEXT: mov w8, v0.s[1]
-; CHECK-GI-NEXT: mov w9, v1.s[1]
-; CHECK-GI-NEXT: mov v0.h[1], w8
-; CHECK-GI-NEXT: mov v1.h[1], w9
+; CHECK-GI-NEXT: uzp1 v0.4h, v0.4h, v0.4h
; CHECK-GI-NEXT: adrp x8, .LCPI17_0
+; CHECK-GI-NEXT: uzp1 v1.4h, v1.4h, v0.4h
; CHECK-GI-NEXT: mov v0.d[1], v1.d[0]
; CHECK-GI-NEXT: ldr d1, [x8, :lo12:.LCPI17_0]
; CHECK-GI-NEXT: tbl v0.16b, { v0.16b }, v1.16b
@@ -403,16 +381,7 @@ define i32 @shufflevector_v4i8_zeroes(<4 x i8> %a, <4 x i8> %b){
;
; CHECK-GI-LABEL: shufflevector_v4i8_zeroes:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: mov h1, v0.h[1]
-; CHECK-GI-NEXT: mov h2, v0.h[2]
-; CHECK-GI-NEXT: fmov w8, s1
-; CHECK-GI-NEXT: mov h1, v0.h[3]
-; CHECK-GI-NEXT: mov v0.b[1], w8
-; CHECK-GI-NEXT: fmov w8, s2
-; CHECK-GI-NEXT: mov v0.b[2], w8
-; CHECK-GI-NEXT: fmov w8, s1
-; CHECK-GI-NEXT: mov v0.b[3], w8
+; CHECK-GI-NEXT: uzp1 v0.8b, v0.8b, v0.8b
; CHECK-GI-NEXT: dup v0.8b, v0.b[0]
; CHECK-GI-NEXT: fmov w0, s0
; CHECK-GI-NEXT: ret
@@ -448,9 +417,7 @@ define i32 @shufflevector_v2i16_zeroes(<2 x i16> %a, <2 x i16> %b){
;
; CHECK-GI-LABEL: shufflevector_v2i16_zeroes:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: mov w8, v0.s[1]
-; CHECK-GI-NEXT: mov v0.h[1], w8
+; CHECK-GI-NEXT: uzp1 v0.4h, v0.4h, v0.4h
; CHECK-GI-NEXT: dup v0.4h, v0.h[0]
; CHECK-GI-NEXT: fmov w0, s0
; CHECK-GI-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/sincos-stack-slots.ll b/llvm/test/CodeGen/AArch64/sincos-stack-slots.ll
new file mode 100644
index 0000000..8ef8b5d
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sincos-stack-slots.ll
@@ -0,0 +1,255 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=aarch64-linux-gnu -o - %s | FileCheck %s
+
+; This file tests eliding stack slots when lowering the FSINCOS ISD node.
+
+define { float, float } @sincos_f32_value_return(float %x) {
+; CHECK-LABEL: sincos_f32_value_return:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: add x0, sp, #12
+; CHECK-NEXT: add x1, sp, #8
+; CHECK-NEXT: bl sincosf
+; CHECK-NEXT: ldp s1, s0, [sp, #8]
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+entry:
+ %sin = tail call float @llvm.sin.f32(float %x)
+ %cos = tail call float @llvm.cos.f32(float %x)
+ %ret_0 = insertvalue { float, float } poison, float %sin, 0
+ %ret_1 = insertvalue { float, float } %ret_0, float %cos, 1
+ ret { float, float } %ret_1
+}
+
+define void @sincos_f32_ptr_return(float %x, ptr noalias %out_sin, ptr noalias %out_cos) {
+; CHECK-LABEL: sincos_f32_ptr_return:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: bl sincosf
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+entry:
+ %sin = tail call float @llvm.sin.f32(float %x)
+ %cos = tail call float @llvm.cos.f32(float %x)
+ store float %sin, ptr %out_sin, align 4
+ store float %cos, ptr %out_cos, align 4
+ ret void
+}
+
+define float @sincos_f32_mixed_return(float %x, ptr %out_sin) {
+; CHECK-LABEL: sincos_f32_mixed_return:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: add x1, sp, #12
+; CHECK-NEXT: bl sincosf
+; CHECK-NEXT: ldr s0, [sp, #12]
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+entry:
+ %sin = tail call float @llvm.sin.f32(float %x)
+ %cos = tail call float @llvm.cos.f32(float %x)
+ store float %sin, ptr %out_sin, align 4
+ ret float %cos
+}
+
+define { double, double } @sincos_f64_value_return(double %x) {
+; CHECK-LABEL: sincos_f64_value_return:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #32
+; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 32
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: add x0, sp, #24
+; CHECK-NEXT: add x1, sp, #8
+; CHECK-NEXT: bl sincos
+; CHECK-NEXT: ldr d0, [sp, #24]
+; CHECK-NEXT: ldr d1, [sp, #8]
+; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #32
+; CHECK-NEXT: ret
+entry:
+ %sin = tail call double @llvm.sin.f64(double %x)
+ %cos = tail call double @llvm.cos.f64(double %x)
+ %ret_0 = insertvalue { double, double } poison, double %sin, 0
+ %ret_1 = insertvalue { double, double } %ret_0, double %cos, 1
+ ret { double, double } %ret_1
+}
+
+define void @sincos_f64_ptr_return(double %x, ptr noalias %out_sin, ptr noalias %out_cos) {
+; CHECK-LABEL: sincos_f64_ptr_return:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: bl sincos
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+entry:
+ %sin = tail call double @llvm.sin.f64(double %x)
+ %cos = tail call double @llvm.cos.f64(double %x)
+ store double %sin, ptr %out_sin, align 8
+ store double %cos, ptr %out_cos, align 8
+ ret void
+}
+
+define double @sincos_f64_mixed_return(double %x, ptr %out_sin) {
+; CHECK-LABEL: sincos_f64_mixed_return:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: add x1, sp, #8
+; CHECK-NEXT: bl sincos
+; CHECK-NEXT: ldr d0, [sp, #8]
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+entry:
+ %sin = tail call double @llvm.sin.f64(double %x)
+ %cos = tail call double @llvm.cos.f64(double %x)
+ store double %sin, ptr %out_sin, align 8
+ ret double %cos
+}
+
+; Here %out_sin and %out_cos may alias so we can't replace both stores with the
+; call to sincosf (as the order of stores in sincosf is not defined).
+define void @sincos_may_alias(float %x, ptr %out_sin, ptr %out_cos) {
+; CHECK-LABEL: sincos_may_alias:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #32
+; CHECK-NEXT: stp x30, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 32
+; CHECK-NEXT: .cfi_offset w19, -8
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: mov x19, x1
+; CHECK-NEXT: add x1, sp, #12
+; CHECK-NEXT: bl sincosf
+; CHECK-NEXT: ldr s0, [sp, #12]
+; CHECK-NEXT: str s0, [x19]
+; CHECK-NEXT: ldp x30, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #32
+; CHECK-NEXT: ret
+entry:
+ %sin = tail call float @llvm.sin.f32(float %x)
+ %cos = tail call float @llvm.cos.f32(float %x)
+ store float %sin, ptr %out_sin, align 4
+ store float %cos, ptr %out_cos, align 4
+ ret void
+}
+
+; Here %out is used for both sin and cos (with the final value stored being cos).
+define float @sincos_multiple_uses(float %x, ptr %out) {
+; CHECK-LABEL: sincos_multiple_uses:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: mov x1, x0
+; CHECK-NEXT: add x0, sp, #12
+; CHECK-NEXT: bl sincosf
+; CHECK-NEXT: ldr s0, [sp, #12]
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %sin = call float @llvm.sin.f32(float %x)
+ store float %sin, ptr %out, align 4
+ %reload = load float, ptr %out, align 4
+ %cos = call float @llvm.cos.f32(float %x)
+ store float %cos, ptr %out, align 4
+ ret float %reload
+}
+
+; Negative test. We can't fold volatile stores into the library call.
+define void @sincos_volatile_result_stores(float %x, ptr noalias %out_sin, ptr noalias %out_cos) {
+; CHECK-LABEL: sincos_volatile_result_stores:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: str x30, [sp, #-32]! // 8-byte Folded Spill
+; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 32
+; CHECK-NEXT: .cfi_offset w19, -8
+; CHECK-NEXT: .cfi_offset w20, -16
+; CHECK-NEXT: .cfi_offset w30, -32
+; CHECK-NEXT: mov x19, x1
+; CHECK-NEXT: mov x20, x0
+; CHECK-NEXT: add x0, sp, #12
+; CHECK-NEXT: add x1, sp, #8
+; CHECK-NEXT: bl sincosf
+; CHECK-NEXT: ldp s1, s0, [sp, #8]
+; CHECK-NEXT: str s0, [x20]
+; CHECK-NEXT: str s1, [x19]
+; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp], #32 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+entry:
+ %sin = tail call float @llvm.sin.f32(float %x)
+ %cos = tail call float @llvm.cos.f32(float %x)
+ store volatile float %sin, ptr %out_sin, align 4
+ store volatile float %cos, ptr %out_cos, align 4
+ ret void
+}
+
+; Negative test. We can't fold atomic stores into the library call.
+define void @sincos_atomic_result_stores(float %x, ptr noalias %out_sin, ptr noalias %out_cos) {
+; CHECK-LABEL: sincos_atomic_result_stores:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: str x30, [sp, #-32]! // 8-byte Folded Spill
+; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 32
+; CHECK-NEXT: .cfi_offset w19, -8
+; CHECK-NEXT: .cfi_offset w20, -16
+; CHECK-NEXT: .cfi_offset w30, -32
+; CHECK-NEXT: mov x19, x1
+; CHECK-NEXT: mov x20, x0
+; CHECK-NEXT: add x0, sp, #12
+; CHECK-NEXT: add x1, sp, #8
+; CHECK-NEXT: bl sincosf
+; CHECK-NEXT: ldr w8, [sp, #12]
+; CHECK-NEXT: str w8, [x20]
+; CHECK-NEXT: ldr w8, [sp, #8]
+; CHECK-NEXT: str w8, [x19]
+; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp], #32 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+entry:
+ %sin = tail call float @llvm.sin.f32(float %x)
+ %cos = tail call float @llvm.cos.f32(float %x)
+ store atomic float %sin, ptr %out_sin unordered, align 4
+ store atomic float %cos, ptr %out_cos unordered, align 4
+ ret void
+}
+
+; Negative test. We can't fold misaligned stores into the library call.
+define void @sincos_misaligned_result_stores(double %x, ptr noalias %out_sin, ptr noalias %out_cos) {
+; CHECK-LABEL: sincos_misaligned_result_stores:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #48
+; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 48
+; CHECK-NEXT: .cfi_offset w19, -8
+; CHECK-NEXT: .cfi_offset w20, -16
+; CHECK-NEXT: .cfi_offset w30, -32
+; CHECK-NEXT: mov x19, x1
+; CHECK-NEXT: mov x20, x0
+; CHECK-NEXT: add x0, sp, #24
+; CHECK-NEXT: add x1, sp, #8
+; CHECK-NEXT: bl sincos
+; CHECK-NEXT: ldr d0, [sp, #24]
+; CHECK-NEXT: ldr d1, [sp, #8]
+; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT: str d0, [x20]
+; CHECK-NEXT: str d1, [x19]
+; CHECK-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #48
+; CHECK-NEXT: ret
+entry:
+ %sin = tail call double @llvm.sin.f64(double %x)
+ %cos = tail call double @llvm.cos.f64(double %x)
+ store double %sin, ptr %out_sin, align 4
+ store double %cos, ptr %out_cos, align 4
+ ret void
+}
diff --git a/llvm/test/CodeGen/AArch64/sme-streaming-mode-changing-call-disable-stackslot-scavenging.ll b/llvm/test/CodeGen/AArch64/sme-streaming-mode-changing-call-disable-stackslot-scavenging.ll
index ac19bd5..803bb9f 100644
--- a/llvm/test/CodeGen/AArch64/sme-streaming-mode-changing-call-disable-stackslot-scavenging.ll
+++ b/llvm/test/CodeGen/AArch64/sme-streaming-mode-changing-call-disable-stackslot-scavenging.ll
@@ -45,6 +45,51 @@ define void @test_no_stackslot_scavenging(float %f) #0 {
ret void
}
+define void @test_no_stackslot_scavenging_with_fp(float %f, i64 %n) #0 "frame-pointer"="all" {
+; CHECK-LABEL: test_no_stackslot_scavenging_with_fp:
+; CHECK: // %bb.0:
+; CHECK-NEXT: stp d15, d14, [sp, #-128]! // 16-byte Folded Spill
+; CHECK-NEXT: cntd x9
+; CHECK-NEXT: stp d13, d12, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: stp d11, d10, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT: stp d9, d8, [sp, #48] // 16-byte Folded Spill
+; CHECK-NEXT: stp x29, x30, [sp, #64] // 16-byte Folded Spill
+; CHECK-NEXT: add x29, sp, #64
+; CHECK-NEXT: str x9, [sp, #80] // 8-byte Folded Spill
+; CHECK-NEXT: stp x28, x25, [sp, #96] // 16-byte Folded Spill
+; CHECK-NEXT: stp x24, x19, [sp, #112] // 16-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: lsl x9, x0, #3
+; CHECK-NEXT: mov x8, sp
+; CHECK-NEXT: mov x19, sp
+; CHECK-NEXT: str s0, [x29, #28] // 4-byte Folded Spill
+; CHECK-NEXT: add x9, x9, #15
+; CHECK-NEXT: and x9, x9, #0xfffffffffffffff0
+; CHECK-NEXT: sub x8, x8, x9
+; CHECK-NEXT: mov sp, x8
+; CHECK-NEXT: //APP
+; CHECK-NEXT: //NO_APP
+; CHECK-NEXT: smstop sm
+; CHECK-NEXT: ldr s0, [x29, #28] // 4-byte Folded Reload
+; CHECK-NEXT: bl use_f
+; CHECK-NEXT: smstart sm
+; CHECK-NEXT: sub sp, x29, #64
+; CHECK-NEXT: ldp x24, x19, [sp, #112] // 16-byte Folded Reload
+; CHECK-NEXT: ldp x28, x25, [sp, #96] // 16-byte Folded Reload
+; CHECK-NEXT: ldp x29, x30, [sp, #64] // 16-byte Folded Reload
+; CHECK-NEXT: ldp d9, d8, [sp, #48] // 16-byte Folded Reload
+; CHECK-NEXT: ldp d11, d10, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT: ldp d13, d12, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: ldp d15, d14, [sp], #128 // 16-byte Folded Reload
+; CHECK-NEXT: ret
+ %ptr2 = alloca i64, i64 %n, align 8
+ %ptr = alloca <vscale x 16 x i8>
+ call void asm sideeffect "", "~{x24},~{x25}"() nounwind
+ call void @use_f(float %f)
+ ret void
+}
+
declare void @use_f(float)
+declare void @use_f_and_ptr(float, ptr)
attributes #0 = { nounwind "target-features"="+sve,+sme" "aarch64_pstate_sm_enabled" }
diff --git a/llvm/test/CodeGen/AArch64/sve-bf16-converts.ll b/llvm/test/CodeGen/AArch64/sve-bf16-converts.ll
index d72f92c..d63f7e6 100644
--- a/llvm/test/CodeGen/AArch64/sve-bf16-converts.ll
+++ b/llvm/test/CodeGen/AArch64/sve-bf16-converts.ll
@@ -1,9 +1,15 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mattr=+sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming < %s | FileCheck %s
+; RUN: llc -mattr=+sve < %s | FileCheck %s --check-prefixes=CHECK,NOBF16
+; RUN: llc -mattr=+sve --enable-no-nans-fp-math < %s | FileCheck %s --check-prefixes=CHECK,NOBF16NNAN
+; RUN: llc -mattr=+sve,+bf16 < %s | FileCheck %s --check-prefixes=CHECK,BF16
+; RUN: llc -mattr=+sme -force-streaming < %s | FileCheck %s --check-prefixes=CHECK,BF16
target triple = "aarch64-unknown-linux-gnu"
+; NOTE: "fptrunc <# x double> to <# x bfloat>" is not supported because SVE
+; lacks a down convert that rounds to odd. Such IR will trigger the usual
+; failure (crash) when attempting to unroll a scalable vector.
+
define <vscale x 2 x float> @fpext_nxv2bf16_to_nxv2f32(<vscale x 2 x bfloat> %a) {
; CHECK-LABEL: fpext_nxv2bf16_to_nxv2f32:
; CHECK: // %bb.0:
@@ -87,3 +93,122 @@ define <vscale x 8 x double> @fpext_nxv8bf16_to_nxv8f64(<vscale x 8 x bfloat> %a
%res = fpext <vscale x 8 x bfloat> %a to <vscale x 8 x double>
ret <vscale x 8 x double> %res
}
+
+define <vscale x 2 x bfloat> @fptrunc_nxv2f32_to_nxv2bf16(<vscale x 2 x float> %a) {
+; NOBF16-LABEL: fptrunc_nxv2f32_to_nxv2bf16:
+; NOBF16: // %bb.0:
+; NOBF16-NEXT: mov z1.s, #32767 // =0x7fff
+; NOBF16-NEXT: lsr z2.s, z0.s, #16
+; NOBF16-NEXT: ptrue p0.d
+; NOBF16-NEXT: fcmuo p0.s, p0/z, z0.s, z0.s
+; NOBF16-NEXT: and z2.s, z2.s, #0x1
+; NOBF16-NEXT: add z1.s, z0.s, z1.s
+; NOBF16-NEXT: orr z0.s, z0.s, #0x400000
+; NOBF16-NEXT: add z1.s, z2.s, z1.s
+; NOBF16-NEXT: sel z0.s, p0, z0.s, z1.s
+; NOBF16-NEXT: lsr z0.s, z0.s, #16
+; NOBF16-NEXT: ret
+;
+; NOBF16NNAN-LABEL: fptrunc_nxv2f32_to_nxv2bf16:
+; NOBF16NNAN: // %bb.0:
+; NOBF16NNAN-NEXT: mov z1.s, #32767 // =0x7fff
+; NOBF16NNAN-NEXT: lsr z2.s, z0.s, #16
+; NOBF16NNAN-NEXT: and z2.s, z2.s, #0x1
+; NOBF16NNAN-NEXT: add z0.s, z0.s, z1.s
+; NOBF16NNAN-NEXT: add z0.s, z2.s, z0.s
+; NOBF16NNAN-NEXT: lsr z0.s, z0.s, #16
+; NOBF16NNAN-NEXT: ret
+;
+; BF16-LABEL: fptrunc_nxv2f32_to_nxv2bf16:
+; BF16: // %bb.0:
+; BF16-NEXT: ptrue p0.d
+; BF16-NEXT: bfcvt z0.h, p0/m, z0.s
+; BF16-NEXT: ret
+ %res = fptrunc <vscale x 2 x float> %a to <vscale x 2 x bfloat>
+ ret <vscale x 2 x bfloat> %res
+}
+
+define <vscale x 4 x bfloat> @fptrunc_nxv4f32_to_nxv4bf16(<vscale x 4 x float> %a) {
+; NOBF16-LABEL: fptrunc_nxv4f32_to_nxv4bf16:
+; NOBF16: // %bb.0:
+; NOBF16-NEXT: mov z1.s, #32767 // =0x7fff
+; NOBF16-NEXT: lsr z2.s, z0.s, #16
+; NOBF16-NEXT: ptrue p0.s
+; NOBF16-NEXT: fcmuo p0.s, p0/z, z0.s, z0.s
+; NOBF16-NEXT: and z2.s, z2.s, #0x1
+; NOBF16-NEXT: add z1.s, z0.s, z1.s
+; NOBF16-NEXT: orr z0.s, z0.s, #0x400000
+; NOBF16-NEXT: add z1.s, z2.s, z1.s
+; NOBF16-NEXT: sel z0.s, p0, z0.s, z1.s
+; NOBF16-NEXT: lsr z0.s, z0.s, #16
+; NOBF16-NEXT: ret
+;
+; NOBF16NNAN-LABEL: fptrunc_nxv4f32_to_nxv4bf16:
+; NOBF16NNAN: // %bb.0:
+; NOBF16NNAN-NEXT: mov z1.s, #32767 // =0x7fff
+; NOBF16NNAN-NEXT: lsr z2.s, z0.s, #16
+; NOBF16NNAN-NEXT: and z2.s, z2.s, #0x1
+; NOBF16NNAN-NEXT: add z0.s, z0.s, z1.s
+; NOBF16NNAN-NEXT: add z0.s, z2.s, z0.s
+; NOBF16NNAN-NEXT: lsr z0.s, z0.s, #16
+; NOBF16NNAN-NEXT: ret
+;
+; BF16-LABEL: fptrunc_nxv4f32_to_nxv4bf16:
+; BF16: // %bb.0:
+; BF16-NEXT: ptrue p0.s
+; BF16-NEXT: bfcvt z0.h, p0/m, z0.s
+; BF16-NEXT: ret
+ %res = fptrunc <vscale x 4 x float> %a to <vscale x 4 x bfloat>
+ ret <vscale x 4 x bfloat> %res
+}
+
+define <vscale x 8 x bfloat> @fptrunc_nxv8f32_to_nxv8bf16(<vscale x 8 x float> %a) {
+; NOBF16-LABEL: fptrunc_nxv8f32_to_nxv8bf16:
+; NOBF16: // %bb.0:
+; NOBF16-NEXT: mov z2.s, #32767 // =0x7fff
+; NOBF16-NEXT: lsr z3.s, z1.s, #16
+; NOBF16-NEXT: lsr z4.s, z0.s, #16
+; NOBF16-NEXT: ptrue p0.s
+; NOBF16-NEXT: and z3.s, z3.s, #0x1
+; NOBF16-NEXT: and z4.s, z4.s, #0x1
+; NOBF16-NEXT: fcmuo p1.s, p0/z, z1.s, z1.s
+; NOBF16-NEXT: add z5.s, z1.s, z2.s
+; NOBF16-NEXT: add z2.s, z0.s, z2.s
+; NOBF16-NEXT: fcmuo p0.s, p0/z, z0.s, z0.s
+; NOBF16-NEXT: orr z1.s, z1.s, #0x400000
+; NOBF16-NEXT: orr z0.s, z0.s, #0x400000
+; NOBF16-NEXT: add z3.s, z3.s, z5.s
+; NOBF16-NEXT: add z2.s, z4.s, z2.s
+; NOBF16-NEXT: sel z1.s, p1, z1.s, z3.s
+; NOBF16-NEXT: sel z0.s, p0, z0.s, z2.s
+; NOBF16-NEXT: lsr z1.s, z1.s, #16
+; NOBF16-NEXT: lsr z0.s, z0.s, #16
+; NOBF16-NEXT: uzp1 z0.h, z0.h, z1.h
+; NOBF16-NEXT: ret
+;
+; NOBF16NNAN-LABEL: fptrunc_nxv8f32_to_nxv8bf16:
+; NOBF16NNAN: // %bb.0:
+; NOBF16NNAN-NEXT: mov z2.s, #32767 // =0x7fff
+; NOBF16NNAN-NEXT: lsr z3.s, z1.s, #16
+; NOBF16NNAN-NEXT: lsr z4.s, z0.s, #16
+; NOBF16NNAN-NEXT: and z3.s, z3.s, #0x1
+; NOBF16NNAN-NEXT: and z4.s, z4.s, #0x1
+; NOBF16NNAN-NEXT: add z1.s, z1.s, z2.s
+; NOBF16NNAN-NEXT: add z0.s, z0.s, z2.s
+; NOBF16NNAN-NEXT: add z1.s, z3.s, z1.s
+; NOBF16NNAN-NEXT: add z0.s, z4.s, z0.s
+; NOBF16NNAN-NEXT: lsr z1.s, z1.s, #16
+; NOBF16NNAN-NEXT: lsr z0.s, z0.s, #16
+; NOBF16NNAN-NEXT: uzp1 z0.h, z0.h, z1.h
+; NOBF16NNAN-NEXT: ret
+;
+; BF16-LABEL: fptrunc_nxv8f32_to_nxv8bf16:
+; BF16: // %bb.0:
+; BF16-NEXT: ptrue p0.s
+; BF16-NEXT: bfcvt z1.h, p0/m, z1.s
+; BF16-NEXT: bfcvt z0.h, p0/m, z0.s
+; BF16-NEXT: uzp1 z0.h, z0.h, z1.h
+; BF16-NEXT: ret
+ %res = fptrunc <vscale x 8 x float> %a to <vscale x 8 x bfloat>
+ ret <vscale x 8 x bfloat> %res
+}
diff --git a/llvm/test/CodeGen/AArch64/wide-scalar-shift-by-byte-multiple-legalization.ll b/llvm/test/CodeGen/AArch64/wide-scalar-shift-by-byte-multiple-legalization.ll
index e21015a..b02788a 100644
--- a/llvm/test/CodeGen/AArch64/wide-scalar-shift-by-byte-multiple-legalization.ll
+++ b/llvm/test/CodeGen/AArch64/wide-scalar-shift-by-byte-multiple-legalization.ll
@@ -186,10 +186,54 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; ALL-NEXT: ldr q1, [x0]
; ALL-NEXT: stp x9, x8, [sp, #16]
; ALL-NEXT: mov x8, sp
-; ALL-NEXT: and x9, x10, #0x1f
+; ALL-NEXT: and x9, x10, #0x18
; ALL-NEXT: str q1, [sp]
; ALL-NEXT: add x8, x8, x9
+; ALL-NEXT: lsl x9, x10, #3
; ALL-NEXT: stp q0, q0, [sp, #32]
+; ALL-NEXT: ldp x11, x10, [x8, #16]
+; ALL-NEXT: mvn w13, w9
+; ALL-NEXT: ldp x8, x12, [x8]
+; ALL-NEXT: and x9, x9, #0x38
+; ALL-NEXT: lsl x14, x10, #1
+; ALL-NEXT: lsl x15, x11, #1
+; ALL-NEXT: lsr x11, x11, x9
+; ALL-NEXT: lsl x16, x12, #1
+; ALL-NEXT: lsr x10, x10, x9
+; ALL-NEXT: lsr x12, x12, x9
+; ALL-NEXT: lsl x14, x14, x13
+; ALL-NEXT: lsr x8, x8, x9
+; ALL-NEXT: lsl x9, x16, x13
+; ALL-NEXT: lsl x13, x15, x13
+; ALL-NEXT: orr x11, x14, x11
+; ALL-NEXT: orr x8, x9, x8
+; ALL-NEXT: orr x9, x12, x13
+; ALL-NEXT: stp x11, x10, [x2, #16]
+; ALL-NEXT: stp x8, x9, [x2]
+; ALL-NEXT: add sp, sp, #64
+; ALL-NEXT: ret
+ %src = load i256, ptr %src.ptr, align 1
+ %byteOff = load i256, ptr %byteOff.ptr, align 1
+ %bitOff = shl i256 %byteOff, 3
+ %res = lshr i256 %src, %bitOff
+ store i256 %res, ptr %dst, align 1
+ ret void
+}
+
+define void @lshr_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) nounwind {
+; ALL-LABEL: lshr_32bytes_dwordOff:
+; ALL: // %bb.0:
+; ALL-NEXT: sub sp, sp, #64
+; ALL-NEXT: ldp x9, x8, [x0, #16]
+; ALL-NEXT: movi v0.2d, #0000000000000000
+; ALL-NEXT: ldr x10, [x1]
+; ALL-NEXT: ldr q1, [x0]
+; ALL-NEXT: stp x9, x8, [sp, #16]
+; ALL-NEXT: ubfiz x8, x10, #3, #2
+; ALL-NEXT: mov x9, sp
+; ALL-NEXT: str q1, [sp]
+; ALL-NEXT: stp q0, q0, [sp, #32]
+; ALL-NEXT: add x8, x9, x8
; ALL-NEXT: ldp x10, x9, [x8, #16]
; ALL-NEXT: ldr q0, [x8]
; ALL-NEXT: str q0, [x2]
@@ -197,12 +241,13 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; ALL-NEXT: add sp, sp, #64
; ALL-NEXT: ret
%src = load i256, ptr %src.ptr, align 1
- %byteOff = load i256, ptr %byteOff.ptr, align 1
- %bitOff = shl i256 %byteOff, 3
+ %dwordOff = load i256, ptr %dwordOff.ptr, align 1
+ %bitOff = shl i256 %dwordOff, 6
%res = lshr i256 %src, %bitOff
store i256 %res, ptr %dst, align 1
ret void
}
+
define void @shl_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; ALL-LABEL: shl_32bytes:
; ALL: // %bb.0:
@@ -213,11 +258,56 @@ define void @shl_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; ALL-NEXT: ldr q1, [x0]
; ALL-NEXT: stp x9, x8, [sp, #48]
; ALL-NEXT: mov x8, sp
-; ALL-NEXT: and x9, x10, #0x1f
+; ALL-NEXT: and x9, x10, #0x18
; ALL-NEXT: add x8, x8, #32
; ALL-NEXT: stp q0, q0, [sp]
; ALL-NEXT: str q1, [sp, #32]
; ALL-NEXT: sub x8, x8, x9
+; ALL-NEXT: lsl x9, x10, #3
+; ALL-NEXT: ldp x10, x11, [x8]
+; ALL-NEXT: ldp x12, x8, [x8, #16]
+; ALL-NEXT: mvn w13, w9
+; ALL-NEXT: and x9, x9, #0x38
+; ALL-NEXT: lsr x14, x10, #1
+; ALL-NEXT: lsr x15, x11, #1
+; ALL-NEXT: lsl x11, x11, x9
+; ALL-NEXT: lsr x16, x12, #1
+; ALL-NEXT: lsl x10, x10, x9
+; ALL-NEXT: lsl x12, x12, x9
+; ALL-NEXT: lsr x14, x14, x13
+; ALL-NEXT: lsl x8, x8, x9
+; ALL-NEXT: lsr x9, x16, x13
+; ALL-NEXT: lsr x13, x15, x13
+; ALL-NEXT: orr x11, x11, x14
+; ALL-NEXT: orr x8, x8, x9
+; ALL-NEXT: orr x9, x12, x13
+; ALL-NEXT: stp x10, x11, [x2]
+; ALL-NEXT: stp x9, x8, [x2, #16]
+; ALL-NEXT: add sp, sp, #64
+; ALL-NEXT: ret
+ %src = load i256, ptr %src.ptr, align 1
+ %byteOff = load i256, ptr %byteOff.ptr, align 1
+ %bitOff = shl i256 %byteOff, 3
+ %res = shl i256 %src, %bitOff
+ store i256 %res, ptr %dst, align 1
+ ret void
+}
+
+define void @shl_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) nounwind {
+; ALL-LABEL: shl_32bytes_dwordOff:
+; ALL: // %bb.0:
+; ALL-NEXT: sub sp, sp, #64
+; ALL-NEXT: ldp x9, x8, [x0, #16]
+; ALL-NEXT: movi v0.2d, #0000000000000000
+; ALL-NEXT: ldr x10, [x1]
+; ALL-NEXT: ldr q1, [x0]
+; ALL-NEXT: stp x9, x8, [sp, #48]
+; ALL-NEXT: mov x8, sp
+; ALL-NEXT: ubfiz x9, x10, #3, #2
+; ALL-NEXT: add x8, x8, #32
+; ALL-NEXT: stp q0, q1, [sp, #16]
+; ALL-NEXT: str q0, [sp]
+; ALL-NEXT: sub x8, x8, x9
; ALL-NEXT: ldp x9, x10, [x8, #16]
; ALL-NEXT: ldr q0, [x8]
; ALL-NEXT: str q0, [x2]
@@ -225,12 +315,13 @@ define void @shl_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; ALL-NEXT: add sp, sp, #64
; ALL-NEXT: ret
%src = load i256, ptr %src.ptr, align 1
- %byteOff = load i256, ptr %byteOff.ptr, align 1
- %bitOff = shl i256 %byteOff, 3
+ %dwordOff = load i256, ptr %dwordOff.ptr, align 1
+ %bitOff = shl i256 %dwordOff, 6
%res = shl i256 %src, %bitOff
store i256 %res, ptr %dst, align 1
ret void
}
+
define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; ALL-LABEL: ashr_32bytes:
; ALL: // %bb.0:
@@ -238,14 +329,59 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; ALL-NEXT: ldp x9, x8, [x0, #16]
; ALL-NEXT: ldr x10, [x1]
; ALL-NEXT: ldr q0, [x0]
-; ALL-NEXT: and x10, x10, #0x1f
+; ALL-NEXT: and x11, x10, #0x18
; ALL-NEXT: stp x9, x8, [sp, #16]
; ALL-NEXT: asr x8, x8, #63
; ALL-NEXT: mov x9, sp
; ALL-NEXT: str q0, [sp]
+; ALL-NEXT: add x9, x9, x11
+; ALL-NEXT: stp x8, x8, [sp, #48]
+; ALL-NEXT: stp x8, x8, [sp, #32]
+; ALL-NEXT: lsl x8, x10, #3
+; ALL-NEXT: ldp x11, x10, [x9, #16]
+; ALL-NEXT: ldp x9, x12, [x9]
+; ALL-NEXT: mvn w13, w8
+; ALL-NEXT: and x8, x8, #0x38
+; ALL-NEXT: lsl x14, x10, #1
+; ALL-NEXT: lsl x15, x11, #1
+; ALL-NEXT: lsr x11, x11, x8
+; ALL-NEXT: lsl x16, x12, #1
+; ALL-NEXT: asr x10, x10, x8
+; ALL-NEXT: lsr x12, x12, x8
+; ALL-NEXT: lsl x14, x14, x13
+; ALL-NEXT: lsr x8, x9, x8
+; ALL-NEXT: lsl x9, x16, x13
+; ALL-NEXT: lsl x13, x15, x13
+; ALL-NEXT: orr x11, x14, x11
+; ALL-NEXT: orr x8, x9, x8
+; ALL-NEXT: orr x9, x12, x13
+; ALL-NEXT: stp x11, x10, [x2, #16]
+; ALL-NEXT: stp x8, x9, [x2]
+; ALL-NEXT: add sp, sp, #64
+; ALL-NEXT: ret
+ %src = load i256, ptr %src.ptr, align 1
+ %byteOff = load i256, ptr %byteOff.ptr, align 1
+ %bitOff = shl i256 %byteOff, 3
+ %res = ashr i256 %src, %bitOff
+ store i256 %res, ptr %dst, align 1
+ ret void
+}
+
+define void @ashr_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) nounwind {
+; ALL-LABEL: ashr_32bytes_dwordOff:
+; ALL: // %bb.0:
+; ALL-NEXT: sub sp, sp, #64
+; ALL-NEXT: ldp x9, x8, [x0, #16]
+; ALL-NEXT: ldr x10, [x1]
+; ALL-NEXT: ldr q0, [x0]
+; ALL-NEXT: stp x9, x8, [sp, #16]
+; ALL-NEXT: asr x8, x8, #63
+; ALL-NEXT: ubfiz x9, x10, #3, #2
+; ALL-NEXT: mov x10, sp
+; ALL-NEXT: str q0, [sp]
; ALL-NEXT: stp x8, x8, [sp, #48]
; ALL-NEXT: stp x8, x8, [sp, #32]
-; ALL-NEXT: add x8, x9, x10
+; ALL-NEXT: add x8, x10, x9
; ALL-NEXT: ldp x10, x9, [x8, #16]
; ALL-NEXT: ldr q0, [x8]
; ALL-NEXT: str q0, [x2]
@@ -253,8 +389,8 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; ALL-NEXT: add sp, sp, #64
; ALL-NEXT: ret
%src = load i256, ptr %src.ptr, align 1
- %byteOff = load i256, ptr %byteOff.ptr, align 1
- %bitOff = shl i256 %byteOff, 3
+ %dwordOff = load i256, ptr %dwordOff.ptr, align 1
+ %bitOff = shl i256 %dwordOff, 6
%res = ashr i256 %src, %bitOff
store i256 %res, ptr %dst, align 1
ret void
diff --git a/llvm/test/CodeGen/AArch64/wide-scalar-shift-legalization.ll b/llvm/test/CodeGen/AArch64/wide-scalar-shift-legalization.ll
index a4da6db..531e0fa 100644
--- a/llvm/test/CodeGen/AArch64/wide-scalar-shift-legalization.ll
+++ b/llvm/test/CodeGen/AArch64/wide-scalar-shift-legalization.ll
@@ -160,30 +160,33 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; ALL-NEXT: ldr x10, [x1]
; ALL-NEXT: ldr q1, [x0]
; ALL-NEXT: stp x9, x8, [sp, #16]
-; ALL-NEXT: ubfx x8, x10, #3, #5
+; ALL-NEXT: lsr x8, x10, #3
; ALL-NEXT: mov x9, sp
; ALL-NEXT: str q1, [sp]
-; ALL-NEXT: and x10, x10, #0x7
+; ALL-NEXT: and x12, x10, #0x3f
+; ALL-NEXT: and x8, x8, #0x18
; ALL-NEXT: stp q0, q0, [sp, #32]
+; ALL-NEXT: eor x12, x12, #0x3f
; ALL-NEXT: add x8, x9, x8
-; ALL-NEXT: mvn w13, w10
-; ALL-NEXT: ldp x11, x9, [x8, #16]
-; ALL-NEXT: ldp x8, x12, [x8]
+; ALL-NEXT: ldp x13, x11, [x8]
+; ALL-NEXT: ldr x9, [x8, #24]
+; ALL-NEXT: ldr x8, [x8, #16]
; ALL-NEXT: lsl x14, x9, #1
+; ALL-NEXT: lsr x9, x9, x10
; ALL-NEXT: lsl x15, x11, #1
; ALL-NEXT: lsr x11, x11, x10
-; ALL-NEXT: lsl x16, x12, #1
-; ALL-NEXT: lsr x9, x9, x10
-; ALL-NEXT: lsr x12, x12, x10
-; ALL-NEXT: lsl x14, x14, x13
+; ALL-NEXT: lsr x13, x13, x10
+; ALL-NEXT: lsl x14, x14, x12
+; ALL-NEXT: lsl x12, x15, x12
+; ALL-NEXT: lsl x15, x8, #1
; ALL-NEXT: lsr x8, x8, x10
-; ALL-NEXT: lsl x10, x16, x13
-; ALL-NEXT: lsl x13, x15, x13
-; ALL-NEXT: orr x11, x14, x11
-; ALL-NEXT: stp x11, x9, [x2, #16]
-; ALL-NEXT: orr x8, x10, x8
+; ALL-NEXT: mvn w10, w10
+; ALL-NEXT: lsl x10, x15, x10
+; ALL-NEXT: orr x8, x14, x8
+; ALL-NEXT: stp x8, x9, [x2, #16]
; ALL-NEXT: orr x9, x12, x13
-; ALL-NEXT: stp x8, x9, [x2]
+; ALL-NEXT: orr x8, x11, x10
+; ALL-NEXT: stp x9, x8, [x2]
; ALL-NEXT: add sp, sp, #64
; ALL-NEXT: ret
%src = load i256, ptr %src.ptr, align 1
@@ -201,31 +204,34 @@ define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; ALL-NEXT: ldr x10, [x1]
; ALL-NEXT: ldr q1, [x0]
; ALL-NEXT: stp x9, x8, [sp, #48]
-; ALL-NEXT: mov x8, sp
-; ALL-NEXT: ubfx x9, x10, #3, #5
-; ALL-NEXT: add x8, x8, #32
+; ALL-NEXT: lsr x8, x10, #3
+; ALL-NEXT: mov x9, sp
+; ALL-NEXT: add x9, x9, #32
; ALL-NEXT: stp q0, q1, [sp, #16]
-; ALL-NEXT: and x10, x10, #0x7
+; ALL-NEXT: and x12, x10, #0x3f
+; ALL-NEXT: and x8, x8, #0x18
; ALL-NEXT: str q0, [sp]
-; ALL-NEXT: sub x8, x8, x9
-; ALL-NEXT: mvn w13, w10
-; ALL-NEXT: ldp x9, x11, [x8]
-; ALL-NEXT: ldp x12, x8, [x8, #16]
-; ALL-NEXT: lsr x14, x9, #1
-; ALL-NEXT: lsr x15, x11, #1
-; ALL-NEXT: lsl x11, x11, x10
-; ALL-NEXT: lsr x16, x12, #1
+; ALL-NEXT: eor x12, x12, #0x3f
+; ALL-NEXT: sub x8, x9, x8
+; ALL-NEXT: ldp x11, x13, [x8, #16]
+; ALL-NEXT: ldr x9, [x8]
+; ALL-NEXT: ldr x8, [x8, #8]
+; ALL-NEXT: lsr x15, x9, #1
; ALL-NEXT: lsl x9, x9, x10
-; ALL-NEXT: lsl x12, x12, x10
-; ALL-NEXT: lsr x14, x14, x13
+; ALL-NEXT: lsr x14, x11, #1
+; ALL-NEXT: lsl x11, x11, x10
+; ALL-NEXT: lsl x13, x13, x10
+; ALL-NEXT: lsr x14, x14, x12
+; ALL-NEXT: lsr x12, x15, x12
+; ALL-NEXT: lsr x15, x8, #1
; ALL-NEXT: lsl x8, x8, x10
-; ALL-NEXT: lsr x10, x16, x13
-; ALL-NEXT: lsr x13, x15, x13
-; ALL-NEXT: orr x11, x11, x14
-; ALL-NEXT: stp x9, x11, [x2]
-; ALL-NEXT: orr x8, x8, x10
-; ALL-NEXT: orr x9, x12, x13
-; ALL-NEXT: stp x9, x8, [x2, #16]
+; ALL-NEXT: mvn w10, w10
+; ALL-NEXT: lsr x10, x15, x10
+; ALL-NEXT: orr x8, x8, x12
+; ALL-NEXT: stp x9, x8, [x2]
+; ALL-NEXT: orr x9, x13, x14
+; ALL-NEXT: orr x8, x11, x10
+; ALL-NEXT: stp x8, x9, [x2, #16]
; ALL-NEXT: add sp, sp, #64
; ALL-NEXT: ret
%src = load i256, ptr %src.ptr, align 1
@@ -243,31 +249,34 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; ALL-NEXT: ldr x10, [x1]
; ALL-NEXT: ldr q0, [x0]
; ALL-NEXT: stp x9, x8, [sp, #16]
+; ALL-NEXT: lsr x9, x10, #3
; ALL-NEXT: asr x8, x8, #63
-; ALL-NEXT: ubfx x9, x10, #3, #5
; ALL-NEXT: str q0, [sp]
-; ALL-NEXT: and x10, x10, #0x7
+; ALL-NEXT: and x12, x10, #0x3f
+; ALL-NEXT: and x9, x9, #0x18
; ALL-NEXT: stp x8, x8, [sp, #48]
-; ALL-NEXT: add x9, x11, x9
-; ALL-NEXT: mvn w13, w10
+; ALL-NEXT: eor x12, x12, #0x3f
; ALL-NEXT: stp x8, x8, [sp, #32]
-; ALL-NEXT: ldp x11, x8, [x9, #16]
-; ALL-NEXT: ldp x9, x12, [x9]
-; ALL-NEXT: lsl x14, x8, #1
+; ALL-NEXT: add x8, x11, x9
+; ALL-NEXT: ldp x13, x11, [x8]
+; ALL-NEXT: ldr x9, [x8, #24]
+; ALL-NEXT: ldr x8, [x8, #16]
+; ALL-NEXT: lsl x14, x9, #1
+; ALL-NEXT: asr x9, x9, x10
; ALL-NEXT: lsl x15, x11, #1
; ALL-NEXT: lsr x11, x11, x10
-; ALL-NEXT: lsl x16, x12, #1
-; ALL-NEXT: asr x8, x8, x10
-; ALL-NEXT: lsr x12, x12, x10
-; ALL-NEXT: lsl x14, x14, x13
-; ALL-NEXT: lsr x9, x9, x10
-; ALL-NEXT: lsl x10, x16, x13
-; ALL-NEXT: lsl x13, x15, x13
-; ALL-NEXT: orr x11, x14, x11
-; ALL-NEXT: stp x11, x8, [x2, #16]
-; ALL-NEXT: orr x8, x10, x9
+; ALL-NEXT: lsr x13, x13, x10
+; ALL-NEXT: lsl x14, x14, x12
+; ALL-NEXT: lsl x12, x15, x12
+; ALL-NEXT: lsl x15, x8, #1
+; ALL-NEXT: lsr x8, x8, x10
+; ALL-NEXT: mvn w10, w10
+; ALL-NEXT: lsl x10, x15, x10
+; ALL-NEXT: orr x8, x14, x8
+; ALL-NEXT: stp x8, x9, [x2, #16]
; ALL-NEXT: orr x9, x12, x13
-; ALL-NEXT: stp x8, x9, [x2]
+; ALL-NEXT: orr x8, x11, x10
+; ALL-NEXT: stp x9, x8, [x2]
; ALL-NEXT: add sp, sp, #64
; ALL-NEXT: ret
%src = load i256, ptr %src.ptr, align 1
diff --git a/llvm/test/CodeGen/AArch64/xtn.ll b/llvm/test/CodeGen/AArch64/xtn.ll
index ead7902..fb3f8eb 100644
--- a/llvm/test/CodeGen/AArch64/xtn.ll
+++ b/llvm/test/CodeGen/AArch64/xtn.ll
@@ -294,19 +294,10 @@ entry:
}
define <3 x i16> @xtn_v3i32_v3i16(<3 x i32> %a) {
-; CHECK-SD-LABEL: xtn_v3i32_v3i16:
-; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: xtn v0.4h, v0.4s
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: xtn_v3i32_v3i16:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: mov w8, v0.s[1]
-; CHECK-GI-NEXT: mov w9, v0.s[2]
-; CHECK-GI-NEXT: mov v0.h[1], w8
-; CHECK-GI-NEXT: mov v0.h[2], w9
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: xtn_v3i32_v3i16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: xtn v0.4h, v0.4s
+; CHECK-NEXT: ret
entry:
%arg1 = trunc <3 x i32> %a to <3 x i16>
ret <3 x i16> %arg1
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll
index bb7bc04..c5ded11 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll
@@ -167,8 +167,8 @@ define void @divergent_i1_phi_used_inside_loop_bigger_loop_body(float %val, floa
; GFX10-NEXT: s_cbranch_execz .LBB3_6
; GFX10-NEXT: .LBB3_2: ; %loop_start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX10-NEXT: v_cmp_ge_i32_e32 vcc_lo, 0x3e8, v8
; GFX10-NEXT: s_mov_b32 s7, 1
+; GFX10-NEXT: v_cmp_ge_i32_e32 vcc_lo, 0x3e8, v8
; GFX10-NEXT: s_cbranch_vccz .LBB3_4
; GFX10-NEXT: ; %bb.3: ; %else
; GFX10-NEXT: ; in Loop: Header=BB3_2 Depth=1
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.ll
index 49c2326..b27d8fd 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.ll
@@ -75,12 +75,12 @@ define void @divergent_i1_phi_used_outside_loop_larger_loop_body(float %val, ptr
; GFX10-NEXT: .LBB1_1: ; %loop.cond
; GFX10-NEXT: ; in Loop: Header=BB1_2 Depth=1
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
-; GFX10-NEXT: v_add_nc_u32_e32 v0, 1, v0
; GFX10-NEXT: v_add_co_u32 v1, s4, v1, 4
+; GFX10-NEXT: v_add_nc_u32_e32 v0, 1, v0
; GFX10-NEXT: v_add_co_ci_u32_e64 v2, s4, 0, v2, s4
-; GFX10-NEXT: v_cmp_le_i32_e32 vcc_lo, 10, v0
; GFX10-NEXT: s_andn2_b32 s7, s5, exec_lo
; GFX10-NEXT: s_and_b32 s8, exec_lo, s6
+; GFX10-NEXT: v_cmp_le_i32_e32 vcc_lo, 10, v0
; GFX10-NEXT: s_or_b32 s4, s7, s8
; GFX10-NEXT: s_cbranch_vccz .LBB1_4
; GFX10-NEXT: .LBB1_2: ; %loop.start
@@ -191,9 +191,9 @@ define void @divergent_i1_xor_used_outside_loop_larger_loop_body(i32 %num.elts,
; GFX10-LABEL: divergent_i1_xor_used_outside_loop_larger_loop_body:
; GFX10: ; %bb.0: ; %entry
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX10-NEXT: s_mov_b32 s5, 0
; GFX10-NEXT: s_mov_b32 s6, -1
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX10-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX10-NEXT: s_cbranch_execz .LBB3_6
; GFX10-NEXT: ; %bb.1: ; %loop.start.preheader
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.f32-no-rtn.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.f32-no-rtn.ll
index 89c3bbc..6064b17 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.f32-no-rtn.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.f32-no-rtn.ll
@@ -61,7 +61,7 @@ define amdgpu_ps void @global_atomic_fadd_f32_saddr_no_rtn_atomicrmw(ptr addrspa
; GFX908-NEXT: {{ $}}
; GFX908-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GFX908-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX908-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX908-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec_xnull = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
; GFX908-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX908-NEXT: [[SI_PS_LIVE:%[0-9]+]]:sreg_64_xexec = SI_PS_LIVE
; GFX908-NEXT: [[SI_IF:%[0-9]+]]:sreg_64_xexec = SI_IF [[SI_PS_LIVE]], %bb.5, implicit-def $exec, implicit-def $scc, implicit $exec
@@ -136,7 +136,7 @@ define amdgpu_ps void @global_atomic_fadd_f32_saddr_no_rtn_atomicrmw(ptr addrspa
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GFX90A-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX90A-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX90A-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec_xnull = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
; GFX90A-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX90A-NEXT: [[SI_PS_LIVE:%[0-9]+]]:sreg_64_xexec = SI_PS_LIVE
; GFX90A-NEXT: [[SI_IF:%[0-9]+]]:sreg_64_xexec = SI_IF [[SI_PS_LIVE]], %bb.5, implicit-def $exec, implicit-def $scc, implicit $exec
@@ -211,7 +211,7 @@ define amdgpu_ps void @global_atomic_fadd_f32_saddr_no_rtn_atomicrmw(ptr addrspa
; GFX940-NEXT: {{ $}}
; GFX940-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GFX940-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX940-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX940-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec_xnull = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
; GFX940-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX940-NEXT: [[SI_PS_LIVE:%[0-9]+]]:sreg_64_xexec = SI_PS_LIVE
; GFX940-NEXT: [[SI_IF:%[0-9]+]]:sreg_64_xexec = SI_IF [[SI_PS_LIVE]], %bb.5, implicit-def $exec, implicit-def $scc, implicit $exec
@@ -286,7 +286,7 @@ define amdgpu_ps void @global_atomic_fadd_f32_saddr_no_rtn_atomicrmw(ptr addrspa
; GFX11-NEXT: {{ $}}
; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GFX11-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX11-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX11-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec_xnull = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
; GFX11-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX11-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
; GFX11-NEXT: [[SI_PS_LIVE:%[0-9]+]]:sreg_32_xm0_xexec = SI_PS_LIVE
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.f32-rtn.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.f32-rtn.ll
index 8eef3d4..07c9710 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.f32-rtn.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.f32-rtn.ll
@@ -52,7 +52,7 @@ define amdgpu_ps float @global_atomic_fadd_f32_saddr_rtn_atomicrmw(ptr addrspace
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GFX90A-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX90A-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX90A-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec_xnull = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
; GFX90A-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX90A-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
; GFX90A-NEXT: [[SI_PS_LIVE:%[0-9]+]]:sreg_64_xexec = SI_PS_LIVE
@@ -146,7 +146,7 @@ define amdgpu_ps float @global_atomic_fadd_f32_saddr_rtn_atomicrmw(ptr addrspace
; GFX940-NEXT: {{ $}}
; GFX940-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GFX940-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX940-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX940-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec_xnull = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
; GFX940-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX940-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
; GFX940-NEXT: [[SI_PS_LIVE:%[0-9]+]]:sreg_64_xexec = SI_PS_LIVE
@@ -240,7 +240,7 @@ define amdgpu_ps float @global_atomic_fadd_f32_saddr_rtn_atomicrmw(ptr addrspace
; GFX11-NEXT: {{ $}}
; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GFX11-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX11-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX11-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec_xnull = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
; GFX11-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX11-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
; GFX11-NEXT: [[SI_PS_LIVE:%[0-9]+]]:sreg_32_xm0_xexec = SI_PS_LIVE
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.f64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.f64.ll
index 80fa244..49c5dc7 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.f64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.f64.ll
@@ -120,7 +120,7 @@ define amdgpu_ps void @global_atomic_fadd_f64_saddr_no_rtn_atomicrmw(ptr addrspa
; GFX90A_GFX940-NEXT: {{ $}}
; GFX90A_GFX940-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GFX90A_GFX940-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX90A_GFX940-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX90A_GFX940-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec_xnull = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
; GFX90A_GFX940-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX90A_GFX940-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1
; GFX90A_GFX940-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
@@ -138,7 +138,7 @@ define amdgpu_ps double @global_atomic_fadd_f64_saddr_rtn_atomicrmw(ptr addrspac
; GFX90A_GFX940-NEXT: {{ $}}
; GFX90A_GFX940-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GFX90A_GFX940-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX90A_GFX940-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX90A_GFX940-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec_xnull = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
; GFX90A_GFX940-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX90A_GFX940-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1
; GFX90A_GFX940-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.v2f16-no-rtn.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.v2f16-no-rtn.ll
index db508b5..1317770 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.v2f16-no-rtn.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.v2f16-no-rtn.ll
@@ -36,7 +36,7 @@ define amdgpu_ps void @global_atomic_fadd_v2f16_saddr_no_rtn(ptr addrspace(1) in
; GFX908-NEXT: {{ $}}
; GFX908-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GFX908-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX908-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX908-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec_xnull = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
; GFX908-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX908-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; GFX908-NEXT: GLOBAL_ATOMIC_PK_ADD_F16_SADDR [[V_MOV_B32_e32_]], [[COPY2]], [[REG_SEQUENCE]], 0, 0, implicit $exec :: (load store syncscope("agent") seq_cst (<2 x s16>) on %ir.ptr, addrspace 1)
@@ -48,7 +48,7 @@ define amdgpu_ps void @global_atomic_fadd_v2f16_saddr_no_rtn(ptr addrspace(1) in
; GFX90A_GFX940-NEXT: {{ $}}
; GFX90A_GFX940-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GFX90A_GFX940-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX90A_GFX940-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX90A_GFX940-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec_xnull = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
; GFX90A_GFX940-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX90A_GFX940-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; GFX90A_GFX940-NEXT: GLOBAL_ATOMIC_PK_ADD_F16_SADDR [[V_MOV_B32_e32_]], [[COPY2]], [[REG_SEQUENCE]], 0, 0, implicit $exec :: (load store syncscope("agent") seq_cst (<2 x s16>) on %ir.ptr, addrspace 1)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.v2f16-rtn.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.v2f16-rtn.ll
index f11196b..a65fc6c 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.v2f16-rtn.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.v2f16-rtn.ll
@@ -25,7 +25,7 @@ define amdgpu_ps <2 x half> @global_atomic_fadd_v2f16_saddr_rtn(ptr addrspace(1)
; GFX90A_GFX940-NEXT: {{ $}}
; GFX90A_GFX940-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GFX90A_GFX940-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX90A_GFX940-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX90A_GFX940-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec_xnull = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
; GFX90A_GFX940-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX90A_GFX940-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; GFX90A_GFX940-NEXT: [[GLOBAL_ATOMIC_PK_ADD_F16_SADDR_RTN:%[0-9]+]]:vgpr_32 = GLOBAL_ATOMIC_PK_ADD_F16_SADDR_RTN [[V_MOV_B32_e32_]], [[COPY2]], [[REG_SEQUENCE]], 0, 1, implicit $exec :: (load store syncscope("agent") seq_cst (<2 x s16>) on %ir.ptr, addrspace 1)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgpu-atomic-cmpxchg-global.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgpu-atomic-cmpxchg-global.mir
index 6f01083..09eb77f 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgpu-atomic-cmpxchg-global.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgpu-atomic-cmpxchg-global.mir
@@ -747,7 +747,7 @@ body: |
; GFX9-LABEL: name: amdgpu_atomic_cmpxchg_s32_global_sgpr_ptr
; GFX9: liveins: $sgpr0_sgpr1, $vgpr2, $vgpr3
; GFX9-NEXT: {{ $}}
- ; GFX9-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX9-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX9-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
; GFX9-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr3
; GFX9-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1
@@ -758,7 +758,7 @@ body: |
; GFX10-LABEL: name: amdgpu_atomic_cmpxchg_s32_global_sgpr_ptr
; GFX10: liveins: $sgpr0_sgpr1, $vgpr2, $vgpr3
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX10-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
; GFX10-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr3
; GFX10-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1
@@ -854,7 +854,7 @@ body: |
; GFX9-LABEL: name: amdgpu_atomic_cmpxchg_s32_global_sgpr_ptr_offset_4095
; GFX9: liveins: $sgpr0_sgpr1, $vgpr2, $vgpr3
; GFX9-NEXT: {{ $}}
- ; GFX9-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX9-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX9-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
; GFX9-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr3
; GFX9-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1
@@ -865,7 +865,7 @@ body: |
; GFX10-LABEL: name: amdgpu_atomic_cmpxchg_s32_global_sgpr_ptr_offset_4095
; GFX10: liveins: $sgpr0_sgpr1, $vgpr2, $vgpr3
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX10-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
; GFX10-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr3
; GFX10-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy.mir
index 3428230..e07d635 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy.mir
@@ -25,7 +25,7 @@ body: |
; WAVE32-LABEL: name: copy
; WAVE32: liveins: $sgpr2_sgpr3
; WAVE32-NEXT: {{ $}}
- ; WAVE32-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3
+ ; WAVE32-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr2_sgpr3
; WAVE32-NEXT: [[DEF:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; WAVE32-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; WAVE32-NEXT: GLOBAL_STORE_DWORD_SADDR [[V_MOV_B32_e32_]], [[DEF]], [[COPY]], 0, 0, implicit $exec :: (store (s32), addrspace 1)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fract.f64.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fract.f64.mir
index 6a1e52c..52b1beb 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fract.f64.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fract.f64.mir
@@ -26,8 +26,9 @@ body: |
; GFX10-NEXT: [[V_ADD_F64_e64_:%[0-9]+]]:vreg_64 = nofpexcept V_ADD_F64_e64 0, [[COPY3]], 1, [[COPY4]], 0, 0, implicit $mode, implicit $exec
; GFX10-NEXT: [[V_FLOOR_F64_e64_:%[0-9]+]]:vreg_64 = nofpexcept V_FLOOR_F64_e64 0, [[V_ADD_F64_e64_]], 0, 0, implicit $mode, implicit $exec
; GFX10-NEXT: [[V_ADD_F64_e64_1:%[0-9]+]]:vreg_64 = nofpexcept V_ADD_F64_e64 0, [[V_ADD_F64_e64_]], 1, [[V_FLOOR_F64_e64_]], 0, 0, implicit $mode, implicit $exec
+ ; GFX10-NEXT: [[COPY5:%[0-9]+]]:sreg_64_xexec_xnull = COPY [[COPY1]]
; GFX10-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
- ; GFX10-NEXT: GLOBAL_STORE_DWORDX2_SADDR [[V_MOV_B32_e32_]], [[V_ADD_F64_e64_1]], [[COPY1]], 0, 0, implicit $exec :: (store (s64), addrspace 1)
+ ; GFX10-NEXT: GLOBAL_STORE_DWORDX2_SADDR [[V_MOV_B32_e32_]], [[V_ADD_F64_e64_1]], [[COPY5]], 0, 0, implicit $exec :: (store (s64), addrspace 1)
; GFX10-NEXT: S_ENDPGM 0
;
; GFX11-LABEL: name: fract_f64_neg
@@ -44,8 +45,9 @@ body: |
; GFX11-NEXT: [[V_ADD_F64_e64_:%[0-9]+]]:vreg_64 = nofpexcept V_ADD_F64_e64 0, [[COPY3]], 1, [[COPY4]], 0, 0, implicit $mode, implicit $exec
; GFX11-NEXT: [[V_FLOOR_F64_e64_:%[0-9]+]]:vreg_64 = nofpexcept V_FLOOR_F64_e64 0, [[V_ADD_F64_e64_]], 0, 0, implicit $mode, implicit $exec
; GFX11-NEXT: [[V_ADD_F64_e64_1:%[0-9]+]]:vreg_64 = nofpexcept V_ADD_F64_e64 0, [[V_ADD_F64_e64_]], 1, [[V_FLOOR_F64_e64_]], 0, 0, implicit $mode, implicit $exec
+ ; GFX11-NEXT: [[COPY5:%[0-9]+]]:sreg_64_xexec_xnull = COPY [[COPY1]]
; GFX11-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
- ; GFX11-NEXT: GLOBAL_STORE_DWORDX2_SADDR [[V_MOV_B32_e32_]], [[V_ADD_F64_e64_1]], [[COPY1]], 0, 0, implicit $exec :: (store (s64), addrspace 1)
+ ; GFX11-NEXT: GLOBAL_STORE_DWORDX2_SADDR [[V_MOV_B32_e32_]], [[V_ADD_F64_e64_1]], [[COPY5]], 0, 0, implicit $exec :: (store (s64), addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
%2:sgpr(p4) = COPY $sgpr0_sgpr1
%7:sgpr(s64) = G_CONSTANT i64 36
@@ -92,8 +94,9 @@ body: |
; GFX10-NEXT: [[V_ADD_F64_e64_:%[0-9]+]]:vreg_64 = nofpexcept V_ADD_F64_e64 0, [[COPY3]], 3, [[COPY4]], 0, 0, implicit $mode, implicit $exec
; GFX10-NEXT: [[V_FLOOR_F64_e64_:%[0-9]+]]:vreg_64 = nofpexcept V_FLOOR_F64_e64 0, [[V_ADD_F64_e64_]], 0, 0, implicit $mode, implicit $exec
; GFX10-NEXT: [[V_ADD_F64_e64_1:%[0-9]+]]:vreg_64 = nofpexcept V_ADD_F64_e64 0, [[V_ADD_F64_e64_]], 1, [[V_FLOOR_F64_e64_]], 0, 0, implicit $mode, implicit $exec
+ ; GFX10-NEXT: [[COPY5:%[0-9]+]]:sreg_64_xexec_xnull = COPY [[COPY1]]
; GFX10-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
- ; GFX10-NEXT: GLOBAL_STORE_DWORDX2_SADDR [[V_MOV_B32_e32_]], [[V_ADD_F64_e64_1]], [[COPY1]], 0, 0, implicit $exec :: (store (s64), addrspace 1)
+ ; GFX10-NEXT: GLOBAL_STORE_DWORDX2_SADDR [[V_MOV_B32_e32_]], [[V_ADD_F64_e64_1]], [[COPY5]], 0, 0, implicit $exec :: (store (s64), addrspace 1)
; GFX10-NEXT: S_ENDPGM 0
;
; GFX11-LABEL: name: fract_f64_neg_abs
@@ -110,8 +113,9 @@ body: |
; GFX11-NEXT: [[V_ADD_F64_e64_:%[0-9]+]]:vreg_64 = nofpexcept V_ADD_F64_e64 0, [[COPY3]], 3, [[COPY4]], 0, 0, implicit $mode, implicit $exec
; GFX11-NEXT: [[V_FLOOR_F64_e64_:%[0-9]+]]:vreg_64 = nofpexcept V_FLOOR_F64_e64 0, [[V_ADD_F64_e64_]], 0, 0, implicit $mode, implicit $exec
; GFX11-NEXT: [[V_ADD_F64_e64_1:%[0-9]+]]:vreg_64 = nofpexcept V_ADD_F64_e64 0, [[V_ADD_F64_e64_]], 1, [[V_FLOOR_F64_e64_]], 0, 0, implicit $mode, implicit $exec
+ ; GFX11-NEXT: [[COPY5:%[0-9]+]]:sreg_64_xexec_xnull = COPY [[COPY1]]
; GFX11-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
- ; GFX11-NEXT: GLOBAL_STORE_DWORDX2_SADDR [[V_MOV_B32_e32_]], [[V_ADD_F64_e64_1]], [[COPY1]], 0, 0, implicit $exec :: (store (s64), addrspace 1)
+ ; GFX11-NEXT: GLOBAL_STORE_DWORDX2_SADDR [[V_MOV_B32_e32_]], [[V_ADD_F64_e64_1]], [[COPY5]], 0, 0, implicit $exec :: (store (s64), addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
%2:sgpr(p4) = COPY $sgpr0_sgpr1
%7:sgpr(s64) = G_CONSTANT i64 36
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-global-saddr.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-global-saddr.mir
index cf4e6c8..65f6b88 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-global-saddr.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-global-saddr.mir
@@ -17,7 +17,7 @@ body: |
; GFX9-LABEL: name: load_global_s32_from_sgpr
; GFX9: liveins: $sgpr0_sgpr1
; GFX9-NEXT: {{ $}}
- ; GFX9-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX9-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX9-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; GFX9-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[COPY]], [[V_MOV_B32_e32_]], 0, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX9-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
@@ -25,7 +25,7 @@ body: |
; GFX10-LABEL: name: load_global_s32_from_sgpr
; GFX10: liveins: $sgpr0_sgpr1
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX10-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; GFX10-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[COPY]], [[V_MOV_B32_e32_]], 0, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX10-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
@@ -33,7 +33,7 @@ body: |
; GFX11-LABEL: name: load_global_s32_from_sgpr
; GFX11: liveins: $sgpr0_sgpr1
; GFX11-NEXT: {{ $}}
- ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX11-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; GFX11-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[COPY]], [[V_MOV_B32_e32_]], 0, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX11-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
@@ -41,7 +41,7 @@ body: |
; GFX12-LABEL: name: load_global_s32_from_sgpr
; GFX12: liveins: $sgpr0_sgpr1
; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX12-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; GFX12-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[COPY]], [[V_MOV_B32_e32_]], 0, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX12-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
@@ -67,7 +67,7 @@ body: |
; GFX9-LABEL: name: load_global_s32_from_sgpr_zext_vgpr
; GFX9: liveins: $sgpr0_sgpr1, $vgpr0
; GFX9-NEXT: {{ $}}
- ; GFX9-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX9-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX9-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX9-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[COPY]], [[COPY1]], 0, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX9-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
@@ -75,7 +75,7 @@ body: |
; GFX10-LABEL: name: load_global_s32_from_sgpr_zext_vgpr
; GFX10: liveins: $sgpr0_sgpr1, $vgpr0
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX10-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX10-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[COPY]], [[COPY1]], 0, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX10-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
@@ -83,7 +83,7 @@ body: |
; GFX11-LABEL: name: load_global_s32_from_sgpr_zext_vgpr
; GFX11: liveins: $sgpr0_sgpr1, $vgpr0
; GFX11-NEXT: {{ $}}
- ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX11-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX11-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[COPY]], [[COPY1]], 0, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX11-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
@@ -91,7 +91,7 @@ body: |
; GFX12-LABEL: name: load_global_s32_from_sgpr_zext_vgpr
; GFX12: liveins: $sgpr0_sgpr1, $vgpr0
; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX12-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[COPY]], [[COPY1]], 0, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX12-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
@@ -120,7 +120,7 @@ body: |
; GFX9-LABEL: name: load_global_s32_from_sgpr_merge_zext_vgpr
; GFX9: liveins: $sgpr0_sgpr1, $vgpr0
; GFX9-NEXT: {{ $}}
- ; GFX9-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX9-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX9-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX9-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[COPY]], [[COPY1]], 0, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX9-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
@@ -128,7 +128,7 @@ body: |
; GFX10-LABEL: name: load_global_s32_from_sgpr_merge_zext_vgpr
; GFX10: liveins: $sgpr0_sgpr1, $vgpr0
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX10-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX10-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[COPY]], [[COPY1]], 0, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX10-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
@@ -136,7 +136,7 @@ body: |
; GFX11-LABEL: name: load_global_s32_from_sgpr_merge_zext_vgpr
; GFX11: liveins: $sgpr0_sgpr1, $vgpr0
; GFX11-NEXT: {{ $}}
- ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX11-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX11-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[COPY]], [[COPY1]], 0, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX11-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
@@ -144,7 +144,7 @@ body: |
; GFX12-LABEL: name: load_global_s32_from_sgpr_merge_zext_vgpr
; GFX12: liveins: $sgpr0_sgpr1, $vgpr0
; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX12-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[COPY]], [[COPY1]], 0, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX12-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
@@ -266,7 +266,7 @@ body: |
; GFX9-LABEL: name: load_global_s32_from_sgpr_zext_vgpr_offset4095
; GFX9: liveins: $sgpr0_sgpr1, $vgpr0
; GFX9-NEXT: {{ $}}
- ; GFX9-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX9-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX9-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX9-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[COPY]], [[COPY1]], 4095, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX9-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
@@ -300,7 +300,7 @@ body: |
; GFX11-LABEL: name: load_global_s32_from_sgpr_zext_vgpr_offset4095
; GFX11: liveins: $sgpr0_sgpr1, $vgpr0
; GFX11-NEXT: {{ $}}
- ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX11-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX11-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[COPY]], [[COPY1]], 4095, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX11-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
@@ -308,7 +308,7 @@ body: |
; GFX12-LABEL: name: load_global_s32_from_sgpr_zext_vgpr_offset4095
; GFX12: liveins: $sgpr0_sgpr1, $vgpr0
; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX12-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[COPY]], [[COPY1]], 4095, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX12-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
@@ -339,7 +339,7 @@ body: |
; GFX9-LABEL: name: load_global_s32_from_sgpr_zext_vgpr_offset_neg4096
; GFX9: liveins: $sgpr0_sgpr1, $vgpr0
; GFX9-NEXT: {{ $}}
- ; GFX9-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX9-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX9-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX9-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[COPY]], [[COPY1]], -4096, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX9-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
@@ -373,7 +373,7 @@ body: |
; GFX11-LABEL: name: load_global_s32_from_sgpr_zext_vgpr_offset_neg4096
; GFX11: liveins: $sgpr0_sgpr1, $vgpr0
; GFX11-NEXT: {{ $}}
- ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX11-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX11-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[COPY]], [[COPY1]], -4096, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX11-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
@@ -381,7 +381,7 @@ body: |
; GFX12-LABEL: name: load_global_s32_from_sgpr_zext_vgpr_offset_neg4096
; GFX12: liveins: $sgpr0_sgpr1, $vgpr0
; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX12-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[COPY]], [[COPY1]], -4096, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX12-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
@@ -410,7 +410,7 @@ body: |
; GFX9-LABEL: name: load_global_s32_from_sgpr_base_offset_4096
; GFX9: liveins: $sgpr0_sgpr1
; GFX9-NEXT: {{ $}}
- ; GFX9-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX9-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX9-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 4096, implicit $exec
; GFX9-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[COPY]], [[V_MOV_B32_e32_]], 0, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX9-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
@@ -418,7 +418,7 @@ body: |
; GFX10-LABEL: name: load_global_s32_from_sgpr_base_offset_4096
; GFX10: liveins: $sgpr0_sgpr1
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX10-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 4096, implicit $exec
; GFX10-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[COPY]], [[V_MOV_B32_e32_]], 0, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX10-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
@@ -426,7 +426,7 @@ body: |
; GFX11-LABEL: name: load_global_s32_from_sgpr_base_offset_4096
; GFX11: liveins: $sgpr0_sgpr1
; GFX11-NEXT: {{ $}}
- ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX11-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 4096, implicit $exec
; GFX11-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[COPY]], [[V_MOV_B32_e32_]], 0, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX11-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
@@ -434,7 +434,7 @@ body: |
; GFX12-LABEL: name: load_global_s32_from_sgpr_base_offset_4096
; GFX12: liveins: $sgpr0_sgpr1
; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX12-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; GFX12-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[COPY]], [[V_MOV_B32_e32_]], 4096, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX12-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
@@ -460,7 +460,7 @@ body: |
; GFX9-LABEL: name: load_global_s32_from_sgpr_base_offset_4097
; GFX9: liveins: $sgpr0_sgpr1
; GFX9-NEXT: {{ $}}
- ; GFX9-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX9-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX9-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 4096, implicit $exec
; GFX9-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[COPY]], [[V_MOV_B32_e32_]], 1, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX9-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
@@ -468,7 +468,7 @@ body: |
; GFX10-LABEL: name: load_global_s32_from_sgpr_base_offset_4097
; GFX10: liveins: $sgpr0_sgpr1
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX10-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 4096, implicit $exec
; GFX10-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[COPY]], [[V_MOV_B32_e32_]], 1, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX10-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
@@ -476,7 +476,7 @@ body: |
; GFX11-LABEL: name: load_global_s32_from_sgpr_base_offset_4097
; GFX11: liveins: $sgpr0_sgpr1
; GFX11-NEXT: {{ $}}
- ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX11-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 4096, implicit $exec
; GFX11-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[COPY]], [[V_MOV_B32_e32_]], 1, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX11-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
@@ -484,7 +484,7 @@ body: |
; GFX12-LABEL: name: load_global_s32_from_sgpr_base_offset_4097
; GFX12: liveins: $sgpr0_sgpr1
; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX12-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; GFX12-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[COPY]], [[V_MOV_B32_e32_]], 4097, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX12-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
@@ -518,7 +518,7 @@ body: |
; GFX9-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B]].sub1
; GFX9-NEXT: [[S_ADD_U32_:%[0-9]+]]:sreg_32 = S_ADD_U32 [[COPY1]], [[COPY2]], implicit-def $scc
; GFX9-NEXT: [[S_ADDC_U32_:%[0-9]+]]:sreg_32 = S_ADDC_U32 [[COPY3]], [[COPY4]], implicit-def dead $scc, implicit $scc
- ; GFX9-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[S_ADD_U32_]], %subreg.sub0, [[S_ADDC_U32_]], %subreg.sub1
+ ; GFX9-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec_xnull = REG_SEQUENCE [[S_ADD_U32_]], %subreg.sub0, [[S_ADDC_U32_]], %subreg.sub1
; GFX9-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; GFX9-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[REG_SEQUENCE]], [[V_MOV_B32_e32_]], 0, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX9-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
@@ -558,7 +558,7 @@ body: |
; GFX12-LABEL: name: load_global_s32_from_sgpr_base_offset_neg4097
; GFX12: liveins: $sgpr0_sgpr1
; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX12-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; GFX12-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[COPY]], [[V_MOV_B32_e32_]], -4097, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX12-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
@@ -584,7 +584,7 @@ body: |
; GFX9-LABEL: name: load_global_s32_from_sgpr_base_offset_2049
; GFX9: liveins: $sgpr0_sgpr1
; GFX9-NEXT: {{ $}}
- ; GFX9-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX9-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX9-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; GFX9-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[COPY]], [[V_MOV_B32_e32_]], 2049, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX9-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
@@ -592,7 +592,7 @@ body: |
; GFX10-LABEL: name: load_global_s32_from_sgpr_base_offset_2049
; GFX10: liveins: $sgpr0_sgpr1
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX10-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 2048, implicit $exec
; GFX10-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[COPY]], [[V_MOV_B32_e32_]], 1, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX10-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
@@ -600,7 +600,7 @@ body: |
; GFX11-LABEL: name: load_global_s32_from_sgpr_base_offset_2049
; GFX11: liveins: $sgpr0_sgpr1
; GFX11-NEXT: {{ $}}
- ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX11-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; GFX11-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[COPY]], [[V_MOV_B32_e32_]], 2049, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX11-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
@@ -608,7 +608,7 @@ body: |
; GFX12-LABEL: name: load_global_s32_from_sgpr_base_offset_2049
; GFX12: liveins: $sgpr0_sgpr1
; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX12-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; GFX12-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[COPY]], [[V_MOV_B32_e32_]], 2049, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX12-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
@@ -634,7 +634,7 @@ body: |
; GFX9-LABEL: name: load_global_s32_from_sgpr_base_offset_neg2049
; GFX9: liveins: $sgpr0_sgpr1
; GFX9-NEXT: {{ $}}
- ; GFX9-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX9-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX9-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; GFX9-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[COPY]], [[V_MOV_B32_e32_]], -2049, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX9-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
@@ -658,7 +658,7 @@ body: |
; GFX11-LABEL: name: load_global_s32_from_sgpr_base_offset_neg2049
; GFX11: liveins: $sgpr0_sgpr1
; GFX11-NEXT: {{ $}}
- ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX11-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; GFX11-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[COPY]], [[V_MOV_B32_e32_]], -2049, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX11-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
@@ -666,7 +666,7 @@ body: |
; GFX12-LABEL: name: load_global_s32_from_sgpr_base_offset_neg2049
; GFX12: liveins: $sgpr0_sgpr1
; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX12-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; GFX12-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[COPY]], [[V_MOV_B32_e32_]], -2049, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX12-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
@@ -691,7 +691,7 @@ body: |
; GFX9-LABEL: name: load_global_s32_from_sgpr_base_offset_4294967295
; GFX9: liveins: $sgpr0_sgpr1
; GFX9-NEXT: {{ $}}
- ; GFX9-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX9-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX9-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 4294963200, implicit $exec
; GFX9-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[COPY]], [[V_MOV_B32_e32_]], 4095, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX9-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
@@ -699,7 +699,7 @@ body: |
; GFX10-LABEL: name: load_global_s32_from_sgpr_base_offset_4294967295
; GFX10: liveins: $sgpr0_sgpr1
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX10-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 4294965248, implicit $exec
; GFX10-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[COPY]], [[V_MOV_B32_e32_]], 2047, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX10-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
@@ -707,7 +707,7 @@ body: |
; GFX11-LABEL: name: load_global_s32_from_sgpr_base_offset_4294967295
; GFX11: liveins: $sgpr0_sgpr1
; GFX11-NEXT: {{ $}}
- ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX11-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 4294963200, implicit $exec
; GFX11-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[COPY]], [[V_MOV_B32_e32_]], 4095, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX11-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
@@ -715,7 +715,7 @@ body: |
; GFX12-LABEL: name: load_global_s32_from_sgpr_base_offset_4294967295
; GFX12: liveins: $sgpr0_sgpr1
; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec_xnull = COPY $sgpr0_sgpr1
; GFX12-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 4286578688, implicit $exec
; GFX12-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[COPY]], [[V_MOV_B32_e32_]], 8388607, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX12-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
@@ -830,7 +830,7 @@ body: |
; GFX9-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B]].sub1
; GFX9-NEXT: [[S_ADD_U32_:%[0-9]+]]:sreg_32 = S_ADD_U32 [[COPY1]], [[COPY2]], implicit-def $scc
; GFX9-NEXT: [[S_ADDC_U32_:%[0-9]+]]:sreg_32 = S_ADDC_U32 [[COPY3]], [[COPY4]], implicit-def dead $scc, implicit $scc
- ; GFX9-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[S_ADD_U32_]], %subreg.sub0, [[S_ADDC_U32_]], %subreg.sub1
+ ; GFX9-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec_xnull = REG_SEQUENCE [[S_ADD_U32_]], %subreg.sub0, [[S_ADDC_U32_]], %subreg.sub1
; GFX9-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; GFX9-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[REG_SEQUENCE]], [[V_MOV_B32_e32_]], 0, 0, implicit $exec :: (load (s32), addrspace 1)
; GFX9-NEXT: $vgpr0 = COPY [[GLOBAL_LOAD_DWORD_SADDR]]
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/vni8-across-blocks.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/vni8-across-blocks.ll
index fff06e4..386e34f7 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/vni8-across-blocks.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/vni8-across-blocks.ll
@@ -7,33 +7,33 @@ define amdgpu_kernel void @v3i8_liveout(ptr addrspace(1) %src1, ptr addrspace(1)
; GFX906-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX906-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; GFX906-NEXT: v_lshlrev_b32_e32 v2, 2, v0
-; GFX906-NEXT: v_mov_b32_e32 v3, 8
+; GFX906-NEXT: v_mov_b32_e32 v4, 8
; GFX906-NEXT: v_mov_b32_e32 v5, 16
; GFX906-NEXT: s_waitcnt lgkmcnt(0)
-; GFX906-NEXT: global_load_dword v4, v2, s[4:5]
+; GFX906-NEXT: global_load_dword v3, v2, s[4:5]
; GFX906-NEXT: v_mov_b32_e32 v1, 0xff
; GFX906-NEXT: v_cmp_gt_u32_e32 vcc, 15, v0
; GFX906-NEXT: s_waitcnt vmcnt(0)
-; GFX906-NEXT: v_and_b32_e32 v6, 0xff, v4
-; GFX906-NEXT: v_lshlrev_b32_sdwa v7, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
-; GFX906-NEXT: v_lshlrev_b32_sdwa v4, v5, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
-; GFX906-NEXT: v_or3_b32 v4, v6, v7, v4
+; GFX906-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX906-NEXT: v_lshlrev_b32_sdwa v7, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX906-NEXT: v_lshlrev_b32_sdwa v3, v5, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+; GFX906-NEXT: v_or3_b32 v3, v6, v7, v3
; GFX906-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX906-NEXT: s_cbranch_execz .LBB0_2
; GFX906-NEXT: ; %bb.1: ; %bb.1
; GFX906-NEXT: global_load_dword v0, v2, s[6:7]
; GFX906-NEXT: s_waitcnt vmcnt(0)
; GFX906-NEXT: v_and_b32_e32 v2, 0xff, v0
-; GFX906-NEXT: v_lshlrev_b32_sdwa v3, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX906-NEXT: v_lshlrev_b32_sdwa v3, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
; GFX906-NEXT: v_lshlrev_b32_sdwa v0, v5, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
-; GFX906-NEXT: v_or3_b32 v4, v2, v3, v0
+; GFX906-NEXT: v_or3_b32 v3, v2, v3, v0
; GFX906-NEXT: .LBB0_2: ; %bb.2
; GFX906-NEXT: s_or_b64 exec, exec, s[2:3]
-; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v4
+; GFX906-NEXT: v_lshrrev_b32_e32 v0, 8, v3
; GFX906-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX906-NEXT: v_lshlrev_b16_e32 v0, 8, v0
-; GFX906-NEXT: v_or_b32_sdwa v0, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX906-NEXT: v_and_b32_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX906-NEXT: v_or_b32_sdwa v0, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX906-NEXT: v_and_b32_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; GFX906-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX906-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX906-NEXT: v_lshl_or_b32 v0, v1, 16, v0
diff --git a/llvm/test/CodeGen/AMDGPU/accvgpr-copy.mir b/llvm/test/CodeGen/AMDGPU/accvgpr-copy.mir
index 63b8cb6..e9a8248 100644
--- a/llvm/test/CodeGen/AMDGPU/accvgpr-copy.mir
+++ b/llvm/test/CodeGen/AMDGPU/accvgpr-copy.mir
@@ -895,6 +895,42 @@ body: |
...
---
+name: a2_to_a2
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $agpr0_agpr1
+ ; GFX908-LABEL: name: a2_to_a2
+ ; GFX908: liveins: $agpr0_agpr1
+ ; GFX908-NEXT: {{ $}}
+ ; GFX908-NEXT: $vgpr1 = V_ACCVGPR_READ_B32_e64 $agpr1, implicit $exec, implicit $agpr0_agpr1
+ ; GFX908-NEXT: $agpr2 = V_ACCVGPR_WRITE_B32_e64 $vgpr1, implicit $exec, implicit-def $agpr1_agpr2
+ ; GFX908-NEXT: $vgpr0 = V_ACCVGPR_READ_B32_e64 $agpr0, implicit $exec, implicit $agpr0_agpr1
+ ; GFX908-NEXT: $agpr1 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec, implicit $exec
+ ; GFX908-NEXT: $agpr3 = V_ACCVGPR_WRITE_B32_e64 $vgpr1, implicit $exec
+ ; GFX908-NEXT: S_ENDPGM 0, implicit $agpr1, implicit $agpr2, implicit $agpr3
+ ;
+ ; GFX90A-LABEL: name: a2_to_a2
+ ; GFX90A: liveins: $agpr0_agpr1
+ ; GFX90A-NEXT: {{ $}}
+ ; GFX90A-NEXT: $agpr2 = V_ACCVGPR_MOV_B32 $agpr1, implicit $exec, implicit-def $agpr1_agpr2, implicit $agpr0_agpr1
+ ; GFX90A-NEXT: $agpr1 = V_ACCVGPR_MOV_B32 $agpr0, implicit $exec, implicit $agpr0_agpr1, implicit $exec
+ ; GFX90A-NEXT: $agpr3 = V_ACCVGPR_MOV_B32 $agpr2, implicit $exec
+ ; GFX90A-NEXT: S_ENDPGM 0, implicit $agpr1, implicit $agpr2, implicit $agpr3
+ ;
+ ; GFX940-LABEL: name: a2_to_a2
+ ; GFX940: liveins: $agpr0_agpr1
+ ; GFX940-NEXT: {{ $}}
+ ; GFX940-NEXT: $agpr2 = V_ACCVGPR_MOV_B32 $agpr1, implicit $exec, implicit-def $agpr1_agpr2, implicit $agpr0_agpr1
+ ; GFX940-NEXT: $agpr1 = V_ACCVGPR_MOV_B32 $agpr0, implicit $exec, implicit $agpr0_agpr1, implicit $exec
+ ; GFX940-NEXT: $agpr3 = V_ACCVGPR_MOV_B32 $agpr2, implicit $exec
+ ; GFX940-NEXT: S_ENDPGM 0, implicit $agpr1, implicit $agpr2, implicit $agpr3
+ $agpr1_agpr2 = COPY $agpr0_agpr1, implicit $exec
+ $agpr3 = COPY $agpr2
+ S_ENDPGM 0, implicit $agpr1, implicit $agpr2, implicit $agpr3
+...
+
+---
name: a2_to_a2_kill
tracksRegLiveness: true
body: |
diff --git a/llvm/test/CodeGen/AMDGPU/agpr-to-agpr-copy.mir b/llvm/test/CodeGen/AMDGPU/agpr-to-agpr-copy.mir
index ffa9e64..86a1a26 100644
--- a/llvm/test/CodeGen/AMDGPU/agpr-to-agpr-copy.mir
+++ b/llvm/test/CodeGen/AMDGPU/agpr-to-agpr-copy.mir
@@ -12,7 +12,7 @@ body: |
; GFX908: liveins: $sgpr0_sgpr1
; GFX908-NEXT: {{ $}}
; GFX908-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr0_sgpr1
- ; GFX908-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64), align 4, addrspace 4)
+ ; GFX908-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec_xnull = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64), align 4, addrspace 4)
; GFX908-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; GFX908-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 1123418112, implicit $exec
; GFX908-NEXT: undef [[V_ACCVGPR_WRITE_B32_e64_:%[0-9]+]].sub0:areg_128 = V_ACCVGPR_WRITE_B32_e64 [[V_MOV_B32_e32_1]], implicit $exec
@@ -26,7 +26,7 @@ body: |
; GFX908-NEXT: GLOBAL_STORE_DWORDX4_SADDR [[V_MOV_B32_e32_]], [[COPY1]], [[S_LOAD_DWORDX2_IMM]], 0, 0, implicit $exec :: (store (s128), addrspace 1)
; GFX908-NEXT: S_ENDPGM 0
%1:sgpr_64(p4) = COPY $sgpr0_sgpr1
- %4:sreg_64_xexec = S_LOAD_DWORDX2_IMM %1:sgpr_64(p4), 36, 0 :: (dereferenceable invariant load (s64), align 4, addrspace 4)
+ %4:sreg_64_xexec_xnull = S_LOAD_DWORDX2_IMM %1:sgpr_64(p4), 36, 0 :: (dereferenceable invariant load (s64), align 4, addrspace 4)
%5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
%13:vgpr_32 = V_MOV_B32_e32 1123418112, implicit $exec
undef %11.sub0:areg_128 = V_ACCVGPR_WRITE_B32_e64 %13:vgpr_32, implicit $exec
@@ -37,7 +37,7 @@ body: |
%9:vgpr_32 = V_MOV_B32_e32 1065353216, implicit $exec
%10:areg_128 = V_MFMA_F32_4X4X1F32_e64 %9:vgpr_32, %8:vgpr_32, %11:areg_128, 0, 0, 0, implicit $mode, implicit $exec
%12:vreg_128 = COPY %10:areg_128
- GLOBAL_STORE_DWORDX4_SADDR %5:vgpr_32, %12:vreg_128, %4:sreg_64_xexec, 0, 0, implicit $exec :: (store (s128), addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5:vgpr_32, %12:vreg_128, %4:sreg_64_xexec_xnull, 0, 0, implicit $exec :: (store (s128), addrspace 1)
S_ENDPGM 0
...
---
@@ -51,7 +51,7 @@ body: |
; GFX908: liveins: $sgpr0_sgpr1
; GFX908-NEXT: {{ $}}
; GFX908-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr0_sgpr1
- ; GFX908-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64), align 4, addrspace 4)
+ ; GFX908-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec_xnull = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64), align 4, addrspace 4)
; GFX908-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; GFX908-NEXT: undef [[V_ACCVGPR_WRITE_B32_e64_:%[0-9]+]].sub0:areg_128 = V_ACCVGPR_WRITE_B32_e64 1073741824, implicit $exec
; GFX908-NEXT: [[V_ACCVGPR_WRITE_B32_e64_:%[0-9]+]].sub1:areg_128 = COPY [[V_ACCVGPR_WRITE_B32_e64_]].sub0
@@ -64,7 +64,7 @@ body: |
; GFX908-NEXT: GLOBAL_STORE_DWORDX4_SADDR [[V_MOV_B32_e32_]], [[COPY1]], [[S_LOAD_DWORDX2_IMM]], 0, 0, implicit $exec :: (store (s128), addrspace 1)
; GFX908-NEXT: S_ENDPGM 0
%1:sgpr_64(p4) = COPY $sgpr0_sgpr1
- %4:sreg_64_xexec = S_LOAD_DWORDX2_IMM %1:sgpr_64(p4), 36, 0 :: (dereferenceable invariant load (s64), align 4, addrspace 4)
+ %4:sreg_64_xexec_xnull = S_LOAD_DWORDX2_IMM %1:sgpr_64(p4), 36, 0 :: (dereferenceable invariant load (s64), align 4, addrspace 4)
%5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
undef %11.sub0:areg_128 = V_ACCVGPR_WRITE_B32_e64 1073741824, implicit $exec
%11.sub1:areg_128 = COPY %11.sub0:areg_128
@@ -74,7 +74,7 @@ body: |
%9:vgpr_32 = V_MOV_B32_e32 1065353216, implicit $exec
%10:areg_128 = V_MFMA_F32_4X4X1F32_e64 %9:vgpr_32, %8:vgpr_32, %11:areg_128, 0, 0, 0, implicit $mode, implicit $exec
%12:vreg_128 = COPY %10:areg_128
- GLOBAL_STORE_DWORDX4_SADDR %5:vgpr_32, %12:vreg_128, %4:sreg_64_xexec, 0, 0, implicit $exec :: (store (s128), addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5:vgpr_32, %12:vreg_128, %4:sreg_64_xexec_xnull, 0, 0, implicit $exec :: (store (s128), addrspace 1)
S_ENDPGM 0
...
---
diff --git a/llvm/test/CodeGen/AMDGPU/amdhsa-kernarg-preload-num-sgprs.ll b/llvm/test/CodeGen/AMDGPU/amdhsa-kernarg-preload-num-sgprs.ll
new file mode 100644
index 0000000..c8ba672
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdhsa-kernarg-preload-num-sgprs.ll
@@ -0,0 +1,73 @@
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -filetype=obj < %s | llvm-objdump -s -j .rodata - | FileCheck --check-prefix=OBJDUMP %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 < %s | FileCheck --check-prefix=ASM %s
+
+; OBJDUMP: Contents of section .rodata:
+; OBJDUMP-NEXT: 0000 00000000 00000000 10010000 00000000 ................
+; OBJDUMP-NEXT: 0010 00000000 00000000 00000000 00000000 ................
+; OBJDUMP-NEXT: 0020 00000000 00000000 00000000 00000000 ................
+; OBJDUMP-NOT: 0030 0000af00 94130000 1a000400 00000000 ................
+; OBJDUMP-NEXT: 0030 4000af00 94130000 1a000400 00000000 @...............
+
+; ASM-LABEL: amdhsa_kernarg_preload_4_implicit_6:
+; ASM: .amdhsa_user_sgpr_count 10
+; ASM: .amdhsa_next_free_sgpr 10
+; ASM: ; NumSgprs: 16
+; ASM: ; NumSGPRsForWavesPerEU: 16
+
+; Test that we include preloaded SGPRs in the GRANULATED_WAVEFRONT_SGPR_COUNT
+; feild that are not explicitly referenced in the kernel. This test has 6 implicit
+; user SPGRs enabled, 4 preloaded kernarg SGPRs, plus 6 extra SGPRs allocated
+; for flat scratch, ect. The total number of allocated SGPRs encoded in the
+; kernel descriptor should be 16. That's a 1 in the KD field since the granule
+; size is 8 and it's NumGranules - 1. The encoding for that looks like '40'.
+
+define amdgpu_kernel void @amdhsa_kernarg_preload_4_implicit_6(i128 inreg) { ret void }
+
+; OBJDUMP-NEXT: 0040 00000000 00000000 20010000 00000000 ........ .......
+; OBJDUMP-NEXT: 0050 00000000 00000000 00000000 00000000 ................
+; OBJDUMP-NEXT: 0060 00000000 00000000 00000000 00000000 ................
+; OBJDUMP-NEXT: 0070 4000af00 94000000 08000800 00000000 @...............
+
+; ASM-LABEL: amdhsa_kernarg_preload_8_implicit_2:
+; ASM: .amdhsa_user_sgpr_count 10
+; ASM: .amdhsa_next_free_sgpr 10
+; ASM: ; NumSgprs: 16
+; ASM: ; NumSGPRsForWavesPerEU: 16
+
+; Only the kernarg_ptr is enabled so we should have 8 preload kernarg SGPRs, 2
+; implicit, and 6 extra.
+
+define amdgpu_kernel void @amdhsa_kernarg_preload_8_implicit_2(i256 inreg) #0 { ret void }
+
+; OBJDUMP-NEXT: 0080 00000000 00000000 08010000 00000000 ................
+; OBJDUMP-NEXT: 0090 00000000 00000000 00000000 00000000 ................
+; OBJDUMP-NEXT: 00a0 00000000 00000000 00000000 00000000 ................
+; OBJDUMP-NEXT: 00b0 4000af00 86000000 08000100 00000000 @...............
+
+; ASM-LABEL: amdhsa_kernarg_preload_1_implicit_2:
+; ASM: .amdhsa_user_sgpr_count 3
+; ASM: .amdhsa_next_free_sgpr 3
+; ASM: ; NumSgprs: 9
+; ASM: ; NumSGPRsForWavesPerEU: 9
+
+; 1 preload, 2 implicit, 6 extra. Rounds up to 16 SGPRs in the KD.
+
+define amdgpu_kernel void @amdhsa_kernarg_preload_1_implicit_2(i32 inreg) #0 { ret void }
+
+; OBJDUMP-NEXT: 00c0 00000000 00000000 08010000 00000000 ................
+; OBJDUMP-NEXT: 00d0 00000000 00000000 00000000 00000000 ................
+; OBJDUMP-NEXT: 00e0 00000000 00000000 00000000 00000000 ................
+; OBJDUMP-NEXT: 00f0 0000af00 84000000 08000000 00000000 ................
+
+; ASM-LABEL: amdhsa_kernarg_preload_0_implicit_2:
+; ASM: .amdhsa_user_sgpr_count 2
+; ASM: .amdhsa_next_free_sgpr 0
+; ASM: ; NumSgprs: 6
+; ASM: ; NumSGPRsForWavesPerEU: 6
+
+; 0 preload kernarg SGPRs, 2 implicit, 6 extra. Rounds up to 8 SGPRs in the KD.
+; Encoded like '00'.
+
+define amdgpu_kernel void @amdhsa_kernarg_preload_0_implicit_2(i32) #0 { ret void }
+
+attributes #0 = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
diff --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll
index b17dfc7..ce608df 100644
--- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll
@@ -1323,9 +1323,9 @@ define amdgpu_kernel void @add_i32_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1032_DPP-NEXT: s_or_saveexec_b32 s0, -1
; GFX1032_DPP-NEXT: v_writelane_b32 v3, s1, 16
; GFX1032_DPP-NEXT: s_mov_b32 exec_lo, s0
-; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032_DPP-NEXT: s_mov_b32 s0, s2
; GFX1032_DPP-NEXT: s_mov_b32 s2, -1
+; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032_DPP-NEXT: ; implicit-def: $vgpr0
; GFX1032_DPP-NEXT: s_and_saveexec_b32 s8, vcc_lo
; GFX1032_DPP-NEXT: s_cbranch_execz .LBB2_2
@@ -1451,10 +1451,9 @@ define amdgpu_kernel void @add_i32_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1132_DPP-NEXT: s_or_saveexec_b32 s4, -1
; GFX1132_DPP-NEXT: v_writelane_b32 v3, s5, 16
; GFX1132_DPP-NEXT: s_mov_b32 exec_lo, s4
-; GFX1132_DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX1132_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1132_DPP-NEXT: s_mov_b32 s4, s6
; GFX1132_DPP-NEXT: s_mov_b32 s6, -1
+; GFX1132_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1132_DPP-NEXT: ; implicit-def: $vgpr0
; GFX1132_DPP-NEXT: s_and_saveexec_b32 s8, vcc_lo
; GFX1132_DPP-NEXT: s_cbranch_execz .LBB2_2
@@ -1587,9 +1586,9 @@ define amdgpu_kernel void @add_i32_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1232_DPP-NEXT: v_writelane_b32 v3, s5, 16
; GFX1232_DPP-NEXT: s_wait_alu 0xfffe
; GFX1232_DPP-NEXT: s_mov_b32 exec_lo, s4
-; GFX1232_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1232_DPP-NEXT: s_mov_b32 s4, s6
; GFX1232_DPP-NEXT: s_mov_b32 s6, -1
+; GFX1232_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1232_DPP-NEXT: ; implicit-def: $vgpr0
; GFX1232_DPP-NEXT: s_and_saveexec_b32 s8, vcc_lo
; GFX1232_DPP-NEXT: s_cbranch_execz .LBB2_2
@@ -3228,8 +3227,8 @@ define amdgpu_kernel void @add_i64_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1032_DPP-NEXT: v_writelane_b32 v2, s8, 16
; GFX1032_DPP-NEXT: v_writelane_b32 v1, s3, 16
; GFX1032_DPP-NEXT: s_mov_b32 exec_lo, s2
-; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032_DPP-NEXT: s_mov_b32 s2, -1
+; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032_DPP-NEXT: ; implicit-def: $vgpr9_vgpr10
; GFX1032_DPP-NEXT: s_and_saveexec_b32 s8, vcc_lo
; GFX1032_DPP-NEXT: s_cbranch_execz .LBB5_2
@@ -4991,9 +4990,9 @@ define amdgpu_kernel void @sub_i32_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1032_DPP-NEXT: s_or_saveexec_b32 s0, -1
; GFX1032_DPP-NEXT: v_writelane_b32 v3, s1, 16
; GFX1032_DPP-NEXT: s_mov_b32 exec_lo, s0
-; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032_DPP-NEXT: s_mov_b32 s0, s2
; GFX1032_DPP-NEXT: s_mov_b32 s2, -1
+; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032_DPP-NEXT: ; implicit-def: $vgpr0
; GFX1032_DPP-NEXT: s_and_saveexec_b32 s8, vcc_lo
; GFX1032_DPP-NEXT: s_cbranch_execz .LBB8_2
@@ -5119,10 +5118,9 @@ define amdgpu_kernel void @sub_i32_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1132_DPP-NEXT: s_or_saveexec_b32 s4, -1
; GFX1132_DPP-NEXT: v_writelane_b32 v3, s5, 16
; GFX1132_DPP-NEXT: s_mov_b32 exec_lo, s4
-; GFX1132_DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX1132_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1132_DPP-NEXT: s_mov_b32 s4, s6
; GFX1132_DPP-NEXT: s_mov_b32 s6, -1
+; GFX1132_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1132_DPP-NEXT: ; implicit-def: $vgpr0
; GFX1132_DPP-NEXT: s_and_saveexec_b32 s8, vcc_lo
; GFX1132_DPP-NEXT: s_cbranch_execz .LBB8_2
@@ -5255,9 +5253,9 @@ define amdgpu_kernel void @sub_i32_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1232_DPP-NEXT: v_writelane_b32 v3, s5, 16
; GFX1232_DPP-NEXT: s_wait_alu 0xfffe
; GFX1232_DPP-NEXT: s_mov_b32 exec_lo, s4
-; GFX1232_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1232_DPP-NEXT: s_mov_b32 s4, s6
; GFX1232_DPP-NEXT: s_mov_b32 s6, -1
+; GFX1232_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1232_DPP-NEXT: ; implicit-def: $vgpr0
; GFX1232_DPP-NEXT: s_and_saveexec_b32 s8, vcc_lo
; GFX1232_DPP-NEXT: s_cbranch_execz .LBB8_2
@@ -6938,8 +6936,8 @@ define amdgpu_kernel void @sub_i64_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1032_DPP-NEXT: v_writelane_b32 v2, s8, 16
; GFX1032_DPP-NEXT: v_writelane_b32 v1, s3, 16
; GFX1032_DPP-NEXT: s_mov_b32 exec_lo, s2
-; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032_DPP-NEXT: s_mov_b32 s2, -1
+; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032_DPP-NEXT: ; implicit-def: $vgpr9_vgpr10
; GFX1032_DPP-NEXT: s_and_saveexec_b32 s8, vcc_lo
; GFX1032_DPP-NEXT: s_cbranch_execz .LBB11_2
diff --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll
index 988bc8e..ce90fbe 100644
--- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll
@@ -936,8 +936,8 @@ define amdgpu_kernel void @add_i32_varying(ptr addrspace(1) %out) {
; GFX1032_DPP-NEXT: s_or_saveexec_b32 s0, -1
; GFX1032_DPP-NEXT: v_writelane_b32 v3, s1, 16
; GFX1032_DPP-NEXT: s_mov_b32 exec_lo, s0
-; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032_DPP-NEXT: s_mov_b32 s6, -1
+; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032_DPP-NEXT: ; implicit-def: $vgpr0
; GFX1032_DPP-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1032_DPP-NEXT: s_cbranch_execz .LBB2_2
@@ -1047,8 +1047,8 @@ define amdgpu_kernel void @add_i32_varying(ptr addrspace(1) %out) {
; GFX1132_DPP-NEXT: s_or_saveexec_b32 s0, -1
; GFX1132_DPP-NEXT: v_writelane_b32 v3, s1, 16
; GFX1132_DPP-NEXT: s_mov_b32 exec_lo, s0
-; GFX1132_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1132_DPP-NEXT: s_mov_b32 s6, -1
+; GFX1132_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1132_DPP-NEXT: ; implicit-def: $vgpr0
; GFX1132_DPP-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1132_DPP-NEXT: s_cbranch_execz .LBB2_2
@@ -2684,8 +2684,8 @@ define amdgpu_kernel void @add_i64_varying(ptr addrspace(1) %out) {
; GFX1032_DPP-NEXT: v_writelane_b32 v2, s6, 16
; GFX1032_DPP-NEXT: v_writelane_b32 v1, s5, 16
; GFX1032_DPP-NEXT: s_mov_b32 exec_lo, s4
-; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v9
; GFX1032_DPP-NEXT: s_mov_b32 s6, -1
+; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v9
; GFX1032_DPP-NEXT: ; implicit-def: $vgpr9_vgpr10
; GFX1032_DPP-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX1032_DPP-NEXT: s_cbranch_execz .LBB6_2
@@ -2874,8 +2874,8 @@ define amdgpu_kernel void @add_i64_varying(ptr addrspace(1) %out) {
; GFX1132_DPP-NEXT: v_writelane_b32 v1, s5, 16
; GFX1132_DPP-NEXT: v_writelane_b32 v2, s6, 16
; GFX1132_DPP-NEXT: s_mov_b32 exec_lo, s4
-; GFX1132_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v8
; GFX1132_DPP-NEXT: s_mov_b32 s6, -1
+; GFX1132_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v8
; GFX1132_DPP-NEXT: ; implicit-def: $vgpr8_vgpr9
; GFX1132_DPP-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX1132_DPP-NEXT: s_cbranch_execz .LBB6_2
@@ -3383,8 +3383,8 @@ define amdgpu_kernel void @add_i64_varying_nouse() {
; GFX1032_DPP-NEXT: v_add_co_u32 v1, vcc_lo, v1, v3
; GFX1032_DPP-NEXT: v_add_co_ci_u32_e32 v2, vcc_lo, v2, v4, vcc_lo
; GFX1032_DPP-NEXT: s_mov_b32 exec_lo, s0
-; GFX1032_DPP-NEXT: v_mbcnt_lo_u32_b32 v11, exec_lo, 0
; GFX1032_DPP-NEXT: v_mov_b32_e32 v9, v1
+; GFX1032_DPP-NEXT: v_mbcnt_lo_u32_b32 v11, exec_lo, 0
; GFX1032_DPP-NEXT: v_mov_b32_e32 v0, 0
; GFX1032_DPP-NEXT: v_mov_b32_e32 v10, v2
; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v11
@@ -4444,8 +4444,8 @@ define amdgpu_kernel void @sub_i32_varying(ptr addrspace(1) %out) {
; GFX1032_DPP-NEXT: s_or_saveexec_b32 s0, -1
; GFX1032_DPP-NEXT: v_writelane_b32 v3, s1, 16
; GFX1032_DPP-NEXT: s_mov_b32 exec_lo, s0
-; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032_DPP-NEXT: s_mov_b32 s6, -1
+; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032_DPP-NEXT: ; implicit-def: $vgpr0
; GFX1032_DPP-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1032_DPP-NEXT: s_cbranch_execz .LBB10_2
@@ -4555,8 +4555,8 @@ define amdgpu_kernel void @sub_i32_varying(ptr addrspace(1) %out) {
; GFX1132_DPP-NEXT: s_or_saveexec_b32 s0, -1
; GFX1132_DPP-NEXT: v_writelane_b32 v3, s1, 16
; GFX1132_DPP-NEXT: s_mov_b32 exec_lo, s0
-; GFX1132_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1132_DPP-NEXT: s_mov_b32 s6, -1
+; GFX1132_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1132_DPP-NEXT: ; implicit-def: $vgpr0
; GFX1132_DPP-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1132_DPP-NEXT: s_cbranch_execz .LBB10_2
@@ -6218,8 +6218,8 @@ define amdgpu_kernel void @sub_i64_varying(ptr addrspace(1) %out) {
; GFX1032_DPP-NEXT: v_writelane_b32 v2, s6, 16
; GFX1032_DPP-NEXT: v_writelane_b32 v1, s5, 16
; GFX1032_DPP-NEXT: s_mov_b32 exec_lo, s4
-; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v9
; GFX1032_DPP-NEXT: s_mov_b32 s6, -1
+; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v9
; GFX1032_DPP-NEXT: ; implicit-def: $vgpr9_vgpr10
; GFX1032_DPP-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX1032_DPP-NEXT: s_cbranch_execz .LBB14_2
@@ -6408,8 +6408,8 @@ define amdgpu_kernel void @sub_i64_varying(ptr addrspace(1) %out) {
; GFX1132_DPP-NEXT: v_writelane_b32 v1, s5, 16
; GFX1132_DPP-NEXT: v_writelane_b32 v2, s6, 16
; GFX1132_DPP-NEXT: s_mov_b32 exec_lo, s4
-; GFX1132_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v8
; GFX1132_DPP-NEXT: s_mov_b32 s6, -1
+; GFX1132_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v8
; GFX1132_DPP-NEXT: ; implicit-def: $vgpr8_vgpr9
; GFX1132_DPP-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX1132_DPP-NEXT: s_cbranch_execz .LBB14_2
@@ -6915,8 +6915,8 @@ define amdgpu_kernel void @and_i32_varying(ptr addrspace(1) %out) {
; GFX1032_DPP-NEXT: s_or_saveexec_b32 s0, -1
; GFX1032_DPP-NEXT: v_writelane_b32 v3, s1, 16
; GFX1032_DPP-NEXT: s_mov_b32 exec_lo, s0
-; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032_DPP-NEXT: s_mov_b32 s6, -1
+; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032_DPP-NEXT: ; implicit-def: $vgpr0
; GFX1032_DPP-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1032_DPP-NEXT: s_cbranch_execz .LBB15_2
@@ -7026,9 +7026,8 @@ define amdgpu_kernel void @and_i32_varying(ptr addrspace(1) %out) {
; GFX1132_DPP-NEXT: s_or_saveexec_b32 s0, -1
; GFX1132_DPP-NEXT: v_writelane_b32 v3, s1, 16
; GFX1132_DPP-NEXT: s_mov_b32 exec_lo, s0
-; GFX1132_DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX1132_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1132_DPP-NEXT: s_mov_b32 s6, -1
+; GFX1132_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1132_DPP-NEXT: ; implicit-def: $vgpr0
; GFX1132_DPP-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1132_DPP-NEXT: s_cbranch_execz .LBB15_2
@@ -7627,8 +7626,8 @@ define amdgpu_kernel void @and_i64_varying(ptr addrspace(1) %out) {
; GFX1032_DPP-NEXT: v_writelane_b32 v6, s5, 16
; GFX1032_DPP-NEXT: v_writelane_b32 v5, s6, 16
; GFX1032_DPP-NEXT: s_mov_b32 exec_lo, s4
-; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v7
; GFX1032_DPP-NEXT: s_mov_b32 s6, -1
+; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v7
; GFX1032_DPP-NEXT: ; implicit-def: $vgpr7_vgpr8
; GFX1032_DPP-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX1032_DPP-NEXT: s_cbranch_execz .LBB16_2
@@ -7786,8 +7785,8 @@ define amdgpu_kernel void @and_i64_varying(ptr addrspace(1) %out) {
; GFX1132_DPP-NEXT: v_writelane_b32 v6, s5, 16
; GFX1132_DPP-NEXT: v_writelane_b32 v5, s6, 16
; GFX1132_DPP-NEXT: s_mov_b32 exec_lo, s4
-; GFX1132_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v7
; GFX1132_DPP-NEXT: s_mov_b32 s6, -1
+; GFX1132_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v7
; GFX1132_DPP-NEXT: ; implicit-def: $vgpr7_vgpr8
; GFX1132_DPP-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX1132_DPP-NEXT: s_cbranch_execz .LBB16_2
@@ -8294,8 +8293,8 @@ define amdgpu_kernel void @or_i32_varying(ptr addrspace(1) %out) {
; GFX1032_DPP-NEXT: s_or_saveexec_b32 s0, -1
; GFX1032_DPP-NEXT: v_writelane_b32 v3, s1, 16
; GFX1032_DPP-NEXT: s_mov_b32 exec_lo, s0
-; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032_DPP-NEXT: s_mov_b32 s6, -1
+; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032_DPP-NEXT: ; implicit-def: $vgpr0
; GFX1032_DPP-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1032_DPP-NEXT: s_cbranch_execz .LBB17_2
@@ -8405,8 +8404,8 @@ define amdgpu_kernel void @or_i32_varying(ptr addrspace(1) %out) {
; GFX1132_DPP-NEXT: s_or_saveexec_b32 s0, -1
; GFX1132_DPP-NEXT: v_writelane_b32 v3, s1, 16
; GFX1132_DPP-NEXT: s_mov_b32 exec_lo, s0
-; GFX1132_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1132_DPP-NEXT: s_mov_b32 s6, -1
+; GFX1132_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1132_DPP-NEXT: ; implicit-def: $vgpr0
; GFX1132_DPP-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1132_DPP-NEXT: s_cbranch_execz .LBB17_2
@@ -9006,8 +9005,8 @@ define amdgpu_kernel void @or_i64_varying(ptr addrspace(1) %out) {
; GFX1032_DPP-NEXT: v_writelane_b32 v6, s5, 16
; GFX1032_DPP-NEXT: v_writelane_b32 v5, s6, 16
; GFX1032_DPP-NEXT: s_mov_b32 exec_lo, s4
-; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v7
; GFX1032_DPP-NEXT: s_mov_b32 s6, -1
+; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v7
; GFX1032_DPP-NEXT: ; implicit-def: $vgpr7_vgpr8
; GFX1032_DPP-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX1032_DPP-NEXT: s_cbranch_execz .LBB18_2
@@ -9165,8 +9164,8 @@ define amdgpu_kernel void @or_i64_varying(ptr addrspace(1) %out) {
; GFX1132_DPP-NEXT: v_writelane_b32 v6, s5, 16
; GFX1132_DPP-NEXT: v_writelane_b32 v5, s6, 16
; GFX1132_DPP-NEXT: s_mov_b32 exec_lo, s4
-; GFX1132_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v7
; GFX1132_DPP-NEXT: s_mov_b32 s6, -1
+; GFX1132_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v7
; GFX1132_DPP-NEXT: ; implicit-def: $vgpr7_vgpr8
; GFX1132_DPP-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX1132_DPP-NEXT: s_cbranch_execz .LBB18_2
@@ -9673,8 +9672,8 @@ define amdgpu_kernel void @xor_i32_varying(ptr addrspace(1) %out) {
; GFX1032_DPP-NEXT: s_or_saveexec_b32 s0, -1
; GFX1032_DPP-NEXT: v_writelane_b32 v3, s1, 16
; GFX1032_DPP-NEXT: s_mov_b32 exec_lo, s0
-; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032_DPP-NEXT: s_mov_b32 s6, -1
+; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032_DPP-NEXT: ; implicit-def: $vgpr0
; GFX1032_DPP-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1032_DPP-NEXT: s_cbranch_execz .LBB19_2
@@ -9784,8 +9783,8 @@ define amdgpu_kernel void @xor_i32_varying(ptr addrspace(1) %out) {
; GFX1132_DPP-NEXT: s_or_saveexec_b32 s0, -1
; GFX1132_DPP-NEXT: v_writelane_b32 v3, s1, 16
; GFX1132_DPP-NEXT: s_mov_b32 exec_lo, s0
-; GFX1132_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1132_DPP-NEXT: s_mov_b32 s6, -1
+; GFX1132_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1132_DPP-NEXT: ; implicit-def: $vgpr0
; GFX1132_DPP-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1132_DPP-NEXT: s_cbranch_execz .LBB19_2
@@ -10385,8 +10384,8 @@ define amdgpu_kernel void @xor_i64_varying(ptr addrspace(1) %out) {
; GFX1032_DPP-NEXT: v_writelane_b32 v6, s5, 16
; GFX1032_DPP-NEXT: v_writelane_b32 v5, s6, 16
; GFX1032_DPP-NEXT: s_mov_b32 exec_lo, s4
-; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v7
; GFX1032_DPP-NEXT: s_mov_b32 s6, -1
+; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v7
; GFX1032_DPP-NEXT: ; implicit-def: $vgpr7_vgpr8
; GFX1032_DPP-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX1032_DPP-NEXT: s_cbranch_execz .LBB20_2
@@ -10544,8 +10543,8 @@ define amdgpu_kernel void @xor_i64_varying(ptr addrspace(1) %out) {
; GFX1132_DPP-NEXT: v_writelane_b32 v6, s5, 16
; GFX1132_DPP-NEXT: v_writelane_b32 v5, s6, 16
; GFX1132_DPP-NEXT: s_mov_b32 exec_lo, s4
-; GFX1132_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v7
; GFX1132_DPP-NEXT: s_mov_b32 s6, -1
+; GFX1132_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v7
; GFX1132_DPP-NEXT: ; implicit-def: $vgpr7_vgpr8
; GFX1132_DPP-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX1132_DPP-NEXT: s_cbranch_execz .LBB20_2
@@ -11051,8 +11050,8 @@ define amdgpu_kernel void @max_i32_varying(ptr addrspace(1) %out) {
; GFX1032_DPP-NEXT: s_or_saveexec_b32 s0, -1
; GFX1032_DPP-NEXT: v_writelane_b32 v3, s1, 16
; GFX1032_DPP-NEXT: s_mov_b32 exec_lo, s0
-; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032_DPP-NEXT: s_mov_b32 s6, -1
+; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032_DPP-NEXT: ; implicit-def: $vgpr0
; GFX1032_DPP-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1032_DPP-NEXT: s_cbranch_execz .LBB21_2
@@ -11162,9 +11161,8 @@ define amdgpu_kernel void @max_i32_varying(ptr addrspace(1) %out) {
; GFX1132_DPP-NEXT: s_or_saveexec_b32 s0, -1
; GFX1132_DPP-NEXT: v_writelane_b32 v3, s1, 16
; GFX1132_DPP-NEXT: s_mov_b32 exec_lo, s0
-; GFX1132_DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX1132_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1132_DPP-NEXT: s_mov_b32 s6, -1
+; GFX1132_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1132_DPP-NEXT: ; implicit-def: $vgpr0
; GFX1132_DPP-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1132_DPP-NEXT: s_cbranch_execz .LBB21_2
@@ -12196,8 +12194,8 @@ define amdgpu_kernel void @max_i64_varying(ptr addrspace(1) %out) {
; GFX1032_DPP-NEXT: v_writelane_b32 v2, s5, 16
; GFX1032_DPP-NEXT: v_writelane_b32 v1, s6, 16
; GFX1032_DPP-NEXT: s_mov_b32 exec_lo, s4
-; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v9
; GFX1032_DPP-NEXT: s_mov_b32 s6, -1
+; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v9
; GFX1032_DPP-NEXT: ; implicit-def: $vgpr9_vgpr10
; GFX1032_DPP-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX1032_DPP-NEXT: s_cbranch_execz .LBB23_2
@@ -12415,8 +12413,8 @@ define amdgpu_kernel void @max_i64_varying(ptr addrspace(1) %out) {
; GFX1132_DPP-NEXT: v_writelane_b32 v2, s5, 16
; GFX1132_DPP-NEXT: v_writelane_b32 v1, s6, 16
; GFX1132_DPP-NEXT: s_mov_b32 exec_lo, s4
-; GFX1132_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v9
; GFX1132_DPP-NEXT: s_mov_b32 s6, -1
+; GFX1132_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v9
; GFX1132_DPP-NEXT: ; implicit-def: $vgpr9_vgpr10
; GFX1132_DPP-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX1132_DPP-NEXT: s_cbranch_execz .LBB23_2
@@ -12923,8 +12921,8 @@ define amdgpu_kernel void @min_i32_varying(ptr addrspace(1) %out) {
; GFX1032_DPP-NEXT: s_or_saveexec_b32 s0, -1
; GFX1032_DPP-NEXT: v_writelane_b32 v3, s1, 16
; GFX1032_DPP-NEXT: s_mov_b32 exec_lo, s0
-; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032_DPP-NEXT: s_mov_b32 s6, -1
+; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032_DPP-NEXT: ; implicit-def: $vgpr0
; GFX1032_DPP-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1032_DPP-NEXT: s_cbranch_execz .LBB24_2
@@ -13034,9 +13032,8 @@ define amdgpu_kernel void @min_i32_varying(ptr addrspace(1) %out) {
; GFX1132_DPP-NEXT: s_or_saveexec_b32 s0, -1
; GFX1132_DPP-NEXT: v_writelane_b32 v3, s1, 16
; GFX1132_DPP-NEXT: s_mov_b32 exec_lo, s0
-; GFX1132_DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX1132_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1132_DPP-NEXT: s_mov_b32 s6, -1
+; GFX1132_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1132_DPP-NEXT: ; implicit-def: $vgpr0
; GFX1132_DPP-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1132_DPP-NEXT: s_cbranch_execz .LBB24_2
@@ -14788,8 +14785,8 @@ define amdgpu_kernel void @umax_i32_varying(ptr addrspace(1) %out) {
; GFX1032_DPP-NEXT: s_or_saveexec_b32 s0, -1
; GFX1032_DPP-NEXT: v_writelane_b32 v3, s1, 16
; GFX1032_DPP-NEXT: s_mov_b32 exec_lo, s0
-; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032_DPP-NEXT: s_mov_b32 s6, -1
+; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032_DPP-NEXT: ; implicit-def: $vgpr0
; GFX1032_DPP-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1032_DPP-NEXT: s_cbranch_execz .LBB27_2
@@ -14899,8 +14896,8 @@ define amdgpu_kernel void @umax_i32_varying(ptr addrspace(1) %out) {
; GFX1132_DPP-NEXT: s_or_saveexec_b32 s0, -1
; GFX1132_DPP-NEXT: v_writelane_b32 v3, s1, 16
; GFX1132_DPP-NEXT: s_mov_b32 exec_lo, s0
-; GFX1132_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1132_DPP-NEXT: s_mov_b32 s6, -1
+; GFX1132_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1132_DPP-NEXT: ; implicit-def: $vgpr0
; GFX1132_DPP-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1132_DPP-NEXT: s_cbranch_execz .LBB27_2
@@ -15909,8 +15906,8 @@ define amdgpu_kernel void @umax_i64_varying(ptr addrspace(1) %out) {
; GFX1032_DPP-NEXT: v_writelane_b32 v2, s5, 16
; GFX1032_DPP-NEXT: v_writelane_b32 v1, s6, 16
; GFX1032_DPP-NEXT: s_mov_b32 exec_lo, s4
-; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v9
; GFX1032_DPP-NEXT: s_mov_b32 s6, -1
+; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v9
; GFX1032_DPP-NEXT: ; implicit-def: $vgpr9_vgpr10
; GFX1032_DPP-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX1032_DPP-NEXT: s_cbranch_execz .LBB29_2
@@ -16125,8 +16122,8 @@ define amdgpu_kernel void @umax_i64_varying(ptr addrspace(1) %out) {
; GFX1132_DPP-NEXT: v_writelane_b32 v2, s5, 16
; GFX1132_DPP-NEXT: v_writelane_b32 v1, s6, 16
; GFX1132_DPP-NEXT: s_mov_b32 exec_lo, s4
-; GFX1132_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v9
; GFX1132_DPP-NEXT: s_mov_b32 s6, -1
+; GFX1132_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v9
; GFX1132_DPP-NEXT: ; implicit-def: $vgpr9_vgpr10
; GFX1132_DPP-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX1132_DPP-NEXT: s_cbranch_execz .LBB29_2
@@ -16633,8 +16630,8 @@ define amdgpu_kernel void @umin_i32_varying(ptr addrspace(1) %out) {
; GFX1032_DPP-NEXT: s_or_saveexec_b32 s0, -1
; GFX1032_DPP-NEXT: v_writelane_b32 v3, s1, 16
; GFX1032_DPP-NEXT: s_mov_b32 exec_lo, s0
-; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032_DPP-NEXT: s_mov_b32 s6, -1
+; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032_DPP-NEXT: ; implicit-def: $vgpr0
; GFX1032_DPP-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1032_DPP-NEXT: s_cbranch_execz .LBB30_2
@@ -16744,9 +16741,8 @@ define amdgpu_kernel void @umin_i32_varying(ptr addrspace(1) %out) {
; GFX1132_DPP-NEXT: s_or_saveexec_b32 s0, -1
; GFX1132_DPP-NEXT: v_writelane_b32 v3, s1, 16
; GFX1132_DPP-NEXT: s_mov_b32 exec_lo, s0
-; GFX1132_DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX1132_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1132_DPP-NEXT: s_mov_b32 s6, -1
+; GFX1132_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1132_DPP-NEXT: ; implicit-def: $vgpr0
; GFX1132_DPP-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1132_DPP-NEXT: s_cbranch_execz .LBB30_2
@@ -17754,8 +17750,8 @@ define amdgpu_kernel void @umin_i64_varying(ptr addrspace(1) %out) {
; GFX1032_DPP-NEXT: v_writelane_b32 v2, s5, 16
; GFX1032_DPP-NEXT: v_writelane_b32 v1, s6, 16
; GFX1032_DPP-NEXT: s_mov_b32 exec_lo, s4
-; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v9
; GFX1032_DPP-NEXT: s_mov_b32 s6, -1
+; GFX1032_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v9
; GFX1032_DPP-NEXT: ; implicit-def: $vgpr9_vgpr10
; GFX1032_DPP-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX1032_DPP-NEXT: s_cbranch_execz .LBB32_2
@@ -17970,8 +17966,8 @@ define amdgpu_kernel void @umin_i64_varying(ptr addrspace(1) %out) {
; GFX1132_DPP-NEXT: v_writelane_b32 v2, s5, 16
; GFX1132_DPP-NEXT: v_writelane_b32 v1, s6, 16
; GFX1132_DPP-NEXT: s_mov_b32 exec_lo, s4
-; GFX1132_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v9
; GFX1132_DPP-NEXT: s_mov_b32 s6, -1
+; GFX1132_DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v9
; GFX1132_DPP-NEXT: ; implicit-def: $vgpr9_vgpr10
; GFX1132_DPP-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX1132_DPP-NEXT: s_cbranch_execz .LBB32_2
diff --git a/llvm/test/CodeGen/AMDGPU/expand-si-indirect.mir b/llvm/test/CodeGen/AMDGPU/expand-si-indirect.mir
index 4f8255d..c85d9f4 100644
--- a/llvm/test/CodeGen/AMDGPU/expand-si-indirect.mir
+++ b/llvm/test/CodeGen/AMDGPU/expand-si-indirect.mir
@@ -24,7 +24,7 @@ body: |
%0:sgpr_64 = COPY killed $sgpr0_sgpr1
%1:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
- %2:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0(p4), 36, 0
+ %2:sreg_64_xexec_xnull = S_LOAD_DWORDX2_IMM %0(p4), 36, 0
%3:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM killed %0(p4), 44, 0
%4:sreg_32 = S_ADD_I32 %3, 1, implicit-def dead $scc
%5:vgpr_32 = V_MOV_B32_e32 1065353216, implicit $exec
diff --git a/llvm/test/CodeGen/AMDGPU/global-atomic-fadd.f32-no-rtn.ll b/llvm/test/CodeGen/AMDGPU/global-atomic-fadd.f32-no-rtn.ll
index d62ff37..8a7762f 100644
--- a/llvm/test/CodeGen/AMDGPU/global-atomic-fadd.f32-no-rtn.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-atomic-fadd.f32-no-rtn.ll
@@ -43,7 +43,7 @@ define amdgpu_ps void @global_atomic_fadd_f32_saddr_no_rtn_atomicrmw(ptr addrspa
; GFX908-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
; GFX908-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr0
; GFX908-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX908-NEXT: [[COPY3:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE]]
+ ; GFX908-NEXT: [[COPY3:%[0-9]+]]:sreg_64_xexec_xnull = COPY [[REG_SEQUENCE]]
; GFX908-NEXT: [[SI_PS_LIVE:%[0-9]+]]:sreg_64 = SI_PS_LIVE
; GFX908-NEXT: [[SI_IF:%[0-9]+]]:sreg_64 = SI_IF killed [[SI_PS_LIVE]], %bb.4, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
; GFX908-NEXT: S_BRANCH %bb.1
@@ -105,7 +105,7 @@ define amdgpu_ps void @global_atomic_fadd_f32_saddr_no_rtn_atomicrmw(ptr addrspa
; GFX90A_GFX940-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
; GFX90A_GFX940-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr0
; GFX90A_GFX940-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX90A_GFX940-NEXT: [[COPY3:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE]]
+ ; GFX90A_GFX940-NEXT: [[COPY3:%[0-9]+]]:sreg_64_xexec_xnull = COPY [[REG_SEQUENCE]]
; GFX90A_GFX940-NEXT: [[SI_PS_LIVE:%[0-9]+]]:sreg_64 = SI_PS_LIVE
; GFX90A_GFX940-NEXT: [[SI_IF:%[0-9]+]]:sreg_64 = SI_IF killed [[SI_PS_LIVE]], %bb.4, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
; GFX90A_GFX940-NEXT: S_BRANCH %bb.1
@@ -167,7 +167,7 @@ define amdgpu_ps void @global_atomic_fadd_f32_saddr_no_rtn_atomicrmw(ptr addrspa
; GFX11_GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
; GFX11_GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr0
; GFX11_GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX11_GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE]]
+ ; GFX11_GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_64_xexec_xnull = COPY [[REG_SEQUENCE]]
; GFX11_GFX12-NEXT: [[SI_PS_LIVE:%[0-9]+]]:sreg_32 = SI_PS_LIVE
; GFX11_GFX12-NEXT: [[SI_IF:%[0-9]+]]:sreg_32 = SI_IF killed [[SI_PS_LIVE]], %bb.4, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
; GFX11_GFX12-NEXT: S_BRANCH %bb.1
diff --git a/llvm/test/CodeGen/AMDGPU/global-atomic-fadd.f32-rtn.ll b/llvm/test/CodeGen/AMDGPU/global-atomic-fadd.f32-rtn.ll
index 946ee9e..1fb34ab 100644
--- a/llvm/test/CodeGen/AMDGPU/global-atomic-fadd.f32-rtn.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-atomic-fadd.f32-rtn.ll
@@ -44,7 +44,7 @@ define amdgpu_ps float @global_atomic_fadd_f32_saddr_rtn_atomicrmw(ptr addrspace
; GFX90A-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
; GFX90A-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr0
; GFX90A-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX90A-NEXT: [[COPY3:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE]]
+ ; GFX90A-NEXT: [[COPY3:%[0-9]+]]:sreg_64_xexec_xnull = COPY [[REG_SEQUENCE]]
; GFX90A-NEXT: [[SI_PS_LIVE:%[0-9]+]]:sreg_64 = SI_PS_LIVE
; GFX90A-NEXT: [[DEF:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF
; GFX90A-NEXT: [[SI_IF:%[0-9]+]]:sreg_64 = SI_IF killed [[SI_PS_LIVE]], %bb.3, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
@@ -126,7 +126,7 @@ define amdgpu_ps float @global_atomic_fadd_f32_saddr_rtn_atomicrmw(ptr addrspace
; GFX940-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
; GFX940-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr0
; GFX940-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX940-NEXT: [[COPY3:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE]]
+ ; GFX940-NEXT: [[COPY3:%[0-9]+]]:sreg_64_xexec_xnull = COPY [[REG_SEQUENCE]]
; GFX940-NEXT: [[SI_PS_LIVE:%[0-9]+]]:sreg_64 = SI_PS_LIVE
; GFX940-NEXT: [[DEF:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF
; GFX940-NEXT: [[SI_IF:%[0-9]+]]:sreg_64 = SI_IF killed [[SI_PS_LIVE]], %bb.3, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
@@ -208,7 +208,7 @@ define amdgpu_ps float @global_atomic_fadd_f32_saddr_rtn_atomicrmw(ptr addrspace
; GFX11-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
; GFX11-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr0
; GFX11-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX11-NEXT: [[COPY3:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE]]
+ ; GFX11-NEXT: [[COPY3:%[0-9]+]]:sreg_64_xexec_xnull = COPY [[REG_SEQUENCE]]
; GFX11-NEXT: [[SI_PS_LIVE:%[0-9]+]]:sreg_32 = SI_PS_LIVE
; GFX11-NEXT: [[DEF:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF
; GFX11-NEXT: [[SI_IF:%[0-9]+]]:sreg_32 = SI_IF killed [[SI_PS_LIVE]], %bb.3, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fadd.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fadd.ll
index 2b18f47..c3a197c 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fadd.ll
@@ -1263,16 +1263,16 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_uni_value_one_as_scope
;
; GFX1032-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_structfp:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX1032-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
; GFX1032-NEXT: s_mov_b32 s14, -1
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
; GFX1032-NEXT: s_mov_b32 s15, 0x31c16000
; GFX1032-NEXT: s_add_u32 s12, s12, s9
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-NEXT: s_addc_u32 s13, s13, 0
; GFX1032-NEXT: s_mov_b32 s4, 0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1032-NEXT: s_cbranch_execz .LBB2_3
; GFX1032-NEXT: ; %bb.1:
@@ -1483,16 +1483,16 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_uni_value_one_as_scope
;
; GFX1032-DPP-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_structfp:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-DPP-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-DPP-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
; GFX1032-DPP-NEXT: s_mov_b32 s14, -1
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
; GFX1032-DPP-NEXT: s_mov_b32 s15, 0x31c16000
; GFX1032-DPP-NEXT: s_add_u32 s12, s12, s9
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-DPP-NEXT: s_addc_u32 s13, s13, 0
; GFX1032-DPP-NEXT: s_mov_b32 s4, 0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-DPP-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1032-DPP-NEXT: s_cbranch_execz .LBB2_3
; GFX1032-DPP-NEXT: ; %bb.1:
@@ -2471,16 +2471,16 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_uni_value_agent_scope_
;
; GFX1032-LABEL: global_atomic_fadd_uni_address_uni_value_agent_scope_strictfp:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX1032-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
; GFX1032-NEXT: s_mov_b32 s14, -1
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
; GFX1032-NEXT: s_mov_b32 s15, 0x31c16000
; GFX1032-NEXT: s_add_u32 s12, s12, s9
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-NEXT: s_addc_u32 s13, s13, 0
; GFX1032-NEXT: s_mov_b32 s4, 0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1032-NEXT: s_cbranch_execz .LBB4_3
; GFX1032-NEXT: ; %bb.1:
@@ -2721,16 +2721,16 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_uni_value_agent_scope_
;
; GFX1032-DPP-LABEL: global_atomic_fadd_uni_address_uni_value_agent_scope_strictfp:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-DPP-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-DPP-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
; GFX1032-DPP-NEXT: s_mov_b32 s14, -1
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
; GFX1032-DPP-NEXT: s_mov_b32 s15, 0x31c16000
; GFX1032-DPP-NEXT: s_add_u32 s12, s12, s9
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-DPP-NEXT: s_addc_u32 s13, s13, 0
; GFX1032-DPP-NEXT: s_mov_b32 s4, 0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-DPP-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1032-DPP-NEXT: s_cbranch_execz .LBB4_3
; GFX1032-DPP-NEXT: ; %bb.1:
@@ -4503,16 +4503,16 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_uni_value_default_scop
;
; GFX1032-LABEL: global_atomic_fadd_uni_address_uni_value_default_scope_strictfp:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX1032-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
; GFX1032-NEXT: s_mov_b32 s14, -1
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
; GFX1032-NEXT: s_mov_b32 s15, 0x31c16000
; GFX1032-NEXT: s_add_u32 s12, s12, s9
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-NEXT: s_addc_u32 s13, s13, 0
; GFX1032-NEXT: s_mov_b32 s4, 0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1032-NEXT: s_cbranch_execz .LBB7_3
; GFX1032-NEXT: ; %bb.1:
@@ -4753,16 +4753,16 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_uni_value_default_scop
;
; GFX1032-DPP-LABEL: global_atomic_fadd_uni_address_uni_value_default_scope_strictfp:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-DPP-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-DPP-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
; GFX1032-DPP-NEXT: s_mov_b32 s14, -1
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
; GFX1032-DPP-NEXT: s_mov_b32 s15, 0x31c16000
; GFX1032-DPP-NEXT: s_add_u32 s12, s12, s9
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-DPP-NEXT: s_addc_u32 s13, s13, 0
; GFX1032-DPP-NEXT: s_mov_b32 s4, 0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-DPP-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1032-DPP-NEXT: s_cbranch_execz .LBB7_3
; GFX1032-DPP-NEXT: ; %bb.1:
@@ -5929,19 +5929,19 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_agent
;
; GFX1032-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
; GFX1032-NEXT: s_mov_b32 s33, s8
; GFX1032-NEXT: s_mov_b32 s8, exec_lo
-; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v3, s8, 0
; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
; GFX1032-NEXT: s_mov_b32 s50, -1
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v3, s8, 0
; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
; GFX1032-NEXT: s_add_u32 s48, s48, s9
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v3
; GFX1032-NEXT: s_addc_u32 s49, s49, 0
; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
; GFX1032-NEXT: s_mov_b32 s44, 0
; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v3
; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1032-NEXT: s_cbranch_execz .LBB9_3
; GFX1032-NEXT: ; %bb.1:
@@ -6378,19 +6378,19 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_agent
;
; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
; GFX1032-DPP-NEXT: s_mov_b32 s8, exec_lo
-; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v3, s8, 0
; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v3, s8, 0
; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v3
; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v3
; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1032-DPP-NEXT: s_cbranch_execz .LBB9_3
; GFX1032-DPP-NEXT: ; %bb.1:
@@ -7595,8 +7595,8 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
; GFX1032-DPP-NEXT: v_permlanex16_b32 v10, v8, 0, 0
; GFX1032-DPP-NEXT: v_add_f64 v[8:9], v[8:9], v[10:11]
; GFX1032-DPP-NEXT: s_mov_b32 exec_lo, s0
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, v8
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
; GFX1032-DPP-NEXT: v_mov_b32_e32 v42, v9
; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
@@ -8020,16 +8020,16 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_one_a
;
; GFX1032-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX1032-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
; GFX1032-NEXT: s_mov_b32 s14, -1
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
; GFX1032-NEXT: s_mov_b32 s15, 0x31c16000
; GFX1032-NEXT: s_add_u32 s12, s12, s9
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-NEXT: s_addc_u32 s13, s13, 0
; GFX1032-NEXT: s_mov_b32 s4, 0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1032-NEXT: s_cbranch_execz .LBB11_3
; GFX1032-NEXT: ; %bb.1:
@@ -8277,16 +8277,16 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_one_a
;
; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-DPP-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-DPP-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
; GFX1032-DPP-NEXT: s_mov_b32 s14, -1
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
; GFX1032-DPP-NEXT: s_mov_b32 s15, 0x31c16000
; GFX1032-DPP-NEXT: s_add_u32 s12, s12, s9
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-DPP-NEXT: s_addc_u32 s13, s13, 0
; GFX1032-DPP-NEXT: s_mov_b32 s4, 0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-DPP-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1032-DPP-NEXT: s_cbranch_execz .LBB11_3
; GFX1032-DPP-NEXT: ; %bb.1:
@@ -9107,8 +9107,8 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_one_a
; GFX1032-DPP-NEXT: v_permlanex16_b32 v5, v3, 0, 0
; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[3:4], v[5:6]
; GFX1032-DPP-NEXT: s_mov_b32 exec_lo, s0
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, v3
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, v4
; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2
@@ -9444,16 +9444,16 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_agent
;
; GFX1032-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_strictfp:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX1032-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
; GFX1032-NEXT: s_mov_b32 s14, -1
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
; GFX1032-NEXT: s_mov_b32 s15, 0x31c16000
; GFX1032-NEXT: s_add_u32 s12, s12, s9
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-NEXT: s_addc_u32 s13, s13, 0
; GFX1032-NEXT: s_mov_b32 s4, 0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1032-NEXT: s_cbranch_execz .LBB13_3
; GFX1032-NEXT: ; %bb.1:
@@ -9701,16 +9701,16 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_agent
;
; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_strictfp:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-DPP-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-DPP-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
; GFX1032-DPP-NEXT: s_mov_b32 s14, -1
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
; GFX1032-DPP-NEXT: s_mov_b32 s15, 0x31c16000
; GFX1032-DPP-NEXT: s_add_u32 s12, s12, s9
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-DPP-NEXT: s_addc_u32 s13, s13, 0
; GFX1032-DPP-NEXT: s_mov_b32 s4, 0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-DPP-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1032-DPP-NEXT: s_cbranch_execz .LBB13_3
; GFX1032-DPP-NEXT: ; %bb.1:
@@ -10531,8 +10531,8 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
; GFX1032-DPP-NEXT: v_permlanex16_b32 v5, v3, 0, 0
; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[3:4], v[5:6]
; GFX1032-DPP-NEXT: s_mov_b32 exec_lo, s0
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, v3
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, v4
; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2
@@ -11437,8 +11437,8 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
; GFX1032-DPP-NEXT: v_permlanex16_b32 v5, v3, 0, 0
; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[3:4], v[5:6]
; GFX1032-DPP-NEXT: s_mov_b32 exec_lo, s0
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, v3
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, v4
; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2
@@ -13574,8 +13574,8 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_defau
; GFX1032-DPP-NEXT: v_permlanex16_b32 v10, v8, 0, 0
; GFX1032-DPP-NEXT: v_add_f64 v[8:9], v[8:9], v[10:11]
; GFX1032-DPP-NEXT: s_mov_b32 exec_lo, s0
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, v8
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
; GFX1032-DPP-NEXT: v_mov_b32_e32 v42, v9
; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmax.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmax.ll
index e3144ae..69c6adf 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmax.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmax.ll
@@ -3348,17 +3348,17 @@ define amdgpu_kernel void @global_atomic_fmax_double_uni_address_uni_value_agent
;
; GFX1032-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v3, exec_lo, 0
; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
; GFX1032-NEXT: s_mov_b32 s50, -1
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v3, exec_lo, 0
; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v3
; GFX1032-NEXT: s_add_u32 s48, s48, s9
; GFX1032-NEXT: s_addc_u32 s49, s49, 0
; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
; GFX1032-NEXT: s_mov_b32 s44, 0
; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v3
; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1032-NEXT: s_cbranch_execz .LBB6_3
; GFX1032-NEXT: ; %bb.1:
@@ -3778,17 +3778,17 @@ define amdgpu_kernel void @global_atomic_fmax_double_uni_address_uni_value_agent
;
; GFX1032-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v3, exec_lo, 0
; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v3, exec_lo, 0
; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v3
; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v3
; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1032-DPP-NEXT: s_cbranch_execz .LBB6_3
; GFX1032-DPP-NEXT: ; %bb.1:
@@ -5038,8 +5038,8 @@ define amdgpu_kernel void @global_atomic_fmax_double_uni_address_div_value_agent
; GFX1032-DPP-NEXT: v_max_f64 v[10:11], v[10:11], v[10:11]
; GFX1032-DPP-NEXT: v_max_f64 v[8:9], v[8:9], v[10:11]
; GFX1032-DPP-NEXT: s_mov_b32 exec_lo, s0
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v8
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, v9
; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
@@ -6403,8 +6403,8 @@ define amdgpu_kernel void @global_atomic_fmax_double_uni_address_div_value_one_a
; GFX1032-DPP-NEXT: v_max_f64 v[5:6], v[5:6], v[5:6]
; GFX1032-DPP-NEXT: v_max_f64 v[3:4], v[3:4], v[5:6]
; GFX1032-DPP-NEXT: s_mov_b32 exec_lo, s0
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, v3
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, v4
; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2
; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
@@ -6844,17 +6844,17 @@ define amdgpu_kernel void @global_atomic_fmax_double_uni_address_uni_value_defau
;
; GFX1032-LABEL: global_atomic_fmax_double_uni_address_uni_value_default_scope_unsafe:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v3, exec_lo, 0
; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
; GFX1032-NEXT: s_mov_b32 s50, -1
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v3, exec_lo, 0
; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v3
; GFX1032-NEXT: s_add_u32 s48, s48, s9
; GFX1032-NEXT: s_addc_u32 s49, s49, 0
; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
; GFX1032-NEXT: s_mov_b32 s44, 0
; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v3
; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1032-NEXT: s_cbranch_execz .LBB10_3
; GFX1032-NEXT: ; %bb.1:
@@ -7274,17 +7274,17 @@ define amdgpu_kernel void @global_atomic_fmax_double_uni_address_uni_value_defau
;
; GFX1032-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_default_scope_unsafe:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v3, exec_lo, 0
; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v3, exec_lo, 0
; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v3
; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v3
; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1032-DPP-NEXT: s_cbranch_execz .LBB10_3
; GFX1032-DPP-NEXT: ; %bb.1:
@@ -8534,8 +8534,8 @@ define amdgpu_kernel void @global_atomic_fmax_double_uni_address_div_value_defau
; GFX1032-DPP-NEXT: v_max_f64 v[10:11], v[10:11], v[10:11]
; GFX1032-DPP-NEXT: v_max_f64 v[8:9], v[8:9], v[10:11]
; GFX1032-DPP-NEXT: s_mov_b32 exec_lo, s0
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v8
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, v9
; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmin.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmin.ll
index ddc1031..b7890f3 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmin.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmin.ll
@@ -3348,17 +3348,17 @@ define amdgpu_kernel void @global_atomic_fmin_double_uni_address_uni_value_agent
;
; GFX1032-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v3, exec_lo, 0
; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
; GFX1032-NEXT: s_mov_b32 s50, -1
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v3, exec_lo, 0
; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v3
; GFX1032-NEXT: s_add_u32 s48, s48, s9
; GFX1032-NEXT: s_addc_u32 s49, s49, 0
; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
; GFX1032-NEXT: s_mov_b32 s44, 0
; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v3
; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1032-NEXT: s_cbranch_execz .LBB6_3
; GFX1032-NEXT: ; %bb.1:
@@ -3778,17 +3778,17 @@ define amdgpu_kernel void @global_atomic_fmin_double_uni_address_uni_value_agent
;
; GFX1032-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v3, exec_lo, 0
; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v3, exec_lo, 0
; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v3
; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v3
; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1032-DPP-NEXT: s_cbranch_execz .LBB6_3
; GFX1032-DPP-NEXT: ; %bb.1:
@@ -5038,8 +5038,8 @@ define amdgpu_kernel void @global_atomic_fmin_double_uni_address_div_value_agent
; GFX1032-DPP-NEXT: v_max_f64 v[10:11], v[10:11], v[10:11]
; GFX1032-DPP-NEXT: v_min_f64 v[8:9], v[8:9], v[10:11]
; GFX1032-DPP-NEXT: s_mov_b32 exec_lo, s0
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v8
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, v9
; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
@@ -6403,8 +6403,8 @@ define amdgpu_kernel void @global_atomic_fmin_double_uni_address_div_value_one_a
; GFX1032-DPP-NEXT: v_max_f64 v[5:6], v[5:6], v[5:6]
; GFX1032-DPP-NEXT: v_min_f64 v[3:4], v[3:4], v[5:6]
; GFX1032-DPP-NEXT: s_mov_b32 exec_lo, s0
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, v3
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, v4
; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2
; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
@@ -6844,17 +6844,17 @@ define amdgpu_kernel void @global_atomic_fmin_double_uni_address_uni_value_defau
;
; GFX1032-LABEL: global_atomic_fmin_double_uni_address_uni_value_default_scope_unsafe:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v3, exec_lo, 0
; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
; GFX1032-NEXT: s_mov_b32 s50, -1
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v3, exec_lo, 0
; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v3
; GFX1032-NEXT: s_add_u32 s48, s48, s9
; GFX1032-NEXT: s_addc_u32 s49, s49, 0
; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
; GFX1032-NEXT: s_mov_b32 s44, 0
; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v3
; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1032-NEXT: s_cbranch_execz .LBB10_3
; GFX1032-NEXT: ; %bb.1:
@@ -7274,17 +7274,17 @@ define amdgpu_kernel void @global_atomic_fmin_double_uni_address_uni_value_defau
;
; GFX1032-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_default_scope_unsafe:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v3, exec_lo, 0
; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v3, exec_lo, 0
; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v3
; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v3
; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1032-DPP-NEXT: s_cbranch_execz .LBB10_3
; GFX1032-DPP-NEXT: ; %bb.1:
@@ -8534,8 +8534,8 @@ define amdgpu_kernel void @global_atomic_fmin_double_uni_address_div_value_defau
; GFX1032-DPP-NEXT: v_max_f64 v[10:11], v[10:11], v[10:11]
; GFX1032-DPP-NEXT: v_min_f64 v[8:9], v[8:9], v[10:11]
; GFX1032-DPP-NEXT: s_mov_b32 exec_lo, s0
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v8
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, v9
; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fsub.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fsub.ll
index f353edf..fcd5d0d 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fsub.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fsub.ll
@@ -1367,16 +1367,16 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_uni_value_one_as_scope
;
; GFX1032-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_structfp:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX1032-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
; GFX1032-NEXT: s_mov_b32 s14, -1
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
; GFX1032-NEXT: s_mov_b32 s15, 0x31c16000
; GFX1032-NEXT: s_add_u32 s12, s12, s9
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-NEXT: s_addc_u32 s13, s13, 0
; GFX1032-NEXT: s_mov_b32 s4, 0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1032-NEXT: s_cbranch_execz .LBB2_3
; GFX1032-NEXT: ; %bb.1:
@@ -1617,16 +1617,16 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_uni_value_one_as_scope
;
; GFX1032-DPP-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_structfp:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-DPP-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-DPP-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
; GFX1032-DPP-NEXT: s_mov_b32 s14, -1
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
; GFX1032-DPP-NEXT: s_mov_b32 s15, 0x31c16000
; GFX1032-DPP-NEXT: s_add_u32 s12, s12, s9
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-DPP-NEXT: s_addc_u32 s13, s13, 0
; GFX1032-DPP-NEXT: s_mov_b32 s4, 0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-DPP-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1032-DPP-NEXT: s_cbranch_execz .LBB2_3
; GFX1032-DPP-NEXT: ; %bb.1:
@@ -2687,16 +2687,16 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_uni_value_agent_scope_
;
; GFX1032-LABEL: global_atomic_fsub_uni_address_uni_value_agent_scope_strictfp:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX1032-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
; GFX1032-NEXT: s_mov_b32 s14, -1
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
; GFX1032-NEXT: s_mov_b32 s15, 0x31c16000
; GFX1032-NEXT: s_add_u32 s12, s12, s9
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-NEXT: s_addc_u32 s13, s13, 0
; GFX1032-NEXT: s_mov_b32 s4, 0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1032-NEXT: s_cbranch_execz .LBB4_3
; GFX1032-NEXT: ; %bb.1:
@@ -2937,16 +2937,16 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_uni_value_agent_scope_
;
; GFX1032-DPP-LABEL: global_atomic_fsub_uni_address_uni_value_agent_scope_strictfp:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-DPP-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-DPP-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
; GFX1032-DPP-NEXT: s_mov_b32 s14, -1
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
; GFX1032-DPP-NEXT: s_mov_b32 s15, 0x31c16000
; GFX1032-DPP-NEXT: s_add_u32 s12, s12, s9
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-DPP-NEXT: s_addc_u32 s13, s13, 0
; GFX1032-DPP-NEXT: s_mov_b32 s4, 0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-DPP-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1032-DPP-NEXT: s_cbranch_execz .LBB4_3
; GFX1032-DPP-NEXT: ; %bb.1:
@@ -4823,16 +4823,16 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_uni_value_default_scop
;
; GFX1032-LABEL: global_atomic_fsub_uni_address_uni_value_default_scope_strictfp:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX1032-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
; GFX1032-NEXT: s_mov_b32 s14, -1
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
; GFX1032-NEXT: s_mov_b32 s15, 0x31c16000
; GFX1032-NEXT: s_add_u32 s12, s12, s9
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-NEXT: s_addc_u32 s13, s13, 0
; GFX1032-NEXT: s_mov_b32 s4, 0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1032-NEXT: s_cbranch_execz .LBB7_3
; GFX1032-NEXT: ; %bb.1:
@@ -5073,16 +5073,16 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_uni_value_default_scop
;
; GFX1032-DPP-LABEL: global_atomic_fsub_uni_address_uni_value_default_scope_strictfp:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-DPP-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-DPP-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
; GFX1032-DPP-NEXT: s_mov_b32 s14, -1
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
; GFX1032-DPP-NEXT: s_mov_b32 s15, 0x31c16000
; GFX1032-DPP-NEXT: s_add_u32 s12, s12, s9
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-DPP-NEXT: s_addc_u32 s13, s13, 0
; GFX1032-DPP-NEXT: s_mov_b32 s4, 0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-DPP-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1032-DPP-NEXT: s_cbranch_execz .LBB7_3
; GFX1032-DPP-NEXT: ; %bb.1:
@@ -6249,19 +6249,19 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_agent
;
; GFX1032-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
; GFX1032-NEXT: s_mov_b32 s33, s8
; GFX1032-NEXT: s_mov_b32 s8, exec_lo
-; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v3, s8, 0
; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
; GFX1032-NEXT: s_mov_b32 s50, -1
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v3, s8, 0
; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
; GFX1032-NEXT: s_add_u32 s48, s48, s9
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v3
; GFX1032-NEXT: s_addc_u32 s49, s49, 0
; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
; GFX1032-NEXT: s_mov_b32 s44, 0
; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v3
; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1032-NEXT: s_cbranch_execz .LBB9_3
; GFX1032-NEXT: ; %bb.1:
@@ -6698,19 +6698,19 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_agent
;
; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
; GFX1032-DPP-NEXT: s_mov_b32 s8, exec_lo
-; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v3, s8, 0
; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v3, s8, 0
; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v3
; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v3
; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1032-DPP-NEXT: s_cbranch_execz .LBB9_3
; GFX1032-DPP-NEXT: ; %bb.1:
@@ -7915,8 +7915,8 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
; GFX1032-DPP-NEXT: v_permlanex16_b32 v10, v8, 0, 0
; GFX1032-DPP-NEXT: v_add_f64 v[8:9], v[8:9], v[10:11]
; GFX1032-DPP-NEXT: s_mov_b32 exec_lo, s0
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, v8
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
; GFX1032-DPP-NEXT: v_mov_b32_e32 v42, v9
; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
@@ -8340,16 +8340,16 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_one_a
;
; GFX1032-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX1032-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
; GFX1032-NEXT: s_mov_b32 s14, -1
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
; GFX1032-NEXT: s_mov_b32 s15, 0x31c16000
; GFX1032-NEXT: s_add_u32 s12, s12, s9
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-NEXT: s_addc_u32 s13, s13, 0
; GFX1032-NEXT: s_mov_b32 s4, 0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1032-NEXT: s_cbranch_execz .LBB11_3
; GFX1032-NEXT: ; %bb.1:
@@ -8597,16 +8597,16 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_one_a
;
; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-DPP-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-DPP-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
; GFX1032-DPP-NEXT: s_mov_b32 s14, -1
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
; GFX1032-DPP-NEXT: s_mov_b32 s15, 0x31c16000
; GFX1032-DPP-NEXT: s_add_u32 s12, s12, s9
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-DPP-NEXT: s_addc_u32 s13, s13, 0
; GFX1032-DPP-NEXT: s_mov_b32 s4, 0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-DPP-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1032-DPP-NEXT: s_cbranch_execz .LBB11_3
; GFX1032-DPP-NEXT: ; %bb.1:
@@ -9426,8 +9426,8 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_one_a
; GFX1032-DPP-NEXT: v_permlanex16_b32 v5, v3, 0, 0
; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[3:4], v[5:6]
; GFX1032-DPP-NEXT: s_mov_b32 exec_lo, s0
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, v3
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, v4
; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2
@@ -9763,16 +9763,16 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_agent
;
; GFX1032-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX1032-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
; GFX1032-NEXT: s_mov_b32 s14, -1
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
; GFX1032-NEXT: s_mov_b32 s15, 0x31c16000
; GFX1032-NEXT: s_add_u32 s12, s12, s9
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-NEXT: s_addc_u32 s13, s13, 0
; GFX1032-NEXT: s_mov_b32 s4, 0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1032-NEXT: s_cbranch_execz .LBB13_3
; GFX1032-NEXT: ; %bb.1:
@@ -10020,16 +10020,16 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_agent
;
; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-DPP-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s0, exec_lo
; GFX1032-DPP-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
; GFX1032-DPP-NEXT: s_mov_b32 s14, -1
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
; GFX1032-DPP-NEXT: s_mov_b32 s15, 0x31c16000
; GFX1032-DPP-NEXT: s_add_u32 s12, s12, s9
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-DPP-NEXT: s_addc_u32 s13, s13, 0
; GFX1032-DPP-NEXT: s_mov_b32 s4, 0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-DPP-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX1032-DPP-NEXT: s_cbranch_execz .LBB13_3
; GFX1032-DPP-NEXT: ; %bb.1:
@@ -10850,8 +10850,8 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
; GFX1032-DPP-NEXT: v_permlanex16_b32 v5, v3, 0, 0
; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[3:4], v[5:6]
; GFX1032-DPP-NEXT: s_mov_b32 exec_lo, s0
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, v3
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, v4
; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2
@@ -11756,8 +11756,8 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
; GFX1032-DPP-NEXT: v_permlanex16_b32 v5, v3, 0, 0
; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[3:4], v[5:6]
; GFX1032-DPP-NEXT: s_mov_b32 exec_lo, s0
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, v3
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, v4
; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2
@@ -13892,8 +13892,8 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_defau
; GFX1032-DPP-NEXT: v_permlanex16_b32 v10, v8, 0, 0
; GFX1032-DPP-NEXT: v_add_f64 v[8:9], v[8:9], v[10:11]
; GFX1032-DPP-NEXT: s_mov_b32 exec_lo, s0
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, v8
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
; GFX1032-DPP-NEXT: v_mov_b32_e32 v42, v9
; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
diff --git a/llvm/test/CodeGen/AMDGPU/high-RP-reschedule.mir b/llvm/test/CodeGen/AMDGPU/high-RP-reschedule.mir
index a5ddc9a..e9005e9 100644
--- a/llvm/test/CodeGen/AMDGPU/high-RP-reschedule.mir
+++ b/llvm/test/CodeGen/AMDGPU/high-RP-reschedule.mir
@@ -25,7 +25,7 @@ body: |
%8:vreg_128 = IMPLICIT_DEF
%9:vreg_128 = IMPLICIT_DEF
%10:vreg_128 = IMPLICIT_DEF
- %11:sreg_64_xexec = IMPLICIT_DEF
+ %11:sreg_64_xexec_xnull = IMPLICIT_DEF
%12:vreg_64 = IMPLICIT_DEF
bb.1:
diff --git a/llvm/test/CodeGen/AMDGPU/inline-asm.i128.ll b/llvm/test/CodeGen/AMDGPU/inline-asm.i128.ll
index 4d62d30..292722c 100644
--- a/llvm/test/CodeGen/AMDGPU/inline-asm.i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/inline-asm.i128.ll
@@ -8,16 +8,16 @@
define amdgpu_kernel void @s_input_output_i128() {
; GFX908-LABEL: name: s_input_output_i128
; GFX908: bb.0 (%ir-block.0):
- ; GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7340042 /* regdef:SGPR_128 */, def %11
+ ; GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7405578 /* regdef:SGPR_128 */, def %11
; GFX908-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY %11
- ; GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 7340041 /* reguse:SGPR_128 */, [[COPY]]
+ ; GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 7405577 /* reguse:SGPR_128 */, [[COPY]]
; GFX908-NEXT: S_ENDPGM 0
;
; GFX90A-LABEL: name: s_input_output_i128
; GFX90A: bb.0 (%ir-block.0):
- ; GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7340042 /* regdef:SGPR_128 */, def %9
+ ; GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7405578 /* regdef:SGPR_128 */, def %9
; GFX90A-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY %9
- ; GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 7340041 /* reguse:SGPR_128 */, [[COPY]]
+ ; GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 7405577 /* reguse:SGPR_128 */, [[COPY]]
; GFX90A-NEXT: S_ENDPGM 0
%val = tail call i128 asm sideeffect "; def $0", "=s"()
call void asm sideeffect "; use $0", "s"(i128 %val)
@@ -27,16 +27,16 @@ define amdgpu_kernel void @s_input_output_i128() {
define amdgpu_kernel void @v_input_output_i128() {
; GFX908-LABEL: name: v_input_output_i128
; GFX908: bb.0 (%ir-block.0):
- ; GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 6225930 /* regdef:VReg_128 */, def %11
+ ; GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 6291466 /* regdef:VReg_128 */, def %11
; GFX908-NEXT: [[COPY:%[0-9]+]]:vreg_128 = COPY %11
- ; GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 6225929 /* reguse:VReg_128 */, [[COPY]]
+ ; GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 6291465 /* reguse:VReg_128 */, [[COPY]]
; GFX908-NEXT: S_ENDPGM 0
;
; GFX90A-LABEL: name: v_input_output_i128
; GFX90A: bb.0 (%ir-block.0):
- ; GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 6553610 /* regdef:VReg_128_Align2 */, def %9
+ ; GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 6619146 /* regdef:VReg_128_Align2 */, def %9
; GFX90A-NEXT: [[COPY:%[0-9]+]]:vreg_128_align2 = COPY %9
- ; GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 6553609 /* reguse:VReg_128_Align2 */, [[COPY]]
+ ; GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 6619145 /* reguse:VReg_128_Align2 */, [[COPY]]
; GFX90A-NEXT: S_ENDPGM 0
%val = tail call i128 asm sideeffect "; def $0", "=v"()
call void asm sideeffect "; use $0", "v"(i128 %val)
@@ -46,16 +46,16 @@ define amdgpu_kernel void @v_input_output_i128() {
define amdgpu_kernel void @a_input_output_i128() {
; GFX908-LABEL: name: a_input_output_i128
; GFX908: bb.0 (%ir-block.0):
- ; GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 6160394 /* regdef:AReg_128 */, def %11
+ ; GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 6225930 /* regdef:AReg_128 */, def %11
; GFX908-NEXT: [[COPY:%[0-9]+]]:areg_128 = COPY %11
- ; GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 6160393 /* reguse:AReg_128 */, [[COPY]]
+ ; GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 6225929 /* reguse:AReg_128 */, [[COPY]]
; GFX908-NEXT: S_ENDPGM 0
;
; GFX90A-LABEL: name: a_input_output_i128
; GFX90A: bb.0 (%ir-block.0):
- ; GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 6422538 /* regdef:AReg_128_Align2 */, def %9
+ ; GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 6488074 /* regdef:AReg_128_Align2 */, def %9
; GFX90A-NEXT: [[COPY:%[0-9]+]]:areg_128_align2 = COPY %9
- ; GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 6422537 /* reguse:AReg_128_Align2 */, [[COPY]]
+ ; GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 6488073 /* reguse:AReg_128_Align2 */, [[COPY]]
; GFX90A-NEXT: S_ENDPGM 0
%val = call i128 asm sideeffect "; def $0", "=a"()
call void asm sideeffect "; use $0", "a"(i128 %val)
diff --git a/llvm/test/CodeGen/AMDGPU/insert-singleuse-vdst.mir b/llvm/test/CodeGen/AMDGPU/insert-singleuse-vdst.mir
deleted file mode 100644
index 9e65ce3..0000000
--- a/llvm/test/CodeGen/AMDGPU/insert-singleuse-vdst.mir
+++ /dev/null
@@ -1,1420 +0,0 @@
-# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
-# RUN: llc -mtriple=amdgcn -mcpu=gfx1150 -verify-machineinstrs -run-pass=amdgpu-insert-single-use-vdst %s -o - | FileCheck %s
-
-# One single-use producer.
----
-name: one_producer
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: one_producer
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: liveins: $vgpr0
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_SINGLEUSE_VDST 1
- ; CHECK-NEXT: $vgpr1 = V_ADD_U32_e32 $vgpr0, $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr2 = V_ADD_U32_e32 $vgpr0, $vgpr1, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: liveins: $vgpr0, $vgpr2
- bb.0:
- liveins: $vgpr0
- $vgpr1 = V_ADD_U32_e32 $vgpr0, $vgpr0, implicit $exec
- $vgpr2 = V_ADD_U32_e32 $vgpr0, $vgpr1, implicit $exec
- bb.1:
- liveins: $vgpr0, $vgpr2
-...
-
-# One single-use producer of a 64-bit value.
----
-name: one_producer_64bit
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: one_producer_64bit
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: liveins: $vgpr0_vgpr1
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_SINGLEUSE_VDST 1
- ; CHECK-NEXT: $vgpr2_vgpr3 = V_LSHLREV_B64_e64 0, $vgpr0_vgpr1, implicit $exec
- ; CHECK-NEXT: $vgpr4_vgpr5 = V_MOV_B64_e64 $vgpr2_vgpr3, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: liveins: $vgpr4_vgpr5
- bb.0:
- liveins: $vgpr0_vgpr1
- $vgpr2_vgpr3 = V_LSHLREV_B64_e64 0, $vgpr0_vgpr1, implicit $exec
- $vgpr4_vgpr5 = V_MOV_B64_e64 $vgpr2_vgpr3, implicit $exec
- bb.1:
- liveins: $vgpr4_vgpr5
-...
-
-# Two consecutive single-use producers.
----
-name: two_producers
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: two_producers
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: liveins: $vgpr0
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_SINGLEUSE_VDST 2
- ; CHECK-NEXT: $vgpr1 = V_ADD_U32_e32 $vgpr0, $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr2 = V_ADD_U32_e32 $vgpr0, $vgpr1, implicit $exec
- ; CHECK-NEXT: $vgpr3 = V_ADD_U32_e32 $vgpr0, $vgpr2, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: liveins: $vgpr0, $vgpr3
- bb.0:
- liveins: $vgpr0
- $vgpr1 = V_ADD_U32_e32 $vgpr0, $vgpr0, implicit $exec
- $vgpr2 = V_ADD_U32_e32 $vgpr0, $vgpr1, implicit $exec
- $vgpr3 = V_ADD_U32_e32 $vgpr0, $vgpr2, implicit $exec
- bb.1:
- liveins: $vgpr0, $vgpr3
-...
-
-# Redefinitions of v0.
----
-name: redefinitions
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: redefinitions
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: liveins: $vgpr0
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_SINGLEUSE_VDST 4
- ; CHECK-NEXT: $vgpr0 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr0 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr0 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr0 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- bb.0:
- liveins: $vgpr0
- $vgpr0 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr0 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr0 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr0 = V_MOV_B32_e32 $vgpr0, implicit $exec
- bb.1:
-...
-
-# One producer with no consumers.
----
-name: no_consumer
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: no_consumer
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: liveins: $vgpr0
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_SINGLEUSE_VDST 1
- ; CHECK-NEXT: $vgpr1 = V_ADD_U32_e32 $vgpr0, $vgpr0, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- bb.0:
- liveins: $vgpr0
- $vgpr1 = V_ADD_U32_e32 $vgpr0, $vgpr0, implicit $exec
- bb.1:
-...
-
-# One consumer with two uses of the same value.
----
-name: one_consumer_two_uses
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: one_consumer_two_uses
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: liveins: $vgpr0
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_SINGLEUSE_VDST 1
- ; CHECK-NEXT: $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr2 = V_ADD_U32_e32 $vgpr1, $vgpr1, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: liveins: $vgpr0, $vgpr2
- bb.0:
- liveins: $vgpr0
- $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr2 = V_ADD_U32_e32 $vgpr1, $vgpr1, implicit $exec
- bb.1:
- liveins: $vgpr0, $vgpr2
-...
-
-# A longer example.
----
-name: longer_example
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: longer_example
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: liveins: $vgpr3, $vgpr5, $sgpr0, $sgpr2, $sgpr4, $sgpr5, $sgpr16, $sgpr17, $sgpr18, $sgpr19
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_SINGLEUSE_VDST 274
- ; CHECK-NEXT: $vgpr14 = V_MUL_F32_e32 $sgpr4, $vgpr3, implicit $exec, implicit $mode
- ; CHECK-NEXT: $sgpr3 = S_MUL_F16 $sgpr0, $sgpr2, implicit $mode
- ; CHECK-NEXT: $vgpr15 = V_MUL_F32_e32 $sgpr5, $vgpr3, implicit $exec, implicit $mode
- ; CHECK-NEXT: $vgpr17 = V_FMA_F32_e64 0, $sgpr16, 0, $vgpr5, 0, $vgpr14, 0, 0, implicit $exec, implicit $mode
- ; CHECK-NEXT: $sgpr1 = S_ADD_F16 $sgpr0, 15360, implicit $mode
- ; CHECK-NEXT: $vgpr15 = V_FMA_F32_e64 0, $sgpr17, 0, $vgpr5, 0, $vgpr15, 0, 0, implicit $exec, implicit $mode
- ; CHECK-NEXT: $vgpr14 = V_FMA_F32_e64 0, $sgpr18, 0, $vgpr15, 0, $vgpr17, 0, 0, implicit $exec, implicit $mode
- ; CHECK-NEXT: $vgpr15 = V_FMA_F32_e64 0, $sgpr19, 0, $vgpr14, 0, $vgpr17, 0, 0, implicit $exec, implicit $mode
- ; CHECK-NEXT: $vgpr16 = V_LOG_F32_e32 $vgpr15, implicit $exec, implicit $mode
- ; CHECK-NEXT: $vgpr18 = V_EXP_F32_e32 $vgpr15, implicit $exec, implicit $mode
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: liveins: $vgpr16, $vgpr18
- bb.0:
- liveins: $vgpr3, $vgpr5, $sgpr0, $sgpr2, $sgpr4, $sgpr5, $sgpr16, $sgpr17, $sgpr18, $sgpr19
- $vgpr14 = V_MUL_F32_e32 $sgpr4, $vgpr3, implicit $exec, implicit $mode
- $sgpr3 = S_MUL_F16 $sgpr0, $sgpr2, implicit $mode
- $vgpr15 = V_MUL_F32_e32 $sgpr5, $vgpr3, implicit $exec, implicit $mode
- $vgpr17 = V_FMA_F32_e64 0, $sgpr16, 0, $vgpr5, 0, $vgpr14, 0, 0, implicit $exec, implicit $mode
- $sgpr1 = S_ADD_F16 $sgpr0, 15360, implicit $mode
- $vgpr15 = V_FMA_F32_e64 0, $sgpr17, 0, $vgpr5, 0, $vgpr15, 0, 0, implicit $exec, implicit $mode
- $vgpr14 = V_FMA_F32_e64 0, $sgpr18, 0, $vgpr15, 0, $vgpr17, 0, 0, implicit $exec, implicit $mode
- $vgpr15 = V_FMA_F32_e64 0, $sgpr19, 0, $vgpr14, 0, $vgpr17, 0, 0, implicit $exec, implicit $mode
- $vgpr16 = V_LOG_F32_e32 $vgpr15, implicit $exec, implicit $mode
- $vgpr18 = V_EXP_F32_e32 $vgpr15, implicit $exec, implicit $mode
- bb.1:
- liveins: $vgpr16, $vgpr18
-...
-
-# Multiple uses of v0.
----
-name: multiple_uses_1
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: multiple_uses_1
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: liveins: $vgpr0
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: $vgpr0 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: liveins: $vgpr1, $vgpr2
- bb.0:
- liveins: $vgpr0
- $vgpr0 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
- bb.1:
- liveins: $vgpr1, $vgpr2
-...
-
-# Multiple uses of v0 and redefinitions of v1 and v2.
----
-name: multiple_uses_2
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: multiple_uses_2
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: liveins: $vgpr0
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: $vgpr0 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: S_SINGLEUSE_VDST 2
- ; CHECK-NEXT: $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr0 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: liveins: $vgpr1, $vgpr2
- bb.0:
- liveins: $vgpr0
- $vgpr0 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr0 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
- bb.1:
- liveins: $vgpr1, $vgpr2
-...
-
-# Multiple uses of all but v1.
----
-name: multiple_uses_3
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: multiple_uses_3
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: liveins: $vgpr0
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: $vgpr0 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: S_SINGLEUSE_VDST 1
- ; CHECK-NEXT: $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr3 = V_MOV_B32_e32 $vgpr1, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: liveins: $vgpr2, $vgpr3
- bb.0:
- liveins: $vgpr0
- $vgpr0 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr3 = V_MOV_B32_e32 $vgpr1, implicit $exec
- bb.1:
- liveins: $vgpr2, $vgpr3
-...
-
-# Second use is an instruction that reads and writes v1.
----
-name: multiple_uses_4
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: multiple_uses_4
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: liveins: $vgpr0
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: $vgpr1 = V_ADD_U32_e32 $vgpr0, $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr2 = V_ADD_U32_e32 $vgpr0, $vgpr1, implicit $exec
- ; CHECK-NEXT: $vgpr1 = V_ADD_U32_e32 $vgpr0, $vgpr1, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2
- bb.0:
- liveins: $vgpr0
- $vgpr1 = V_ADD_U32_e32 $vgpr0, $vgpr0, implicit $exec
- $vgpr2 = V_ADD_U32_e32 $vgpr0, $vgpr1, implicit $exec
- $vgpr1 = V_ADD_U32_e32 $vgpr0, $vgpr1, implicit $exec
- bb.1:
- liveins: $vgpr0, $vgpr1, $vgpr2
-...
-
-# Results are live-in to another basic block.
----
-name: basic_block_1
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: basic_block_1
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: liveins: $vgpr0
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: $vgpr0 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: successors: %bb.2(0x80000000)
- ; CHECK-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: $vgpr0 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.2:
- ; CHECK-NEXT: liveins: $vgpr1, $vgpr2
- bb.0:
- liveins: $vgpr0
- $vgpr0 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
- bb.1:
- liveins: $vgpr0, $vgpr1, $vgpr2
- $vgpr0 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
- bb.2:
- liveins: $vgpr1, $vgpr2
-...
-
-# Result v2 has multiple uses in another basic block.
----
-name: basic_block_2
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: basic_block_2
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: liveins: $vgpr0, $vgpr1
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: $vgpr2 = V_MOV_B32_e32 $vgpr1, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: successors: %bb.2(0x80000000)
- ; CHECK-NEXT: liveins: $vgpr2
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_SINGLEUSE_VDST 1
- ; CHECK-NEXT: $vgpr3 = V_MOV_B32_e32 $vgpr2, implicit $exec
- ; CHECK-NEXT: $vgpr3 = V_MOV_B32_e32 $vgpr2, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.2:
- ; CHECK-NEXT: liveins: $vgpr3
- bb.0:
- liveins: $vgpr0, $vgpr1
- $vgpr2 = V_MOV_B32_e32 $vgpr1, implicit $exec
- bb.1:
- liveins: $vgpr2
- $vgpr3 = V_MOV_B32_e32 $vgpr2, implicit $exec
- $vgpr3 = V_MOV_B32_e32 $vgpr2, implicit $exec
- bb.2:
- liveins: $vgpr3
-...
-
-# Results are redefined in another basic block.
----
-name: basic_block_3
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: basic_block_3
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: liveins: $vgpr0
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: $vgpr0 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: S_SINGLEUSE_VDST 1
- ; CHECK-NEXT: $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: successors: %bb.2(0x80000000)
- ; CHECK-NEXT: liveins: $vgpr0, $vgpr1
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: $vgpr0 = V_ADD_U32_e32 $vgpr0, $vgpr1, implicit $exec
- ; CHECK-NEXT: $vgpr1 = V_ADD_U32_e32 $vgpr0, $vgpr1, implicit $exec
- ; CHECK-NEXT: $vgpr2 = V_ADD_U32_e32 $vgpr0, $vgpr1, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.2:
- ; CHECK-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2
- bb.0:
- liveins: $vgpr0
- $vgpr0 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
- bb.1:
- liveins: $vgpr0, $vgpr1
- $vgpr0 = V_ADD_U32_e32 $vgpr0, $vgpr1, implicit $exec
- $vgpr1 = V_ADD_U32_e32 $vgpr0, $vgpr1, implicit $exec
- $vgpr2 = V_ADD_U32_e32 $vgpr0, $vgpr1, implicit $exec
- bb.2:
- liveins: $vgpr0, $vgpr1, $vgpr2
-...
-
-# Exec modified between producer and consumer.
----
-name: exec_mask
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: exec_mask
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: liveins: $sgpr0_sgpr1
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: $vgpr0 = V_MOV_B32_e32 0, implicit $exec
- ; CHECK-NEXT: $exec = COPY $sgpr0_sgpr1
- ; CHECK-NEXT: $vgpr0 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: liveins: $vgpr0
- bb.0:
- liveins: $sgpr0_sgpr1
- $vgpr0 = V_MOV_B32_e32 0, implicit $exec
- $exec = COPY $sgpr0_sgpr1
- $vgpr0 = V_MOV_B32_e32 $vgpr0, implicit $exec
- bb.1:
- liveins: $vgpr0
-...
-
-# Exec_lo modified between producer and consumer.
----
-name: exec_mask_lo
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: exec_mask_lo
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: liveins: $sgpr0
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: $vgpr0 = V_MOV_B32_e32 0, implicit $exec
- ; CHECK-NEXT: $exec_lo = COPY $sgpr0
- ; CHECK-NEXT: $vgpr0 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: liveins: $vgpr0
- bb.0:
- liveins: $sgpr0
- $vgpr0 = V_MOV_B32_e32 0, implicit $exec
- $exec_lo = COPY $sgpr0
- $vgpr0 = V_MOV_B32_e32 $vgpr0, implicit $exec
- bb.1:
- liveins: $vgpr0
-...
-
-# Exec_hi modified between producer and consumer.
----
-name: exec_mask_hi
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: exec_mask_hi
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: liveins: $sgpr0
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: $vgpr0 = V_MOV_B32_e32 0, implicit $exec
- ; CHECK-NEXT: $exec_hi = COPY $sgpr0
- ; CHECK-NEXT: $vgpr0 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: liveins: $vgpr0
- bb.0:
- liveins: $sgpr0
- $vgpr0 = V_MOV_B32_e32 0, implicit $exec
- $exec_hi = COPY $sgpr0
- $vgpr0 = V_MOV_B32_e32 $vgpr0, implicit $exec
- bb.1:
- liveins: $vgpr0
-...
-
-# Write 32-bit vgpr and then read from low 16 bits.
----
-name: write_full_read_lo
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: write_full_read_lo
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_SINGLEUSE_VDST 1
- ; CHECK-NEXT: $vgpr0 = V_MOV_B32_e32 0, implicit $exec
- ; CHECK-NEXT: $vgpr1_lo16 = V_MOV_B16_t16_e32 $vgpr0_lo16, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: liveins: $vgpr1_lo16
- bb.0:
- $vgpr0 = V_MOV_B32_e32 0, implicit $exec
- $vgpr1_lo16 = V_MOV_B16_t16_e32 $vgpr0_lo16, implicit $exec
- bb.1:
- liveins: $vgpr1_lo16
-...
-
-# Write 32-bit vgpr and then read from high 16 bits.
----
-name: write_full_read_hi
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: write_full_read_hi
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_SINGLEUSE_VDST 1
- ; CHECK-NEXT: $vgpr0 = V_MOV_B32_e32 0, implicit $exec
- ; CHECK-NEXT: $vgpr1_hi16 = V_MOV_B16_t16_e32 $vgpr0_hi16, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: liveins: $vgpr1_hi16
- bb.0:
- $vgpr0 = V_MOV_B32_e32 0, implicit $exec
- $vgpr1_hi16 = V_MOV_B16_t16_e32 $vgpr0_hi16, implicit $exec
- bb.1:
- liveins: $vgpr1_hi16
-...
-
-# Write 32-bit vgpr and then read from both halves.
----
-name: write_full_read_both
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: write_full_read_both
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_SINGLEUSE_VDST 1
- ; CHECK-NEXT: $vgpr0 = V_MOV_B32_e32 0, implicit $exec
- ; CHECK-NEXT: $vgpr1_lo16 = V_MOV_B16_t16_e32 $vgpr0_lo16, implicit $exec
- ; CHECK-NEXT: $vgpr1_hi16 = V_MOV_B16_t16_e32 $vgpr0_hi16, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: liveins: $vgpr1
- bb.0:
- $vgpr0 = V_MOV_B32_e32 0, implicit $exec
- $vgpr1_lo16 = V_MOV_B16_t16_e32 $vgpr0_lo16, implicit $exec
- $vgpr1_hi16 = V_MOV_B16_t16_e32 $vgpr0_hi16, implicit $exec
- bb.1:
- liveins: $vgpr1
-...
-
-# Write 32-bit vgpr and then read from both halves in the same instruction.
----
-name: write_full_read_both_same_instruction
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: write_full_read_both_same_instruction
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_SINGLEUSE_VDST 1
- ; CHECK-NEXT: $vgpr0 = V_MOV_B32_e32 0, implicit $exec
- ; CHECK-NEXT: $vgpr1_lo16 = V_ADD_F16_t16_e32 $vgpr0_lo16, $vgpr0_hi16, implicit $mode, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: liveins: $vgpr1_lo16
- bb.0:
- $vgpr0 = V_MOV_B32_e32 0, implicit $exec
- $vgpr1_lo16 = V_ADD_F16_t16_e32 $vgpr0_lo16, $vgpr0_hi16, implicit $mode, implicit $exec
- bb.1:
- liveins: $vgpr1_lo16
-...
-
-# Write low 16-bits and then read 32-bit vgpr.
----
-name: write_lo_read_full
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: write_lo_read_full
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: liveins: $vgpr0
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_SINGLEUSE_VDST 1
- ; CHECK-NEXT: $vgpr0_lo16 = V_MOV_B16_t16_e32 0, implicit $exec
- ; CHECK-NEXT: $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: liveins: $vgpr1
- bb.0:
- liveins: $vgpr0
- $vgpr0_lo16 = V_MOV_B16_t16_e32 0, implicit $exec
- $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- bb.1:
- liveins: $vgpr1
-...
-
-# Write low 16-bits and then read 32-bit vgpr twice.
----
-name: write_lo_read_full_twice
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: write_lo_read_full_twice
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: liveins: $vgpr0
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: $vgpr0_lo16 = V_MOV_B16_t16_e32 0, implicit $exec
- ; CHECK-NEXT: $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: liveins: $vgpr1, $vgpr2
- bb.0:
- liveins: $vgpr0
- $vgpr0_lo16 = V_MOV_B16_t16_e32 0, implicit $exec
- $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
- bb.1:
- liveins: $vgpr1, $vgpr2
-...
-
-# Write high 16-bits and then read 32-bit vgpr.
----
-name: write_hi_read_full
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: write_hi_read_full
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: liveins: $vgpr0
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_SINGLEUSE_VDST 1
- ; CHECK-NEXT: $vgpr0_hi16 = V_MOV_B16_t16_e32 0, implicit $exec
- ; CHECK-NEXT: $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: liveins: $vgpr1
- bb.0:
- liveins: $vgpr0
- $vgpr0_hi16 = V_MOV_B16_t16_e32 0, implicit $exec
- $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- bb.1:
- liveins: $vgpr1
-...
-
-# Write high 16-bits and then read 32-bit vgpr twice.
----
-name: write_hi_read_full_twice
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: write_hi_read_full_twice
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: liveins: $vgpr0
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: $vgpr0_hi16 = V_MOV_B16_t16_e32 0, implicit $exec
- ; CHECK-NEXT: $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: liveins: $vgpr1, $vgpr2
- bb.0:
- liveins: $vgpr0
- $vgpr0_hi16 = V_MOV_B16_t16_e32 0, implicit $exec
- $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
- bb.1:
- liveins: $vgpr1, $vgpr2
-...
-
-# Write low 16-bits and then write high 16-bits and then read 32-bit vgpr.
----
-name: write_both_read_full
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: write_both_read_full
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_SINGLEUSE_VDST 2
- ; CHECK-NEXT: $vgpr0_lo16 = V_MOV_B16_t16_e32 0, implicit $exec
- ; CHECK-NEXT: $vgpr0_hi16 = V_MOV_B16_t16_e32 0, implicit $exec
- ; CHECK-NEXT: $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: liveins: $vgpr1
- bb.0:
- $vgpr0_lo16 = V_MOV_B16_t16_e32 0, implicit $exec
- $vgpr0_hi16 = V_MOV_B16_t16_e32 0, implicit $exec
- $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- bb.1:
- liveins: $vgpr1
-...
-
-# Write low 16-bits and then write high 16-bits and then read 32-bit vgpr twice.
----
-name: write_both_read_full_twice
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: write_both_read_full_twice
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: $vgpr0_lo16 = V_MOV_B16_t16_e32 0, implicit $exec
- ; CHECK-NEXT: $vgpr0_hi16 = V_MOV_B16_t16_e32 0, implicit $exec
- ; CHECK-NEXT: $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: liveins: $vgpr1, $vgpr2
- bb.0:
- $vgpr0_lo16 = V_MOV_B16_t16_e32 0, implicit $exec
- $vgpr0_hi16 = V_MOV_B16_t16_e32 0, implicit $exec
- $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
- bb.1:
- liveins: $vgpr1, $vgpr2
-...
-
-# Three single use producer instructions with non single use producer
-# instructions in between.
----
-name: three_producers_with_two_skips
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: three_producers_with_two_skips
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: liveins: $vgpr0
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_SINGLEUSE_VDST 9361
- ; CHECK-NEXT: $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr3 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr4 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr5 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: liveins: $vgpr2, $vgpr4
- bb.0:
- liveins: $vgpr0
- $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr3 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr4 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr5 = V_MOV_B32_e32 $vgpr0, implicit $exec
- bb.1:
- liveins: $vgpr2, $vgpr4
-...
-
-# Six single use producer instructions with non single use producer
-# instructions in between.
----
-name: six_producers_with_four_skips
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: six_producers_with_four_skips
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: liveins: $vgpr0
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_SINGLEUSE_VDST 145
- ; CHECK-NEXT: $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr3 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr4 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: S_SINGLEUSE_VDST 9362
- ; CHECK-NEXT: $vgpr5 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr6 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr7 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr8 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr9 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr10 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: liveins: $vgpr2, $vgpr4, $vgpr7, $vgpr9
- bb.0:
- liveins: $vgpr0
- $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr3 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr4 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr5 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr6 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr7 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr8 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr9 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr10 = V_MOV_B32_e32 $vgpr0, implicit $exec
- bb.1:
- liveins: $vgpr2, $vgpr4, $vgpr7, $vgpr9
-...
-
-# Five single use producer instructions, followed by
-# four non single use producers, followed by
-# three single use producer instructions, followed by
-# two non single use producers, followed by
-# one single use producer instructions.
----
-name: immediate_order
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: immediate_order
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: liveins: $vgpr0
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_SINGLEUSE_VDST 10693
- ; CHECK-NEXT: $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr3 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr4 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr5 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr6 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr7 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr8 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr9 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr10 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr11 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr12 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr13 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr14 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr15 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: liveins: $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr13, $vgpr14
- bb.0:
- liveins: $vgpr0
- $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr3 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr4 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr5 = V_MOV_B32_e32 $vgpr0, implicit $exec
-
- $vgpr6 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr7 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr8 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr9 = V_MOV_B32_e32 $vgpr0, implicit $exec
-
- $vgpr10 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr11 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr12 = V_MOV_B32_e32 $vgpr0, implicit $exec
-
- $vgpr13 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr14 = V_MOV_B32_e32 $vgpr0, implicit $exec
-
- $vgpr15 = V_MOV_B32_e32 $vgpr0, implicit $exec
- bb.1:
- liveins: $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr13, $vgpr14
-...
-
-# Maximum number of single use producers that can be encoded in a single
-# instruction.
----
-name: maximum_producers_single_instruction
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: maximum_producers_single_instruction
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: liveins: $vgpr0
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_SINGLEUSE_VDST 58255
- ; CHECK-NEXT: $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr3 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr4 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr5 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr6 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr7 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr8 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr9 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr10 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr11 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr12 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr13 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr14 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr15 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr16 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr17 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr18 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr19 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr20 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr21 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr22 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr23 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr24 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr25 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr26 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr27 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr28 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr29 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- bb.0:
- liveins: $vgpr0
- $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr3 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr4 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr5 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr6 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr7 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr8 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr9 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr10 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr11 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr12 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr13 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr14 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr15 = V_MOV_B32_e32 $vgpr0, implicit $exec
-
- $vgpr16 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr17 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr18 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr19 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr20 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr21 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr22 = V_MOV_B32_e32 $vgpr0, implicit $exec
-
- $vgpr23 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr24 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr25 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr26 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr27 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr28 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr29 = V_MOV_B32_e32 $vgpr0, implicit $exec
- bb.1:
-...
-
-# One more than the maximum number of single use producers that can be encoded
-# in a single instruction.
----
-name: too_many_producers_single_instruction
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: too_many_producers_single_instruction
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: liveins: $vgpr0
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_SINGLEUSE_VDST 1
- ; CHECK-NEXT: $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: S_SINGLEUSE_VDST 58255
- ; CHECK-NEXT: $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr3 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr4 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr5 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr6 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr7 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr8 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr9 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr10 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr11 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr12 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr13 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr14 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr15 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr16 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr17 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr18 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr19 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr20 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr21 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr22 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr23 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr24 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr25 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr26 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr27 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr28 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr29 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr30 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
-
-
-
- bb.0:
- liveins: $vgpr0
- $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr3 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr4 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr5 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr6 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr7 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr8 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr9 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr10 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr11 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr12 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr13 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr14 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr15 = V_MOV_B32_e32 $vgpr0, implicit $exec
-
- $vgpr16 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr17 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr18 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr19 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr20 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr21 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr22 = V_MOV_B32_e32 $vgpr0, implicit $exec
-
- $vgpr23 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr24 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr25 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr26 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr27 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr28 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr29 = V_MOV_B32_e32 $vgpr0, implicit $exec
-
- $vgpr30 = V_MOV_B32_e32 $vgpr0, implicit $exec
- bb.1:
-...
-
-# Maximum distance between single use producers that can be encoded in a single
-# instruction.
----
-name: maximum_skips_single_instruction
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: maximum_skips_single_instruction
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: liveins: $vgpr0
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_SINGLEUSE_VDST 15473
- ; CHECK-NEXT: $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr3 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr4 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr5 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr6 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr7 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr8 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr9 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr10 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr11 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr12 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr13 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr14 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr15 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr16 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: liveins: $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15
- bb.0:
- liveins: $vgpr0
- $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
-
- $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr3 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr4 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr5 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr6 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr7 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr8 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr9 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr10 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr11 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr12 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr13 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr14 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr15 = V_MOV_B32_e32 $vgpr0, implicit $exec
-
- $vgpr16 = V_MOV_B32_e32 $vgpr0, implicit $exec
- bb.1:
- liveins: $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15
-...
-
-# One more than the maximum distance between single use producers that can be
-# encoded in a single instruction.
----
-name: too_many_skips_single_instruction
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: too_many_skips_single_instruction
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: liveins: $vgpr0
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_SINGLEUSE_VDST 1
- ; CHECK-NEXT: $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr3 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr4 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr5 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr6 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr7 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr8 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr9 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr10 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr11 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr12 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr13 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr14 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr15 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr16 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: S_SINGLEUSE_VDST 1
- ; CHECK-NEXT: $vgpr17 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: liveins: $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16
- bb.0:
- liveins: $vgpr0
- $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
-
- $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr3 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr4 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr5 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr6 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr7 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr8 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr9 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr10 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr11 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr12 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr13 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr14 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr15 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr16 = V_MOV_B32_e32 $vgpr0, implicit $exec
-
- $vgpr17 = V_MOV_B32_e32 $vgpr0, implicit $exec
- bb.1:
- liveins: $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16
-...
-
-
-# Maximum possible encoding value with all bits of the immediate set
----
-name: all_immediate_bits_set
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: all_immediate_bits_set
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: liveins: $vgpr0
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_SINGLEUSE_VDST 65535
- ; CHECK-NEXT: $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr3 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr4 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr5 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr6 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr7 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr8 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr9 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr10 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr11 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr12 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr13 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr14 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr15 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr16 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr17 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr18 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr19 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr20 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr21 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr22 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr23 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr24 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr25 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr26 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr27 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr28 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr29 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr30 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr31 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr32 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr33 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr34 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr35 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr36 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr37 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr38 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr39 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr40 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr41 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr42 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr43 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: liveins: $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr30, $vgpr31, $vgpr31, $vgpr32, $vgpr33, $vgpr34, $vgpr35, $vgpr36
- bb.0:
- liveins: $vgpr0
- $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr3 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr4 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr5 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr6 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr7 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr8 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr9 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr10 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr11 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr12 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr13 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr14 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr15 = V_MOV_B32_e32 $vgpr0, implicit $exec
-
- $vgpr16 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr17 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr18 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr19 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr20 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr21 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr22 = V_MOV_B32_e32 $vgpr0, implicit $exec
-
- $vgpr23 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr24 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr25 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr26 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr27 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr28 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr29 = V_MOV_B32_e32 $vgpr0, implicit $exec
-
- $vgpr30 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr31 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr32 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr33 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr34 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr35 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr36 = V_MOV_B32_e32 $vgpr0, implicit $exec
-
- $vgpr37 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr38 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr39 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr40 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr41 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr42 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr43 = V_MOV_B32_e32 $vgpr0, implicit $exec
- bb.1:
- liveins: $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr30, $vgpr31, $vgpr31, $vgpr32, $vgpr33, $vgpr34, $vgpr35, $vgpr36
-
-...
-
-# Tests for multi-cycle instructions that are explicitly excluded.
-
-# Valid producers but invalid consumer opcodes.
----
-name: v_mul_hi_u32_e64
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: v_mul_hi_u32_e64
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: liveins: $vgpr0
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: S_SINGLEUSE_VDST 1
- ; CHECK-NEXT: $vgpr2 = V_MUL_HI_U32_e64 $vgpr0, $vgpr1, implicit $exec
- ; CHECK-NEXT: $vgpr3 = V_MOV_B32_e32 $vgpr2, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: liveins: $vgpr0, $vgpr3
- bb.0:
- liveins: $vgpr0
- $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr2 = V_MUL_HI_U32_e64 $vgpr0, $vgpr1, implicit $exec
- $vgpr3 = V_MOV_B32_e32 $vgpr2, implicit $exec
- bb.1:
- liveins: $vgpr0, $vgpr3
-...
-
----
-name: v_cmpx_t_u64_e64
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: v_cmpx_t_u64_e64
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: S_SINGLEUSE_VDST 1
- ; CHECK-NEXT: $sgpr0 = V_CMPX_EQ_U64_e64 $vgpr0_vgpr1, $vgpr2_vgpr3, implicit-def $exec, implicit $exec
- ; CHECK-NEXT: S_BRANCH %bb.1
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: liveins: $vgpr0
- bb.0:
- liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
- $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $sgpr0 = V_CMPX_EQ_U64_e64 $vgpr0_vgpr1, $vgpr2_vgpr3, implicit-def $exec, implicit $exec
- S_BRANCH %bb.1
- bb.1:
- liveins: $vgpr0
-...
-
----
-name: v_lshlrev_b64_e64
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: v_lshlrev_b64_e64
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: liveins: $vgpr0_vgpr1
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: $vgpr2_vgpr3 = V_MOV_B64_e64 $vgpr0_vgpr1, implicit $exec
- ; CHECK-NEXT: $vgpr4_vgpr5 = V_LSHLREV_B64_e64 0, $vgpr2_vgpr3, implicit $exec
- ; CHECK-NEXT: S_SINGLEUSE_VDST 1
- ; CHECK-NEXT: $vgpr6_vgpr7 = V_LSHLREV_B64_e64 0, $vgpr4_vgpr5, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: liveins: $vgpr4_vgpr5
- bb.0:
- liveins: $vgpr0_vgpr1
- $vgpr2_vgpr3 = V_MOV_B64_e64 $vgpr0_vgpr1, implicit $exec
- $vgpr4_vgpr5 = V_LSHLREV_B64_e64 0, $vgpr2_vgpr3, implicit $exec
- $vgpr6_vgpr7 = V_LSHLREV_B64_e64 0, $vgpr4_vgpr5, implicit $exec
- bb.1:
- liveins: $vgpr4_vgpr5
-...
-
-# Invalid producers but valid consumer opcodes.
----
-name: v_movereld_b32_e32
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: v_movereld_b32_e32
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: liveins: $vgpr0, $vgpr2
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: $m0 = S_MOV_B32 0
- ; CHECK-NEXT: S_SINGLEUSE_VDST 1
- ; CHECK-NEXT: $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: V_MOVRELD_B32_e32 $vgpr2, $vgpr1, implicit $m0, implicit $exec, implicit-def $vgpr1_vgpr2, implicit undef $vgpr1_vgpr2(tied-def 4)
- ; CHECK-NEXT: $vgpr3 = V_ADD_U32_e32 $vgpr2, $vgpr1, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: liveins: $vgpr3
- bb.0:
- liveins: $vgpr0, $vgpr2
- $m0 = S_MOV_B32 0
- $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- V_MOVRELD_B32_e32 $vgpr2, $vgpr1, implicit $m0, implicit $exec, implicit-def $vgpr1_vgpr2, implicit undef $vgpr1_vgpr2(tied-def 4)
- $vgpr3 = V_ADD_U32_e32 $vgpr2, $vgpr1, implicit $exec
- bb.1:
- liveins: $vgpr3
-...
-
-# Invalid producers and invalid consumer opcodes.
----
-name: v_writelane_b32
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: v_writelane_b32
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: liveins: $vgpr0, $sgpr0
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: $vgpr1 = V_WRITELANE_B32 $sgpr0, 0, $vgpr1
- ; CHECK-NEXT: S_SINGLEUSE_VDST 1
- ; CHECK-NEXT: $vgpr2 = V_MOV_B32_e32 $vgpr1, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: liveins: $vgpr0
- bb.0:
- liveins: $vgpr0, $sgpr0
- $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- $vgpr1 = V_WRITELANE_B32 $sgpr0, 0, $vgpr1
- $vgpr2 = V_MOV_B32_e32 $vgpr1, implicit $exec
- bb.1:
- liveins: $vgpr0
-...
-
-# DPP instructions cannot be single use producers or consumers
----
-name: V_ADD_NC_U32_dpp
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: V_ADD_NC_U32_dpp
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: liveins: $vgpr0, $vcc
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: $vgpr0 = V_ADDC_U32_dpp $vgpr0, $vgpr0, $vgpr0, 1, 15, 15, 1, implicit-def $vcc_lo, implicit $vcc_lo, implicit $exec
- ; CHECK-NEXT: $vgpr0 = V_ADDC_U32_dpp $vgpr0, $vgpr0, $vgpr0, 1, 15, 15, 1, implicit-def $vcc_lo, implicit $vcc_lo, implicit $exec
- ; CHECK-NEXT: $vgpr0 = V_ADDC_U32_dpp $vgpr0, $vgpr0, $vgpr0, 1, 15, 15, 1, implicit-def $vcc_lo, implicit $vcc_lo, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: liveins: $vgpr0
- bb.0:
- liveins: $vgpr0, $vcc
- $vgpr0 = V_ADDC_U32_dpp $vgpr0, $vgpr0, $vgpr0, 1, 15, 15, 1, implicit-def $vcc, implicit $vcc, implicit $exec
- $vgpr0 = V_ADDC_U32_dpp $vgpr0, $vgpr0, $vgpr0, 1, 15, 15, 1, implicit-def $vcc, implicit $vcc, implicit $exec
- $vgpr0 = V_ADDC_U32_dpp $vgpr0, $vgpr0, $vgpr0, 1, 15, 15, 1, implicit-def $vcc, implicit $vcc, implicit $exec
- bb.1:
- liveins: $vgpr0
-...
-
-# Exception to the rule that dpp instructions
-# cannot be single use producers or consumers
----
-name: V_INTERP_MOV_F32
-tracksRegLiveness: true
-body: |
- ; CHECK-LABEL: name: V_INTERP_MOV_F32
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: S_SINGLEUSE_VDST 1
- ; CHECK-NEXT: $vgpr0 = V_INTERP_MOV_F32 0, 0, 0, implicit $mode, implicit $m0, implicit $exec
- ; CHECK-NEXT: $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: liveins: $vgpr1
- bb.0:
- $vgpr0 = V_INTERP_MOV_F32 0, 0, 0, implicit $mode, implicit $m0, implicit $exec
- $vgpr1 = V_MOV_B32_e32 $vgpr0, implicit $exec
- bb.1:
- liveins: $vgpr1
-...
-
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll
index b061d53..39a3b1c 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll
@@ -2,11 +2,118 @@
; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=CHECK-SDAG -enable-var-scope %s
; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs -global-isel < %s | FileCheck -check-prefix=CHECK-GISEL -enable-var-scope %s
-declare i32 @llvm.amdgcn.readfirstlane(i32) #0
-declare i64 @llvm.amdgcn.readfirstlane.i64(i64) #0
-declare double @llvm.amdgcn.readfirstlane.f64(double) #0
+define void @test_readfirstlane_i1(ptr addrspace(1) %out, i1 %src) {
+; CHECK-SDAG-LABEL: test_readfirstlane_i1:
+; CHECK-SDAG: ; %bb.0:
+; CHECK-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-SDAG-NEXT: v_readfirstlane_b32 s4, v2
+; CHECK-SDAG-NEXT: s_and_b32 s4, s4, 1
+; CHECK-SDAG-NEXT: v_mov_b32_e32 v2, s4
+; CHECK-SDAG-NEXT: flat_store_byte v[0:1], v2
+; CHECK-SDAG-NEXT: s_waitcnt vmcnt(0)
+; CHECK-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; CHECK-GISEL-LABEL: test_readfirstlane_i1:
+; CHECK-GISEL: ; %bb.0:
+; CHECK-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-GISEL-NEXT: v_readfirstlane_b32 s4, v2
+; CHECK-GISEL-NEXT: s_and_b32 s4, s4, 1
+; CHECK-GISEL-NEXT: v_mov_b32_e32 v2, s4
+; CHECK-GISEL-NEXT: flat_store_byte v[0:1], v2
+; CHECK-GISEL-NEXT: s_waitcnt vmcnt(0)
+; CHECK-GISEL-NEXT: s_setpc_b64 s[30:31]
+ %readfirstlane = call i1 @llvm.amdgcn.readfirstlane.i1(i1 %src)
+ store i1 %readfirstlane, ptr addrspace(1) %out, align 4
+ ret void
+}
+
+define void @test_readfirstlane_i1_inreg(ptr addrspace(1) %out, i1 inreg %src) {
+; CHECK-SDAG-LABEL: test_readfirstlane_i1_inreg:
+; CHECK-SDAG: ; %bb.0:
+; CHECK-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-SDAG-NEXT: s_and_b32 s4, s6, 1
+; CHECK-SDAG-NEXT: v_mov_b32_e32 v2, s4
+; CHECK-SDAG-NEXT: flat_store_byte v[0:1], v2
+; CHECK-SDAG-NEXT: s_waitcnt vmcnt(0)
+; CHECK-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; CHECK-GISEL-LABEL: test_readfirstlane_i1_inreg:
+; CHECK-GISEL: ; %bb.0:
+; CHECK-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-GISEL-NEXT: s_and_b32 s4, s6, 1
+; CHECK-GISEL-NEXT: v_mov_b32_e32 v2, s4
+; CHECK-GISEL-NEXT: flat_store_byte v[0:1], v2
+; CHECK-GISEL-NEXT: s_waitcnt vmcnt(0)
+; CHECK-GISEL-NEXT: s_setpc_b64 s[30:31]
+ %readfirstlane = call i1 @llvm.amdgcn.readfirstlane.i1(i1 %src)
+ store i1 %readfirstlane, ptr addrspace(1) %out, align 4
+ ret void
+}
+
+define void @test_readfirstlane_i1_select(ptr addrspace(1) %out, i32 %src, i32 %src1) {
+; CHECK-SDAG-LABEL: test_readfirstlane_i1_select:
+; CHECK-SDAG: ; %bb.0:
+; CHECK-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-SDAG-NEXT: v_cmp_lt_u32_e32 vcc, 42, v2
+; CHECK-SDAG-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
+; CHECK-SDAG-NEXT: v_readfirstlane_b32 s4, v4
+; CHECK-SDAG-NEXT: s_bitcmp1_b32 s4, 0
+; CHECK-SDAG-NEXT: s_cselect_b64 vcc, -1, 0
+; CHECK-SDAG-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
+; CHECK-SDAG-NEXT: flat_store_dword v[0:1], v2
+; CHECK-SDAG-NEXT: s_waitcnt vmcnt(0)
+; CHECK-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; CHECK-GISEL-LABEL: test_readfirstlane_i1_select:
+; CHECK-GISEL: ; %bb.0:
+; CHECK-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-GISEL-NEXT: v_cmp_lt_u32_e32 vcc, 42, v2
+; CHECK-GISEL-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
+; CHECK-GISEL-NEXT: v_readfirstlane_b32 s4, v4
+; CHECK-GISEL-NEXT: s_and_b32 s4, 1, s4
+; CHECK-GISEL-NEXT: v_cmp_ne_u32_e64 vcc, 0, s4
+; CHECK-GISEL-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
+; CHECK-GISEL-NEXT: flat_store_dword v[0:1], v2
+; CHECK-GISEL-NEXT: s_waitcnt vmcnt(0)
+; CHECK-GISEL-NEXT: s_setpc_b64 s[30:31]
+ %cmp = icmp ugt i32 %src, 42
+ %readfirstlane = call i1 @llvm.amdgcn.readfirstlane.i1(i1 %cmp)
+ %sel = select i1 %readfirstlane, i32 %src, i32 %src1
+ store i32 %sel, ptr addrspace(1) %out, align 4
+ ret void
+}
-define void @test_readfirstlane_i32(ptr addrspace(1) %out, i32 %src) #1 {
+define void @test_readfirstlane_i1_load(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; CHECK-SDAG-LABEL: test_readfirstlane_i1_load:
+; CHECK-SDAG: ; %bb.0:
+; CHECK-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-SDAG-NEXT: flat_load_ubyte v2, v[2:3]
+; CHECK-SDAG-NEXT: s_waitcnt vmcnt(0)
+; CHECK-SDAG-NEXT: v_readfirstlane_b32 s4, v2
+; CHECK-SDAG-NEXT: s_and_b32 s4, s4, 1
+; CHECK-SDAG-NEXT: v_mov_b32_e32 v2, s4
+; CHECK-SDAG-NEXT: flat_store_byte v[0:1], v2
+; CHECK-SDAG-NEXT: s_waitcnt vmcnt(0)
+; CHECK-SDAG-NEXT: s_setpc_b64 s[30:31]
+;
+; CHECK-GISEL-LABEL: test_readfirstlane_i1_load:
+; CHECK-GISEL: ; %bb.0:
+; CHECK-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-GISEL-NEXT: flat_load_ubyte v2, v[2:3]
+; CHECK-GISEL-NEXT: s_waitcnt vmcnt(0)
+; CHECK-GISEL-NEXT: v_readfirstlane_b32 s4, v2
+; CHECK-GISEL-NEXT: s_and_b32 s4, s4, 1
+; CHECK-GISEL-NEXT: v_mov_b32_e32 v2, s4
+; CHECK-GISEL-NEXT: flat_store_byte v[0:1], v2
+; CHECK-GISEL-NEXT: s_waitcnt vmcnt(0)
+; CHECK-GISEL-NEXT: s_setpc_b64 s[30:31]
+ %load = load i1, ptr addrspace(1) %in
+ %readfirstlane = call i1 @llvm.amdgcn.readfirstlane.i1(i1 %load)
+ store i1 %readfirstlane, ptr addrspace(1) %out, align 4
+ ret void
+}
+
+define void @test_readfirstlane_i32(ptr addrspace(1) %out, i32 %src) {
; CHECK-SDAG-LABEL: test_readfirstlane_i32:
; CHECK-SDAG: ; %bb.0:
; CHECK-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -29,7 +136,7 @@ define void @test_readfirstlane_i32(ptr addrspace(1) %out, i32 %src) #1 {
ret void
}
-define void @test_readfirstlane_i64(ptr addrspace(1) %out, i64 %src) #1 {
+define void @test_readfirstlane_i64(ptr addrspace(1) %out, i64 %src) {
; CHECK-SDAG-LABEL: test_readfirstlane_i64:
; CHECK-SDAG: ; %bb.0:
; CHECK-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -56,7 +163,7 @@ define void @test_readfirstlane_i64(ptr addrspace(1) %out, i64 %src) #1 {
ret void
}
-define void @test_readfirstlane_f64(ptr addrspace(1) %out, double %src) #1 {
+define void @test_readfirstlane_f64(ptr addrspace(1) %out, double %src) {
; CHECK-SDAG-LABEL: test_readfirstlane_f64:
; CHECK-SDAG: ; %bb.0:
; CHECK-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -83,7 +190,7 @@ define void @test_readfirstlane_f64(ptr addrspace(1) %out, double %src) #1 {
ret void
}
-define amdgpu_kernel void @test_readfirstlane_imm_i32(ptr addrspace(1) %out) #1 {
+define amdgpu_kernel void @test_readfirstlane_imm_i32(ptr addrspace(1) %out) {
; CHECK-SDAG-LABEL: test_readfirstlane_imm_i32:
; CHECK-SDAG: ; %bb.0:
; CHECK-SDAG-NEXT: s_mov_b32 s0, 32
@@ -104,7 +211,7 @@ define amdgpu_kernel void @test_readfirstlane_imm_i32(ptr addrspace(1) %out) #1
ret void
}
-define amdgpu_kernel void @test_readfirstlane_imm_i64(ptr addrspace(1) %out) #1 {
+define amdgpu_kernel void @test_readfirstlane_imm_i64(ptr addrspace(1) %out) {
; CHECK-SDAG-LABEL: test_readfirstlane_imm_i64:
; CHECK-SDAG: ; %bb.0:
; CHECK-SDAG-NEXT: s_mov_b64 s[0:1], 32
@@ -125,7 +232,7 @@ define amdgpu_kernel void @test_readfirstlane_imm_i64(ptr addrspace(1) %out) #1
ret void
}
-define amdgpu_kernel void @test_readfirstlane_imm_f64(ptr addrspace(1) %out) #1 {
+define amdgpu_kernel void @test_readfirstlane_imm_f64(ptr addrspace(1) %out) {
; CHECK-SDAG-LABEL: test_readfirstlane_imm_f64:
; CHECK-SDAG: ; %bb.0:
; CHECK-SDAG-NEXT: s_mov_b32 s0, 0
@@ -148,7 +255,7 @@ define amdgpu_kernel void @test_readfirstlane_imm_f64(ptr addrspace(1) %out) #1
ret void
}
-define amdgpu_kernel void @test_readfirstlane_imm_fold_i32(ptr addrspace(1) %out) #1 {
+define amdgpu_kernel void @test_readfirstlane_imm_fold_i32(ptr addrspace(1) %out) {
; CHECK-SDAG-LABEL: test_readfirstlane_imm_fold_i32:
; CHECK-SDAG: ; %bb.0:
; CHECK-SDAG-NEXT: s_load_dwordx2 s[0:1], s[6:7], 0x0
@@ -173,7 +280,7 @@ define amdgpu_kernel void @test_readfirstlane_imm_fold_i32(ptr addrspace(1) %out
ret void
}
-define amdgpu_kernel void @test_readfirstlane_imm_fold_i64(ptr addrspace(1) %out) #1 {
+define amdgpu_kernel void @test_readfirstlane_imm_fold_i64(ptr addrspace(1) %out) {
; CHECK-SDAG-LABEL: test_readfirstlane_imm_fold_i64:
; CHECK-SDAG: ; %bb.0:
; CHECK-SDAG-NEXT: s_load_dwordx2 s[0:1], s[6:7], 0x0
@@ -201,7 +308,7 @@ define amdgpu_kernel void @test_readfirstlane_imm_fold_i64(ptr addrspace(1) %out
ret void
}
-define amdgpu_kernel void @test_readfirstlane_imm_fold_f64(ptr addrspace(1) %out) #1 {
+define amdgpu_kernel void @test_readfirstlane_imm_fold_f64(ptr addrspace(1) %out) {
; CHECK-SDAG-LABEL: test_readfirstlane_imm_fold_f64:
; CHECK-SDAG: ; %bb.0:
; CHECK-SDAG-NEXT: s_load_dwordx2 s[0:1], s[6:7], 0x0
@@ -230,7 +337,7 @@ define amdgpu_kernel void @test_readfirstlane_imm_fold_f64(ptr addrspace(1) %out
ret void
}
-define amdgpu_kernel void @test_readfirstlane_m0(ptr addrspace(1) %out) #1 {
+define amdgpu_kernel void @test_readfirstlane_m0(ptr addrspace(1) %out) {
; CHECK-SDAG-LABEL: test_readfirstlane_m0:
; CHECK-SDAG: ; %bb.0:
; CHECK-SDAG-NEXT: s_load_dwordx2 s[0:1], s[6:7], 0x0
@@ -262,7 +369,7 @@ define amdgpu_kernel void @test_readfirstlane_m0(ptr addrspace(1) %out) #1 {
ret void
}
-define amdgpu_kernel void @test_readfirstlane_copy_from_sgpr_i32(ptr addrspace(1) %out) #1 {
+define amdgpu_kernel void @test_readfirstlane_copy_from_sgpr_i32(ptr addrspace(1) %out) {
; CHECK-SDAG-LABEL: test_readfirstlane_copy_from_sgpr_i32:
; CHECK-SDAG: ; %bb.0:
; CHECK-SDAG-NEXT: s_load_dwordx2 s[0:1], s[6:7], 0x0
@@ -294,7 +401,7 @@ define amdgpu_kernel void @test_readfirstlane_copy_from_sgpr_i32(ptr addrspace(1
ret void
}
-define amdgpu_kernel void @test_readfirstlane_copy_from_sgpr_i64(ptr addrspace(1) %out) #1 {
+define amdgpu_kernel void @test_readfirstlane_copy_from_sgpr_i64(ptr addrspace(1) %out) {
; CHECK-SDAG-LABEL: test_readfirstlane_copy_from_sgpr_i64:
; CHECK-SDAG: ; %bb.0:
; CHECK-SDAG-NEXT: s_load_dwordx2 s[0:1], s[6:7], 0x0
@@ -328,7 +435,7 @@ define amdgpu_kernel void @test_readfirstlane_copy_from_sgpr_i64(ptr addrspace(1
ret void
}
-define amdgpu_kernel void @test_readfirstlane_copy_from_sgpr_f64(ptr addrspace(1) %out) #1 {
+define amdgpu_kernel void @test_readfirstlane_copy_from_sgpr_f64(ptr addrspace(1) %out) {
; CHECK-SDAG-LABEL: test_readfirstlane_copy_from_sgpr_f64:
; CHECK-SDAG: ; %bb.0:
; CHECK-SDAG-NEXT: s_load_dwordx2 s[0:1], s[6:7], 0x0
@@ -362,7 +469,7 @@ define amdgpu_kernel void @test_readfirstlane_copy_from_sgpr_f64(ptr addrspace(1
ret void
}
-define amdgpu_kernel void @test_readfirstlane_fi(ptr addrspace(1) %out) #1 {
+define amdgpu_kernel void @test_readfirstlane_fi(ptr addrspace(1) %out) {
; CHECK-SDAG-LABEL: test_readfirstlane_fi:
; CHECK-SDAG: ; %bb.0:
; CHECK-SDAG-NEXT: s_add_u32 s0, s0, s15
@@ -593,6 +700,3 @@ define void @test_readfirstlane_v8i16(ptr addrspace(1) %out, <8 x i16> %src) {
call void asm sideeffect "; use $0", "s"(<8 x i16> %x)
ret void
}
-
-attributes #0 = { nounwind readnone convergent }
-attributes #1 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.barrier.wait.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.barrier.wait.ll
index 6e029f7..4fb28b3 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.barrier.wait.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.barrier.wait.ll
@@ -1,43 +1,43 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN %s
-; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck -check-prefixes=GLOBAL-ISEL %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX12,GFX12-SDAG %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX12,GFX12-GISEL %s
define amdgpu_kernel void @test1_s_barrier_signal(ptr addrspace(1) %out) #0 {
-; GCN-LABEL: test1_s_barrier_signal:
-; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
-; GCN-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GCN-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
-; GCN-NEXT: v_mul_u32_u24_e32 v1, v0, v0
-; GCN-NEXT: v_sub_nc_u32_e32 v0, v1, v0
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: global_store_b32 v3, v2, s[0:1]
-; GCN-NEXT: s_wait_storecnt 0x0
-; GCN-NEXT: s_barrier_signal -1
-; GCN-NEXT: s_barrier_wait -1
-; GCN-NEXT: global_store_b32 v3, v0, s[0:1]
-; GCN-NEXT: s_nop 0
-; GCN-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GCN-NEXT: s_endpgm
+; GFX12-SDAG-LABEL: test1_s_barrier_signal:
+; GFX12-SDAG: ; %bb.0: ; %entry
+; GFX12-SDAG-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
+; GFX12-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
+; GFX12-SDAG-NEXT: v_mul_u32_u24_e32 v1, v0, v0
+; GFX12-SDAG-NEXT: v_sub_nc_u32_e32 v0, v1, v0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: global_store_b32 v3, v2, s[0:1]
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: s_barrier_signal -1
+; GFX12-SDAG-NEXT: s_barrier_wait -1
+; GFX12-SDAG-NEXT: global_store_b32 v3, v0, s[0:1]
+; GFX12-SDAG-NEXT: s_nop 0
+; GFX12-SDAG-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-SDAG-NEXT: s_endpgm
;
-; GLOBAL-ISEL-LABEL: test1_s_barrier_signal:
-; GLOBAL-ISEL: ; %bb.0: ; %entry
-; GLOBAL-ISEL-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
-; GLOBAL-ISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GLOBAL-ISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GLOBAL-ISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
-; GLOBAL-ISEL-NEXT: v_mul_lo_u32 v1, v0, v0
-; GLOBAL-ISEL-NEXT: v_sub_nc_u32_e32 v0, v1, v0
-; GLOBAL-ISEL-NEXT: s_wait_kmcnt 0x0
-; GLOBAL-ISEL-NEXT: global_store_b32 v3, v2, s[0:1]
-; GLOBAL-ISEL-NEXT: s_wait_storecnt 0x0
-; GLOBAL-ISEL-NEXT: s_barrier_signal -1
-; GLOBAL-ISEL-NEXT: s_barrier_wait -1
-; GLOBAL-ISEL-NEXT: global_store_b32 v3, v0, s[0:1]
-; GLOBAL-ISEL-NEXT: s_nop 0
-; GLOBAL-ISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GLOBAL-ISEL-NEXT: s_endpgm
+; GFX12-GISEL-LABEL: test1_s_barrier_signal:
+; GFX12-GISEL: ; %bb.0: ; %entry
+; GFX12-GISEL-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
+; GFX12-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v0, v0
+; GFX12-GISEL-NEXT: v_sub_nc_u32_e32 v0, v1, v0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: global_store_b32 v3, v2, s[0:1]
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: s_barrier_signal -1
+; GFX12-GISEL-NEXT: s_barrier_wait -1
+; GFX12-GISEL-NEXT: global_store_b32 v3, v0, s[0:1]
+; GFX12-GISEL-NEXT: s_nop 0
+; GFX12-GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-GISEL-NEXT: s_endpgm
entry:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = getelementptr i32, ptr addrspace(1) %out, i32 %tmp
@@ -51,41 +51,41 @@ entry:
}
define amdgpu_kernel void @test2_s_barrier_signal(ptr addrspace(1) %out) #0 {
-; GCN-LABEL: test2_s_barrier_signal:
-; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
-; GCN-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GCN-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
-; GCN-NEXT: v_mul_u32_u24_e32 v1, v0, v0
-; GCN-NEXT: v_sub_nc_u32_e32 v0, v1, v0
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: global_store_b32 v3, v2, s[0:1]
-; GCN-NEXT: s_wait_storecnt 0x0
-; GCN-NEXT: s_barrier_signal 1
-; GCN-NEXT: s_barrier_wait 1
-; GCN-NEXT: global_store_b32 v3, v0, s[0:1]
-; GCN-NEXT: s_nop 0
-; GCN-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GCN-NEXT: s_endpgm
+; GFX12-SDAG-LABEL: test2_s_barrier_signal:
+; GFX12-SDAG: ; %bb.0: ; %entry
+; GFX12-SDAG-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
+; GFX12-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
+; GFX12-SDAG-NEXT: v_mul_u32_u24_e32 v1, v0, v0
+; GFX12-SDAG-NEXT: v_sub_nc_u32_e32 v0, v1, v0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: global_store_b32 v3, v2, s[0:1]
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: s_barrier_signal 1
+; GFX12-SDAG-NEXT: s_barrier_wait 1
+; GFX12-SDAG-NEXT: global_store_b32 v3, v0, s[0:1]
+; GFX12-SDAG-NEXT: s_nop 0
+; GFX12-SDAG-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-SDAG-NEXT: s_endpgm
;
-; GLOBAL-ISEL-LABEL: test2_s_barrier_signal:
-; GLOBAL-ISEL: ; %bb.0: ; %entry
-; GLOBAL-ISEL-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
-; GLOBAL-ISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GLOBAL-ISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GLOBAL-ISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
-; GLOBAL-ISEL-NEXT: v_mul_lo_u32 v1, v0, v0
-; GLOBAL-ISEL-NEXT: v_sub_nc_u32_e32 v0, v1, v0
-; GLOBAL-ISEL-NEXT: s_wait_kmcnt 0x0
-; GLOBAL-ISEL-NEXT: global_store_b32 v3, v2, s[0:1]
-; GLOBAL-ISEL-NEXT: s_wait_storecnt 0x0
-; GLOBAL-ISEL-NEXT: s_barrier_signal 1
-; GLOBAL-ISEL-NEXT: s_barrier_wait 1
-; GLOBAL-ISEL-NEXT: global_store_b32 v3, v0, s[0:1]
-; GLOBAL-ISEL-NEXT: s_nop 0
-; GLOBAL-ISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GLOBAL-ISEL-NEXT: s_endpgm
+; GFX12-GISEL-LABEL: test2_s_barrier_signal:
+; GFX12-GISEL: ; %bb.0: ; %entry
+; GFX12-GISEL-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
+; GFX12-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v0, v0
+; GFX12-GISEL-NEXT: v_sub_nc_u32_e32 v0, v1, v0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: global_store_b32 v3, v2, s[0:1]
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: s_barrier_signal 1
+; GFX12-GISEL-NEXT: s_barrier_wait 1
+; GFX12-GISEL-NEXT: global_store_b32 v3, v0, s[0:1]
+; GFX12-GISEL-NEXT: s_nop 0
+; GFX12-GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-GISEL-NEXT: s_endpgm
entry:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = getelementptr i32, ptr addrspace(1) %out, i32 %tmp
@@ -99,41 +99,41 @@ entry:
}
define amdgpu_kernel void @test3_s_barrier_signal(ptr addrspace(1) %out) #0 {
-; GCN-LABEL: test3_s_barrier_signal:
-; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
-; GCN-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GCN-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
-; GCN-NEXT: v_mul_u32_u24_e32 v1, v0, v0
-; GCN-NEXT: v_sub_nc_u32_e32 v0, v1, v0
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: global_store_b32 v3, v2, s[0:1]
-; GCN-NEXT: s_wait_storecnt 0x0
-; GCN-NEXT: s_barrier_signal 0
-; GCN-NEXT: s_barrier_wait 0
-; GCN-NEXT: global_store_b32 v3, v0, s[0:1]
-; GCN-NEXT: s_nop 0
-; GCN-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GCN-NEXT: s_endpgm
+; GFX12-SDAG-LABEL: test3_s_barrier_signal:
+; GFX12-SDAG: ; %bb.0: ; %entry
+; GFX12-SDAG-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
+; GFX12-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
+; GFX12-SDAG-NEXT: v_mul_u32_u24_e32 v1, v0, v0
+; GFX12-SDAG-NEXT: v_sub_nc_u32_e32 v0, v1, v0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: global_store_b32 v3, v2, s[0:1]
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: s_barrier_signal 0
+; GFX12-SDAG-NEXT: s_barrier_wait 0
+; GFX12-SDAG-NEXT: global_store_b32 v3, v0, s[0:1]
+; GFX12-SDAG-NEXT: s_nop 0
+; GFX12-SDAG-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-SDAG-NEXT: s_endpgm
;
-; GLOBAL-ISEL-LABEL: test3_s_barrier_signal:
-; GLOBAL-ISEL: ; %bb.0: ; %entry
-; GLOBAL-ISEL-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
-; GLOBAL-ISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GLOBAL-ISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GLOBAL-ISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
-; GLOBAL-ISEL-NEXT: v_mul_lo_u32 v1, v0, v0
-; GLOBAL-ISEL-NEXT: v_sub_nc_u32_e32 v0, v1, v0
-; GLOBAL-ISEL-NEXT: s_wait_kmcnt 0x0
-; GLOBAL-ISEL-NEXT: global_store_b32 v3, v2, s[0:1]
-; GLOBAL-ISEL-NEXT: s_wait_storecnt 0x0
-; GLOBAL-ISEL-NEXT: s_barrier_signal 0
-; GLOBAL-ISEL-NEXT: s_barrier_wait 0
-; GLOBAL-ISEL-NEXT: global_store_b32 v3, v0, s[0:1]
-; GLOBAL-ISEL-NEXT: s_nop 0
-; GLOBAL-ISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GLOBAL-ISEL-NEXT: s_endpgm
+; GFX12-GISEL-LABEL: test3_s_barrier_signal:
+; GFX12-GISEL: ; %bb.0: ; %entry
+; GFX12-GISEL-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
+; GFX12-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v0, v0
+; GFX12-GISEL-NEXT: v_sub_nc_u32_e32 v0, v1, v0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: global_store_b32 v3, v2, s[0:1]
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: s_barrier_signal 0
+; GFX12-GISEL-NEXT: s_barrier_wait 0
+; GFX12-GISEL-NEXT: global_store_b32 v3, v0, s[0:1]
+; GFX12-GISEL-NEXT: s_nop 0
+; GFX12-GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-GISEL-NEXT: s_endpgm
entry:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = getelementptr i32, ptr addrspace(1) %out, i32 %tmp
@@ -147,44 +147,44 @@ entry:
}
define amdgpu_kernel void @test1_s_barrier_signal_var(ptr addrspace(1) %out) #0 {
-; GCN-LABEL: test1_s_barrier_signal_var:
-; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
-; GCN-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
-; GCN-NEXT: s_mov_b32 m0, 1
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GCN-NEXT: v_mul_u32_u24_e32 v2, v0, v0
-; GCN-NEXT: v_lshlrev_b32_e32 v3, 2, v0
-; GCN-NEXT: v_sub_nc_u32_e32 v0, v2, v0
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: global_store_b32 v3, v1, s[0:1]
-; GCN-NEXT: s_wait_storecnt 0x0
-; GCN-NEXT: s_barrier_signal m0
-; GCN-NEXT: s_barrier_wait 1
-; GCN-NEXT: global_store_b32 v3, v0, s[0:1]
-; GCN-NEXT: s_nop 0
-; GCN-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GCN-NEXT: s_endpgm
+; GFX12-SDAG-LABEL: test1_s_barrier_signal_var:
+; GFX12-SDAG: ; %bb.0: ; %entry
+; GFX12-SDAG-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
+; GFX12-SDAG-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX12-SDAG-NEXT: s_mov_b32 m0, 1
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-SDAG-NEXT: v_mul_u32_u24_e32 v2, v0, v0
+; GFX12-SDAG-NEXT: v_lshlrev_b32_e32 v3, 2, v0
+; GFX12-SDAG-NEXT: v_sub_nc_u32_e32 v0, v2, v0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: global_store_b32 v3, v1, s[0:1]
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: s_barrier_signal m0
+; GFX12-SDAG-NEXT: s_barrier_wait 1
+; GFX12-SDAG-NEXT: global_store_b32 v3, v0, s[0:1]
+; GFX12-SDAG-NEXT: s_nop 0
+; GFX12-SDAG-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-SDAG-NEXT: s_endpgm
;
-; GLOBAL-ISEL-LABEL: test1_s_barrier_signal_var:
-; GLOBAL-ISEL: ; %bb.0: ; %entry
-; GLOBAL-ISEL-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
-; GLOBAL-ISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GLOBAL-ISEL-NEXT: v_mov_b32_e32 v2, 0
-; GLOBAL-ISEL-NEXT: s_mov_b32 m0, 1
-; GLOBAL-ISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GLOBAL-ISEL-NEXT: v_lshlrev_b32_e32 v3, 2, v0
-; GLOBAL-ISEL-NEXT: v_mul_lo_u32 v1, v0, v0
-; GLOBAL-ISEL-NEXT: v_sub_nc_u32_e32 v0, v1, v0
-; GLOBAL-ISEL-NEXT: s_wait_kmcnt 0x0
-; GLOBAL-ISEL-NEXT: global_store_b32 v3, v2, s[0:1]
-; GLOBAL-ISEL-NEXT: s_wait_storecnt 0x0
-; GLOBAL-ISEL-NEXT: s_barrier_signal m0
-; GLOBAL-ISEL-NEXT: s_barrier_wait 1
-; GLOBAL-ISEL-NEXT: global_store_b32 v3, v0, s[0:1]
-; GLOBAL-ISEL-NEXT: s_nop 0
-; GLOBAL-ISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GLOBAL-ISEL-NEXT: s_endpgm
+; GFX12-GISEL-LABEL: test1_s_barrier_signal_var:
+; GFX12-GISEL: ; %bb.0: ; %entry
+; GFX12-GISEL-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
+; GFX12-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-GISEL-NEXT: s_mov_b32 m0, 1
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_lshlrev_b32_e32 v3, 2, v0
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v0, v0
+; GFX12-GISEL-NEXT: v_sub_nc_u32_e32 v0, v1, v0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: global_store_b32 v3, v2, s[0:1]
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: s_barrier_signal m0
+; GFX12-GISEL-NEXT: s_barrier_wait 1
+; GFX12-GISEL-NEXT: global_store_b32 v3, v0, s[0:1]
+; GFX12-GISEL-NEXT: s_nop 0
+; GFX12-GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-GISEL-NEXT: s_endpgm
entry:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = getelementptr i32, ptr addrspace(1) %out, i32 %tmp
@@ -198,83 +198,83 @@ entry:
}
define void @test2_s_barrier_signal_var(i32 %arg) {
-; GCN-LABEL: test2_s_barrier_signal_var:
-; GCN: ; %bb.0:
-; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
-; GCN-NEXT: s_wait_expcnt 0x0
-; GCN-NEXT: s_wait_samplecnt 0x0
-; GCN-NEXT: s_wait_bvhcnt 0x0
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: v_readfirstlane_b32 s0, v0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: s_mov_b32 m0, s0
-; GCN-NEXT: s_wait_storecnt 0x0
-; GCN-NEXT: s_barrier_signal m0
-; GCN-NEXT: s_wait_alu 0xfffe
-; GCN-NEXT: s_setpc_b64 s[30:31]
+; GFX12-SDAG-LABEL: test2_s_barrier_signal_var:
+; GFX12-SDAG: ; %bb.0:
+; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-SDAG-NEXT: s_wait_expcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_samplecnt 0x0
+; GFX12-SDAG-NEXT: s_wait_bvhcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: v_readfirstlane_b32 s0, v0
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-SDAG-NEXT: s_mov_b32 m0, s0
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: s_barrier_signal m0
+; GFX12-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX12-SDAG-NEXT: s_setpc_b64 s[30:31]
;
-; GLOBAL-ISEL-LABEL: test2_s_barrier_signal_var:
-; GLOBAL-ISEL: ; %bb.0:
-; GLOBAL-ISEL-NEXT: s_wait_loadcnt_dscnt 0x0
-; GLOBAL-ISEL-NEXT: s_wait_expcnt 0x0
-; GLOBAL-ISEL-NEXT: s_wait_samplecnt 0x0
-; GLOBAL-ISEL-NEXT: s_wait_bvhcnt 0x0
-; GLOBAL-ISEL-NEXT: s_wait_kmcnt 0x0
-; GLOBAL-ISEL-NEXT: v_readfirstlane_b32 m0, v0
-; GLOBAL-ISEL-NEXT: s_wait_storecnt 0x0
-; GLOBAL-ISEL-NEXT: s_barrier_signal m0
-; GLOBAL-ISEL-NEXT: s_setpc_b64 s[30:31]
+; GFX12-GISEL-LABEL: test2_s_barrier_signal_var:
+; GFX12-GISEL: ; %bb.0:
+; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-GISEL-NEXT: s_wait_expcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0
+; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: v_readfirstlane_b32 m0, v0
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: s_barrier_signal m0
+; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31]
call void @llvm.amdgcn.s.barrier.signal.var(i32 %arg)
ret void
}
define amdgpu_kernel void @test1_s_barrier_signal_isfirst(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %out) #0 {
-; GCN-LABEL: test1_s_barrier_signal_isfirst:
-; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: s_load_b256 s[0:7], s[2:3], 0x24
-; GCN-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_lshlrev_b32_e32 v0, 2, v0
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: global_store_b32 v0, v1, s[6:7]
-; GCN-NEXT: s_wait_storecnt 0x0
-; GCN-NEXT: s_barrier_signal_isfirst -1
-; GCN-NEXT: s_cselect_b32 s3, s3, s5
-; GCN-NEXT: s_cselect_b32 s2, s2, s4
-; GCN-NEXT: s_clause 0x1
-; GCN-NEXT: global_load_b32 v2, v1, s[0:1]
-; GCN-NEXT: global_load_b32 v1, v1, s[2:3]
-; GCN-NEXT: s_wait_loadcnt 0x0
-; GCN-NEXT: v_mul_lo_u32 v1, v1, v2
-; GCN-NEXT: global_store_b32 v0, v1, s[6:7]
-; GCN-NEXT: s_nop 0
-; GCN-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GCN-NEXT: s_endpgm
+; GFX12-SDAG-LABEL: test1_s_barrier_signal_isfirst:
+; GFX12-SDAG: ; %bb.0: ; %entry
+; GFX12-SDAG-NEXT: s_load_b256 s[0:7], s[2:3], 0x24
+; GFX12-SDAG-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: global_store_b32 v0, v1, s[6:7]
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: s_barrier_signal_isfirst -1
+; GFX12-SDAG-NEXT: s_cselect_b32 s3, s3, s5
+; GFX12-SDAG-NEXT: s_cselect_b32 s2, s2, s4
+; GFX12-SDAG-NEXT: s_clause 0x1
+; GFX12-SDAG-NEXT: global_load_b32 v2, v1, s[0:1]
+; GFX12-SDAG-NEXT: global_load_b32 v1, v1, s[2:3]
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: v_mul_lo_u32 v1, v1, v2
+; GFX12-SDAG-NEXT: global_store_b32 v0, v1, s[6:7]
+; GFX12-SDAG-NEXT: s_nop 0
+; GFX12-SDAG-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-SDAG-NEXT: s_endpgm
;
-; GLOBAL-ISEL-LABEL: test1_s_barrier_signal_isfirst:
-; GLOBAL-ISEL: ; %bb.0: ; %entry
-; GLOBAL-ISEL-NEXT: s_load_b256 s[0:7], s[2:3], 0x24
-; GLOBAL-ISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
-; GLOBAL-ISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GLOBAL-ISEL-NEXT: v_lshlrev_b32_e32 v0, 2, v0
-; GLOBAL-ISEL-NEXT: s_wait_kmcnt 0x0
-; GLOBAL-ISEL-NEXT: global_store_b32 v0, v1, s[6:7]
-; GLOBAL-ISEL-NEXT: s_wait_storecnt 0x0
-; GLOBAL-ISEL-NEXT: s_barrier_signal_isfirst -1
-; GLOBAL-ISEL-NEXT: s_cselect_b32 s8, 1, 0
-; GLOBAL-ISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GLOBAL-ISEL-NEXT: s_and_b32 s8, s8, 1
-; GLOBAL-ISEL-NEXT: s_cmp_lg_u32 s8, 0
-; GLOBAL-ISEL-NEXT: s_cselect_b64 s[2:3], s[2:3], s[4:5]
-; GLOBAL-ISEL-NEXT: s_clause 0x1
-; GLOBAL-ISEL-NEXT: global_load_b32 v2, v1, s[0:1]
-; GLOBAL-ISEL-NEXT: global_load_b32 v1, v1, s[2:3]
-; GLOBAL-ISEL-NEXT: s_wait_loadcnt 0x0
-; GLOBAL-ISEL-NEXT: v_mul_lo_u32 v1, v1, v2
-; GLOBAL-ISEL-NEXT: global_store_b32 v0, v1, s[6:7]
-; GLOBAL-ISEL-NEXT: s_nop 0
-; GLOBAL-ISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GLOBAL-ISEL-NEXT: s_endpgm
+; GFX12-GISEL-LABEL: test1_s_barrier_signal_isfirst:
+; GFX12-GISEL: ; %bb.0: ; %entry
+; GFX12-GISEL-NEXT: s_load_b256 s[0:7], s[2:3], 0x24
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: global_store_b32 v0, v1, s[6:7]
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: s_barrier_signal_isfirst -1
+; GFX12-GISEL-NEXT: s_cselect_b32 s8, 1, 0
+; GFX12-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX12-GISEL-NEXT: s_and_b32 s8, s8, 1
+; GFX12-GISEL-NEXT: s_cmp_lg_u32 s8, 0
+; GFX12-GISEL-NEXT: s_cselect_b64 s[2:3], s[2:3], s[4:5]
+; GFX12-GISEL-NEXT: s_clause 0x1
+; GFX12-GISEL-NEXT: global_load_b32 v2, v1, s[0:1]
+; GFX12-GISEL-NEXT: global_load_b32 v1, v1, s[2:3]
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v1, v2
+; GFX12-GISEL-NEXT: global_store_b32 v0, v1, s[6:7]
+; GFX12-GISEL-NEXT: s_nop 0
+; GFX12-GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-GISEL-NEXT: s_endpgm
entry:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = getelementptr i32, ptr addrspace(1) %out, i32 %tmp
@@ -289,52 +289,52 @@ entry:
}
define amdgpu_kernel void @test2_s_barrier_signal_isfirst(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %out) #0 {
-; GCN-LABEL: test2_s_barrier_signal_isfirst:
-; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: s_load_b256 s[0:7], s[2:3], 0x24
-; GCN-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_lshlrev_b32_e32 v0, 2, v0
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: global_store_b32 v0, v1, s[6:7]
-; GCN-NEXT: s_wait_storecnt 0x0
-; GCN-NEXT: s_barrier_signal_isfirst 1
-; GCN-NEXT: s_cselect_b32 s3, s3, s5
-; GCN-NEXT: s_cselect_b32 s2, s2, s4
-; GCN-NEXT: s_clause 0x1
-; GCN-NEXT: global_load_b32 v2, v1, s[0:1]
-; GCN-NEXT: global_load_b32 v1, v1, s[2:3]
-; GCN-NEXT: s_wait_loadcnt 0x0
-; GCN-NEXT: v_mul_lo_u32 v1, v1, v2
-; GCN-NEXT: global_store_b32 v0, v1, s[6:7]
-; GCN-NEXT: s_nop 0
-; GCN-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GCN-NEXT: s_endpgm
+; GFX12-SDAG-LABEL: test2_s_barrier_signal_isfirst:
+; GFX12-SDAG: ; %bb.0: ; %entry
+; GFX12-SDAG-NEXT: s_load_b256 s[0:7], s[2:3], 0x24
+; GFX12-SDAG-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: global_store_b32 v0, v1, s[6:7]
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: s_barrier_signal_isfirst 1
+; GFX12-SDAG-NEXT: s_cselect_b32 s3, s3, s5
+; GFX12-SDAG-NEXT: s_cselect_b32 s2, s2, s4
+; GFX12-SDAG-NEXT: s_clause 0x1
+; GFX12-SDAG-NEXT: global_load_b32 v2, v1, s[0:1]
+; GFX12-SDAG-NEXT: global_load_b32 v1, v1, s[2:3]
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: v_mul_lo_u32 v1, v1, v2
+; GFX12-SDAG-NEXT: global_store_b32 v0, v1, s[6:7]
+; GFX12-SDAG-NEXT: s_nop 0
+; GFX12-SDAG-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-SDAG-NEXT: s_endpgm
;
-; GLOBAL-ISEL-LABEL: test2_s_barrier_signal_isfirst:
-; GLOBAL-ISEL: ; %bb.0: ; %entry
-; GLOBAL-ISEL-NEXT: s_load_b256 s[0:7], s[2:3], 0x24
-; GLOBAL-ISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
-; GLOBAL-ISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GLOBAL-ISEL-NEXT: v_lshlrev_b32_e32 v0, 2, v0
-; GLOBAL-ISEL-NEXT: s_wait_kmcnt 0x0
-; GLOBAL-ISEL-NEXT: global_store_b32 v0, v1, s[6:7]
-; GLOBAL-ISEL-NEXT: s_wait_storecnt 0x0
-; GLOBAL-ISEL-NEXT: s_barrier_signal_isfirst 1
-; GLOBAL-ISEL-NEXT: s_cselect_b32 s8, 1, 0
-; GLOBAL-ISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GLOBAL-ISEL-NEXT: s_and_b32 s8, s8, 1
-; GLOBAL-ISEL-NEXT: s_cmp_lg_u32 s8, 0
-; GLOBAL-ISEL-NEXT: s_cselect_b64 s[2:3], s[2:3], s[4:5]
-; GLOBAL-ISEL-NEXT: s_clause 0x1
-; GLOBAL-ISEL-NEXT: global_load_b32 v2, v1, s[0:1]
-; GLOBAL-ISEL-NEXT: global_load_b32 v1, v1, s[2:3]
-; GLOBAL-ISEL-NEXT: s_wait_loadcnt 0x0
-; GLOBAL-ISEL-NEXT: v_mul_lo_u32 v1, v1, v2
-; GLOBAL-ISEL-NEXT: global_store_b32 v0, v1, s[6:7]
-; GLOBAL-ISEL-NEXT: s_nop 0
-; GLOBAL-ISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GLOBAL-ISEL-NEXT: s_endpgm
+; GFX12-GISEL-LABEL: test2_s_barrier_signal_isfirst:
+; GFX12-GISEL: ; %bb.0: ; %entry
+; GFX12-GISEL-NEXT: s_load_b256 s[0:7], s[2:3], 0x24
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: global_store_b32 v0, v1, s[6:7]
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: s_barrier_signal_isfirst 1
+; GFX12-GISEL-NEXT: s_cselect_b32 s8, 1, 0
+; GFX12-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX12-GISEL-NEXT: s_and_b32 s8, s8, 1
+; GFX12-GISEL-NEXT: s_cmp_lg_u32 s8, 0
+; GFX12-GISEL-NEXT: s_cselect_b64 s[2:3], s[2:3], s[4:5]
+; GFX12-GISEL-NEXT: s_clause 0x1
+; GFX12-GISEL-NEXT: global_load_b32 v2, v1, s[0:1]
+; GFX12-GISEL-NEXT: global_load_b32 v1, v1, s[2:3]
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v1, v2
+; GFX12-GISEL-NEXT: global_store_b32 v0, v1, s[6:7]
+; GFX12-GISEL-NEXT: s_nop 0
+; GFX12-GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-GISEL-NEXT: s_endpgm
entry:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = getelementptr i32, ptr addrspace(1) %out, i32 %tmp
@@ -349,52 +349,52 @@ entry:
}
define amdgpu_kernel void @test3_s_barrier_signal_isfirst(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %out) #0 {
-; GCN-LABEL: test3_s_barrier_signal_isfirst:
-; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: s_load_b256 s[0:7], s[2:3], 0x24
-; GCN-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_lshlrev_b32_e32 v0, 2, v0
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: global_store_b32 v0, v1, s[6:7]
-; GCN-NEXT: s_wait_storecnt 0x0
-; GCN-NEXT: s_barrier_signal_isfirst 1
-; GCN-NEXT: s_cselect_b32 s3, s3, s5
-; GCN-NEXT: s_cselect_b32 s2, s2, s4
-; GCN-NEXT: s_clause 0x1
-; GCN-NEXT: global_load_b32 v2, v1, s[0:1]
-; GCN-NEXT: global_load_b32 v1, v1, s[2:3]
-; GCN-NEXT: s_wait_loadcnt 0x0
-; GCN-NEXT: v_mul_lo_u32 v1, v1, v2
-; GCN-NEXT: global_store_b32 v0, v1, s[6:7]
-; GCN-NEXT: s_nop 0
-; GCN-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GCN-NEXT: s_endpgm
+; GFX12-SDAG-LABEL: test3_s_barrier_signal_isfirst:
+; GFX12-SDAG: ; %bb.0: ; %entry
+; GFX12-SDAG-NEXT: s_load_b256 s[0:7], s[2:3], 0x24
+; GFX12-SDAG-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: global_store_b32 v0, v1, s[6:7]
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: s_barrier_signal_isfirst 1
+; GFX12-SDAG-NEXT: s_cselect_b32 s3, s3, s5
+; GFX12-SDAG-NEXT: s_cselect_b32 s2, s2, s4
+; GFX12-SDAG-NEXT: s_clause 0x1
+; GFX12-SDAG-NEXT: global_load_b32 v2, v1, s[0:1]
+; GFX12-SDAG-NEXT: global_load_b32 v1, v1, s[2:3]
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: v_mul_lo_u32 v1, v1, v2
+; GFX12-SDAG-NEXT: global_store_b32 v0, v1, s[6:7]
+; GFX12-SDAG-NEXT: s_nop 0
+; GFX12-SDAG-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-SDAG-NEXT: s_endpgm
;
-; GLOBAL-ISEL-LABEL: test3_s_barrier_signal_isfirst:
-; GLOBAL-ISEL: ; %bb.0: ; %entry
-; GLOBAL-ISEL-NEXT: s_load_b256 s[0:7], s[2:3], 0x24
-; GLOBAL-ISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
-; GLOBAL-ISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GLOBAL-ISEL-NEXT: v_lshlrev_b32_e32 v0, 2, v0
-; GLOBAL-ISEL-NEXT: s_wait_kmcnt 0x0
-; GLOBAL-ISEL-NEXT: global_store_b32 v0, v1, s[6:7]
-; GLOBAL-ISEL-NEXT: s_wait_storecnt 0x0
-; GLOBAL-ISEL-NEXT: s_barrier_signal_isfirst 1
-; GLOBAL-ISEL-NEXT: s_cselect_b32 s8, 1, 0
-; GLOBAL-ISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GLOBAL-ISEL-NEXT: s_and_b32 s8, s8, 1
-; GLOBAL-ISEL-NEXT: s_cmp_lg_u32 s8, 0
-; GLOBAL-ISEL-NEXT: s_cselect_b64 s[2:3], s[2:3], s[4:5]
-; GLOBAL-ISEL-NEXT: s_clause 0x1
-; GLOBAL-ISEL-NEXT: global_load_b32 v2, v1, s[0:1]
-; GLOBAL-ISEL-NEXT: global_load_b32 v1, v1, s[2:3]
-; GLOBAL-ISEL-NEXT: s_wait_loadcnt 0x0
-; GLOBAL-ISEL-NEXT: v_mul_lo_u32 v1, v1, v2
-; GLOBAL-ISEL-NEXT: global_store_b32 v0, v1, s[6:7]
-; GLOBAL-ISEL-NEXT: s_nop 0
-; GLOBAL-ISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GLOBAL-ISEL-NEXT: s_endpgm
+; GFX12-GISEL-LABEL: test3_s_barrier_signal_isfirst:
+; GFX12-GISEL: ; %bb.0: ; %entry
+; GFX12-GISEL-NEXT: s_load_b256 s[0:7], s[2:3], 0x24
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: global_store_b32 v0, v1, s[6:7]
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: s_barrier_signal_isfirst 1
+; GFX12-GISEL-NEXT: s_cselect_b32 s8, 1, 0
+; GFX12-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX12-GISEL-NEXT: s_and_b32 s8, s8, 1
+; GFX12-GISEL-NEXT: s_cmp_lg_u32 s8, 0
+; GFX12-GISEL-NEXT: s_cselect_b64 s[2:3], s[2:3], s[4:5]
+; GFX12-GISEL-NEXT: s_clause 0x1
+; GFX12-GISEL-NEXT: global_load_b32 v2, v1, s[0:1]
+; GFX12-GISEL-NEXT: global_load_b32 v1, v1, s[2:3]
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v1, v2
+; GFX12-GISEL-NEXT: global_store_b32 v0, v1, s[6:7]
+; GFX12-GISEL-NEXT: s_nop 0
+; GFX12-GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-GISEL-NEXT: s_endpgm
entry:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = getelementptr i32, ptr addrspace(1) %out, i32 %tmp
@@ -409,54 +409,54 @@ entry:
}
define amdgpu_kernel void @test1_s_barrier_signal_isfirst_var(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %out) #0 {
-; GCN-LABEL: test1_s_barrier_signal_isfirst_var:
-; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: s_load_b256 s[0:7], s[2:3], 0x24
-; GCN-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
-; GCN-NEXT: s_mov_b32 m0, 1
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_lshlrev_b32_e32 v0, 2, v0
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: global_store_b32 v0, v1, s[6:7]
-; GCN-NEXT: s_wait_storecnt 0x0
-; GCN-NEXT: s_barrier_signal_isfirst m0
-; GCN-NEXT: s_cselect_b32 s3, s3, s5
-; GCN-NEXT: s_cselect_b32 s2, s2, s4
-; GCN-NEXT: s_clause 0x1
-; GCN-NEXT: global_load_b32 v2, v1, s[0:1]
-; GCN-NEXT: global_load_b32 v1, v1, s[2:3]
-; GCN-NEXT: s_wait_loadcnt 0x0
-; GCN-NEXT: v_mul_lo_u32 v1, v1, v2
-; GCN-NEXT: global_store_b32 v0, v1, s[6:7]
-; GCN-NEXT: s_nop 0
-; GCN-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GCN-NEXT: s_endpgm
+; GFX12-SDAG-LABEL: test1_s_barrier_signal_isfirst_var:
+; GFX12-SDAG: ; %bb.0: ; %entry
+; GFX12-SDAG-NEXT: s_load_b256 s[0:7], s[2:3], 0x24
+; GFX12-SDAG-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX12-SDAG-NEXT: s_mov_b32 m0, 1
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: global_store_b32 v0, v1, s[6:7]
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: s_barrier_signal_isfirst m0
+; GFX12-SDAG-NEXT: s_cselect_b32 s3, s3, s5
+; GFX12-SDAG-NEXT: s_cselect_b32 s2, s2, s4
+; GFX12-SDAG-NEXT: s_clause 0x1
+; GFX12-SDAG-NEXT: global_load_b32 v2, v1, s[0:1]
+; GFX12-SDAG-NEXT: global_load_b32 v1, v1, s[2:3]
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: v_mul_lo_u32 v1, v1, v2
+; GFX12-SDAG-NEXT: global_store_b32 v0, v1, s[6:7]
+; GFX12-SDAG-NEXT: s_nop 0
+; GFX12-SDAG-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-SDAG-NEXT: s_endpgm
;
-; GLOBAL-ISEL-LABEL: test1_s_barrier_signal_isfirst_var:
-; GLOBAL-ISEL: ; %bb.0: ; %entry
-; GLOBAL-ISEL-NEXT: s_load_b256 s[0:7], s[2:3], 0x24
-; GLOBAL-ISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
-; GLOBAL-ISEL-NEXT: s_mov_b32 m0, 1
-; GLOBAL-ISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GLOBAL-ISEL-NEXT: v_lshlrev_b32_e32 v0, 2, v0
-; GLOBAL-ISEL-NEXT: s_wait_kmcnt 0x0
-; GLOBAL-ISEL-NEXT: global_store_b32 v0, v1, s[6:7]
-; GLOBAL-ISEL-NEXT: s_wait_storecnt 0x0
-; GLOBAL-ISEL-NEXT: s_barrier_signal_isfirst m0
-; GLOBAL-ISEL-NEXT: s_cselect_b32 s8, 1, 0
-; GLOBAL-ISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GLOBAL-ISEL-NEXT: s_and_b32 s8, s8, 1
-; GLOBAL-ISEL-NEXT: s_cmp_lg_u32 s8, 0
-; GLOBAL-ISEL-NEXT: s_cselect_b64 s[2:3], s[2:3], s[4:5]
-; GLOBAL-ISEL-NEXT: s_clause 0x1
-; GLOBAL-ISEL-NEXT: global_load_b32 v2, v1, s[0:1]
-; GLOBAL-ISEL-NEXT: global_load_b32 v1, v1, s[2:3]
-; GLOBAL-ISEL-NEXT: s_wait_loadcnt 0x0
-; GLOBAL-ISEL-NEXT: v_mul_lo_u32 v1, v1, v2
-; GLOBAL-ISEL-NEXT: global_store_b32 v0, v1, s[6:7]
-; GLOBAL-ISEL-NEXT: s_nop 0
-; GLOBAL-ISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GLOBAL-ISEL-NEXT: s_endpgm
+; GFX12-GISEL-LABEL: test1_s_barrier_signal_isfirst_var:
+; GFX12-GISEL: ; %bb.0: ; %entry
+; GFX12-GISEL-NEXT: s_load_b256 s[0:7], s[2:3], 0x24
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX12-GISEL-NEXT: s_mov_b32 m0, 1
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: global_store_b32 v0, v1, s[6:7]
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: s_barrier_signal_isfirst m0
+; GFX12-GISEL-NEXT: s_cselect_b32 s8, 1, 0
+; GFX12-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX12-GISEL-NEXT: s_and_b32 s8, s8, 1
+; GFX12-GISEL-NEXT: s_cmp_lg_u32 s8, 0
+; GFX12-GISEL-NEXT: s_cselect_b64 s[2:3], s[2:3], s[4:5]
+; GFX12-GISEL-NEXT: s_clause 0x1
+; GFX12-GISEL-NEXT: global_load_b32 v2, v1, s[0:1]
+; GFX12-GISEL-NEXT: global_load_b32 v1, v1, s[2:3]
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v1, v2
+; GFX12-GISEL-NEXT: global_store_b32 v0, v1, s[6:7]
+; GFX12-GISEL-NEXT: s_nop 0
+; GFX12-GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-GISEL-NEXT: s_endpgm
entry:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = getelementptr i32, ptr addrspace(1) %out, i32 %tmp
@@ -471,65 +471,65 @@ entry:
}
define void @test2_s_barrier_signal_isfirst_var(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, i32 %arg, ptr addrspace(1) %out) {
-; GCN-LABEL: test2_s_barrier_signal_isfirst_var:
-; GCN: ; %bb.0:
-; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
-; GCN-NEXT: s_wait_expcnt 0x0
-; GCN-NEXT: s_wait_samplecnt 0x0
-; GCN-NEXT: s_wait_bvhcnt 0x0
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: v_dual_mov_b32 v10, 0 :: v_dual_and_b32 v9, 0x3ff, v31
-; GCN-NEXT: v_readfirstlane_b32 s0, v6
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GCN-NEXT: v_lshlrev_b32_e32 v9, 2, v9
-; GCN-NEXT: s_mov_b32 m0, s0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_add_co_u32 v7, vcc_lo, v7, v9
-; GCN-NEXT: v_add_co_ci_u32_e32 v8, vcc_lo, 0, v8, vcc_lo
-; GCN-NEXT: global_store_b32 v[7:8], v10, off
-; GCN-NEXT: s_wait_storecnt 0x0
-; GCN-NEXT: s_barrier_signal_isfirst m0
-; GCN-NEXT: s_cselect_b32 vcc_lo, -1, 0
-; GCN-NEXT: s_wait_alu 0xfffe
-; GCN-NEXT: v_dual_cndmask_b32 v2, v4, v2 :: v_dual_cndmask_b32 v3, v5, v3
-; GCN-NEXT: global_load_b32 v0, v[0:1], off
-; GCN-NEXT: global_load_b32 v1, v[2:3], off
-; GCN-NEXT: s_wait_loadcnt 0x0
-; GCN-NEXT: v_mul_lo_u32 v0, v1, v0
-; GCN-NEXT: global_store_b32 v[7:8], v0, off
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: s_setpc_b64 s[30:31]
+; GFX12-SDAG-LABEL: test2_s_barrier_signal_isfirst_var:
+; GFX12-SDAG: ; %bb.0:
+; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-SDAG-NEXT: s_wait_expcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_samplecnt 0x0
+; GFX12-SDAG-NEXT: s_wait_bvhcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: v_dual_mov_b32 v10, 0 :: v_dual_and_b32 v9, 0x3ff, v31
+; GFX12-SDAG-NEXT: v_readfirstlane_b32 s0, v6
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-SDAG-NEXT: v_lshlrev_b32_e32 v9, 2, v9
+; GFX12-SDAG-NEXT: s_mov_b32 m0, s0
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_add_co_u32 v7, vcc_lo, v7, v9
+; GFX12-SDAG-NEXT: v_add_co_ci_u32_e32 v8, vcc_lo, 0, v8, vcc_lo
+; GFX12-SDAG-NEXT: global_store_b32 v[7:8], v10, off
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: s_barrier_signal_isfirst m0
+; GFX12-SDAG-NEXT: s_cselect_b32 vcc_lo, -1, 0
+; GFX12-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX12-SDAG-NEXT: v_dual_cndmask_b32 v2, v4, v2 :: v_dual_cndmask_b32 v3, v5, v3
+; GFX12-SDAG-NEXT: global_load_b32 v0, v[0:1], off
+; GFX12-SDAG-NEXT: global_load_b32 v1, v[2:3], off
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: v_mul_lo_u32 v0, v1, v0
+; GFX12-SDAG-NEXT: global_store_b32 v[7:8], v0, off
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: s_setpc_b64 s[30:31]
;
-; GLOBAL-ISEL-LABEL: test2_s_barrier_signal_isfirst_var:
-; GLOBAL-ISEL: ; %bb.0:
-; GLOBAL-ISEL-NEXT: s_wait_loadcnt_dscnt 0x0
-; GLOBAL-ISEL-NEXT: s_wait_expcnt 0x0
-; GLOBAL-ISEL-NEXT: s_wait_samplecnt 0x0
-; GLOBAL-ISEL-NEXT: s_wait_bvhcnt 0x0
-; GLOBAL-ISEL-NEXT: s_wait_kmcnt 0x0
-; GLOBAL-ISEL-NEXT: v_and_b32_e32 v9, 0x3ff, v31
-; GLOBAL-ISEL-NEXT: v_readfirstlane_b32 m0, v6
-; GLOBAL-ISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GLOBAL-ISEL-NEXT: v_lshlrev_b32_e32 v9, 2, v9
-; GLOBAL-ISEL-NEXT: v_add_co_u32 v7, vcc_lo, v7, v9
-; GLOBAL-ISEL-NEXT: v_add_co_ci_u32_e32 v8, vcc_lo, 0, v8, vcc_lo
-; GLOBAL-ISEL-NEXT: v_mov_b32_e32 v9, 0
-; GLOBAL-ISEL-NEXT: global_store_b32 v[7:8], v9, off
-; GLOBAL-ISEL-NEXT: s_wait_storecnt 0x0
-; GLOBAL-ISEL-NEXT: s_barrier_signal_isfirst m0
-; GLOBAL-ISEL-NEXT: s_cselect_b32 s0, 1, 0
-; GLOBAL-ISEL-NEXT: s_wait_alu 0xfffe
-; GLOBAL-ISEL-NEXT: s_and_b32 s0, 1, s0
-; GLOBAL-ISEL-NEXT: s_wait_alu 0xfffe
-; GLOBAL-ISEL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0
-; GLOBAL-ISEL-NEXT: v_dual_cndmask_b32 v2, v4, v2 :: v_dual_cndmask_b32 v3, v5, v3
-; GLOBAL-ISEL-NEXT: global_load_b32 v0, v[0:1], off
-; GLOBAL-ISEL-NEXT: global_load_b32 v1, v[2:3], off
-; GLOBAL-ISEL-NEXT: s_wait_loadcnt 0x0
-; GLOBAL-ISEL-NEXT: v_mul_lo_u32 v0, v1, v0
-; GLOBAL-ISEL-NEXT: global_store_b32 v[7:8], v0, off
-; GLOBAL-ISEL-NEXT: s_wait_kmcnt 0x0
-; GLOBAL-ISEL-NEXT: s_setpc_b64 s[30:31]
+; GFX12-GISEL-LABEL: test2_s_barrier_signal_isfirst_var:
+; GFX12-GISEL: ; %bb.0:
+; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-GISEL-NEXT: s_wait_expcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0
+; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: v_and_b32_e32 v9, 0x3ff, v31
+; GFX12-GISEL-NEXT: v_readfirstlane_b32 m0, v6
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_lshlrev_b32_e32 v9, 2, v9
+; GFX12-GISEL-NEXT: v_add_co_u32 v7, vcc_lo, v7, v9
+; GFX12-GISEL-NEXT: v_add_co_ci_u32_e32 v8, vcc_lo, 0, v8, vcc_lo
+; GFX12-GISEL-NEXT: v_mov_b32_e32 v9, 0
+; GFX12-GISEL-NEXT: global_store_b32 v[7:8], v9, off
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: s_barrier_signal_isfirst m0
+; GFX12-GISEL-NEXT: s_cselect_b32 s0, 1, 0
+; GFX12-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX12-GISEL-NEXT: s_and_b32 s0, 1, s0
+; GFX12-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX12-GISEL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0
+; GFX12-GISEL-NEXT: v_dual_cndmask_b32 v2, v4, v2 :: v_dual_cndmask_b32 v3, v5, v3
+; GFX12-GISEL-NEXT: global_load_b32 v0, v[0:1], off
+; GFX12-GISEL-NEXT: global_load_b32 v1, v[2:3], off
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v0, v1, v0
+; GFX12-GISEL-NEXT: global_store_b32 v[7:8], v0, off
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31]
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = getelementptr i32, ptr addrspace(1) %out, i32 %tmp
store i32 0, ptr addrspace(1) %tmp1
@@ -543,40 +543,40 @@ define void @test2_s_barrier_signal_isfirst_var(ptr addrspace(1) %a, ptr addrspa
}
define amdgpu_kernel void @test1_s_barrier_init(ptr addrspace(1) %out, i32 %mbrCnt) #0 {
-; GCN-LABEL: test1_s_barrier_init:
-; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: s_load_b96 s[0:2], s[2:3], 0x24
-; GCN-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GCN-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
-; GCN-NEXT: v_mul_u32_u24_e32 v1, v0, v0
-; GCN-NEXT: v_sub_nc_u32_e32 v0, v1, v0
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: s_lshl_b32 s2, s2, 16
-; GCN-NEXT: global_store_b32 v3, v2, s[0:1]
-; GCN-NEXT: s_mov_b32 m0, s2
-; GCN-NEXT: s_barrier_init -1
-; GCN-NEXT: global_store_b32 v3, v0, s[0:1]
-; GCN-NEXT: s_nop 0
-; GCN-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GCN-NEXT: s_endpgm
+; GFX12-SDAG-LABEL: test1_s_barrier_init:
+; GFX12-SDAG: ; %bb.0: ; %entry
+; GFX12-SDAG-NEXT: s_load_b96 s[0:2], s[2:3], 0x24
+; GFX12-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
+; GFX12-SDAG-NEXT: v_mul_u32_u24_e32 v1, v0, v0
+; GFX12-SDAG-NEXT: v_sub_nc_u32_e32 v0, v1, v0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: s_lshl_b32 s2, s2, 16
+; GFX12-SDAG-NEXT: global_store_b32 v3, v2, s[0:1]
+; GFX12-SDAG-NEXT: s_mov_b32 m0, s2
+; GFX12-SDAG-NEXT: s_barrier_init -1
+; GFX12-SDAG-NEXT: global_store_b32 v3, v0, s[0:1]
+; GFX12-SDAG-NEXT: s_nop 0
+; GFX12-SDAG-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-SDAG-NEXT: s_endpgm
;
-; GLOBAL-ISEL-LABEL: test1_s_barrier_init:
-; GLOBAL-ISEL: ; %bb.0: ; %entry
-; GLOBAL-ISEL-NEXT: s_load_b96 s[0:2], s[2:3], 0x24
-; GLOBAL-ISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GLOBAL-ISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GLOBAL-ISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
-; GLOBAL-ISEL-NEXT: v_mul_lo_u32 v1, v0, v0
-; GLOBAL-ISEL-NEXT: v_sub_nc_u32_e32 v0, v1, v0
-; GLOBAL-ISEL-NEXT: s_wait_kmcnt 0x0
-; GLOBAL-ISEL-NEXT: s_lshl_b32 m0, 16, s2
-; GLOBAL-ISEL-NEXT: global_store_b32 v3, v2, s[0:1]
-; GLOBAL-ISEL-NEXT: s_barrier_init -1
-; GLOBAL-ISEL-NEXT: global_store_b32 v3, v0, s[0:1]
-; GLOBAL-ISEL-NEXT: s_nop 0
-; GLOBAL-ISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GLOBAL-ISEL-NEXT: s_endpgm
+; GFX12-GISEL-LABEL: test1_s_barrier_init:
+; GFX12-GISEL: ; %bb.0: ; %entry
+; GFX12-GISEL-NEXT: s_load_b96 s[0:2], s[2:3], 0x24
+; GFX12-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v0, v0
+; GFX12-GISEL-NEXT: v_sub_nc_u32_e32 v0, v1, v0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: s_lshl_b32 m0, 16, s2
+; GFX12-GISEL-NEXT: global_store_b32 v3, v2, s[0:1]
+; GFX12-GISEL-NEXT: s_barrier_init -1
+; GFX12-GISEL-NEXT: global_store_b32 v3, v0, s[0:1]
+; GFX12-GISEL-NEXT: s_nop 0
+; GFX12-GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-GISEL-NEXT: s_endpgm
entry:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = getelementptr i32, ptr addrspace(1) %out, i32 %tmp
@@ -589,40 +589,40 @@ entry:
}
define amdgpu_kernel void @test2_s_barrier_init(ptr addrspace(1) %out, i32 %mbrCnt) #0 {
-; GCN-LABEL: test2_s_barrier_init:
-; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: s_load_b96 s[0:2], s[2:3], 0x24
-; GCN-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GCN-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
-; GCN-NEXT: v_mul_u32_u24_e32 v1, v0, v0
-; GCN-NEXT: v_sub_nc_u32_e32 v0, v1, v0
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: s_lshl_b32 s2, s2, 16
-; GCN-NEXT: global_store_b32 v3, v2, s[0:1]
-; GCN-NEXT: s_mov_b32 m0, s2
-; GCN-NEXT: s_barrier_init 1
-; GCN-NEXT: global_store_b32 v3, v0, s[0:1]
-; GCN-NEXT: s_nop 0
-; GCN-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GCN-NEXT: s_endpgm
+; GFX12-SDAG-LABEL: test2_s_barrier_init:
+; GFX12-SDAG: ; %bb.0: ; %entry
+; GFX12-SDAG-NEXT: s_load_b96 s[0:2], s[2:3], 0x24
+; GFX12-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
+; GFX12-SDAG-NEXT: v_mul_u32_u24_e32 v1, v0, v0
+; GFX12-SDAG-NEXT: v_sub_nc_u32_e32 v0, v1, v0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: s_lshl_b32 s2, s2, 16
+; GFX12-SDAG-NEXT: global_store_b32 v3, v2, s[0:1]
+; GFX12-SDAG-NEXT: s_mov_b32 m0, s2
+; GFX12-SDAG-NEXT: s_barrier_init 1
+; GFX12-SDAG-NEXT: global_store_b32 v3, v0, s[0:1]
+; GFX12-SDAG-NEXT: s_nop 0
+; GFX12-SDAG-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-SDAG-NEXT: s_endpgm
;
-; GLOBAL-ISEL-LABEL: test2_s_barrier_init:
-; GLOBAL-ISEL: ; %bb.0: ; %entry
-; GLOBAL-ISEL-NEXT: s_load_b96 s[0:2], s[2:3], 0x24
-; GLOBAL-ISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GLOBAL-ISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GLOBAL-ISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
-; GLOBAL-ISEL-NEXT: v_mul_lo_u32 v1, v0, v0
-; GLOBAL-ISEL-NEXT: v_sub_nc_u32_e32 v0, v1, v0
-; GLOBAL-ISEL-NEXT: s_wait_kmcnt 0x0
-; GLOBAL-ISEL-NEXT: s_lshl_b32 m0, 16, s2
-; GLOBAL-ISEL-NEXT: global_store_b32 v3, v2, s[0:1]
-; GLOBAL-ISEL-NEXT: s_barrier_init 1
-; GLOBAL-ISEL-NEXT: global_store_b32 v3, v0, s[0:1]
-; GLOBAL-ISEL-NEXT: s_nop 0
-; GLOBAL-ISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GLOBAL-ISEL-NEXT: s_endpgm
+; GFX12-GISEL-LABEL: test2_s_barrier_init:
+; GFX12-GISEL: ; %bb.0: ; %entry
+; GFX12-GISEL-NEXT: s_load_b96 s[0:2], s[2:3], 0x24
+; GFX12-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v0, v0
+; GFX12-GISEL-NEXT: v_sub_nc_u32_e32 v0, v1, v0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: s_lshl_b32 m0, 16, s2
+; GFX12-GISEL-NEXT: global_store_b32 v3, v2, s[0:1]
+; GFX12-GISEL-NEXT: s_barrier_init 1
+; GFX12-GISEL-NEXT: global_store_b32 v3, v0, s[0:1]
+; GFX12-GISEL-NEXT: s_nop 0
+; GFX12-GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-GISEL-NEXT: s_endpgm
entry:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = getelementptr i32, ptr addrspace(1) %out, i32 %tmp
@@ -635,40 +635,40 @@ entry:
}
define amdgpu_kernel void @test3_s_barrier_init(ptr addrspace(1) %out, i32 %mbrCnt) #0 {
-; GCN-LABEL: test3_s_barrier_init:
-; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: s_load_b96 s[0:2], s[2:3], 0x24
-; GCN-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GCN-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
-; GCN-NEXT: v_mul_u32_u24_e32 v1, v0, v0
-; GCN-NEXT: v_sub_nc_u32_e32 v0, v1, v0
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: s_lshl_b32 s2, s2, 16
-; GCN-NEXT: global_store_b32 v3, v2, s[0:1]
-; GCN-NEXT: s_mov_b32 m0, s2
-; GCN-NEXT: s_barrier_init 0
-; GCN-NEXT: global_store_b32 v3, v0, s[0:1]
-; GCN-NEXT: s_nop 0
-; GCN-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GCN-NEXT: s_endpgm
+; GFX12-SDAG-LABEL: test3_s_barrier_init:
+; GFX12-SDAG: ; %bb.0: ; %entry
+; GFX12-SDAG-NEXT: s_load_b96 s[0:2], s[2:3], 0x24
+; GFX12-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
+; GFX12-SDAG-NEXT: v_mul_u32_u24_e32 v1, v0, v0
+; GFX12-SDAG-NEXT: v_sub_nc_u32_e32 v0, v1, v0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: s_lshl_b32 s2, s2, 16
+; GFX12-SDAG-NEXT: global_store_b32 v3, v2, s[0:1]
+; GFX12-SDAG-NEXT: s_mov_b32 m0, s2
+; GFX12-SDAG-NEXT: s_barrier_init 0
+; GFX12-SDAG-NEXT: global_store_b32 v3, v0, s[0:1]
+; GFX12-SDAG-NEXT: s_nop 0
+; GFX12-SDAG-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-SDAG-NEXT: s_endpgm
;
-; GLOBAL-ISEL-LABEL: test3_s_barrier_init:
-; GLOBAL-ISEL: ; %bb.0: ; %entry
-; GLOBAL-ISEL-NEXT: s_load_b96 s[0:2], s[2:3], 0x24
-; GLOBAL-ISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GLOBAL-ISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GLOBAL-ISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
-; GLOBAL-ISEL-NEXT: v_mul_lo_u32 v1, v0, v0
-; GLOBAL-ISEL-NEXT: v_sub_nc_u32_e32 v0, v1, v0
-; GLOBAL-ISEL-NEXT: s_wait_kmcnt 0x0
-; GLOBAL-ISEL-NEXT: s_lshl_b32 m0, 16, s2
-; GLOBAL-ISEL-NEXT: global_store_b32 v3, v2, s[0:1]
-; GLOBAL-ISEL-NEXT: s_barrier_init 0
-; GLOBAL-ISEL-NEXT: global_store_b32 v3, v0, s[0:1]
-; GLOBAL-ISEL-NEXT: s_nop 0
-; GLOBAL-ISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GLOBAL-ISEL-NEXT: s_endpgm
+; GFX12-GISEL-LABEL: test3_s_barrier_init:
+; GFX12-GISEL: ; %bb.0: ; %entry
+; GFX12-GISEL-NEXT: s_load_b96 s[0:2], s[2:3], 0x24
+; GFX12-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v0, v0
+; GFX12-GISEL-NEXT: v_sub_nc_u32_e32 v0, v1, v0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: s_lshl_b32 m0, 16, s2
+; GFX12-GISEL-NEXT: global_store_b32 v3, v2, s[0:1]
+; GFX12-GISEL-NEXT: s_barrier_init 0
+; GFX12-GISEL-NEXT: global_store_b32 v3, v0, s[0:1]
+; GFX12-GISEL-NEXT: s_nop 0
+; GFX12-GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-GISEL-NEXT: s_endpgm
entry:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = getelementptr i32, ptr addrspace(1) %out, i32 %tmp
@@ -681,43 +681,43 @@ entry:
}
define amdgpu_kernel void @test4_s_barrier_init(ptr addrspace(1) %out, i32 %bar, i32 %mbrCnt) #0 {
-; GCN-LABEL: test4_s_barrier_init:
-; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: s_load_b128 s[0:3], s[2:3], 0x24
-; GCN-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GCN-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
-; GCN-NEXT: v_mul_u32_u24_e32 v1, v0, v0
-; GCN-NEXT: v_sub_nc_u32_e32 v0, v1, v0
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: s_lshl_b32 s3, s3, 16
-; GCN-NEXT: global_store_b32 v3, v2, s[0:1]
-; GCN-NEXT: s_or_b32 s2, s2, s3
-; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GCN-NEXT: s_mov_b32 m0, s2
-; GCN-NEXT: s_barrier_init m0
-; GCN-NEXT: global_store_b32 v3, v0, s[0:1]
-; GCN-NEXT: s_nop 0
-; GCN-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GCN-NEXT: s_endpgm
+; GFX12-SDAG-LABEL: test4_s_barrier_init:
+; GFX12-SDAG: ; %bb.0: ; %entry
+; GFX12-SDAG-NEXT: s_load_b128 s[0:3], s[2:3], 0x24
+; GFX12-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
+; GFX12-SDAG-NEXT: v_mul_u32_u24_e32 v1, v0, v0
+; GFX12-SDAG-NEXT: v_sub_nc_u32_e32 v0, v1, v0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: s_lshl_b32 s3, s3, 16
+; GFX12-SDAG-NEXT: global_store_b32 v3, v2, s[0:1]
+; GFX12-SDAG-NEXT: s_or_b32 s2, s2, s3
+; GFX12-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-SDAG-NEXT: s_mov_b32 m0, s2
+; GFX12-SDAG-NEXT: s_barrier_init m0
+; GFX12-SDAG-NEXT: global_store_b32 v3, v0, s[0:1]
+; GFX12-SDAG-NEXT: s_nop 0
+; GFX12-SDAG-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-SDAG-NEXT: s_endpgm
;
-; GLOBAL-ISEL-LABEL: test4_s_barrier_init:
-; GLOBAL-ISEL: ; %bb.0: ; %entry
-; GLOBAL-ISEL-NEXT: s_load_b128 s[0:3], s[2:3], 0x24
-; GLOBAL-ISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GLOBAL-ISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GLOBAL-ISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
-; GLOBAL-ISEL-NEXT: v_mul_lo_u32 v1, v0, v0
-; GLOBAL-ISEL-NEXT: v_sub_nc_u32_e32 v0, v1, v0
-; GLOBAL-ISEL-NEXT: s_wait_kmcnt 0x0
-; GLOBAL-ISEL-NEXT: s_lshl_b32 s3, 16, s3
-; GLOBAL-ISEL-NEXT: global_store_b32 v3, v2, s[0:1]
-; GLOBAL-ISEL-NEXT: s_or_b32 m0, s2, s3
-; GLOBAL-ISEL-NEXT: s_barrier_init m0
-; GLOBAL-ISEL-NEXT: global_store_b32 v3, v0, s[0:1]
-; GLOBAL-ISEL-NEXT: s_nop 0
-; GLOBAL-ISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GLOBAL-ISEL-NEXT: s_endpgm
+; GFX12-GISEL-LABEL: test4_s_barrier_init:
+; GFX12-GISEL: ; %bb.0: ; %entry
+; GFX12-GISEL-NEXT: s_load_b128 s[0:3], s[2:3], 0x24
+; GFX12-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v0, v0
+; GFX12-GISEL-NEXT: v_sub_nc_u32_e32 v0, v1, v0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: s_lshl_b32 s3, 16, s3
+; GFX12-GISEL-NEXT: global_store_b32 v3, v2, s[0:1]
+; GFX12-GISEL-NEXT: s_or_b32 m0, s2, s3
+; GFX12-GISEL-NEXT: s_barrier_init m0
+; GFX12-GISEL-NEXT: global_store_b32 v3, v0, s[0:1]
+; GFX12-GISEL-NEXT: s_nop 0
+; GFX12-GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-GISEL-NEXT: s_endpgm
entry:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = getelementptr i32, ptr addrspace(1) %out, i32 %tmp
@@ -730,74 +730,76 @@ entry:
}
define void @test5_s_barrier_init_m0(i32 %arg1 ,i32 %arg2) {
-; GCN-LABEL: test5_s_barrier_init_m0:
-; GCN: ; %bb.0:
-; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
-; GCN-NEXT: s_wait_expcnt 0x0
-; GCN-NEXT: s_wait_samplecnt 0x0
-; GCN-NEXT: s_wait_bvhcnt 0x0
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GCN-NEXT: v_or_b32_e32 v0, v0, v1
-; GCN-NEXT: v_readfirstlane_b32 s0, v0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: s_mov_b32 m0, s0
-; GCN-NEXT: s_barrier_init m0
-; GCN-NEXT: s_wait_alu 0xfffe
-; GCN-NEXT: s_setpc_b64 s[30:31]
+; GFX12-SDAG-LABEL: test5_s_barrier_init_m0:
+; GFX12-SDAG: ; %bb.0:
+; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-SDAG-NEXT: s_wait_expcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_samplecnt 0x0
+; GFX12-SDAG-NEXT: s_wait_bvhcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX12-SDAG-NEXT: v_readfirstlane_b32 s0, v0
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-SDAG-NEXT: s_mov_b32 m0, s0
+; GFX12-SDAG-NEXT: s_barrier_init m0
+; GFX12-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX12-SDAG-NEXT: s_setpc_b64 s[30:31]
;
-; GLOBAL-ISEL-LABEL: test5_s_barrier_init_m0:
-; GLOBAL-ISEL: ; %bb.0:
-; GLOBAL-ISEL-NEXT: s_wait_loadcnt_dscnt 0x0
-; GLOBAL-ISEL-NEXT: s_wait_expcnt 0x0
-; GLOBAL-ISEL-NEXT: s_wait_samplecnt 0x0
-; GLOBAL-ISEL-NEXT: s_wait_bvhcnt 0x0
-; GLOBAL-ISEL-NEXT: s_wait_kmcnt 0x0
-; GLOBAL-ISEL-NEXT: v_readfirstlane_b32 s0, v1
-; GLOBAL-ISEL-NEXT: v_readfirstlane_b32 s1, v0
-; GLOBAL-ISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GLOBAL-ISEL-NEXT: s_lshl_b32 s0, 16, s0
-; GLOBAL-ISEL-NEXT: s_wait_alu 0xfffe
-; GLOBAL-ISEL-NEXT: s_or_b32 m0, s1, s0
-; GLOBAL-ISEL-NEXT: s_barrier_init m0
-; GLOBAL-ISEL-NEXT: s_wait_alu 0xfffe
-; GLOBAL-ISEL-NEXT: s_setpc_b64 s[30:31]
+; GFX12-GISEL-LABEL: test5_s_barrier_init_m0:
+; GFX12-GISEL: ; %bb.0:
+; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-GISEL-NEXT: s_wait_expcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0
+; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: v_readfirstlane_b32 s0, v1
+; GFX12-GISEL-NEXT: v_readfirstlane_b32 s1, v0
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: s_lshl_b32 s0, 16, s0
+; GFX12-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX12-GISEL-NEXT: s_or_b32 m0, s1, s0
+; GFX12-GISEL-NEXT: s_barrier_init m0
+; GFX12-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31]
call void @llvm.amdgcn.s.barrier.init(i32 %arg1, i32 %arg2)
ret void
}
define amdgpu_kernel void @test1_s_barrier_join(ptr addrspace(1) %out) #0 {
-; GCN-LABEL: test1_s_barrier_join:
-; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
-; GCN-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GCN-NEXT: s_barrier_join -1
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GCN-NEXT: v_mul_u32_u24_e32 v1, v0, v0
-; GCN-NEXT: v_lshlrev_b32_e32 v2, 2, v0
-; GCN-NEXT: v_sub_nc_u32_e32 v0, v1, v0
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: global_store_b32 v2, v0, s[0:1]
-; GCN-NEXT: s_nop 0
-; GCN-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GCN-NEXT: s_endpgm
;
-; GLOBAL-ISEL-LABEL: test1_s_barrier_join:
-; GLOBAL-ISEL: ; %bb.0: ; %entry
-; GLOBAL-ISEL-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
-; GLOBAL-ISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GLOBAL-ISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GLOBAL-ISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
-; GLOBAL-ISEL-NEXT: v_mul_lo_u32 v1, v0, v0
-; GLOBAL-ISEL-NEXT: v_sub_nc_u32_e32 v0, v1, v0
-; GLOBAL-ISEL-NEXT: s_wait_kmcnt 0x0
-; GLOBAL-ISEL-NEXT: global_store_b32 v3, v2, s[0:1]
-; GLOBAL-ISEL-NEXT: s_barrier_join -1
-; GLOBAL-ISEL-NEXT: global_store_b32 v3, v0, s[0:1]
-; GLOBAL-ISEL-NEXT: s_nop 0
-; GLOBAL-ISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GLOBAL-ISEL-NEXT: s_endpgm
+; GFX12-SDAG-LABEL: test1_s_barrier_join:
+; GFX12-SDAG: ; %bb.0: ; %entry
+; GFX12-SDAG-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
+; GFX12-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
+; GFX12-SDAG-NEXT: v_mul_u32_u24_e32 v1, v0, v0
+; GFX12-SDAG-NEXT: v_sub_nc_u32_e32 v0, v1, v0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: global_store_b32 v3, v2, s[0:1]
+; GFX12-SDAG-NEXT: s_barrier_join -1
+; GFX12-SDAG-NEXT: global_store_b32 v3, v0, s[0:1]
+; GFX12-SDAG-NEXT: s_nop 0
+; GFX12-SDAG-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-SDAG-NEXT: s_endpgm
+;
+; GFX12-GISEL-LABEL: test1_s_barrier_join:
+; GFX12-GISEL: ; %bb.0: ; %entry
+; GFX12-GISEL-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
+; GFX12-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v0, v0
+; GFX12-GISEL-NEXT: v_sub_nc_u32_e32 v0, v1, v0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: global_store_b32 v3, v2, s[0:1]
+; GFX12-GISEL-NEXT: s_barrier_join -1
+; GFX12-GISEL-NEXT: global_store_b32 v3, v0, s[0:1]
+; GFX12-GISEL-NEXT: s_nop 0
+; GFX12-GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-GISEL-NEXT: s_endpgm
entry:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = getelementptr i32, ptr addrspace(1) %out, i32 %tmp
@@ -810,36 +812,38 @@ entry:
}
define amdgpu_kernel void @test2_s_barrier_join(ptr addrspace(1) %out) #0 {
-; GCN-LABEL: test2_s_barrier_join:
-; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
-; GCN-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GCN-NEXT: s_barrier_join 1
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GCN-NEXT: v_mul_u32_u24_e32 v1, v0, v0
-; GCN-NEXT: v_lshlrev_b32_e32 v2, 2, v0
-; GCN-NEXT: v_sub_nc_u32_e32 v0, v1, v0
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: global_store_b32 v2, v0, s[0:1]
-; GCN-NEXT: s_nop 0
-; GCN-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GCN-NEXT: s_endpgm
;
-; GLOBAL-ISEL-LABEL: test2_s_barrier_join:
-; GLOBAL-ISEL: ; %bb.0: ; %entry
-; GLOBAL-ISEL-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
-; GLOBAL-ISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GLOBAL-ISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GLOBAL-ISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
-; GLOBAL-ISEL-NEXT: v_mul_lo_u32 v1, v0, v0
-; GLOBAL-ISEL-NEXT: v_sub_nc_u32_e32 v0, v1, v0
-; GLOBAL-ISEL-NEXT: s_wait_kmcnt 0x0
-; GLOBAL-ISEL-NEXT: global_store_b32 v3, v2, s[0:1]
-; GLOBAL-ISEL-NEXT: s_barrier_join 1
-; GLOBAL-ISEL-NEXT: global_store_b32 v3, v0, s[0:1]
-; GLOBAL-ISEL-NEXT: s_nop 0
-; GLOBAL-ISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GLOBAL-ISEL-NEXT: s_endpgm
+; GFX12-SDAG-LABEL: test2_s_barrier_join:
+; GFX12-SDAG: ; %bb.0: ; %entry
+; GFX12-SDAG-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
+; GFX12-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
+; GFX12-SDAG-NEXT: v_mul_u32_u24_e32 v1, v0, v0
+; GFX12-SDAG-NEXT: v_sub_nc_u32_e32 v0, v1, v0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: global_store_b32 v3, v2, s[0:1]
+; GFX12-SDAG-NEXT: s_barrier_join 1
+; GFX12-SDAG-NEXT: global_store_b32 v3, v0, s[0:1]
+; GFX12-SDAG-NEXT: s_nop 0
+; GFX12-SDAG-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-SDAG-NEXT: s_endpgm
+;
+; GFX12-GISEL-LABEL: test2_s_barrier_join:
+; GFX12-GISEL: ; %bb.0: ; %entry
+; GFX12-GISEL-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
+; GFX12-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v0, v0
+; GFX12-GISEL-NEXT: v_sub_nc_u32_e32 v0, v1, v0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: global_store_b32 v3, v2, s[0:1]
+; GFX12-GISEL-NEXT: s_barrier_join 1
+; GFX12-GISEL-NEXT: global_store_b32 v3, v0, s[0:1]
+; GFX12-GISEL-NEXT: s_nop 0
+; GFX12-GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-GISEL-NEXT: s_endpgm
entry:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = getelementptr i32, ptr addrspace(1) %out, i32 %tmp
@@ -852,36 +856,38 @@ entry:
}
define amdgpu_kernel void @test3_s_barrier_join(ptr addrspace(1) %out) #0 {
-; GCN-LABEL: test3_s_barrier_join:
-; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
-; GCN-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GCN-NEXT: s_barrier_join 0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GCN-NEXT: v_mul_u32_u24_e32 v1, v0, v0
-; GCN-NEXT: v_lshlrev_b32_e32 v2, 2, v0
-; GCN-NEXT: v_sub_nc_u32_e32 v0, v1, v0
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: global_store_b32 v2, v0, s[0:1]
-; GCN-NEXT: s_nop 0
-; GCN-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GCN-NEXT: s_endpgm
;
-; GLOBAL-ISEL-LABEL: test3_s_barrier_join:
-; GLOBAL-ISEL: ; %bb.0: ; %entry
-; GLOBAL-ISEL-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
-; GLOBAL-ISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GLOBAL-ISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GLOBAL-ISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
-; GLOBAL-ISEL-NEXT: v_mul_lo_u32 v1, v0, v0
-; GLOBAL-ISEL-NEXT: v_sub_nc_u32_e32 v0, v1, v0
-; GLOBAL-ISEL-NEXT: s_wait_kmcnt 0x0
-; GLOBAL-ISEL-NEXT: global_store_b32 v3, v2, s[0:1]
-; GLOBAL-ISEL-NEXT: s_barrier_join 0
-; GLOBAL-ISEL-NEXT: global_store_b32 v3, v0, s[0:1]
-; GLOBAL-ISEL-NEXT: s_nop 0
-; GLOBAL-ISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GLOBAL-ISEL-NEXT: s_endpgm
+; GFX12-SDAG-LABEL: test3_s_barrier_join:
+; GFX12-SDAG: ; %bb.0: ; %entry
+; GFX12-SDAG-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
+; GFX12-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
+; GFX12-SDAG-NEXT: v_mul_u32_u24_e32 v1, v0, v0
+; GFX12-SDAG-NEXT: v_sub_nc_u32_e32 v0, v1, v0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: global_store_b32 v3, v2, s[0:1]
+; GFX12-SDAG-NEXT: s_barrier_join 0
+; GFX12-SDAG-NEXT: global_store_b32 v3, v0, s[0:1]
+; GFX12-SDAG-NEXT: s_nop 0
+; GFX12-SDAG-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-SDAG-NEXT: s_endpgm
+;
+; GFX12-GISEL-LABEL: test3_s_barrier_join:
+; GFX12-GISEL: ; %bb.0: ; %entry
+; GFX12-GISEL-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
+; GFX12-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v0, v0
+; GFX12-GISEL-NEXT: v_sub_nc_u32_e32 v0, v1, v0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: global_store_b32 v3, v2, s[0:1]
+; GFX12-GISEL-NEXT: s_barrier_join 0
+; GFX12-GISEL-NEXT: global_store_b32 v3, v0, s[0:1]
+; GFX12-GISEL-NEXT: s_nop 0
+; GFX12-GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-GISEL-NEXT: s_endpgm
entry:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = getelementptr i32, ptr addrspace(1) %out, i32 %tmp
@@ -894,39 +900,39 @@ entry:
}
define amdgpu_kernel void @test4_s_barrier_join_m0(ptr addrspace(1) %out, i32 %bar) #0 {
-; GCN-LABEL: test4_s_barrier_join_m0:
-; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: s_load_b96 s[0:2], s[2:3], 0x24
-; GCN-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GCN-NEXT: v_mul_u32_u24_e32 v2, v0, v0
-; GCN-NEXT: v_lshlrev_b32_e32 v3, 2, v0
-; GCN-NEXT: v_sub_nc_u32_e32 v0, v2, v0
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: s_mov_b32 m0, s2
-; GCN-NEXT: global_store_b32 v3, v1, s[0:1]
-; GCN-NEXT: s_barrier_join m0
-; GCN-NEXT: global_store_b32 v3, v0, s[0:1]
-; GCN-NEXT: s_nop 0
-; GCN-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GCN-NEXT: s_endpgm
+; GFX12-SDAG-LABEL: test4_s_barrier_join_m0:
+; GFX12-SDAG: ; %bb.0: ; %entry
+; GFX12-SDAG-NEXT: s_load_b96 s[0:2], s[2:3], 0x24
+; GFX12-SDAG-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-SDAG-NEXT: v_mul_u32_u24_e32 v2, v0, v0
+; GFX12-SDAG-NEXT: v_lshlrev_b32_e32 v3, 2, v0
+; GFX12-SDAG-NEXT: v_sub_nc_u32_e32 v0, v2, v0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: s_mov_b32 m0, s2
+; GFX12-SDAG-NEXT: global_store_b32 v3, v1, s[0:1]
+; GFX12-SDAG-NEXT: s_barrier_join m0
+; GFX12-SDAG-NEXT: global_store_b32 v3, v0, s[0:1]
+; GFX12-SDAG-NEXT: s_nop 0
+; GFX12-SDAG-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-SDAG-NEXT: s_endpgm
;
-; GLOBAL-ISEL-LABEL: test4_s_barrier_join_m0:
-; GLOBAL-ISEL: ; %bb.0: ; %entry
-; GLOBAL-ISEL-NEXT: s_load_b96 s[0:2], s[2:3], 0x24
-; GLOBAL-ISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GLOBAL-ISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GLOBAL-ISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
-; GLOBAL-ISEL-NEXT: v_mul_lo_u32 v1, v0, v0
-; GLOBAL-ISEL-NEXT: v_sub_nc_u32_e32 v0, v1, v0
-; GLOBAL-ISEL-NEXT: s_wait_kmcnt 0x0
-; GLOBAL-ISEL-NEXT: s_mov_b32 m0, s2
-; GLOBAL-ISEL-NEXT: global_store_b32 v3, v2, s[0:1]
-; GLOBAL-ISEL-NEXT: s_barrier_join m0
-; GLOBAL-ISEL-NEXT: global_store_b32 v3, v0, s[0:1]
-; GLOBAL-ISEL-NEXT: s_nop 0
-; GLOBAL-ISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GLOBAL-ISEL-NEXT: s_endpgm
+; GFX12-GISEL-LABEL: test4_s_barrier_join_m0:
+; GFX12-GISEL: ; %bb.0: ; %entry
+; GFX12-GISEL-NEXT: s_load_b96 s[0:2], s[2:3], 0x24
+; GFX12-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v0, v0
+; GFX12-GISEL-NEXT: v_sub_nc_u32_e32 v0, v1, v0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: s_mov_b32 m0, s2
+; GFX12-GISEL-NEXT: global_store_b32 v3, v2, s[0:1]
+; GFX12-GISEL-NEXT: s_barrier_join m0
+; GFX12-GISEL-NEXT: global_store_b32 v3, v0, s[0:1]
+; GFX12-GISEL-NEXT: s_nop 0
+; GFX12-GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-GISEL-NEXT: s_endpgm
entry:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = getelementptr i32, ptr addrspace(1) %out, i32 %tmp
@@ -939,79 +945,93 @@ entry:
}
define void @test5_s_barrier_join_m0(i32 %arg) {
-; GCN-LABEL: test5_s_barrier_join_m0:
-; GCN: ; %bb.0:
-; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
-; GCN-NEXT: s_wait_expcnt 0x0
-; GCN-NEXT: s_wait_samplecnt 0x0
-; GCN-NEXT: s_wait_bvhcnt 0x0
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: v_readfirstlane_b32 s0, v0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: s_mov_b32 m0, s0
-; GCN-NEXT: s_barrier_join m0
-; GCN-NEXT: s_wait_alu 0xfffe
-; GCN-NEXT: s_setpc_b64 s[30:31]
+; GFX12-SDAG-LABEL: test5_s_barrier_join_m0:
+; GFX12-SDAG: ; %bb.0:
+; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-SDAG-NEXT: s_wait_expcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_samplecnt 0x0
+; GFX12-SDAG-NEXT: s_wait_bvhcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: v_readfirstlane_b32 s0, v0
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-SDAG-NEXT: s_mov_b32 m0, s0
+; GFX12-SDAG-NEXT: s_barrier_join m0
+; GFX12-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX12-SDAG-NEXT: s_setpc_b64 s[30:31]
;
-; GLOBAL-ISEL-LABEL: test5_s_barrier_join_m0:
-; GLOBAL-ISEL: ; %bb.0:
-; GLOBAL-ISEL-NEXT: s_wait_loadcnt_dscnt 0x0
-; GLOBAL-ISEL-NEXT: s_wait_expcnt 0x0
-; GLOBAL-ISEL-NEXT: s_wait_samplecnt 0x0
-; GLOBAL-ISEL-NEXT: s_wait_bvhcnt 0x0
-; GLOBAL-ISEL-NEXT: s_wait_kmcnt 0x0
-; GLOBAL-ISEL-NEXT: v_readfirstlane_b32 m0, v0
-; GLOBAL-ISEL-NEXT: s_barrier_join m0
-; GLOBAL-ISEL-NEXT: s_setpc_b64 s[30:31]
+; GFX12-GISEL-LABEL: test5_s_barrier_join_m0:
+; GFX12-GISEL: ; %bb.0:
+; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-GISEL-NEXT: s_wait_expcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0
+; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: v_readfirstlane_b32 m0, v0
+; GFX12-GISEL-NEXT: s_barrier_join m0
+; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31]
call void @llvm.amdgcn.s.barrier.join(i32 %arg)
ret void
}
+define void @test6_s_barrier_join_0() {
+; GFX12-LABEL: test6_s_barrier_join_0:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_barrier_join 0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ call void @llvm.amdgcn.s.barrier.join(i32 0)
+ ret void
+}
+
define amdgpu_kernel void @test1_s_barrier_leave(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %out) #0 {
-; GCN-LABEL: test1_s_barrier_leave:
-; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: s_load_b256 s[0:7], s[2:3], 0x24
-; GCN-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_lshlrev_b32_e32 v0, 2, v0
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: global_store_b32 v0, v1, s[6:7]
-; GCN-NEXT: s_barrier_leave
-; GCN-NEXT: s_cselect_b32 s3, s3, s5
-; GCN-NEXT: s_cselect_b32 s2, s2, s4
-; GCN-NEXT: s_clause 0x1
-; GCN-NEXT: global_load_b32 v2, v1, s[0:1]
-; GCN-NEXT: global_load_b32 v1, v1, s[2:3]
-; GCN-NEXT: s_wait_loadcnt 0x0
-; GCN-NEXT: v_mul_lo_u32 v1, v1, v2
-; GCN-NEXT: global_store_b32 v0, v1, s[6:7]
-; GCN-NEXT: s_nop 0
-; GCN-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GCN-NEXT: s_endpgm
+; GFX12-SDAG-LABEL: test1_s_barrier_leave:
+; GFX12-SDAG: ; %bb.0: ; %entry
+; GFX12-SDAG-NEXT: s_load_b256 s[0:7], s[2:3], 0x24
+; GFX12-SDAG-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: global_store_b32 v0, v1, s[6:7]
+; GFX12-SDAG-NEXT: s_barrier_leave
+; GFX12-SDAG-NEXT: s_cselect_b32 s3, s3, s5
+; GFX12-SDAG-NEXT: s_cselect_b32 s2, s2, s4
+; GFX12-SDAG-NEXT: s_clause 0x1
+; GFX12-SDAG-NEXT: global_load_b32 v2, v1, s[0:1]
+; GFX12-SDAG-NEXT: global_load_b32 v1, v1, s[2:3]
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: v_mul_lo_u32 v1, v1, v2
+; GFX12-SDAG-NEXT: global_store_b32 v0, v1, s[6:7]
+; GFX12-SDAG-NEXT: s_nop 0
+; GFX12-SDAG-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-SDAG-NEXT: s_endpgm
;
-; GLOBAL-ISEL-LABEL: test1_s_barrier_leave:
-; GLOBAL-ISEL: ; %bb.0: ; %entry
-; GLOBAL-ISEL-NEXT: s_load_b256 s[0:7], s[2:3], 0x24
-; GLOBAL-ISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
-; GLOBAL-ISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GLOBAL-ISEL-NEXT: v_lshlrev_b32_e32 v0, 2, v0
-; GLOBAL-ISEL-NEXT: s_wait_kmcnt 0x0
-; GLOBAL-ISEL-NEXT: global_store_b32 v0, v1, s[6:7]
-; GLOBAL-ISEL-NEXT: s_barrier_leave
-; GLOBAL-ISEL-NEXT: s_cselect_b32 s8, 1, 0
-; GLOBAL-ISEL-NEXT: s_and_b32 s8, s8, 1
-; GLOBAL-ISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GLOBAL-ISEL-NEXT: s_cmp_lg_u32 s8, 0
-; GLOBAL-ISEL-NEXT: s_cselect_b64 s[2:3], s[2:3], s[4:5]
-; GLOBAL-ISEL-NEXT: s_clause 0x1
-; GLOBAL-ISEL-NEXT: global_load_b32 v2, v1, s[0:1]
-; GLOBAL-ISEL-NEXT: global_load_b32 v1, v1, s[2:3]
-; GLOBAL-ISEL-NEXT: s_wait_loadcnt 0x0
-; GLOBAL-ISEL-NEXT: v_mul_lo_u32 v1, v1, v2
-; GLOBAL-ISEL-NEXT: global_store_b32 v0, v1, s[6:7]
-; GLOBAL-ISEL-NEXT: s_nop 0
-; GLOBAL-ISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GLOBAL-ISEL-NEXT: s_endpgm
+; GFX12-GISEL-LABEL: test1_s_barrier_leave:
+; GFX12-GISEL: ; %bb.0: ; %entry
+; GFX12-GISEL-NEXT: s_load_b256 s[0:7], s[2:3], 0x24
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
+; GFX12-GISEL-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: global_store_b32 v0, v1, s[6:7]
+; GFX12-GISEL-NEXT: s_barrier_leave
+; GFX12-GISEL-NEXT: s_cselect_b32 s8, 1, 0
+; GFX12-GISEL-NEXT: s_and_b32 s8, s8, 1
+; GFX12-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-GISEL-NEXT: s_cmp_lg_u32 s8, 0
+; GFX12-GISEL-NEXT: s_cselect_b64 s[2:3], s[2:3], s[4:5]
+; GFX12-GISEL-NEXT: s_clause 0x1
+; GFX12-GISEL-NEXT: global_load_b32 v2, v1, s[0:1]
+; GFX12-GISEL-NEXT: global_load_b32 v1, v1, s[2:3]
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v1, v2
+; GFX12-GISEL-NEXT: global_store_b32 v0, v1, s[6:7]
+; GFX12-GISEL-NEXT: s_nop 0
+; GFX12-GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-GISEL-NEXT: s_endpgm
entry:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = getelementptr i32, ptr addrspace(1) %out, i32 %tmp
@@ -1026,36 +1046,38 @@ entry:
}
define amdgpu_kernel void @test1_s_wakeup_barrier(ptr addrspace(1) %out) #0 {
-; GCN-LABEL: test1_s_wakeup_barrier:
-; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
-; GCN-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GCN-NEXT: s_wakeup_barrier -1
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GCN-NEXT: v_mul_u32_u24_e32 v1, v0, v0
-; GCN-NEXT: v_lshlrev_b32_e32 v2, 2, v0
-; GCN-NEXT: v_sub_nc_u32_e32 v0, v1, v0
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: global_store_b32 v2, v0, s[0:1]
-; GCN-NEXT: s_nop 0
-; GCN-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GCN-NEXT: s_endpgm
;
-; GLOBAL-ISEL-LABEL: test1_s_wakeup_barrier:
-; GLOBAL-ISEL: ; %bb.0: ; %entry
-; GLOBAL-ISEL-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
-; GLOBAL-ISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GLOBAL-ISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GLOBAL-ISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
-; GLOBAL-ISEL-NEXT: v_mul_lo_u32 v1, v0, v0
-; GLOBAL-ISEL-NEXT: v_sub_nc_u32_e32 v0, v1, v0
-; GLOBAL-ISEL-NEXT: s_wait_kmcnt 0x0
-; GLOBAL-ISEL-NEXT: global_store_b32 v3, v2, s[0:1]
-; GLOBAL-ISEL-NEXT: s_wakeup_barrier -1
-; GLOBAL-ISEL-NEXT: global_store_b32 v3, v0, s[0:1]
-; GLOBAL-ISEL-NEXT: s_nop 0
-; GLOBAL-ISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GLOBAL-ISEL-NEXT: s_endpgm
+; GFX12-SDAG-LABEL: test1_s_wakeup_barrier:
+; GFX12-SDAG: ; %bb.0: ; %entry
+; GFX12-SDAG-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
+; GFX12-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
+; GFX12-SDAG-NEXT: v_mul_u32_u24_e32 v1, v0, v0
+; GFX12-SDAG-NEXT: v_sub_nc_u32_e32 v0, v1, v0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: global_store_b32 v3, v2, s[0:1]
+; GFX12-SDAG-NEXT: s_wakeup_barrier -1
+; GFX12-SDAG-NEXT: global_store_b32 v3, v0, s[0:1]
+; GFX12-SDAG-NEXT: s_nop 0
+; GFX12-SDAG-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-SDAG-NEXT: s_endpgm
+;
+; GFX12-GISEL-LABEL: test1_s_wakeup_barrier:
+; GFX12-GISEL: ; %bb.0: ; %entry
+; GFX12-GISEL-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
+; GFX12-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v0, v0
+; GFX12-GISEL-NEXT: v_sub_nc_u32_e32 v0, v1, v0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: global_store_b32 v3, v2, s[0:1]
+; GFX12-GISEL-NEXT: s_wakeup_barrier -1
+; GFX12-GISEL-NEXT: global_store_b32 v3, v0, s[0:1]
+; GFX12-GISEL-NEXT: s_nop 0
+; GFX12-GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-GISEL-NEXT: s_endpgm
entry:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = getelementptr i32, ptr addrspace(1) %out, i32 %tmp
@@ -1068,36 +1090,38 @@ entry:
}
define amdgpu_kernel void @test2_s_wakeup_barrier(ptr addrspace(1) %out) #0 {
-; GCN-LABEL: test2_s_wakeup_barrier:
-; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
-; GCN-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GCN-NEXT: s_wakeup_barrier 1
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GCN-NEXT: v_mul_u32_u24_e32 v1, v0, v0
-; GCN-NEXT: v_lshlrev_b32_e32 v2, 2, v0
-; GCN-NEXT: v_sub_nc_u32_e32 v0, v1, v0
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: global_store_b32 v2, v0, s[0:1]
-; GCN-NEXT: s_nop 0
-; GCN-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GCN-NEXT: s_endpgm
;
-; GLOBAL-ISEL-LABEL: test2_s_wakeup_barrier:
-; GLOBAL-ISEL: ; %bb.0: ; %entry
-; GLOBAL-ISEL-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
-; GLOBAL-ISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GLOBAL-ISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GLOBAL-ISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
-; GLOBAL-ISEL-NEXT: v_mul_lo_u32 v1, v0, v0
-; GLOBAL-ISEL-NEXT: v_sub_nc_u32_e32 v0, v1, v0
-; GLOBAL-ISEL-NEXT: s_wait_kmcnt 0x0
-; GLOBAL-ISEL-NEXT: global_store_b32 v3, v2, s[0:1]
-; GLOBAL-ISEL-NEXT: s_wakeup_barrier 1
-; GLOBAL-ISEL-NEXT: global_store_b32 v3, v0, s[0:1]
-; GLOBAL-ISEL-NEXT: s_nop 0
-; GLOBAL-ISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GLOBAL-ISEL-NEXT: s_endpgm
+; GFX12-SDAG-LABEL: test2_s_wakeup_barrier:
+; GFX12-SDAG: ; %bb.0: ; %entry
+; GFX12-SDAG-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
+; GFX12-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
+; GFX12-SDAG-NEXT: v_mul_u32_u24_e32 v1, v0, v0
+; GFX12-SDAG-NEXT: v_sub_nc_u32_e32 v0, v1, v0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: global_store_b32 v3, v2, s[0:1]
+; GFX12-SDAG-NEXT: s_wakeup_barrier 1
+; GFX12-SDAG-NEXT: global_store_b32 v3, v0, s[0:1]
+; GFX12-SDAG-NEXT: s_nop 0
+; GFX12-SDAG-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-SDAG-NEXT: s_endpgm
+;
+; GFX12-GISEL-LABEL: test2_s_wakeup_barrier:
+; GFX12-GISEL: ; %bb.0: ; %entry
+; GFX12-GISEL-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
+; GFX12-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v0, v0
+; GFX12-GISEL-NEXT: v_sub_nc_u32_e32 v0, v1, v0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: global_store_b32 v3, v2, s[0:1]
+; GFX12-GISEL-NEXT: s_wakeup_barrier 1
+; GFX12-GISEL-NEXT: global_store_b32 v3, v0, s[0:1]
+; GFX12-GISEL-NEXT: s_nop 0
+; GFX12-GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-GISEL-NEXT: s_endpgm
entry:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = getelementptr i32, ptr addrspace(1) %out, i32 %tmp
@@ -1110,36 +1134,38 @@ entry:
}
define amdgpu_kernel void @test3_s_wakeup_barrier(ptr addrspace(1) %out) #0 {
-; GCN-LABEL: test3_s_wakeup_barrier:
-; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
-; GCN-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GCN-NEXT: s_wakeup_barrier 0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GCN-NEXT: v_mul_u32_u24_e32 v1, v0, v0
-; GCN-NEXT: v_lshlrev_b32_e32 v2, 2, v0
-; GCN-NEXT: v_sub_nc_u32_e32 v0, v1, v0
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: global_store_b32 v2, v0, s[0:1]
-; GCN-NEXT: s_nop 0
-; GCN-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GCN-NEXT: s_endpgm
;
-; GLOBAL-ISEL-LABEL: test3_s_wakeup_barrier:
-; GLOBAL-ISEL: ; %bb.0: ; %entry
-; GLOBAL-ISEL-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
-; GLOBAL-ISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GLOBAL-ISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GLOBAL-ISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
-; GLOBAL-ISEL-NEXT: v_mul_lo_u32 v1, v0, v0
-; GLOBAL-ISEL-NEXT: v_sub_nc_u32_e32 v0, v1, v0
-; GLOBAL-ISEL-NEXT: s_wait_kmcnt 0x0
-; GLOBAL-ISEL-NEXT: global_store_b32 v3, v2, s[0:1]
-; GLOBAL-ISEL-NEXT: s_wakeup_barrier 0
-; GLOBAL-ISEL-NEXT: global_store_b32 v3, v0, s[0:1]
-; GLOBAL-ISEL-NEXT: s_nop 0
-; GLOBAL-ISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GLOBAL-ISEL-NEXT: s_endpgm
+; GFX12-SDAG-LABEL: test3_s_wakeup_barrier:
+; GFX12-SDAG: ; %bb.0: ; %entry
+; GFX12-SDAG-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
+; GFX12-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
+; GFX12-SDAG-NEXT: v_mul_u32_u24_e32 v1, v0, v0
+; GFX12-SDAG-NEXT: v_sub_nc_u32_e32 v0, v1, v0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: global_store_b32 v3, v2, s[0:1]
+; GFX12-SDAG-NEXT: s_wakeup_barrier 0
+; GFX12-SDAG-NEXT: global_store_b32 v3, v0, s[0:1]
+; GFX12-SDAG-NEXT: s_nop 0
+; GFX12-SDAG-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-SDAG-NEXT: s_endpgm
+;
+; GFX12-GISEL-LABEL: test3_s_wakeup_barrier:
+; GFX12-GISEL: ; %bb.0: ; %entry
+; GFX12-GISEL-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
+; GFX12-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v0, v0
+; GFX12-GISEL-NEXT: v_sub_nc_u32_e32 v0, v1, v0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: global_store_b32 v3, v2, s[0:1]
+; GFX12-GISEL-NEXT: s_wakeup_barrier 0
+; GFX12-GISEL-NEXT: global_store_b32 v3, v0, s[0:1]
+; GFX12-GISEL-NEXT: s_nop 0
+; GFX12-GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-GISEL-NEXT: s_endpgm
entry:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = getelementptr i32, ptr addrspace(1) %out, i32 %tmp
@@ -1152,39 +1178,39 @@ entry:
}
define amdgpu_kernel void @test4_s_wakeup_barrier_m0(ptr addrspace(1) %out, i32 %bar) #0 {
-; GCN-LABEL: test4_s_wakeup_barrier_m0:
-; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: s_load_b96 s[0:2], s[2:3], 0x24
-; GCN-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GCN-NEXT: v_mul_u32_u24_e32 v2, v0, v0
-; GCN-NEXT: v_lshlrev_b32_e32 v3, 2, v0
-; GCN-NEXT: v_sub_nc_u32_e32 v0, v2, v0
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: s_mov_b32 m0, s2
-; GCN-NEXT: global_store_b32 v3, v1, s[0:1]
-; GCN-NEXT: s_wakeup_barrier m0
-; GCN-NEXT: global_store_b32 v3, v0, s[0:1]
-; GCN-NEXT: s_nop 0
-; GCN-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GCN-NEXT: s_endpgm
+; GFX12-SDAG-LABEL: test4_s_wakeup_barrier_m0:
+; GFX12-SDAG: ; %bb.0: ; %entry
+; GFX12-SDAG-NEXT: s_load_b96 s[0:2], s[2:3], 0x24
+; GFX12-SDAG-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-SDAG-NEXT: v_mul_u32_u24_e32 v2, v0, v0
+; GFX12-SDAG-NEXT: v_lshlrev_b32_e32 v3, 2, v0
+; GFX12-SDAG-NEXT: v_sub_nc_u32_e32 v0, v2, v0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: s_mov_b32 m0, s2
+; GFX12-SDAG-NEXT: global_store_b32 v3, v1, s[0:1]
+; GFX12-SDAG-NEXT: s_wakeup_barrier m0
+; GFX12-SDAG-NEXT: global_store_b32 v3, v0, s[0:1]
+; GFX12-SDAG-NEXT: s_nop 0
+; GFX12-SDAG-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-SDAG-NEXT: s_endpgm
;
-; GLOBAL-ISEL-LABEL: test4_s_wakeup_barrier_m0:
-; GLOBAL-ISEL: ; %bb.0: ; %entry
-; GLOBAL-ISEL-NEXT: s_load_b96 s[0:2], s[2:3], 0x24
-; GLOBAL-ISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GLOBAL-ISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GLOBAL-ISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
-; GLOBAL-ISEL-NEXT: v_mul_lo_u32 v1, v0, v0
-; GLOBAL-ISEL-NEXT: v_sub_nc_u32_e32 v0, v1, v0
-; GLOBAL-ISEL-NEXT: s_wait_kmcnt 0x0
-; GLOBAL-ISEL-NEXT: s_mov_b32 m0, s2
-; GLOBAL-ISEL-NEXT: global_store_b32 v3, v2, s[0:1]
-; GLOBAL-ISEL-NEXT: s_wakeup_barrier m0
-; GLOBAL-ISEL-NEXT: global_store_b32 v3, v0, s[0:1]
-; GLOBAL-ISEL-NEXT: s_nop 0
-; GLOBAL-ISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GLOBAL-ISEL-NEXT: s_endpgm
+; GFX12-GISEL-LABEL: test4_s_wakeup_barrier_m0:
+; GFX12-GISEL: ; %bb.0: ; %entry
+; GFX12-GISEL-NEXT: s_load_b96 s[0:2], s[2:3], 0x24
+; GFX12-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v0, v0
+; GFX12-GISEL-NEXT: v_sub_nc_u32_e32 v0, v1, v0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: s_mov_b32 m0, s2
+; GFX12-GISEL-NEXT: global_store_b32 v3, v2, s[0:1]
+; GFX12-GISEL-NEXT: s_wakeup_barrier m0
+; GFX12-GISEL-NEXT: global_store_b32 v3, v0, s[0:1]
+; GFX12-GISEL-NEXT: s_nop 0
+; GFX12-GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-GISEL-NEXT: s_endpgm
entry:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = getelementptr i32, ptr addrspace(1) %out, i32 %tmp
@@ -1197,63 +1223,50 @@ entry:
}
define void @test5_s_wakeup_barrier_m0(i32 %arg) {
-; GCN-LABEL: test5_s_wakeup_barrier_m0:
-; GCN: ; %bb.0:
-; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
-; GCN-NEXT: s_wait_expcnt 0x0
-; GCN-NEXT: s_wait_samplecnt 0x0
-; GCN-NEXT: s_wait_bvhcnt 0x0
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: v_readfirstlane_b32 s0, v0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: s_mov_b32 m0, s0
-; GCN-NEXT: s_wakeup_barrier m0
-; GCN-NEXT: s_wait_alu 0xfffe
-; GCN-NEXT: s_setpc_b64 s[30:31]
+; GFX12-SDAG-LABEL: test5_s_wakeup_barrier_m0:
+; GFX12-SDAG: ; %bb.0:
+; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-SDAG-NEXT: s_wait_expcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_samplecnt 0x0
+; GFX12-SDAG-NEXT: s_wait_bvhcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: v_readfirstlane_b32 s0, v0
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-SDAG-NEXT: s_mov_b32 m0, s0
+; GFX12-SDAG-NEXT: s_wakeup_barrier m0
+; GFX12-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX12-SDAG-NEXT: s_setpc_b64 s[30:31]
;
-; GLOBAL-ISEL-LABEL: test5_s_wakeup_barrier_m0:
-; GLOBAL-ISEL: ; %bb.0:
-; GLOBAL-ISEL-NEXT: s_wait_loadcnt_dscnt 0x0
-; GLOBAL-ISEL-NEXT: s_wait_expcnt 0x0
-; GLOBAL-ISEL-NEXT: s_wait_samplecnt 0x0
-; GLOBAL-ISEL-NEXT: s_wait_bvhcnt 0x0
-; GLOBAL-ISEL-NEXT: s_wait_kmcnt 0x0
-; GLOBAL-ISEL-NEXT: v_readfirstlane_b32 m0, v0
-; GLOBAL-ISEL-NEXT: s_wakeup_barrier m0
-; GLOBAL-ISEL-NEXT: s_setpc_b64 s[30:31]
+; GFX12-GISEL-LABEL: test5_s_wakeup_barrier_m0:
+; GFX12-GISEL: ; %bb.0:
+; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-GISEL-NEXT: s_wait_expcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0
+; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: v_readfirstlane_b32 m0, v0
+; GFX12-GISEL-NEXT: s_wakeup_barrier m0
+; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31]
call void @llvm.amdgcn.s.wakeup.barrier(i32 %arg)
ret void
}
define amdgpu_kernel void @test1_s_get_barrier_state(ptr addrspace(1) %out) #0 {
-; GCN-LABEL: test1_s_get_barrier_state:
-; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: s_get_barrier_state s4, -1
-; GCN-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GCN-NEXT: v_dual_mov_b32 v1, s4 :: v_dual_and_b32 v0, 0x3ff, v0
-; GCN-NEXT: v_lshlrev_b32_e32 v0, 2, v0
-; GCN-NEXT: global_store_b32 v0, v1, s[0:1]
-; GCN-NEXT: s_nop 0
-; GCN-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GCN-NEXT: s_endpgm
-;
-; GLOBAL-ISEL-LABEL: test1_s_get_barrier_state:
-; GLOBAL-ISEL: ; %bb.0: ; %entry
-; GLOBAL-ISEL-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
-; GLOBAL-ISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
-; GLOBAL-ISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_2)
-; GLOBAL-ISEL-NEXT: v_lshlrev_b32_e32 v0, 2, v0
-; GLOBAL-ISEL-NEXT: s_wait_kmcnt 0x0
-; GLOBAL-ISEL-NEXT: global_store_b32 v0, v1, s[0:1]
-; GLOBAL-ISEL-NEXT: s_get_barrier_state s2, -1
-; GLOBAL-ISEL-NEXT: s_wait_kmcnt 0x0
-; GLOBAL-ISEL-NEXT: v_mov_b32_e32 v1, s2
-; GLOBAL-ISEL-NEXT: global_store_b32 v0, v1, s[0:1]
-; GLOBAL-ISEL-NEXT: s_nop 0
-; GLOBAL-ISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GLOBAL-ISEL-NEXT: s_endpgm
+; GFX12-LABEL: test1_s_get_barrier_state:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
+; GFX12-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_2)
+; GFX12-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX12-NEXT: s_get_barrier_state s2, -1
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s2
+; GFX12-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
entry:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = getelementptr i32, ptr addrspace(1) %out, i32 %tmp
@@ -1264,34 +1277,21 @@ entry:
}
define amdgpu_kernel void @test2_s_get_barrier_state(ptr addrspace(1) %out) #0 {
-; GCN-LABEL: test2_s_get_barrier_state:
-; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: s_get_barrier_state s4, 1
-; GCN-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GCN-NEXT: v_dual_mov_b32 v1, s4 :: v_dual_and_b32 v0, 0x3ff, v0
-; GCN-NEXT: v_lshlrev_b32_e32 v0, 2, v0
-; GCN-NEXT: global_store_b32 v0, v1, s[0:1]
-; GCN-NEXT: s_nop 0
-; GCN-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GCN-NEXT: s_endpgm
-;
-; GLOBAL-ISEL-LABEL: test2_s_get_barrier_state:
-; GLOBAL-ISEL: ; %bb.0: ; %entry
-; GLOBAL-ISEL-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
-; GLOBAL-ISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
-; GLOBAL-ISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_2)
-; GLOBAL-ISEL-NEXT: v_lshlrev_b32_e32 v0, 2, v0
-; GLOBAL-ISEL-NEXT: s_wait_kmcnt 0x0
-; GLOBAL-ISEL-NEXT: global_store_b32 v0, v1, s[0:1]
-; GLOBAL-ISEL-NEXT: s_get_barrier_state s2, 1
-; GLOBAL-ISEL-NEXT: s_wait_kmcnt 0x0
-; GLOBAL-ISEL-NEXT: v_mov_b32_e32 v1, s2
-; GLOBAL-ISEL-NEXT: global_store_b32 v0, v1, s[0:1]
-; GLOBAL-ISEL-NEXT: s_nop 0
-; GLOBAL-ISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GLOBAL-ISEL-NEXT: s_endpgm
+; GFX12-LABEL: test2_s_get_barrier_state:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
+; GFX12-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_2)
+; GFX12-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX12-NEXT: s_get_barrier_state s2, 1
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s2
+; GFX12-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
entry:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = getelementptr i32, ptr addrspace(1) %out, i32 %tmp
@@ -1302,34 +1302,21 @@ entry:
}
define amdgpu_kernel void @test3_s_get_barrier_state(ptr addrspace(1) %out) #0 {
-; GCN-LABEL: test3_s_get_barrier_state:
-; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: s_get_barrier_state s4, 0
-; GCN-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GCN-NEXT: v_dual_mov_b32 v1, s4 :: v_dual_and_b32 v0, 0x3ff, v0
-; GCN-NEXT: v_lshlrev_b32_e32 v0, 2, v0
-; GCN-NEXT: global_store_b32 v0, v1, s[0:1]
-; GCN-NEXT: s_nop 0
-; GCN-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GCN-NEXT: s_endpgm
-;
-; GLOBAL-ISEL-LABEL: test3_s_get_barrier_state:
-; GLOBAL-ISEL: ; %bb.0: ; %entry
-; GLOBAL-ISEL-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
-; GLOBAL-ISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
-; GLOBAL-ISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_2)
-; GLOBAL-ISEL-NEXT: v_lshlrev_b32_e32 v0, 2, v0
-; GLOBAL-ISEL-NEXT: s_wait_kmcnt 0x0
-; GLOBAL-ISEL-NEXT: global_store_b32 v0, v1, s[0:1]
-; GLOBAL-ISEL-NEXT: s_get_barrier_state s2, 0
-; GLOBAL-ISEL-NEXT: s_wait_kmcnt 0x0
-; GLOBAL-ISEL-NEXT: v_mov_b32_e32 v1, s2
-; GLOBAL-ISEL-NEXT: global_store_b32 v0, v1, s[0:1]
-; GLOBAL-ISEL-NEXT: s_nop 0
-; GLOBAL-ISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GLOBAL-ISEL-NEXT: s_endpgm
+; GFX12-LABEL: test3_s_get_barrier_state:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
+; GFX12-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_2)
+; GFX12-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX12-NEXT: s_get_barrier_state s2, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s2
+; GFX12-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
entry:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = getelementptr i32, ptr addrspace(1) %out, i32 %tmp
@@ -1340,41 +1327,23 @@ entry:
}
define amdgpu_kernel void @test4_s_get_barrier_state_m0(ptr addrspace(1) %out, i32 %bar) #0 {
-; GCN-LABEL: test4_s_get_barrier_state_m0:
-; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: s_load_b96 s[0:2], s[2:3], 0x24
-; GCN-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_lshlrev_b32_e32 v0, 2, v0
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: s_mov_b32 m0, s2
-; GCN-NEXT: global_store_b32 v0, v1, s[0:1]
-; GCN-NEXT: s_get_barrier_state s2, m0
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_2)
-; GCN-NEXT: v_mov_b32_e32 v1, s2
-; GCN-NEXT: global_store_b32 v0, v1, s[0:1]
-; GCN-NEXT: s_nop 0
-; GCN-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GCN-NEXT: s_endpgm
-;
-; GLOBAL-ISEL-LABEL: test4_s_get_barrier_state_m0:
-; GLOBAL-ISEL: ; %bb.0: ; %entry
-; GLOBAL-ISEL-NEXT: s_load_b96 s[0:2], s[2:3], 0x24
-; GLOBAL-ISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
-; GLOBAL-ISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GLOBAL-ISEL-NEXT: v_lshlrev_b32_e32 v0, 2, v0
-; GLOBAL-ISEL-NEXT: s_wait_kmcnt 0x0
-; GLOBAL-ISEL-NEXT: s_mov_b32 m0, s2
-; GLOBAL-ISEL-NEXT: global_store_b32 v0, v1, s[0:1]
-; GLOBAL-ISEL-NEXT: s_get_barrier_state s2, m0
-; GLOBAL-ISEL-NEXT: s_wait_kmcnt 0x0
-; GLOBAL-ISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_2)
-; GLOBAL-ISEL-NEXT: v_mov_b32_e32 v1, s2
-; GLOBAL-ISEL-NEXT: global_store_b32 v0, v1, s[0:1]
-; GLOBAL-ISEL-NEXT: s_nop 0
-; GLOBAL-ISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GLOBAL-ISEL-NEXT: s_endpgm
+; GFX12-LABEL: test4_s_get_barrier_state_m0:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_load_b96 s[0:2], s[2:3], 0x24
+; GFX12-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_mov_b32 m0, s2
+; GFX12-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX12-NEXT: s_get_barrier_state s2, m0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_2)
+; GFX12-NEXT: v_mov_b32_e32 v1, s2
+; GFX12-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
entry:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = getelementptr i32, ptr addrspace(1) %out, i32 %tmp
@@ -1385,76 +1354,94 @@ entry:
}
define i32 @test5_s_get_barrier_state_m0(i32 %arg) {
-; GCN-LABEL: test5_s_get_barrier_state_m0:
-; GCN: ; %bb.0:
-; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
-; GCN-NEXT: s_wait_expcnt 0x0
-; GCN-NEXT: s_wait_samplecnt 0x0
-; GCN-NEXT: s_wait_bvhcnt 0x0
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: v_readfirstlane_b32 s0, v0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(SALU_CYCLE_1)
-; GCN-NEXT: s_mov_b32 m0, s0
-; GCN-NEXT: s_get_barrier_state s0, m0
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: s_wait_alu 0xfffe
-; GCN-NEXT: v_mov_b32_e32 v0, s0
-; GCN-NEXT: s_setpc_b64 s[30:31]
+; GFX12-SDAG-LABEL: test5_s_get_barrier_state_m0:
+; GFX12-SDAG: ; %bb.0:
+; GFX12-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-SDAG-NEXT: s_wait_expcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_samplecnt 0x0
+; GFX12-SDAG-NEXT: s_wait_bvhcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: v_readfirstlane_b32 s0, v0
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(SALU_CYCLE_1)
+; GFX12-SDAG-NEXT: s_mov_b32 m0, s0
+; GFX12-SDAG-NEXT: s_get_barrier_state s0, m0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: s_wait_alu 0xfffe
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v0, s0
+; GFX12-SDAG-NEXT: s_setpc_b64 s[30:31]
;
-; GLOBAL-ISEL-LABEL: test5_s_get_barrier_state_m0:
-; GLOBAL-ISEL: ; %bb.0:
-; GLOBAL-ISEL-NEXT: s_wait_loadcnt_dscnt 0x0
-; GLOBAL-ISEL-NEXT: s_wait_expcnt 0x0
-; GLOBAL-ISEL-NEXT: s_wait_samplecnt 0x0
-; GLOBAL-ISEL-NEXT: s_wait_bvhcnt 0x0
-; GLOBAL-ISEL-NEXT: s_wait_kmcnt 0x0
-; GLOBAL-ISEL-NEXT: v_readfirstlane_b32 m0, v0
-; GLOBAL-ISEL-NEXT: s_get_barrier_state s0, m0
-; GLOBAL-ISEL-NEXT: s_wait_kmcnt 0x0
-; GLOBAL-ISEL-NEXT: s_wait_alu 0xfffe
-; GLOBAL-ISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GLOBAL-ISEL-NEXT: v_mov_b32_e32 v0, s0
-; GLOBAL-ISEL-NEXT: s_setpc_b64 s[30:31]
+; GFX12-GISEL-LABEL: test5_s_get_barrier_state_m0:
+; GFX12-GISEL: ; %bb.0:
+; GFX12-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-GISEL-NEXT: s_wait_expcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0
+; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: v_readfirstlane_b32 m0, v0
+; GFX12-GISEL-NEXT: s_get_barrier_state s0, m0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: s_wait_alu 0xfffe
+; GFX12-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-GISEL-NEXT: v_mov_b32_e32 v0, s0
+; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31]
%state = call i32 @llvm.amdgcn.s.get.barrier.state(i32 %arg)
ret i32 %state
}
+define i32 @test6_s_get_barrier_state_0() {
+; GFX12-LABEL: test6_s_get_barrier_state_0:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_get_barrier_state s0, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: v_mov_b32_e32 v0, s0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %state = call i32 @llvm.amdgcn.s.get.barrier.state(i32 0)
+ ret i32 %state
+}
+
define amdgpu_kernel void @test_barrier_convert(ptr addrspace(1) %out) #0 {
-; GCN-LABEL: test_barrier_convert:
-; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
-; GCN-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GCN-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
-; GCN-NEXT: v_mul_u32_u24_e32 v1, v0, v0
-; GCN-NEXT: v_sub_nc_u32_e32 v0, v1, v0
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: global_store_b32 v3, v2, s[0:1]
-; GCN-NEXT: s_wait_storecnt 0x0
-; GCN-NEXT: s_barrier_signal -1
-; GCN-NEXT: s_barrier_wait -1
-; GCN-NEXT: global_store_b32 v3, v0, s[0:1]
-; GCN-NEXT: s_nop 0
-; GCN-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GCN-NEXT: s_endpgm
+; GFX12-SDAG-LABEL: test_barrier_convert:
+; GFX12-SDAG: ; %bb.0: ; %entry
+; GFX12-SDAG-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
+; GFX12-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-SDAG-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
+; GFX12-SDAG-NEXT: v_mul_u32_u24_e32 v1, v0, v0
+; GFX12-SDAG-NEXT: v_sub_nc_u32_e32 v0, v1, v0
+; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-NEXT: global_store_b32 v3, v2, s[0:1]
+; GFX12-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX12-SDAG-NEXT: s_barrier_signal -1
+; GFX12-SDAG-NEXT: s_barrier_wait -1
+; GFX12-SDAG-NEXT: global_store_b32 v3, v0, s[0:1]
+; GFX12-SDAG-NEXT: s_nop 0
+; GFX12-SDAG-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-SDAG-NEXT: s_endpgm
;
-; GLOBAL-ISEL-LABEL: test_barrier_convert:
-; GLOBAL-ISEL: ; %bb.0: ; %entry
-; GLOBAL-ISEL-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
-; GLOBAL-ISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GLOBAL-ISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GLOBAL-ISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
-; GLOBAL-ISEL-NEXT: v_mul_lo_u32 v1, v0, v0
-; GLOBAL-ISEL-NEXT: v_sub_nc_u32_e32 v0, v1, v0
-; GLOBAL-ISEL-NEXT: s_wait_kmcnt 0x0
-; GLOBAL-ISEL-NEXT: global_store_b32 v3, v2, s[0:1]
-; GLOBAL-ISEL-NEXT: s_wait_storecnt 0x0
-; GLOBAL-ISEL-NEXT: s_barrier_signal -1
-; GLOBAL-ISEL-NEXT: s_barrier_wait -1
-; GLOBAL-ISEL-NEXT: global_store_b32 v3, v0, s[0:1]
-; GLOBAL-ISEL-NEXT: s_nop 0
-; GLOBAL-ISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GLOBAL-ISEL-NEXT: s_endpgm
+; GFX12-GISEL-LABEL: test_barrier_convert:
+; GFX12-GISEL: ; %bb.0: ; %entry
+; GFX12-GISEL-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
+; GFX12-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v3, 2, v0
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v0, v0
+; GFX12-GISEL-NEXT: v_sub_nc_u32_e32 v0, v1, v0
+; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-NEXT: global_store_b32 v3, v2, s[0:1]
+; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX12-GISEL-NEXT: s_barrier_signal -1
+; GFX12-GISEL-NEXT: s_barrier_wait -1
+; GFX12-GISEL-NEXT: global_store_b32 v3, v0, s[0:1]
+; GFX12-GISEL-NEXT: s_nop 0
+; GFX12-GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-GISEL-NEXT: s_endpgm
entry:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp1 = getelementptr i32, ptr addrspace(1) %out, i32 %tmp
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wave.reduce.umax.mir b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wave.reduce.umax.mir
index c3ead8b..179c9f4 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wave.reduce.umax.mir
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wave.reduce.umax.mir
@@ -16,7 +16,7 @@ body: |
; GCN-NEXT: {{ $}}
; GCN-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr0_sgpr1
; GCN-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
- ; GCN-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0
+ ; GCN-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec_xnull = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0
; GCN-NEXT: [[S_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[COPY]](p4), 44, 0
; GCN-NEXT: [[S_MOV_B32_:%[0-9]+]]:sgpr_32 = S_MOV_B32 [[S_LOAD_DWORD_IMM]]
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
@@ -24,7 +24,7 @@ body: |
; GCN-NEXT: S_ENDPGM 0
%1:sgpr_64(p4) = COPY $sgpr0_sgpr1
%4:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
- %5:sreg_64_xexec = S_LOAD_DWORDX2_IMM %1(p4), 36, 0
+ %5:sreg_64_xexec_xnull = S_LOAD_DWORDX2_IMM %1(p4), 36, 0
%6:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM %1(p4), 44, 0
%7:sgpr_32 = WAVE_REDUCE_UMAX_PSEUDO_U32 killed %6, 1, implicit $exec
%8:vgpr_32 = COPY %7
@@ -46,7 +46,7 @@ body: |
; GCN-NEXT: {{ $}}
; GCN-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr0_sgpr1
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GCN-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0
+ ; GCN-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec_xnull = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0
; GCN-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; GCN-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 $exec
; GCN-NEXT: [[S_MOV_B32_:%[0-9]+]]:sgpr_32 = S_MOV_B32 0
@@ -77,7 +77,7 @@ body: |
liveins: $vgpr0, $sgpr0_sgpr1
%1:sgpr_64(p4) = COPY $sgpr0_sgpr1
%0:vgpr_32 = COPY $vgpr0
- %4:sreg_64_xexec = S_LOAD_DWORDX2_IMM %1(p4), 36, 0
+ %4:sreg_64_xexec_xnull = S_LOAD_DWORDX2_IMM %1(p4), 36, 0
%5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
%6:sgpr_32 = WAVE_REDUCE_UMAX_PSEUDO_U32 %0, 1, implicit $exec
%7:vgpr_32 = COPY %6
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wave.reduce.umin.mir b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wave.reduce.umin.mir
index 7664498..88c35a6 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wave.reduce.umin.mir
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wave.reduce.umin.mir
@@ -16,7 +16,7 @@ body: |
; GCN-NEXT: {{ $}}
; GCN-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr0_sgpr1
; GCN-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
- ; GCN-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0
+ ; GCN-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec_xnull = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0
; GCN-NEXT: [[S_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[COPY]](p4), 44, 0
; GCN-NEXT: [[S_MOV_B32_:%[0-9]+]]:sgpr_32 = S_MOV_B32 [[S_LOAD_DWORD_IMM]]
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
@@ -24,7 +24,7 @@ body: |
; GCN-NEXT: S_ENDPGM 0
%1:sgpr_64(p4) = COPY $sgpr0_sgpr1
%4:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
- %5:sreg_64_xexec = S_LOAD_DWORDX2_IMM %1(p4), 36, 0
+ %5:sreg_64_xexec_xnull = S_LOAD_DWORDX2_IMM %1(p4), 36, 0
%6:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM %1(p4), 44, 0
%7:sgpr_32 = WAVE_REDUCE_UMIN_PSEUDO_U32 killed %6, 1, implicit $exec
%8:vgpr_32 = COPY %7
@@ -46,7 +46,7 @@ body: |
; GCN-NEXT: {{ $}}
; GCN-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr0_sgpr1
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GCN-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0
+ ; GCN-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec_xnull = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0
; GCN-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; GCN-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 $exec
; GCN-NEXT: [[S_MOV_B32_:%[0-9]+]]:sgpr_32 = S_MOV_B32 4294967295
@@ -77,7 +77,7 @@ body: |
liveins: $vgpr0, $sgpr0_sgpr1
%1:sgpr_64(p4) = COPY $sgpr0_sgpr1
%0:vgpr_32 = COPY $vgpr0
- %4:sreg_64_xexec = S_LOAD_DWORDX2_IMM %1(p4), 36, 0
+ %4:sreg_64_xexec_xnull = S_LOAD_DWORDX2_IMM %1(p4), 36, 0
%5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
%6:sgpr_32 = WAVE_REDUCE_UMIN_PSEUDO_U32 %0, 1, implicit $exec
%7:vgpr_32 = COPY %6
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wqm.demote.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wqm.demote.ll
index 684ca3a..004a720 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wqm.demote.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wqm.demote.ll
@@ -216,8 +216,8 @@ define amdgpu_ps void @branch(float %arg0, float %arg1) {
; GFX10-32-NEXT: s_mov_b32 s1, exec_lo
; GFX10-32-NEXT: v_or_b32_e32 v0, v0, v1
; GFX10-32-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX10-32-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX10-32-NEXT: v_cmp_eq_u32_e64 s0, 1, v0
+; GFX10-32-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX10-32-NEXT: s_and_saveexec_b32 s2, s0
; GFX10-32-NEXT: s_xor_b32 s0, exec_lo, s2
; GFX10-32-NEXT: s_cbranch_execz .LBB2_3
diff --git a/llvm/test/CodeGen/AMDGPU/merge-flat-with-global-load-store.mir b/llvm/test/CodeGen/AMDGPU/merge-flat-with-global-load-store.mir
index 5c43cd2..1afc24d 100644
--- a/llvm/test/CodeGen/AMDGPU/merge-flat-with-global-load-store.mir
+++ b/llvm/test/CodeGen/AMDGPU/merge-flat-with-global-load-store.mir
@@ -136,14 +136,14 @@ body: |
; GCN-LABEL: name: no_merge_flat_global_load_dword_saddr
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
- ; GCN-NEXT: [[DEF1:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
+ ; GCN-NEXT: [[DEF1:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; GCN-NEXT: [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `ptr undef`)
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX2_SADDR:%[0-9]+]]:vreg_64_align2 = GLOBAL_LOAD_DWORDX2_SADDR [[DEF1]], [[DEF]].sub0, 4, 0, implicit $exec :: (load (s64) from `ptr addrspace(1) undef` + 4, align 4, addrspace 1)
; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[GLOBAL_LOAD_DWORDX2_SADDR]].sub0
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX2_SADDR]].sub1
; GCN-NEXT: S_NOP 0, implicit [[FLAT_LOAD_DWORD]], implicit [[COPY]], implicit [[COPY1]]
%0:vreg_64_align2 = IMPLICIT_DEF
- %1:sreg_64_xexec = IMPLICIT_DEF
+ %1:sreg_64_xexec_xnull = IMPLICIT_DEF
%2:vgpr_32 = FLAT_LOAD_DWORD %0, 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `ptr undef`, basealign 4)
%3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %1, %0.sub0, 4, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef` + 4, basealign 4, addrspace 1)
%4:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %1, %0.sub0, 8, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef` + 8, basealign 4, addrspace 1)
@@ -157,14 +157,14 @@ body: |
; GCN-LABEL: name: no_merge_global_saddr_flat_load_dword
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
- ; GCN-NEXT: [[DEF1:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
+ ; GCN-NEXT: [[DEF1:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF1]], [[DEF]].sub0, 0, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: [[FLAT_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = FLAT_LOAD_DWORDX2 [[DEF]], 4, 0, implicit $exec, implicit $flat_scr :: (load (s64) from `ptr undef` + 4, align 4)
; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[FLAT_LOAD_DWORDX2_]].sub0
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[FLAT_LOAD_DWORDX2_]].sub1
; GCN-NEXT: S_NOP 0, implicit [[GLOBAL_LOAD_DWORD_SADDR]], implicit [[COPY]], implicit [[COPY1]]
%0:vreg_64_align2 = IMPLICIT_DEF
- %1:sreg_64_xexec = IMPLICIT_DEF
+ %1:sreg_64_xexec_xnull = IMPLICIT_DEF
%2:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %1, %0.sub0, 0, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
%3:vgpr_32 = FLAT_LOAD_DWORD %0, 4, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `ptr undef` + 4)
%4:vgpr_32 = FLAT_LOAD_DWORD %0, 8, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `ptr undef` + 8)
@@ -279,13 +279,13 @@ body: |
bb.0.entry:
; GCN-LABEL: name: no_merge_flat_global_store_dword_saddr
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
- ; GCN-NEXT: [[DEF1:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
+ ; GCN-NEXT: [[DEF1:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF3:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], [[DEF2]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF]].sub0, [[DEF3]], [[DEF1]], 4, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
%0:vreg_64_align2 = IMPLICIT_DEF
- %1:sreg_64_xexec = IMPLICIT_DEF
+ %1:sreg_64_xexec_xnull = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
%3:vgpr_32 = IMPLICIT_DEF
FLAT_STORE_DWORD %0, %2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
@@ -298,13 +298,13 @@ body: |
bb.0.entry:
; GCN-LABEL: name: no_merge_global_saddr_flat_store_dword
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
- ; GCN-NEXT: [[DEF1:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
+ ; GCN-NEXT: [[DEF1:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF3:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF]].sub0, [[DEF2]], [[DEF1]], 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], [[DEF3]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
%0:vreg_64_align2 = IMPLICIT_DEF
- %1:sreg_64_xexec = IMPLICIT_DEF
+ %1:sreg_64_xexec_xnull = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
%3:vgpr_32 = IMPLICIT_DEF
GLOBAL_STORE_DWORD_SADDR %0.sub0, %2, %1, 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
diff --git a/llvm/test/CodeGen/AMDGPU/merge-global-load-store.mir b/llvm/test/CodeGen/AMDGPU/merge-global-load-store.mir
index ffa250f..0b868c0 100644
--- a/llvm/test/CodeGen/AMDGPU/merge-global-load-store.mir
+++ b/llvm/test/CodeGen/AMDGPU/merge-global-load-store.mir
@@ -235,13 +235,13 @@ body: |
bb.0.entry:
; GCN-LABEL: name: merge_global_load_dword_saddr_2
- ; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
+ ; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX2_SADDR:%[0-9]+]]:vreg_64_align2 = GLOBAL_LOAD_DWORDX2_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s64) from `ptr addrspace(1) undef`, align 4, addrspace 1)
; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[GLOBAL_LOAD_DWORDX2_SADDR]].sub0
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX2_SADDR]].sub1
; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
- %0:sreg_64_xexec = IMPLICIT_DEF
+ %0:sreg_64_xexec_xnull = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 0, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, align 4, addrspace 1)
%3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 4, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, align 4, addrspace 1)
@@ -254,7 +254,7 @@ body: |
bb.0.entry:
; GCN-LABEL: name: merge_global_load_dword_saddr_3
- ; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
+ ; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX3_SADDR:%[0-9]+]]:vreg_96_align2 = GLOBAL_LOAD_DWORDX3_SADDR [[DEF]], [[DEF1]], 0, 1, implicit $exec :: (load (s96) from `ptr addrspace(1) undef`, align 4, addrspace 1)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY [[GLOBAL_LOAD_DWORDX3_SADDR]].sub0_sub1
@@ -262,7 +262,7 @@ body: |
; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub0
; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY killed [[COPY]].sub1
; GCN-NEXT: S_NOP 0, implicit [[COPY2]], implicit [[COPY3]], implicit [[COPY1]]
- %0:sreg_64_xexec = IMPLICIT_DEF
+ %0:sreg_64_xexec_xnull = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 0, 1, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, align 4, addrspace 1)
%3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 4, 1, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, align 4, addrspace 1)
@@ -276,7 +276,7 @@ body: |
bb.0.entry:
; GCN-LABEL: name: merge_global_load_dword_saddr_4
- ; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
+ ; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_SADDR:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4_SADDR [[DEF]], [[DEF1]], 0, 2, implicit $exec :: (load (s128) from `ptr addrspace(1) undef`, align 4, addrspace 1)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_96_align2 = COPY [[GLOBAL_LOAD_DWORDX4_SADDR]].sub0_sub1_sub2
@@ -286,7 +286,7 @@ body: |
; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[COPY2]].sub0
; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[COPY2]].sub1
; GCN-NEXT: S_NOP 0, implicit [[COPY4]], implicit [[COPY5]], implicit [[COPY3]], implicit [[COPY1]]
- %0:sreg_64_xexec = IMPLICIT_DEF
+ %0:sreg_64_xexec_xnull = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 0, 2, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, align 4, addrspace 1)
%3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 4, 2, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, align 4, addrspace 1)
@@ -301,7 +301,7 @@ body: |
bb.0.entry:
; GCN-LABEL: name: merge_global_load_dword_saddr_6
- ; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
+ ; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_SADDR:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4_SADDR [[DEF]], [[DEF1]], 4, 3, implicit $exec :: (load (s128) from `ptr addrspace(1) undef`, align 4, addrspace 1)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_96_align2 = COPY [[GLOBAL_LOAD_DWORDX4_SADDR]].sub0_sub1_sub2
@@ -314,7 +314,7 @@ body: |
; GCN-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[GLOBAL_LOAD_DWORDX2_SADDR]].sub0
; GCN-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX2_SADDR]].sub1
; GCN-NEXT: S_NOP 0, implicit [[COPY4]], implicit [[COPY5]], implicit [[COPY3]], implicit [[COPY1]], implicit [[COPY6]], implicit [[COPY7]]
- %0:sreg_64_xexec = IMPLICIT_DEF
+ %0:sreg_64_xexec_xnull = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 4, 3, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, align 4, addrspace 1)
%3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 8, 3, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, align 4, addrspace 1)
@@ -331,13 +331,13 @@ body: |
bb.0.entry:
; GCN-LABEL: name: merge_global_load_dwordx2_saddr
- ; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
+ ; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_SADDR:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s128) from `ptr addrspace(1) undef`, align 4, addrspace 1)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY [[GLOBAL_LOAD_DWORDX4_SADDR]].sub0_sub1
; GCN-NEXT: [[COPY1:%[0-9]+]]:vreg_64_align2 = COPY killed [[GLOBAL_LOAD_DWORDX4_SADDR]].sub2_sub3
; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
- %0:sreg_64_xexec = IMPLICIT_DEF
+ %0:sreg_64_xexec_xnull = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vreg_64_align2 = GLOBAL_LOAD_DWORDX2_SADDR %0, %1, 0, 0, implicit $exec :: (load (s64) from `ptr addrspace(1) undef`, align 4, addrspace 1)
%3:vreg_64_align2 = GLOBAL_LOAD_DWORDX2_SADDR %0, %1, 8, 0, implicit $exec :: (load (s64) from `ptr addrspace(1) undef`, align 4, addrspace 1)
@@ -350,12 +350,12 @@ body: |
bb.0.entry:
; GCN-LABEL: name: no_merge_global_load_dword_and_global_load_dword_saddr
- ; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
+ ; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF1]], 0, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]].sub0, 4, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: S_NOP 0, implicit [[GLOBAL_LOAD_DWORD]], implicit [[GLOBAL_LOAD_DWORD_SADDR]]
- %0:sreg_64_xexec = IMPLICIT_DEF
+ %0:sreg_64_xexec_xnull = IMPLICIT_DEF
%1:vreg_64_align2 = IMPLICIT_DEF
%2:vgpr_32 = GLOBAL_LOAD_DWORD %1, 0, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, align 4, addrspace 1)
%3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1.sub0, 4, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, align 4, addrspace 1)
@@ -386,12 +386,12 @@ body: |
bb.0.entry:
; GCN-LABEL: name: no_merge_global_load_dword_saddr_different_vaddr
- ; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
+ ; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]].sub0, 0, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]].sub1, 4, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: S_NOP 0, implicit [[GLOBAL_LOAD_DWORD_SADDR]], implicit [[GLOBAL_LOAD_DWORD_SADDR1]]
- %0:sreg_64_xexec = IMPLICIT_DEF
+ %0:sreg_64_xexec_xnull = IMPLICIT_DEF
%1:vreg_64_align2 = IMPLICIT_DEF
%2:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1.sub0, 0, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, align 4, addrspace 1)
%3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1.sub1, 4, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, align 4, addrspace 1)
@@ -691,13 +691,13 @@ body: |
bb.0.entry:
; GCN-LABEL: name: merge_global_store_dword_saddr_2
- ; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
+ ; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF3:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[DEF2]], %subreg.sub0, [[DEF3]], %subreg.sub1
; GCN-NEXT: GLOBAL_STORE_DWORDX2_SADDR [[DEF1]], killed [[REG_SEQUENCE]], [[DEF]], 0, 0, implicit $exec :: (store (s64) into `ptr addrspace(1) undef`, align 4, addrspace 1)
- %0:sreg_64_xexec = IMPLICIT_DEF
+ %0:sreg_64_xexec_xnull = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
%3:vgpr_32 = IMPLICIT_DEF
@@ -711,7 +711,7 @@ body: |
bb.0.entry:
; GCN-LABEL: name: merge_global_store_dword_saddr_3
- ; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
+ ; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF3:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
@@ -719,7 +719,7 @@ body: |
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[DEF2]], %subreg.sub0, [[DEF3]], %subreg.sub1
; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE killed [[REG_SEQUENCE]], %subreg.sub0_sub1, [[DEF4]], %subreg.sub2
; GCN-NEXT: GLOBAL_STORE_DWORDX3_SADDR [[DEF1]], killed [[REG_SEQUENCE1]], [[DEF]], 4, 1, implicit $exec :: (store (s96) into `ptr addrspace(1) undef`, align 4, addrspace 1)
- %0:sreg_64_xexec = IMPLICIT_DEF
+ %0:sreg_64_xexec_xnull = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
%3:vgpr_32 = IMPLICIT_DEF
@@ -735,7 +735,7 @@ body: |
bb.0.entry:
; GCN-LABEL: name: merge_global_store_dword_saddr_4
- ; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
+ ; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF3:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
@@ -745,7 +745,7 @@ body: |
; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE killed [[REG_SEQUENCE]], %subreg.sub0_sub1, [[DEF4]], %subreg.sub2
; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE killed [[REG_SEQUENCE1]], %subreg.sub0_sub1_sub2, [[DEF5]], %subreg.sub3
; GCN-NEXT: GLOBAL_STORE_DWORDX4_SADDR [[DEF1]], killed [[REG_SEQUENCE2]], [[DEF]], 4, 2, implicit $exec :: (store (s128) into `ptr addrspace(1) undef`, align 4, addrspace 1)
- %0:sreg_64_xexec = IMPLICIT_DEF
+ %0:sreg_64_xexec_xnull = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
%3:vgpr_32 = IMPLICIT_DEF
@@ -763,7 +763,7 @@ body: |
bb.0.entry:
; GCN-LABEL: name: merge_global_store_dword_saddr_6
- ; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
+ ; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF3:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
@@ -777,7 +777,7 @@ body: |
; GCN-NEXT: GLOBAL_STORE_DWORDX4_SADDR [[DEF1]], killed [[REG_SEQUENCE2]], [[DEF]], 4, 3, implicit $exec :: (store (s128) into `ptr addrspace(1) undef`, align 4, addrspace 1)
; GCN-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[DEF6]], %subreg.sub0, [[DEF7]], %subreg.sub1
; GCN-NEXT: GLOBAL_STORE_DWORDX2_SADDR [[DEF1]], killed [[REG_SEQUENCE3]], [[DEF]], 20, 3, implicit $exec :: (store (s64) into `ptr addrspace(1) undef`, align 4, addrspace 1)
- %0:sreg_64_xexec = IMPLICIT_DEF
+ %0:sreg_64_xexec_xnull = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
%3:vgpr_32 = IMPLICIT_DEF
@@ -799,13 +799,13 @@ body: |
bb.0.entry:
; GCN-LABEL: name: no_merge_global_store_dword_saddr_with_global_store_dword
- ; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
+ ; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF3:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]].sub0, [[DEF2]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF1]], [[DEF3]], 4, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
- %0:sreg_64_xexec = IMPLICIT_DEF
+ %0:sreg_64_xexec_xnull = IMPLICIT_DEF
%1:vreg_64_align2 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
%3:vgpr_32 = IMPLICIT_DEF
@@ -819,13 +819,13 @@ body: |
bb.0.entry:
; GCN-LABEL: name: no_merge_global_store_dword_saddr_different_vaddr
- ; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
+ ; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF3:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]].sub0, [[DEF2]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]].sub1, [[DEF3]], [[DEF]], 4, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
- %0:sreg_64_xexec = IMPLICIT_DEF
+ %0:sreg_64_xexec_xnull = IMPLICIT_DEF
%1:vreg_64_align2 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
%3:vgpr_32 = IMPLICIT_DEF
diff --git a/llvm/test/CodeGen/AMDGPU/move-load-addr-to-valu.mir b/llvm/test/CodeGen/AMDGPU/move-load-addr-to-valu.mir
index 502a1c5..c1c5afc 100644
--- a/llvm/test/CodeGen/AMDGPU/move-load-addr-to-valu.mir
+++ b/llvm/test/CodeGen/AMDGPU/move-load-addr-to-valu.mir
@@ -34,7 +34,7 @@ body: |
%0:sreg_64 = COPY $vgpr0_vgpr1
bb.1:
- %1:sreg_64 = PHI %0, %bb.0, %2, %bb.1
+ %1:sreg_64_xexec_xnull = PHI %0, %bb.0, %2, %bb.1
%3:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
%4:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %1, %3, 0, 0, implicit $exec
%2:sreg_64 = S_AND_B64 %1, 1, implicit-def $scc
@@ -63,7 +63,7 @@ body: |
; GCN-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 1, implicit $exec
; GCN-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[PHI]].sub0, implicit $exec
; GCN-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[PHI]].sub1, implicit $exec
- ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec_xnull = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1
; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[REG_SEQUENCE]], [[V_MOV_B32_e32_]], 0, 0, implicit $exec
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY [[PHI]].sub0
; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[PHI]].sub1
@@ -82,10 +82,10 @@ body: |
%0:sreg_64 = COPY $vgpr0_vgpr1
bb.1:
- %1:sreg_64 = PHI %0, %bb.0, %2, %bb.1
+ %1:sreg_64_xexec_xnull = PHI %0, %bb.0, %2, %bb.1
%3:vgpr_32 = V_MOV_B32_e32 1, implicit $exec
%4:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %1, %3, 0, 0, implicit $exec
- %2:sreg_64 = S_AND_B64 %1, 1, implicit-def $scc
+ %2:sreg_64_xexec_xnull = S_AND_B64 %1, 1, implicit-def $scc
S_CMP_LG_U64 %2, 0, implicit-def $scc
S_CBRANCH_SCC1 %bb.1, implicit $scc
@@ -111,7 +111,7 @@ body: |
; GCN-NEXT: [[PHI:%[0-9]+]]:vreg_64 = PHI [[COPY]], %bb.0, %7, %bb.1
; GCN-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[PHI]].sub0, implicit $exec
; GCN-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[PHI]].sub1, implicit $exec
- ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec_xnull = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1
; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[REG_SEQUENCE]], undef %4:vgpr_32, 0, 0, implicit $exec
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY [[PHI]].sub0
; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[PHI]].sub1
@@ -130,7 +130,7 @@ body: |
%0:sreg_64 = COPY $vgpr0_vgpr1
bb.1:
- %1:sreg_64 = PHI %0, %bb.0, %2, %bb.1
+ %1:sreg_64_xexec_xnull = PHI %0, %bb.0, %2, %bb.1
%4:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %1, undef %3:vgpr_32, 0, 0, implicit $exec
%2:sreg_64 = S_AND_B64 %1, 1, implicit-def $scc
S_CMP_LG_U64 %2, 0, implicit-def $scc
@@ -174,7 +174,7 @@ body: |
%0:sreg_64 = COPY $vgpr0_vgpr1
bb.1:
- %1:sreg_64 = PHI %0, %bb.0, %2, %bb.1
+ %1:sreg_64_xexec_xnull = PHI %0, %bb.0, %2, %bb.1
%3:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
%4:vgpr_32 = IMPLICIT_DEF
GLOBAL_STORE_DWORD_SADDR %3, %4, %1, 0, 0, implicit $exec
@@ -314,7 +314,7 @@ body: |
%0:sreg_64 = COPY $vgpr0_vgpr1
bb.1:
- %1:sreg_64 = PHI %0, %bb.0, %2, %bb.1
+ %1:sreg_64_xexec_xnull = PHI %0, %bb.0, %2, %bb.1
%3:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
GLOBAL_ATOMIC_ADD_SADDR %3, %3, %1, 0, 0, implicit $exec
%2:sreg_64 = S_AND_B64 %1, 1, implicit-def $scc
@@ -359,7 +359,7 @@ body: |
%0:sreg_64 = COPY $vgpr0_vgpr1
bb.1:
- %1:sreg_64 = PHI %0, %bb.0, %2, %bb.1
+ %1:sreg_64_xexec_xnull = PHI %0, %bb.0, %2, %bb.1
%3:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
%4:vgpr_32 = GLOBAL_ATOMIC_ADD_SADDR_RTN %3, %3, %1, 0, 0, implicit $exec
%2:sreg_64 = S_AND_B64 %1, 1, implicit-def $scc
diff --git a/llvm/test/CodeGen/AMDGPU/move-to-valu-addsubu64.ll b/llvm/test/CodeGen/AMDGPU/move-to-valu-addsubu64.ll
index 1c38f8f..d4c66f0 100644
--- a/llvm/test/CodeGen/AMDGPU/move-to-valu-addsubu64.ll
+++ b/llvm/test/CodeGen/AMDGPU/move-to-valu-addsubu64.ll
@@ -7,7 +7,7 @@ define amdgpu_kernel void @add_reg_imm(ptr addrspace(1) %ptr) {
; CHECK-NEXT: liveins: $sgpr2_sgpr3
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr2_sgpr3
- ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.ptr.kernarg.offset, align 4, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec_xnull = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.ptr.kernarg.offset, align 4, addrspace 4)
; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; CHECK-NEXT: [[GLOBAL_LOAD_DWORDX2_SADDR:%[0-9]+]]:vreg_64 = GLOBAL_LOAD_DWORDX2_SADDR [[S_LOAD_DWORDX2_IMM]], [[V_MOV_B32_e32_]], 0, 0, implicit $exec :: (volatile "amdgpu-noclobber" load (s64) from %ir.ptr.load, addrspace 1)
; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 28744523
@@ -30,7 +30,7 @@ define amdgpu_kernel void @add_reg_reg(ptr addrspace(1) %ptr) {
; CHECK-NEXT: liveins: $sgpr2_sgpr3
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr2_sgpr3
- ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.ptr.kernarg.offset, align 4, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec_xnull = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.ptr.kernarg.offset, align 4, addrspace 4)
; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; CHECK-NEXT: [[GLOBAL_LOAD_DWORDX2_SADDR:%[0-9]+]]:vreg_64 = GLOBAL_LOAD_DWORDX2_SADDR [[S_LOAD_DWORDX2_IMM]], [[V_MOV_B32_e32_]], 0, 0, implicit $exec :: (volatile "amdgpu-noclobber" load (s64) from %ir.ptr.load, addrspace 1)
; CHECK-NEXT: [[GLOBAL_LOAD_DWORDX2_SADDR1:%[0-9]+]]:vreg_64 = GLOBAL_LOAD_DWORDX2_SADDR [[S_LOAD_DWORDX2_IMM]], [[V_MOV_B32_e32_]], 0, 0, implicit $exec :: (volatile load (s64) from %ir.ptr.load, addrspace 1)
@@ -53,7 +53,7 @@ define amdgpu_kernel void @sub_reg_imm(ptr addrspace(1) %ptr) {
; CHECK-NEXT: liveins: $sgpr2_sgpr3
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr2_sgpr3
- ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.ptr.kernarg.offset, align 4, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec_xnull = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.ptr.kernarg.offset, align 4, addrspace 4)
; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; CHECK-NEXT: [[GLOBAL_LOAD_DWORDX2_SADDR:%[0-9]+]]:vreg_64 = GLOBAL_LOAD_DWORDX2_SADDR [[S_LOAD_DWORDX2_IMM]], [[V_MOV_B32_e32_]], 0, 0, implicit $exec :: (volatile "amdgpu-noclobber" load (s64) from %ir.ptr.load, addrspace 1)
; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 -28744524
@@ -76,7 +76,7 @@ define amdgpu_kernel void @sub_imm_reg(ptr addrspace(1) %ptr) {
; CHECK-NEXT: liveins: $sgpr2_sgpr3
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr2_sgpr3
- ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.ptr.kernarg.offset, align 4, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec_xnull = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.ptr.kernarg.offset, align 4, addrspace 4)
; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; CHECK-NEXT: [[GLOBAL_LOAD_DWORDX2_SADDR:%[0-9]+]]:vreg_64 = GLOBAL_LOAD_DWORDX2_SADDR [[S_LOAD_DWORDX2_IMM]], [[V_MOV_B32_e32_]], 0, 0, implicit $exec :: (volatile "amdgpu-noclobber" load (s64) from %ir.ptr.load, addrspace 1)
; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 28744523
@@ -99,7 +99,7 @@ define amdgpu_kernel void @sub_reg_reg(ptr addrspace(1) %ptr) {
; CHECK-NEXT: liveins: $sgpr2_sgpr3
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr2_sgpr3
- ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.ptr.kernarg.offset, align 4, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec_xnull = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.ptr.kernarg.offset, align 4, addrspace 4)
; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; CHECK-NEXT: [[GLOBAL_LOAD_DWORDX2_SADDR:%[0-9]+]]:vreg_64 = GLOBAL_LOAD_DWORDX2_SADDR [[S_LOAD_DWORDX2_IMM]], [[V_MOV_B32_e32_]], 0, 0, implicit $exec :: (volatile "amdgpu-noclobber" load (s64) from %ir.ptr.load, addrspace 1)
; CHECK-NEXT: [[GLOBAL_LOAD_DWORDX2_SADDR1:%[0-9]+]]:vreg_64 = GLOBAL_LOAD_DWORDX2_SADDR [[S_LOAD_DWORDX2_IMM]], [[V_MOV_B32_e32_]], 0, 0, implicit $exec :: (volatile load (s64) from %ir.ptr.load, addrspace 1)
diff --git a/llvm/test/CodeGen/AMDGPU/move-to-valu-pseudo-scalar-trans.ll b/llvm/test/CodeGen/AMDGPU/move-to-valu-pseudo-scalar-trans.ll
index 4630b0d..57f7ceb 100644
--- a/llvm/test/CodeGen/AMDGPU/move-to-valu-pseudo-scalar-trans.ll
+++ b/llvm/test/CodeGen/AMDGPU/move-to-valu-pseudo-scalar-trans.ll
@@ -7,7 +7,7 @@ define amdgpu_kernel void @exp_f32(ptr addrspace(1) %ptr) {
; CHECK-NEXT: liveins: $sgpr2_sgpr3
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr2_sgpr3
- ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.ptr.kernarg.offset, align 4, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec_xnull = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.ptr.kernarg.offset, align 4, addrspace 4)
; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[S_LOAD_DWORDX2_IMM]], [[V_MOV_B32_e32_]], 0, 0, implicit $exec :: (volatile "amdgpu-noclobber" load (s32) from %ir.ptr.load, addrspace 1)
; CHECK-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
@@ -27,7 +27,7 @@ define amdgpu_kernel void @exp_f16(ptr addrspace(1) %ptr) {
; CHECK-NEXT: liveins: $sgpr2_sgpr3
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr2_sgpr3
- ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.ptr.kernarg.offset, align 4, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec_xnull = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.ptr.kernarg.offset, align 4, addrspace 4)
; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; CHECK-NEXT: [[GLOBAL_LOAD_USHORT_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_USHORT_SADDR [[S_LOAD_DWORDX2_IMM]], [[V_MOV_B32_e32_]], 0, 0, implicit $exec :: (volatile "amdgpu-noclobber" load (s16) from %ir.ptr.load, addrspace 1)
; CHECK-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
@@ -48,7 +48,7 @@ define amdgpu_kernel void @log_f32(ptr addrspace(1) %ptr) {
; CHECK-NEXT: liveins: $sgpr2_sgpr3
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr2_sgpr3
- ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.ptr.kernarg.offset, align 4, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec_xnull = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.ptr.kernarg.offset, align 4, addrspace 4)
; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[S_LOAD_DWORDX2_IMM]], [[V_MOV_B32_e32_]], 0, 0, implicit $exec :: (volatile "amdgpu-noclobber" load (s32) from %ir.ptr.load, addrspace 1)
; CHECK-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
@@ -68,7 +68,7 @@ define amdgpu_kernel void @log_f16(ptr addrspace(1) %ptr) {
; CHECK-NEXT: liveins: $sgpr2_sgpr3
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr2_sgpr3
- ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.ptr.kernarg.offset, align 4, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec_xnull = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.ptr.kernarg.offset, align 4, addrspace 4)
; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; CHECK-NEXT: [[GLOBAL_LOAD_USHORT_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_USHORT_SADDR [[S_LOAD_DWORDX2_IMM]], [[V_MOV_B32_e32_]], 0, 0, implicit $exec :: (volatile "amdgpu-noclobber" load (s16) from %ir.ptr.load, addrspace 1)
; CHECK-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
@@ -89,7 +89,7 @@ define amdgpu_kernel void @rcp_f32(ptr addrspace(1) %ptr) {
; CHECK-NEXT: liveins: $sgpr2_sgpr3
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr2_sgpr3
- ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.ptr.kernarg.offset, align 4, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec_xnull = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.ptr.kernarg.offset, align 4, addrspace 4)
; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[S_LOAD_DWORDX2_IMM]], [[V_MOV_B32_e32_]], 0, 0, implicit $exec :: (volatile "amdgpu-noclobber" load (s32) from %ir.ptr.load, addrspace 1)
; CHECK-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
@@ -109,7 +109,7 @@ define amdgpu_kernel void @rcp_f16(ptr addrspace(1) %ptr) {
; CHECK-NEXT: liveins: $sgpr2_sgpr3
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr2_sgpr3
- ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.ptr.kernarg.offset, align 4, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec_xnull = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.ptr.kernarg.offset, align 4, addrspace 4)
; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; CHECK-NEXT: [[GLOBAL_LOAD_USHORT_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_USHORT_SADDR [[S_LOAD_DWORDX2_IMM]], [[V_MOV_B32_e32_]], 0, 0, implicit $exec :: (volatile "amdgpu-noclobber" load (s16) from %ir.ptr.load, addrspace 1)
; CHECK-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
@@ -130,7 +130,7 @@ define amdgpu_kernel void @rsq_f32(ptr addrspace(1) %ptr) {
; CHECK-NEXT: liveins: $sgpr2_sgpr3
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr2_sgpr3
- ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.ptr.kernarg.offset, align 4, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec_xnull = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.ptr.kernarg.offset, align 4, addrspace 4)
; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[S_LOAD_DWORDX2_IMM]], [[V_MOV_B32_e32_]], 0, 0, implicit $exec :: (volatile "amdgpu-noclobber" load (s32) from %ir.ptr.load, addrspace 1)
; CHECK-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
@@ -150,7 +150,7 @@ define amdgpu_kernel void @rsq_f16(ptr addrspace(1) %ptr) {
; CHECK-NEXT: liveins: $sgpr2_sgpr3
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr2_sgpr3
- ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.ptr.kernarg.offset, align 4, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec_xnull = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.ptr.kernarg.offset, align 4, addrspace 4)
; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; CHECK-NEXT: [[GLOBAL_LOAD_USHORT_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_USHORT_SADDR [[S_LOAD_DWORDX2_IMM]], [[V_MOV_B32_e32_]], 0, 0, implicit $exec :: (volatile "amdgpu-noclobber" load (s16) from %ir.ptr.load, addrspace 1)
; CHECK-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
@@ -171,7 +171,7 @@ define amdgpu_kernel void @sqrt_f32(ptr addrspace(1) %ptr) {
; CHECK-NEXT: liveins: $sgpr2_sgpr3
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr2_sgpr3
- ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.ptr.kernarg.offset, align 4, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec_xnull = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.ptr.kernarg.offset, align 4, addrspace 4)
; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[S_LOAD_DWORDX2_IMM]], [[V_MOV_B32_e32_]], 0, 0, implicit $exec :: (volatile "amdgpu-noclobber" load (s32) from %ir.ptr.load, addrspace 1)
; CHECK-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
@@ -191,7 +191,7 @@ define amdgpu_kernel void @sqrt_f16(ptr addrspace(1) %ptr) {
; CHECK-NEXT: liveins: $sgpr2_sgpr3
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr2_sgpr3
- ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.ptr.kernarg.offset, align 4, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec_xnull = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.ptr.kernarg.offset, align 4, addrspace 4)
; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; CHECK-NEXT: [[GLOBAL_LOAD_USHORT_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_USHORT_SADDR [[S_LOAD_DWORDX2_IMM]], [[V_MOV_B32_e32_]], 0, 0, implicit $exec :: (volatile "amdgpu-noclobber" load (s16) from %ir.ptr.load, addrspace 1)
; CHECK-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
diff --git a/llvm/test/CodeGen/AMDGPU/optimize-exec-mask-pre-ra-non-empty-but-used-interval.mir b/llvm/test/CodeGen/AMDGPU/optimize-exec-mask-pre-ra-non-empty-but-used-interval.mir
index 9607889..63ee27e 100644
--- a/llvm/test/CodeGen/AMDGPU/optimize-exec-mask-pre-ra-non-empty-but-used-interval.mir
+++ b/llvm/test/CodeGen/AMDGPU/optimize-exec-mask-pre-ra-non-empty-but-used-interval.mir
@@ -10,7 +10,7 @@ body: |
bb.0:
%0:sreg_32 = IMPLICIT_DEF
%1:sreg_32_xm0_xexec = IMPLICIT_DEF
- %2:sreg_64_xexec = IMPLICIT_DEF
+ %2:sreg_64_xexec_xnull = IMPLICIT_DEF
%3:sgpr_32 = IMPLICIT_DEF
%4:sreg_32_xexec_hi = IMPLICIT_DEF
%5:sreg_32 = IMPLICIT_DEF
@@ -21,7 +21,7 @@ body: |
%10:sreg_32 = IMPLICIT_DEF
%11:sreg_32 = IMPLICIT_DEF
%12:sreg_64_xexec = IMPLICIT_DEF
- %13:sreg_64_xexec = IMPLICIT_DEF
+ %13:sreg_64_xexec_xnull = IMPLICIT_DEF
%14:sreg_32 = IMPLICIT_DEF
%15:sreg_32 = IMPLICIT_DEF
%16:sreg_32 = IMPLICIT_DEF
diff --git a/llvm/test/CodeGen/AMDGPU/partial-regcopy-and-spill-missed-at-regalloc.ll b/llvm/test/CodeGen/AMDGPU/partial-regcopy-and-spill-missed-at-regalloc.ll
index 58b6151..72aafca 100644
--- a/llvm/test/CodeGen/AMDGPU/partial-regcopy-and-spill-missed-at-regalloc.ll
+++ b/llvm/test/CodeGen/AMDGPU/partial-regcopy-and-spill-missed-at-regalloc.ll
@@ -11,7 +11,7 @@ define amdgpu_kernel void @partial_copy(<4 x i32> %arg) #0 {
; REGALLOC-GFX908-NEXT: liveins: $sgpr4_sgpr5
; REGALLOC-GFX908-NEXT: {{ $}}
; REGALLOC-GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 2162697 /* reguse:AGPR_32 */, undef %5:agpr_32
- ; REGALLOC-GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 6225930 /* regdef:VReg_128 */, def %26
+ ; REGALLOC-GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 6291466 /* regdef:VReg_128 */, def %26
; REGALLOC-GFX908-NEXT: [[COPY:%[0-9]+]]:av_128 = COPY %26
; REGALLOC-GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 3538954 /* regdef:VReg_64 */, def %23
; REGALLOC-GFX908-NEXT: SI_SPILL_V64_SAVE %23, %stack.0, $sgpr32, 0, implicit $exec :: (store (s64) into %stack.0, align 4, addrspace 5)
@@ -36,7 +36,7 @@ define amdgpu_kernel void @partial_copy(<4 x i32> %arg) #0 {
; PEI-GFX908-NEXT: $sgpr8 = S_ADD_U32 $sgpr8, $sgpr7, implicit-def $scc, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11
; PEI-GFX908-NEXT: $sgpr9 = S_ADDC_U32 $sgpr9, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11
; PEI-GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 2162697 /* reguse:AGPR_32 */, undef renamable $agpr0
- ; PEI-GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 6225930 /* regdef:VReg_128 */, def renamable $vgpr0_vgpr1_vgpr2_vgpr3
+ ; PEI-GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 6291466 /* regdef:VReg_128 */, def renamable $vgpr0_vgpr1_vgpr2_vgpr3
; PEI-GFX908-NEXT: renamable $agpr0_agpr1_agpr2_agpr3 = COPY killed renamable $vgpr0_vgpr1_vgpr2_vgpr3, implicit $exec
; PEI-GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 3538954 /* regdef:VReg_64 */, def renamable $vgpr0_vgpr1
; PEI-GFX908-NEXT: BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr8_sgpr9_sgpr10_sgpr11, 0, 0, 0, 0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $vgpr0_vgpr1 :: (store (s32) into %stack.0, addrspace 5)
@@ -60,7 +60,7 @@ define amdgpu_kernel void @partial_copy(<4 x i32> %arg) #0 {
; REGALLOC-GFX90A-NEXT: liveins: $sgpr4_sgpr5
; REGALLOC-GFX90A-NEXT: {{ $}}
; REGALLOC-GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 2162697 /* reguse:AGPR_32 */, undef %5:agpr_32
- ; REGALLOC-GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 6553610 /* regdef:VReg_128_Align2 */, def %25
+ ; REGALLOC-GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 6619146 /* regdef:VReg_128_Align2 */, def %25
; REGALLOC-GFX90A-NEXT: [[COPY:%[0-9]+]]:av_128_align2 = COPY %25
; REGALLOC-GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 3866634 /* regdef:VReg_64_Align2 */, def %23
; REGALLOC-GFX90A-NEXT: SI_SPILL_V64_SAVE %23, %stack.0, $sgpr32, 0, implicit $exec :: (store (s64) into %stack.0, align 4, addrspace 5)
@@ -83,7 +83,7 @@ define amdgpu_kernel void @partial_copy(<4 x i32> %arg) #0 {
; PEI-GFX90A-NEXT: $sgpr8 = S_ADD_U32 $sgpr8, $sgpr7, implicit-def $scc, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11
; PEI-GFX90A-NEXT: $sgpr9 = S_ADDC_U32 $sgpr9, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11
; PEI-GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 2162697 /* reguse:AGPR_32 */, undef renamable $agpr0
- ; PEI-GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 6553610 /* regdef:VReg_128_Align2 */, def renamable $vgpr0_vgpr1_vgpr2_vgpr3
+ ; PEI-GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 6619146 /* regdef:VReg_128_Align2 */, def renamable $vgpr0_vgpr1_vgpr2_vgpr3
; PEI-GFX90A-NEXT: renamable $agpr0_agpr1_agpr2_agpr3 = COPY killed renamable $vgpr0_vgpr1_vgpr2_vgpr3, implicit $exec
; PEI-GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 3866634 /* regdef:VReg_64_Align2 */, def renamable $vgpr0_vgpr1
; PEI-GFX90A-NEXT: BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr8_sgpr9_sgpr10_sgpr11, 0, 0, 0, 0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $vgpr0_vgpr1 :: (store (s32) into %stack.0, addrspace 5)
diff --git a/llvm/test/CodeGen/AMDGPU/postra-sink-update-dependency.mir b/llvm/test/CodeGen/AMDGPU/postra-sink-update-dependency.mir
new file mode 100644
index 0000000..14617e0
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/postra-sink-update-dependency.mir
@@ -0,0 +1,66 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -run-pass=postra-machine-sink -verify-machineinstrs -o - %s | FileCheck %s
+#
+# In the example, the ` $sgpr4 = COPY $sgpr2` was incorrectly sunk into bb.3. This happened because we did not update
+# register uses when we found that `$sgpr2 = COPY $sgpr3` should not be sunk because of conflict with the successor's
+# prologue instructions.
+---
+name: update_dependency_correctly
+tracksRegLiveness: true
+body: |
+ ; CHECK-LABEL: name: update_dependency_correctly
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; CHECK-NEXT: liveins: $sgpr0, $sgpr3, $sgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $vgpr1 = IMPLICIT_DEF
+ ; CHECK-NEXT: renamable $sgpr4 = COPY $sgpr2
+ ; CHECK-NEXT: renamable $sgpr2 = COPY $sgpr3
+ ; CHECK-NEXT: $vgpr1 = SI_SPILL_S32_TO_VGPR $sgpr0, 0, $vgpr1
+ ; CHECK-NEXT: $sgpr1 = S_AND_SAVEEXEC_B32 $sgpr0, implicit-def $exec, implicit-def $scc, implicit $exec
+ ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.1, implicit $exec
+ ; CHECK-NEXT: S_BRANCH %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: S_ENDPGM 0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $sgpr0, $sgpr2, $sgpr4, $vgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $sgpr3 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 0
+ ; CHECK-NEXT: renamable $sgpr0_sgpr1 = S_GETPC_B64_pseudo
+ ; CHECK-NEXT: renamable $sgpr5 = COPY $sgpr1
+ ; CHECK-NEXT: renamable $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM renamable $sgpr4_sgpr5, 32, 0
+ ; CHECK-NEXT: S_BRANCH %bb.1
+ bb.0:
+ successors: %bb.3(0x40000000), %bb.2(0x40000000)
+ liveins: $sgpr0, $sgpr3, $sgpr2
+
+ $vgpr1 = IMPLICIT_DEF
+
+ renamable $sgpr4 = COPY $sgpr2
+ renamable $sgpr2 = COPY $sgpr3
+
+ $vgpr1 = SI_SPILL_S32_TO_VGPR $sgpr0, 0, $vgpr1
+
+ $sgpr1 = S_AND_SAVEEXEC_B32 $sgpr0, implicit-def $exec, implicit-def $scc, implicit $exec
+ S_CBRANCH_EXECZ %bb.2, implicit $exec
+ S_BRANCH %bb.3
+
+ bb.2:
+ S_ENDPGM 0
+
+ bb.3:
+ successors: %bb.2(0x40000000)
+ liveins: $sgpr0, $sgpr2, $sgpr4, $vgpr1
+
+ $sgpr3 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 0
+
+ renamable $sgpr0_sgpr1 = S_GETPC_B64_pseudo
+ renamable $sgpr5 = COPY $sgpr1
+ renamable $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM renamable $sgpr4_sgpr5, 32, 0
+
+ S_BRANCH %bb.2
+
+...
diff --git a/llvm/test/CodeGen/AMDGPU/ran-out-of-sgprs-allocation-failure.mir b/llvm/test/CodeGen/AMDGPU/ran-out-of-sgprs-allocation-failure.mir
index fdfc9b0..c6ee557 100644
--- a/llvm/test/CodeGen/AMDGPU/ran-out-of-sgprs-allocation-failure.mir
+++ b/llvm/test/CodeGen/AMDGPU/ran-out-of-sgprs-allocation-failure.mir
@@ -306,7 +306,7 @@ body: |
bb.3:
ADJCALLSTACKUP 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
- dead $sgpr30_sgpr31 = SI_CALL undef %24:sreg_64_xexec, 0, CustomRegMask($sgpr60,$sgpr62)
+ dead $sgpr30_sgpr31 = SI_CALL undef %24:sreg_64_xexec_xnull, 0, CustomRegMask($sgpr60,$sgpr62)
ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
bb.4:
@@ -338,7 +338,7 @@ body: |
bb.9:
%31:vreg_64_align2 = COPY %19.sub16_sub17, implicit $exec
- GLOBAL_STORE_DWORDX2_SADDR undef %18:vgpr_32, %31, undef %24:sreg_64_xexec, 0, 0, implicit $exec :: (store (s64), addrspace 1)
+ GLOBAL_STORE_DWORDX2_SADDR undef %18:vgpr_32, %31, undef %24:sreg_64_xexec_xnull, 0, 0, implicit $exec :: (store (s64), addrspace 1)
%32:vgpr_32 = V_CNDMASK_B32_e64 0, 0, 0, 1, %5, implicit $exec
dead %33:sreg_64_xexec = V_CMP_NE_U32_e64 1, %32, implicit $exec
undef %34.sub0:sreg_64 = S_ADD_U32 %15.sub0, 32, implicit-def dead $scc
diff --git a/llvm/test/CodeGen/AMDGPU/sched-barrier-hang-weak-dep.mir b/llvm/test/CodeGen/AMDGPU/sched-barrier-hang-weak-dep.mir
index 9c1b4af..3fdb0c7 100644
--- a/llvm/test/CodeGen/AMDGPU/sched-barrier-hang-weak-dep.mir
+++ b/llvm/test/CodeGen/AMDGPU/sched-barrier-hang-weak-dep.mir
@@ -12,7 +12,7 @@ body: |
; CHECK: bb.0:
; CHECK-NEXT: successors: %bb.1(0x80000000)
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; CHECK-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
@@ -26,7 +26,7 @@ body: |
; CHECK-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_1]], [[DEF]], 0, 0, implicit $exec :: (store (s32))
; CHECK-NEXT: S_ENDPGM 0
bb.0:
- %0:sreg_64 = IMPLICIT_DEF
+ %0:sreg_64_xexec_xnull = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
bb.1:
diff --git a/llvm/test/CodeGen/AMDGPU/sched-barrier-pre-RA.mir b/llvm/test/CodeGen/AMDGPU/sched-barrier-pre-RA.mir
index 6fa1e2b..bdfc822 100644
--- a/llvm/test/CodeGen/AMDGPU/sched-barrier-pre-RA.mir
+++ b/llvm/test/CodeGen/AMDGPU/sched-barrier-pre-RA.mir
@@ -28,7 +28,7 @@ tracksRegLiveness: true
body: |
bb.0:
; CHECK-LABEL: name: no_sched_barrier
- ; CHECK: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; CHECK: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; CHECK-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
; CHECK-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
@@ -38,7 +38,7 @@ body: |
; CHECK-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec
; CHECK-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_1]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
; CHECK-NEXT: S_ENDPGM 0
- %0:sreg_64 = IMPLICIT_DEF
+ %0:sreg_64_xexec_xnull = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
%4:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %3, implicit $exec
@@ -58,7 +58,7 @@ tracksRegLiveness: true
body: |
bb.0:
; CHECK-LABEL: name: sched_barrier_mask_0
- ; CHECK: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; CHECK: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; CHECK-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
; CHECK-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
@@ -69,7 +69,7 @@ body: |
; CHECK-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec
; CHECK-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_1]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
; CHECK-NEXT: S_ENDPGM 0
- %0:sreg_64 = IMPLICIT_DEF
+ %0:sreg_64_xexec_xnull = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
%4:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %3, implicit $exec
@@ -91,7 +91,7 @@ tracksRegLiveness: true
body: |
bb.0:
; CHECK-LABEL: name: sched_barrier_mask_1
- ; CHECK: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; CHECK: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; CHECK-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
; CHECK-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
@@ -102,7 +102,7 @@ body: |
; CHECK-NEXT: S_NOP 0
; CHECK-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_1]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
; CHECK-NEXT: S_ENDPGM 0
- %0:sreg_64 = IMPLICIT_DEF
+ %0:sreg_64_xexec_xnull = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
%4:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %3, implicit $exec
@@ -123,7 +123,7 @@ tracksRegLiveness: true
body: |
bb.0:
; CHECK-LABEL: name: sched_barrier_mask_2
- ; CHECK: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; CHECK: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; CHECK-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
; CHECK-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
@@ -136,7 +136,7 @@ body: |
; CHECK-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec
; CHECK-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_3]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
; CHECK-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]]
- %0:sreg_64 = IMPLICIT_DEF
+ %0:sreg_64_xexec_xnull = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
%3:vgpr_32 = nsw V_MUL_LO_U32_e64 %2, %2, implicit $exec
@@ -159,7 +159,7 @@ tracksRegLiveness: true
body: |
bb.0:
; CHECK-LABEL: name: sched_barrier_mask_4
- ; CHECK: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; CHECK: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; CHECK-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
; CHECK-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF
@@ -178,7 +178,7 @@ body: |
; CHECK-NEXT: S_NOP 0
; CHECK-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_3]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
; CHECK-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]]
- %0:sreg_64 = IMPLICIT_DEF
+ %0:sreg_64_xexec_xnull = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:areg_128 = IMPLICIT_DEF
%3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
@@ -207,7 +207,7 @@ tracksRegLiveness: true
body: |
bb.0:
; CHECK-LABEL: name: sched_barrier_mask_8
- ; CHECK: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; CHECK: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; CHECK-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
; CHECK-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF
@@ -226,7 +226,7 @@ body: |
; CHECK-NEXT: [[V_MFMA_F32_4X4X1F32_e64_4:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_3]], 0, 0, 0, implicit $mode, implicit $exec
; CHECK-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_3]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
; CHECK-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]]
- %0:sreg_64 = IMPLICIT_DEF
+ %0:sreg_64_xexec_xnull = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:areg_128 = IMPLICIT_DEF
%3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
@@ -255,7 +255,7 @@ tracksRegLiveness: true
body: |
bb.0:
; CHECK-LABEL: name: sched_barrier_mask_16
- ; CHECK: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; CHECK: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; CHECK-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
@@ -266,7 +266,7 @@ body: |
; CHECK-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec
; CHECK-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_1]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
; CHECK-NEXT: S_ENDPGM 0
- %0:sreg_64 = IMPLICIT_DEF
+ %0:sreg_64_xexec_xnull = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
%4:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %3, implicit $exec
@@ -287,7 +287,7 @@ tracksRegLiveness: true
body: |
bb.0:
; CHECK-LABEL: name: sched_barrier_mask_32
- ; CHECK: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; CHECK: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; CHECK-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
@@ -298,7 +298,7 @@ body: |
; CHECK-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec
; CHECK-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_1]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
; CHECK-NEXT: S_ENDPGM 0
- %0:sreg_64 = IMPLICIT_DEF
+ %0:sreg_64_xexec_xnull = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
%4:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %3, implicit $exec
@@ -319,7 +319,7 @@ tracksRegLiveness: true
body: |
bb.0:
; CHECK-LABEL: name: sched_barrier_mask_64
- ; CHECK: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; CHECK: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; CHECK-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
; CHECK-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
@@ -330,7 +330,7 @@ body: |
; CHECK-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec
; CHECK-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_1]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
; CHECK-NEXT: S_ENDPGM 0
- %0:sreg_64 = IMPLICIT_DEF
+ %0:sreg_64_xexec_xnull = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
%4:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %3, implicit $exec
@@ -449,7 +449,7 @@ tracksRegLiveness: true
body: |
bb.0:
; CHECK-LABEL: name: sched_barrier_masks_8_12
- ; CHECK: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; CHECK: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; CHECK-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
; CHECK-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF
@@ -470,7 +470,7 @@ body: |
; CHECK-NEXT: [[V_MFMA_F32_4X4X1F32_e64_4:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_3]], 0, 0, 0, implicit $mode, implicit $exec
; CHECK-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_3]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
; CHECK-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]]
- %0:sreg_64 = IMPLICIT_DEF
+ %0:sreg_64_xexec_xnull = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:areg_128 = IMPLICIT_DEF
%3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
@@ -501,7 +501,7 @@ tracksRegLiveness: true
body: |
bb.0:
; CHECK-LABEL: name: sched_barrier_mask_4_bundle
- ; CHECK: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; CHECK: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; CHECK-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
@@ -515,7 +515,7 @@ body: |
; CHECK-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
; CHECK-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_1]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
; CHECK-NEXT: S_ENDPGM 0
- %0:sreg_64 = IMPLICIT_DEF
+ %0:sreg_64_xexec_xnull = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
%5:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
@@ -539,7 +539,7 @@ tracksRegLiveness: true
body: |
bb.0:
; CHECK-LABEL: name: sched_barrier_mask_0_bundle
- ; CHECK: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; CHECK: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; CHECK-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
@@ -553,7 +553,7 @@ body: |
; CHECK-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_1]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
; CHECK-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
; CHECK-NEXT: S_ENDPGM 0
- %0:sreg_64 = IMPLICIT_DEF
+ %0:sreg_64_xexec_xnull = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
%5:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
diff --git a/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pipeline-solver.mir b/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pipeline-solver.mir
index d6d89a6..d6774bb 100644
--- a/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pipeline-solver.mir
+++ b/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pipeline-solver.mir
@@ -18,7 +18,7 @@ tracksRegLiveness: true
body: |
bb.0:
; GREEDY-LABEL: name: sched_group_barrier_2_VMEM_10_ALU_5_MFMA_2_VMEM_WRITE
- ; GREEDY: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; GREEDY: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; GREEDY-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GREEDY-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
; GREEDY-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
@@ -42,7 +42,7 @@ body: |
; GREEDY-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]]
;
; EXACT-LABEL: name: sched_group_barrier_2_VMEM_10_ALU_5_MFMA_2_VMEM_WRITE
- ; EXACT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; EXACT: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; EXACT-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
; EXACT-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
@@ -64,7 +64,7 @@ body: |
; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 5, 0
; EXACT-NEXT: SCHED_GROUP_BARRIER 64, 2, 0
; EXACT-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]]
- %0:sreg_64 = IMPLICIT_DEF
+ %0:sreg_64_xexec_xnull = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:areg_128 = IMPLICIT_DEF
%3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
@@ -98,7 +98,7 @@ tracksRegLiveness: true
body: |
bb.0:
; GREEDY-LABEL: name: sched_group_barrier_MFMA_VALU_and_SALU_alternating
- ; GREEDY: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; GREEDY: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; GREEDY-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GREEDY-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
; GREEDY-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
@@ -130,7 +130,7 @@ body: |
; GREEDY-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]]
;
; EXACT-LABEL: name: sched_group_barrier_MFMA_VALU_and_SALU_alternating
- ; EXACT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; EXACT: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; EXACT-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
; EXACT-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
@@ -160,7 +160,7 @@ body: |
; EXACT-NEXT: SCHED_GROUP_BARRIER 6, 1, 0
; EXACT-NEXT: SCHED_GROUP_BARRIER 64, 2, 0
; EXACT-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]]
- %0:sreg_64 = IMPLICIT_DEF
+ %0:sreg_64_xexec_xnull = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:areg_128 = IMPLICIT_DEF
%3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
@@ -210,7 +210,7 @@ tracksRegLiveness: true
body: |
bb.0:
; GREEDY-LABEL: name: sched_group_barrier_2_separate_pipes
- ; GREEDY: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; GREEDY: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; GREEDY-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GREEDY-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
; GREEDY-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF
@@ -236,7 +236,7 @@ body: |
; GREEDY-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MUL_LO_U32_e64_3]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]]
;
; EXACT-LABEL: name: sched_group_barrier_2_separate_pipes
- ; EXACT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; EXACT: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; EXACT-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
; EXACT-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF
@@ -260,7 +260,7 @@ body: |
; EXACT-NEXT: SCHED_GROUP_BARRIER 64, 2, 2
; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 2, 2
; EXACT-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MUL_LO_U32_e64_3]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]]
- %0:sreg_64 = IMPLICIT_DEF
+ %0:sreg_64_xexec_xnull = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:areg_128 = IMPLICIT_DEF
%3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
@@ -298,7 +298,7 @@ tracksRegLiveness: true
body: |
bb.0:
; GREEDY-LABEL: name: sched_group_barrier_3_separate_pipes
- ; GREEDY: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; GREEDY: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; GREEDY-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GREEDY-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
; GREEDY-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
@@ -328,7 +328,7 @@ body: |
; GREEDY-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]]
;
; EXACT-LABEL: name: sched_group_barrier_3_separate_pipes
- ; EXACT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; EXACT: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; EXACT-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
; EXACT-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF
@@ -356,7 +356,7 @@ body: |
; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 1, 1
; EXACT-NEXT: SCHED_GROUP_BARRIER 16, 1, 1
; EXACT-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_3]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]]
- %0:sreg_64 = IMPLICIT_DEF
+ %0:sreg_64_xexec_xnull = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:areg_128 = IMPLICIT_DEF
%3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
diff --git a/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pre-RA.mir b/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pre-RA.mir
index 372e0fe..4f84476 100644
--- a/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pre-RA.mir
+++ b/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pre-RA.mir
@@ -18,7 +18,7 @@ tracksRegLiveness: true
body: |
bb.0:
; CHECK-LABEL: name: no_sched_group_barrier
- ; CHECK: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; CHECK: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; CHECK-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
; CHECK-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
@@ -38,7 +38,7 @@ body: |
; CHECK-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]]
;
; EXACT-LABEL: name: no_sched_group_barrier
- ; EXACT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; EXACT: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; EXACT-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
; EXACT-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
@@ -56,7 +56,7 @@ body: |
; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_4:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_3]], 0, 0, 0, implicit $mode, implicit $exec
; EXACT-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_3]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
; EXACT-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]]
- %0:sreg_64 = IMPLICIT_DEF
+ %0:sreg_64_xexec_xnull = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:areg_128 = IMPLICIT_DEF
%3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
@@ -82,7 +82,7 @@ tracksRegLiveness: true
body: |
bb.0:
; CHECK-LABEL: name: sched_group_barrier_1_VMEM_READ_1_VALU_5_MFMA_1_VMEM_READ_3_VALU_2_VMEM_WRITE
- ; CHECK: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; CHECK: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; CHECK-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
; CHECK-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF
@@ -108,7 +108,7 @@ body: |
; CHECK-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]]
;
; EXACT-LABEL: name: sched_group_barrier_1_VMEM_READ_1_VALU_5_MFMA_1_VMEM_READ_3_VALU_2_VMEM_WRITE
- ; EXACT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; EXACT: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; EXACT-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
; EXACT-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF
@@ -132,7 +132,7 @@ body: |
; EXACT-NEXT: SCHED_GROUP_BARRIER 2, 3, 0
; EXACT-NEXT: SCHED_GROUP_BARRIER 64, 2, 0
; EXACT-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]]
- %0:sreg_64 = IMPLICIT_DEF
+ %0:sreg_64_xexec_xnull = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:areg_128 = IMPLICIT_DEF
%3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
@@ -170,7 +170,7 @@ tracksRegLiveness: true
body: |
bb.0:
; CHECK-LABEL: name: sched_group_barrier_2_VMEM_1000_ALU_5_MFMA_2_VMEM_WRITE
- ; CHECK: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; CHECK: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; CHECK-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
; CHECK-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
@@ -194,7 +194,7 @@ body: |
; CHECK-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]]
;
; EXACT-LABEL: name: sched_group_barrier_2_VMEM_1000_ALU_5_MFMA_2_VMEM_WRITE
- ; EXACT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; EXACT: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; EXACT-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
; EXACT-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
@@ -216,7 +216,7 @@ body: |
; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 5, 0
; EXACT-NEXT: SCHED_GROUP_BARRIER 64, 2, 0
; EXACT-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]]
- %0:sreg_64 = IMPLICIT_DEF
+ %0:sreg_64_xexec_xnull = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:areg_128 = IMPLICIT_DEF
%3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
@@ -250,7 +250,7 @@ tracksRegLiveness: true
body: |
bb.0:
; CHECK-LABEL: name: sched_group_barrier_MFMA_VALU_and_SALU_alternating
- ; CHECK: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; CHECK: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; CHECK-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
; CHECK-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
@@ -282,7 +282,7 @@ body: |
; CHECK-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]]
;
; EXACT-LABEL: name: sched_group_barrier_MFMA_VALU_and_SALU_alternating
- ; EXACT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; EXACT: [[DEF:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
; EXACT-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
; EXACT-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
@@ -312,7 +312,7 @@ body: |
; EXACT-NEXT: SCHED_GROUP_BARRIER 6, 1, 0
; EXACT-NEXT: SCHED_GROUP_BARRIER 64, 2, 0
; EXACT-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]]
- %0:sreg_64 = IMPLICIT_DEF
+ %0:sreg_64_xexec_xnull = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:areg_128 = IMPLICIT_DEF
%3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
diff --git a/llvm/test/CodeGen/AMDGPU/set-inactive-wwm-overwrite.ll b/llvm/test/CodeGen/AMDGPU/set-inactive-wwm-overwrite.ll
index f60786c..6f841c8 100644
--- a/llvm/test/CodeGen/AMDGPU/set-inactive-wwm-overwrite.ll
+++ b/llvm/test/CodeGen/AMDGPU/set-inactive-wwm-overwrite.ll
@@ -4,8 +4,8 @@
define amdgpu_cs void @if_then(ptr addrspace(8) inreg %input, ptr addrspace(8) inreg %output, <3 x i32> %LocalInvocationId) {
; GCN-LABEL: if_then:
; GCN: ; %bb.0: ; %.entry
-; GCN-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
; GCN-NEXT: v_mov_b32_e32 v3, 0
+; GCN-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
; GCN-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GCN-NEXT: ; %bb.1: ; %.bb0
; GCN-NEXT: v_mov_b32_e32 v3, 1
@@ -60,8 +60,8 @@ define amdgpu_cs void @if_then(ptr addrspace(8) inreg %input, ptr addrspace(8) i
define amdgpu_cs void @if_else_vgpr_opt(ptr addrspace(8) inreg %input, ptr addrspace(8) inreg %output, <3 x i32> %LocalInvocationId) {
; GCN-LABEL: if_else_vgpr_opt:
; GCN: ; %bb.0: ; %.entry
-; GCN-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
; GCN-NEXT: v_mov_b32_e32 v3, 0
+; GCN-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
; GCN-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GCN-NEXT: ; %bb.1: ; %.bb0
; GCN-NEXT: v_mov_b32_e32 v3, 1
diff --git a/llvm/test/CodeGen/AMDGPU/sgpr-spill-fi-skip-processing-stack-arg-dbg-value-list.mir b/llvm/test/CodeGen/AMDGPU/sgpr-spill-fi-skip-processing-stack-arg-dbg-value-list.mir
new file mode 100644
index 0000000..cdf2b41
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/sgpr-spill-fi-skip-processing-stack-arg-dbg-value-list.mir
@@ -0,0 +1,53 @@
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -amdgpu-spill-sgpr-to-vgpr=true -run-pass=si-lower-sgpr-spills -o - %s | FileCheck %s
+
+--- |
+ define amdgpu_kernel void @test() { ret void }
+
+ !0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !4, producer: "llvm", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, retainedTypes: !4)
+ !1 = !DILocalVariable(name: "a", scope: !2, file: !4, line: 126, type: !6)
+ !2 = distinct !DISubprogram(name: "test", scope: !4, file: !4, line: 1, type: !3, isLocal: false, isDefinition: true, scopeLine: 2, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !5)
+ !3 = !DISubroutineType(types: !4)
+ !4 = !DIFile(filename: "dummy", directory: "/")
+ !5 = !{!1}
+ !6 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !7, size: 64, align: 32)
+ !7 = !DIBasicType(name: "int", size: 32, align: 32, encoding: DW_ATE_signed)
+ !8 = !DIExpression()
+ !9 = !DILocation(line: 10, column: 9, scope: !2)
+
+...
+---
+name: test
+tracksRegLiveness: true
+frameInfo:
+ maxAlignment: 4
+fixedStack:
+ - { id: 0, type: default, offset: 4, size: 4, alignment: 4, stack-id: default }
+stack:
+ - { id: 0, type: spill-slot, size: 4, alignment: 4, stack-id: sgpr-spill }
+machineFunctionInfo:
+ maxKernArgAlign: 4
+ isEntryFunction: true
+ waveLimiter: true
+ scratchRSrcReg: '$sgpr96_sgpr97_sgpr98_sgpr99'
+ stackPtrOffsetReg: '$sgpr32'
+ frameOffsetReg: '$sgpr33'
+ hasSpilledSGPRs: true
+ argumentInfo:
+ privateSegmentBuffer: { reg: '$sgpr0_sgpr1_sgpr2_sgpr3' }
+ dispatchPtr: { reg: '$sgpr4_sgpr5' }
+ kernargSegmentPtr: { reg: '$sgpr6_sgpr7' }
+ workGroupIDX: { reg: '$sgpr8' }
+ privateSegmentWaveByteOffset: { reg: '$sgpr9' }
+body: |
+ ; CHECK-LABEL: name: test
+ ; CHECK: bb.0:
+ ; CHECK: DBG_VALUE_LIST <{{.*}}>, !DIExpression(), $noreg, 0, debug-location !DILocation(line: 10, column: 9, scope: <{{.*}}>)
+
+ bb.0:
+ renamable $sgpr10 = IMPLICIT_DEF
+ SI_SPILL_S32_SAVE killed $sgpr10, %stack.0, implicit $exec, implicit $sgpr96_sgpr97_sgpr98_sgpr99, implicit $sgpr32
+ DBG_VALUE_LIST !1, !8, %stack.0, 0, debug-location !9
+
+ bb.1:
+ renamable $sgpr10 = SI_SPILL_S32_RESTORE %stack.0, implicit $exec, implicit $sgpr96_sgpr97_sgpr98_sgpr99, implicit $sgpr32
+ S_ENDPGM 0
diff --git a/llvm/test/CodeGen/AMDGPU/should-not-hoist-set-inactive.ll b/llvm/test/CodeGen/AMDGPU/should-not-hoist-set-inactive.ll
index 90b32e2..3519bef 100644
--- a/llvm/test/CodeGen/AMDGPU/should-not-hoist-set-inactive.ll
+++ b/llvm/test/CodeGen/AMDGPU/should-not-hoist-set-inactive.ll
@@ -4,10 +4,10 @@
define amdgpu_cs void @should_not_hoist_set_inactive(<4 x i32> inreg %i14, i32 inreg %v, i32 %lane, i32 %f, i32 %f2) #0 {
; GCN-LABEL: should_not_hoist_set_inactive:
; GCN: ; %bb.0: ; %.entry
-; GCN-NEXT: v_cmp_gt_i32_e32 vcc_lo, 3, v1
; GCN-NEXT: v_cmp_eq_u32_e64 s5, 0, v0
; GCN-NEXT: v_cmp_ne_u32_e64 s6, 0, v2
; GCN-NEXT: s_mov_b32 s7, 0
+; GCN-NEXT: v_cmp_gt_i32_e32 vcc_lo, 3, v1
; GCN-NEXT: s_branch .LBB0_2
; GCN-NEXT: .LBB0_1: ; %bb4
; GCN-NEXT: ; in Loop: Header=BB0_2 Depth=1
diff --git a/llvm/test/CodeGen/AMDGPU/shrink-true16.mir b/llvm/test/CodeGen/AMDGPU/shrink-true16.mir
index 1a7ec5d..be75904 100644
--- a/llvm/test/CodeGen/AMDGPU/shrink-true16.mir
+++ b/llvm/test/CodeGen/AMDGPU/shrink-true16.mir
@@ -11,7 +11,7 @@ body: |
; GFX1100-LABEL: name: 16bit_lo128_shrink
; GFX1100: liveins: $vgpr127
; GFX1100-NEXT: {{ $}}
- ; GFX1100-NEXT: V_CMP_EQ_U16_t16_e32 0, $vgpr127, implicit-def $vcc, implicit $exec, implicit $exec
+ ; GFX1100-NEXT: V_CMP_EQ_U16_t16_e32 0, $vgpr127, implicit-def $vcc_lo, implicit $exec, implicit $exec
$vcc_lo = V_CMP_EQ_U16_t16_e64 0, $vgpr127, implicit-def $vcc, implicit $exec
...
diff --git a/llvm/test/CodeGen/AMDGPU/shrink-v-cmp-wave32-dead-vcc-lo.mir b/llvm/test/CodeGen/AMDGPU/shrink-v-cmp-wave32-dead-vcc-lo.mir
new file mode 100644
index 0000000..73c5526
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/shrink-v-cmp-wave32-dead-vcc-lo.mir
@@ -0,0 +1,55 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -run-pass=si-shrink-instructions -mcpu=gfx1100 -o - %s | FileCheck %s
+
+# Make sure there's no crash when shrinking a v_cmp on a wave32 target
+# when the def is dead. Previously the vcc implicit def wasn't
+# properly replaced with vcc_lo, so the expected implicit operand was
+# not found in the shrunk instruction.
+
+---
+name: shrink_v_cmp_vcc_lo_dead
+tracksRegLiveness: true
+tracksDebugUserValues: true
+frameInfo:
+ maxAlignment: 1
+ maxCallFrameSize: 0
+ isCalleeSavedInfoValid: true
+machineFunctionInfo:
+ maxKernArgAlign: 1
+ stackPtrOffsetReg: '$sgpr32'
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+ ; CHECK-LABEL: name: shrink_v_cmp_vcc_lo_dead
+ ; CHECK: liveins: $vgpr0, $vgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: V_CMP_LT_U32_e32 $vgpr0, $vgpr1, implicit-def dead $vcc_lo, implicit $exec
+ ; CHECK-NEXT: S_SETPC_B64_return undef $sgpr30_sgpr31
+ dead renamable $vcc_lo = V_CMP_LT_U32_e64 $vgpr0, $vgpr1, implicit $exec
+ S_SETPC_B64_return undef $sgpr30_sgpr31
+
+...
+
+---
+name: shrink_v_cmp_vcc_lo_live
+tracksRegLiveness: true
+tracksDebugUserValues: true
+frameInfo:
+ maxAlignment: 1
+ maxCallFrameSize: 0
+ isCalleeSavedInfoValid: true
+machineFunctionInfo:
+ maxKernArgAlign: 1
+ stackPtrOffsetReg: '$sgpr32'
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+ ; CHECK-LABEL: name: shrink_v_cmp_vcc_lo_live
+ ; CHECK: liveins: $vgpr0, $vgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: V_CMP_LT_U32_e32 $vgpr0, $vgpr1, implicit-def $vcc_lo, implicit $exec
+ ; CHECK-NEXT: S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vcc_lo
+ renamable $vcc_lo = V_CMP_LT_U32_e64 $vgpr0, $vgpr1, implicit $exec
+ S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vcc_lo
+
+...
diff --git a/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll b/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll
index eebd32c..8e0a836 100644
--- a/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll
+++ b/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll
@@ -1027,8 +1027,8 @@ define amdgpu_ps void @test_kill_divergent_loop(i32 %arg) #0 {
;
; GFX10-WAVE32-LABEL: test_kill_divergent_loop:
; GFX10-WAVE32: ; %bb.0: ; %entry
-; GFX10-WAVE32-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX10-WAVE32-NEXT: s_mov_b32 s0, exec_lo
+; GFX10-WAVE32-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX10-WAVE32-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX10-WAVE32-NEXT: s_xor_b32 s1, exec_lo, s1
; GFX10-WAVE32-NEXT: s_cbranch_execz .LBB10_3
diff --git a/llvm/test/CodeGen/AMDGPU/valu-mask-write-hazard.mir b/llvm/test/CodeGen/AMDGPU/valu-mask-write-hazard.mir
index ab222f4..c936c13 100644
--- a/llvm/test/CodeGen/AMDGPU/valu-mask-write-hazard.mir
+++ b/llvm/test/CodeGen/AMDGPU/valu-mask-write-hazard.mir
@@ -12,7 +12,6 @@
define amdgpu_gs void @mask_hazard_cndmask_dpp1() { ret void }
define amdgpu_gs void @mask_hazard_cndmask_dpp2() { ret void }
define amdgpu_gs void @mask_hazard_cndmask_dpp3() { ret void }
- define amdgpu_gs void @mask_hazard_cndmask_dpp4() { ret void }
define amdgpu_gs void @mask_hazard_addc1() { ret void }
define amdgpu_gs void @mask_hazard_addc2() { ret void }
define amdgpu_gs void @mask_hazard_addc3() { ret void }
@@ -156,16 +155,16 @@ body: |
...
---
-name: mask_hazard_cndmask_dpp4
+name: mask_hazard_cndmask_dpp3
body: |
bb.0:
- ; GFX11-LABEL: name: mask_hazard_cndmask_dpp4
+ ; GFX11-LABEL: name: mask_hazard_cndmask_dpp3
; GFX11: $vgpr0 = V_CNDMASK_B16_e64_dpp $vgpr0, 0, $vgpr1, 0, $vgpr2, $sgpr2_sgpr3, 1, 15, 15, 1, implicit $exec
; GFX11-NEXT: $sgpr2_sgpr3 = S_CSELECT_B64 -1, 0, implicit $scc
; GFX11-NEXT: S_WAITCNT_DEPCTR 65534
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: mask_hazard_cndmask_dpp4
+ ; GFX12-LABEL: name: mask_hazard_cndmask_dpp3
; GFX12: $vgpr0 = V_CNDMASK_B16_e64_dpp $vgpr0, 0, $vgpr1, 0, $vgpr2, $sgpr2_sgpr3, 1, 15, 15, 1, implicit $exec
; GFX12-NEXT: $sgpr2_sgpr3 = S_CSELECT_B64 -1, 0, implicit $scc
; GFX12-NEXT: S_ENDPGM 0
diff --git a/llvm/test/CodeGen/AMDGPU/vgpr-liverange-ir.ll b/llvm/test/CodeGen/AMDGPU/vgpr-liverange-ir.ll
index 2a280bc..00baea8 100644
--- a/llvm/test/CodeGen/AMDGPU/vgpr-liverange-ir.ll
+++ b/llvm/test/CodeGen/AMDGPU/vgpr-liverange-ir.ll
@@ -570,7 +570,7 @@ define protected amdgpu_kernel void @nested_waterfalls(ptr addrspace(1) %tex.coe
; SI-NEXT: bb.1.if.then:
; SI-NEXT: successors: %bb.2(0x80000000)
; SI-NEXT: {{ $}}
- ; SI-NEXT: early-clobber %10:sreg_64_xexec = S_LOAD_DWORDX2_IMM_ec killed [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.tex.coerce.kernarg.offset, align 4, addrspace 4)
+ ; SI-NEXT: early-clobber %10:sreg_64_xexec_xnull = S_LOAD_DWORDX2_IMM_ec killed [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.tex.coerce.kernarg.offset, align 4, addrspace 4)
; SI-NEXT: [[V_LSHLREV_B32_e64_:%[0-9]+]]:vgpr_32 = nuw nsw V_LSHLREV_B32_e64 3, killed [[COPY1]](s32), implicit $exec
; SI-NEXT: [[GLOBAL_LOAD_DWORDX2_SADDR:%[0-9]+]]:vreg_64 = GLOBAL_LOAD_DWORDX2_SADDR killed %10, killed [[V_LSHLREV_B32_e64_]], 0, 0, implicit $exec :: (load (s64) from %ir.idx, addrspace 1)
; SI-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:vreg_128 = GLOBAL_LOAD_DWORDX4 [[GLOBAL_LOAD_DWORDX2_SADDR]], 16, 0, implicit $exec :: (invariant load (s128) from %ir.3 + 16, addrspace 4)
diff --git a/llvm/test/CodeGen/AMDGPU/vgpr-liverange.ll b/llvm/test/CodeGen/AMDGPU/vgpr-liverange.ll
index 25d8300..a0bce34 100644
--- a/llvm/test/CodeGen/AMDGPU/vgpr-liverange.ll
+++ b/llvm/test/CodeGen/AMDGPU/vgpr-liverange.ll
@@ -86,8 +86,8 @@ end:
define amdgpu_ps float @else3(i32 %z, float %v, i32 inreg %bound, i32 %x0) #0 {
; SI-LABEL: else3:
; SI: ; %bb.0: ; %entry
-; SI-NEXT: v_cmp_gt_i32_e32 vcc_lo, 6, v0
; SI-NEXT: s_mov_b32 s1, 0
+; SI-NEXT: v_cmp_gt_i32_e32 vcc_lo, 6, v0
; SI-NEXT: s_branch .LBB2_2
; SI-NEXT: .LBB2_1: ; %if.end
; SI-NEXT: ; in Loop: Header=BB2_2 Depth=1
@@ -161,16 +161,16 @@ for.end:
define amdgpu_ps float @loop(i32 %z, float %v, i32 inreg %bound, ptr %extern_func, ptr %extern_func2) #0 {
; SI-LABEL: loop:
; SI: ; %bb.0: ; %main_body
-; SI-NEXT: v_mov_b32_e32 v6, v0
; SI-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
; SI-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
; SI-NEXT: s_mov_b32 s14, -1
+; SI-NEXT: v_mov_b32_e32 v6, v0
; SI-NEXT: v_mov_b32_e32 v0, v1
-; SI-NEXT: v_cmp_gt_i32_e32 vcc_lo, 6, v6
; SI-NEXT: s_mov_b32 s15, 0x31c16000
; SI-NEXT: s_add_u32 s12, s12, s1
; SI-NEXT: s_addc_u32 s13, s13, 0
; SI-NEXT: s_mov_b32 s32, 0
+; SI-NEXT: v_cmp_gt_i32_e32 vcc_lo, 6, v6
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: s_and_saveexec_b32 s0, vcc_lo
; SI-NEXT: s_xor_b32 s6, exec_lo, s0
@@ -243,11 +243,11 @@ define amdgpu_ps float @loop_with_use(i32 %z, float %v, i32 inreg %bound, ptr %e
; SI-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
; SI-NEXT: s_mov_b32 s14, -1
; SI-NEXT: v_mov_b32_e32 v40, v1
-; SI-NEXT: v_cmp_gt_i32_e32 vcc_lo, 6, v0
; SI-NEXT: s_mov_b32 s15, 0x31c16000
; SI-NEXT: s_add_u32 s12, s12, s1
; SI-NEXT: s_addc_u32 s13, s13, 0
; SI-NEXT: s_mov_b32 s32, 0
+; SI-NEXT: v_cmp_gt_i32_e32 vcc_lo, 6, v0
; SI-NEXT: ; implicit-def: $vgpr0
; SI-NEXT: s_and_saveexec_b32 s0, vcc_lo
; SI-NEXT: s_xor_b32 s6, exec_lo, s0
diff --git a/llvm/test/CodeGen/AMDGPU/vgpr-spill-fi-skip-processing-stack-arg-dbg-value-list.mir b/llvm/test/CodeGen/AMDGPU/vgpr-spill-fi-skip-processing-stack-arg-dbg-value-list.mir
new file mode 100644
index 0000000..53629cd
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/vgpr-spill-fi-skip-processing-stack-arg-dbg-value-list.mir
@@ -0,0 +1,52 @@
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -amdgpu-spill-vgpr-to-agpr=true -run-pass=prologepilog -o - %s | FileCheck %s
+
+--- |
+ define amdgpu_kernel void @test() { ret void }
+
+ !0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !4, producer: "llvm", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, retainedTypes: !4)
+ !1 = !DILocalVariable(name: "a", scope: !2, file: !4, line: 126, type: !6)
+ !2 = distinct !DISubprogram(name: "test", scope: !4, file: !4, line: 1, type: !3, isLocal: false, isDefinition: true, scopeLine: 2, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !5)
+ !3 = !DISubroutineType(types: !4)
+ !4 = !DIFile(filename: "dummy", directory: "/")
+ !5 = !{!1}
+ !6 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !7, size: 64, align: 32)
+ !7 = !DIBasicType(name: "int", size: 32, align: 32, encoding: DW_ATE_signed)
+ !8 = !DIExpression()
+ !9 = !DILocation(line: 10, column: 9, scope: !2)
+
+...
+---
+name: test
+tracksRegLiveness: true
+frameInfo:
+ maxAlignment: 4
+fixedStack:
+ - { id: 0, type: default, offset: 4, size: 4, alignment: 4, stack-id: default }
+stack:
+ - { id: 0, type: spill-slot, size: 4, alignment: 4 }
+machineFunctionInfo:
+ maxKernArgAlign: 4
+ isEntryFunction: true
+ waveLimiter: true
+ scratchRSrcReg: '$sgpr96_sgpr97_sgpr98_sgpr99'
+ stackPtrOffsetReg: '$sgpr32'
+ frameOffsetReg: '$sgpr33'
+ hasSpilledVGPRs: true
+ argumentInfo:
+ privateSegmentBuffer: { reg: '$sgpr0_sgpr1_sgpr2_sgpr3' }
+ dispatchPtr: { reg: '$sgpr4_sgpr5' }
+ kernargSegmentPtr: { reg: '$sgpr6_sgpr7' }
+ workGroupIDX: { reg: '$sgpr8' }
+ privateSegmentWaveByteOffset: { reg: '$sgpr9' }
+body: |
+ ; CHECK-LABEL: name: test
+ ; CHECK: bb.0:
+ ; CHECK: DBG_VALUE_LIST <{{.*}}>, !DIExpression(), $noreg, 0, debug-location !DILocation(line: 10, column: 9, scope: <{{.*}}>)
+ bb.0:
+ $vgpr2 = IMPLICIT_DEF
+ SI_SPILL_V32_SAVE $vgpr2, %stack.0, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.0, align 4, addrspace 5)
+ DBG_VALUE_LIST !1, !8, %stack.0, 0, debug-location !9
+
+ bb.1:
+ renamable $vgpr2 = SI_SPILL_V32_RESTORE %stack.0, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.0, align 4, addrspace 5)
+ S_ENDPGM 0
diff --git a/llvm/test/CodeGen/AMDGPU/wave32.ll b/llvm/test/CodeGen/AMDGPU/wave32.ll
index 92117e0..4576d82 100644
--- a/llvm/test/CodeGen/AMDGPU/wave32.ll
+++ b/llvm/test/CodeGen/AMDGPU/wave32.ll
@@ -372,8 +372,8 @@ define amdgpu_kernel void @test_loop_with_if(ptr addrspace(1) %arg) #0 {
; GFX1032-NEXT: .LBB10_2: ; %bb2
; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032-NEXT: v_cmp_ge_i32_e64 s4, v1, v0
-; GFX1032-NEXT: v_cmp_lt_i32_e32 vcc_lo, v1, v0
; GFX1032-NEXT: s_mov_b32 s3, 0
+; GFX1032-NEXT: v_cmp_lt_i32_e32 vcc_lo, v1, v0
; GFX1032-NEXT: s_and_saveexec_b32 s5, vcc_lo
; GFX1032-NEXT: s_cbranch_execz .LBB10_4
; GFX1032-NEXT: ; %bb.3: ; %bb5
@@ -515,8 +515,8 @@ bb13:
define amdgpu_kernel void @test_loop_with_if_else_break(ptr addrspace(1) %arg) #0 {
; GFX1032-LABEL: test_loop_with_if_else_break:
; GFX1032: ; %bb.0: ; %bb
-; GFX1032-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
; GFX1032-NEXT: s_mov_b32 s4, 0
+; GFX1032-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1032-NEXT: s_cbranch_execz .LBB11_6
; GFX1032-NEXT: ; %bb.1: ; %.preheader
diff --git a/llvm/test/CodeGen/ARM/expand-pseudos.mir b/llvm/test/CodeGen/ARM/expand-pseudos.mir
index 8aada54..bafcce2 100644
--- a/llvm/test/CodeGen/ARM/expand-pseudos.mir
+++ b/llvm/test/CodeGen/ARM/expand-pseudos.mir
@@ -24,6 +24,9 @@
entry:
unreachable
}
+ define i32 @vbsl_kill_flags(i32 %x) {
+ unreachable
+ }
...
---
name: test1
@@ -141,3 +144,21 @@ body: |
BX_RET 14, $noreg, implicit $r0
...
+---
+name: vbsl_kill_flags
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.0 (%ir-block.0):
+ liveins: $d1
+
+ ; CHECK-LABEL: name: vbsl_kill_flags
+ ; CHECK: liveins: $d1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $d0 = VORRd renamable $d1, renamable $d1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $d0 = VBSLd killed renamable $d0, renamable $d1, renamable $d1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $d0
+ renamable $d0 = VBSPd killed renamable $d1, renamable $d1, renamable $d1, 14 /* CC::al */, $noreg
+ BX_RET 14 /* CC::al */, $noreg, implicit $d0
+
+...
diff --git a/llvm/test/CodeGen/ARM/preferred-function-alignment.ll b/llvm/test/CodeGen/ARM/preferred-function-alignment.ll
index afe64a2..f3a227c 100644
--- a/llvm/test/CodeGen/ARM/preferred-function-alignment.ll
+++ b/llvm/test/CodeGen/ARM/preferred-function-alignment.ll
@@ -1,14 +1,15 @@
-; RUN: llc -mtriple=arm-none-eabi -mcpu=cortex-m85 < %s | FileCheck --check-prefixes=CHECK,ALIGN-16,ALIGN-CS-16 %s
+; RUN: llc -mtriple=arm-none-eabi -mcpu=cortex-m85 < %s | FileCheck --check-prefixes=CHECK,ALIGN-64,ALIGN-CS-16 %s
; RUN: llc -mtriple=arm-none-eabi -mcpu=cortex-m23 < %s | FileCheck --check-prefixes=CHECK,ALIGN-16,ALIGN-CS-16 %s
; RUN: llc -mtriple=arm-none-eabi -mcpu=cortex-a5 < %s | FileCheck --check-prefixes=CHECK,ALIGN-32,ALIGN-CS-32 %s
; RUN: llc -mtriple=arm-none-eabi -mcpu=cortex-m33 < %s | FileCheck --check-prefixes=CHECK,ALIGN-32,ALIGN-CS-16 %s
; RUN: llc -mtriple=arm-none-eabi -mcpu=cortex-m55 < %s | FileCheck --check-prefixes=CHECK,ALIGN-32,ALIGN-CS-16 %s
-
+; RUN: llc -mtriple=arm-none-eabi -mcpu=cortex-m7 < %s | FileCheck --check-prefixes=CHECK,ALIGN-64,ALIGN-CS-16 %s
; CHECK-LABEL: test
; ALIGN-16: .p2align 1
; ALIGN-32: .p2align 2
+; ALIGN-64: .p2align 3
define void @test() {
ret void
diff --git a/llvm/test/CodeGen/ARM/vbsl.ll b/llvm/test/CodeGen/ARM/vbsl.ll
index 735fa51..d5aaf3e 100644
--- a/llvm/test/CodeGen/ARM/vbsl.ll
+++ b/llvm/test/CodeGen/ARM/vbsl.ll
@@ -1,17 +1,15 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
-
-; rdar://12471808
+; RUN: llc -mtriple=armv7-eabihf -mattr=+neon -verify-machineinstrs %s -o - | FileCheck %s
define <8 x i8> @v_bsli8(ptr %A, ptr %B, ptr %C) nounwind {
; CHECK-LABEL: v_bsli8:
; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d18, [r0]
; CHECK-NEXT: vldr d16, [r2]
+; CHECK-NEXT: vorr d0, d18, d18
; CHECK-NEXT: vldr d17, [r1]
-; CHECK-NEXT: vbit d16, d17, d18
-; CHECK-NEXT: vmov r0, r1, d16
-; CHECK-NEXT: mov pc, lr
+; CHECK-NEXT: vbsl d0, d17, d16
+; CHECK-NEXT: bx lr
%tmp1 = load <8 x i8>, ptr %A
%tmp2 = load <8 x i8>, ptr %B
%tmp3 = load <8 x i8>, ptr %C
@@ -27,10 +25,10 @@ define <4 x i16> @v_bsli16(ptr %A, ptr %B, ptr %C) nounwind {
; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d18, [r0]
; CHECK-NEXT: vldr d16, [r2]
+; CHECK-NEXT: vorr d0, d18, d18
; CHECK-NEXT: vldr d17, [r1]
-; CHECK-NEXT: vbit d16, d17, d18
-; CHECK-NEXT: vmov r0, r1, d16
-; CHECK-NEXT: mov pc, lr
+; CHECK-NEXT: vbsl d0, d17, d16
+; CHECK-NEXT: bx lr
%tmp1 = load <4 x i16>, ptr %A
%tmp2 = load <4 x i16>, ptr %B
%tmp3 = load <4 x i16>, ptr %C
@@ -46,10 +44,10 @@ define <2 x i32> @v_bsli32(ptr %A, ptr %B, ptr %C) nounwind {
; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d18, [r0]
; CHECK-NEXT: vldr d16, [r2]
+; CHECK-NEXT: vorr d0, d18, d18
; CHECK-NEXT: vldr d17, [r1]
-; CHECK-NEXT: vbit d16, d17, d18
-; CHECK-NEXT: vmov r0, r1, d16
-; CHECK-NEXT: mov pc, lr
+; CHECK-NEXT: vbsl d0, d17, d16
+; CHECK-NEXT: bx lr
%tmp1 = load <2 x i32>, ptr %A
%tmp2 = load <2 x i32>, ptr %B
%tmp3 = load <2 x i32>, ptr %C
@@ -65,10 +63,10 @@ define <1 x i64> @v_bsli64(ptr %A, ptr %B, ptr %C) nounwind {
; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d18, [r0]
; CHECK-NEXT: vldr d16, [r2]
+; CHECK-NEXT: vorr d0, d18, d18
; CHECK-NEXT: vldr d17, [r1]
-; CHECK-NEXT: vbit d16, d17, d18
-; CHECK-NEXT: vmov r0, r1, d16
-; CHECK-NEXT: mov pc, lr
+; CHECK-NEXT: vbsl d0, d17, d16
+; CHECK-NEXT: bx lr
%tmp1 = load <1 x i64>, ptr %A
%tmp2 = load <1 x i64>, ptr %B
%tmp3 = load <1 x i64>, ptr %C
@@ -83,12 +81,11 @@ define <16 x i8> @v_bslQi8(ptr %A, ptr %B, ptr %C) nounwind {
; CHECK-LABEL: v_bslQi8:
; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d20, d21}, [r0]
+; CHECK-NEXT: vorr q0, q10, q10
; CHECK-NEXT: vld1.64 {d16, d17}, [r2]
; CHECK-NEXT: vld1.64 {d18, d19}, [r1]
-; CHECK-NEXT: vbit q8, q9, q10
-; CHECK-NEXT: vmov r0, r1, d16
-; CHECK-NEXT: vmov r2, r3, d17
-; CHECK-NEXT: mov pc, lr
+; CHECK-NEXT: vbsl q0, q9, q8
+; CHECK-NEXT: bx lr
%tmp1 = load <16 x i8>, ptr %A
%tmp2 = load <16 x i8>, ptr %B
%tmp3 = load <16 x i8>, ptr %C
@@ -103,12 +100,11 @@ define <8 x i16> @v_bslQi16(ptr %A, ptr %B, ptr %C) nounwind {
; CHECK-LABEL: v_bslQi16:
; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d20, d21}, [r0]
+; CHECK-NEXT: vorr q0, q10, q10
; CHECK-NEXT: vld1.64 {d16, d17}, [r2]
; CHECK-NEXT: vld1.64 {d18, d19}, [r1]
-; CHECK-NEXT: vbit q8, q9, q10
-; CHECK-NEXT: vmov r0, r1, d16
-; CHECK-NEXT: vmov r2, r3, d17
-; CHECK-NEXT: mov pc, lr
+; CHECK-NEXT: vbsl q0, q9, q8
+; CHECK-NEXT: bx lr
%tmp1 = load <8 x i16>, ptr %A
%tmp2 = load <8 x i16>, ptr %B
%tmp3 = load <8 x i16>, ptr %C
@@ -123,12 +119,11 @@ define <4 x i32> @v_bslQi32(ptr %A, ptr %B, ptr %C) nounwind {
; CHECK-LABEL: v_bslQi32:
; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d20, d21}, [r0]
+; CHECK-NEXT: vorr q0, q10, q10
; CHECK-NEXT: vld1.64 {d16, d17}, [r2]
; CHECK-NEXT: vld1.64 {d18, d19}, [r1]
-; CHECK-NEXT: vbit q8, q9, q10
-; CHECK-NEXT: vmov r0, r1, d16
-; CHECK-NEXT: vmov r2, r3, d17
-; CHECK-NEXT: mov pc, lr
+; CHECK-NEXT: vbsl q0, q9, q8
+; CHECK-NEXT: bx lr
%tmp1 = load <4 x i32>, ptr %A
%tmp2 = load <4 x i32>, ptr %B
%tmp3 = load <4 x i32>, ptr %C
@@ -143,12 +138,11 @@ define <2 x i64> @v_bslQi64(ptr %A, ptr %B, ptr %C) nounwind {
; CHECK-LABEL: v_bslQi64:
; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d20, d21}, [r0]
+; CHECK-NEXT: vorr q0, q10, q10
; CHECK-NEXT: vld1.64 {d16, d17}, [r2]
; CHECK-NEXT: vld1.64 {d18, d19}, [r1]
-; CHECK-NEXT: vbit q8, q9, q10
-; CHECK-NEXT: vmov r0, r1, d16
-; CHECK-NEXT: vmov r2, r3, d17
-; CHECK-NEXT: mov pc, lr
+; CHECK-NEXT: vbsl q0, q9, q8
+; CHECK-NEXT: bx lr
%tmp1 = load <2 x i64>, ptr %A
%tmp2 = load <2 x i64>, ptr %B
%tmp3 = load <2 x i64>, ptr %C
@@ -162,12 +156,8 @@ define <2 x i64> @v_bslQi64(ptr %A, ptr %B, ptr %C) nounwind {
define <8 x i8> @f1(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c) nounwind readnone optsize ssp {
; CHECK-LABEL: f1:
; CHECK: @ %bb.0:
-; CHECK-NEXT: vldr d16, [sp]
-; CHECK-NEXT: vmov d17, r2, r3
-; CHECK-NEXT: vmov d18, r0, r1
-; CHECK-NEXT: vbit d16, d17, d18
-; CHECK-NEXT: vmov r0, r1, d16
-; CHECK-NEXT: mov pc, lr
+; CHECK-NEXT: vbsl d0, d1, d2
+; CHECK-NEXT: bx lr
%vbsl.i = tail call <8 x i8> @llvm.arm.neon.vbsl.v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c) nounwind
ret <8 x i8> %vbsl.i
}
@@ -175,12 +165,8 @@ define <8 x i8> @f1(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c) nounwind readnone opt
define <4 x i16> @f2(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c) nounwind readnone optsize ssp {
; CHECK-LABEL: f2:
; CHECK: @ %bb.0:
-; CHECK-NEXT: vldr d16, [sp]
-; CHECK-NEXT: vmov d17, r2, r3
-; CHECK-NEXT: vmov d18, r0, r1
-; CHECK-NEXT: vbit d16, d17, d18
-; CHECK-NEXT: vmov r0, r1, d16
-; CHECK-NEXT: mov pc, lr
+; CHECK-NEXT: vbsl d0, d1, d2
+; CHECK-NEXT: bx lr
%vbsl3.i = tail call <4 x i16> @llvm.arm.neon.vbsl.v4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c) nounwind
ret <4 x i16> %vbsl3.i
}
@@ -188,12 +174,8 @@ define <4 x i16> @f2(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c) nounwind readnone
define <2 x i32> @f3(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c) nounwind readnone optsize ssp {
; CHECK-LABEL: f3:
; CHECK: @ %bb.0:
-; CHECK-NEXT: vldr d16, [sp]
-; CHECK-NEXT: vmov d17, r2, r3
-; CHECK-NEXT: vmov d18, r0, r1
-; CHECK-NEXT: vbit d16, d17, d18
-; CHECK-NEXT: vmov r0, r1, d16
-; CHECK-NEXT: mov pc, lr
+; CHECK-NEXT: vbsl d0, d1, d2
+; CHECK-NEXT: bx lr
%vbsl3.i = tail call <2 x i32> @llvm.arm.neon.vbsl.v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c) nounwind
ret <2 x i32> %vbsl3.i
}
@@ -201,12 +183,8 @@ define <2 x i32> @f3(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c) nounwind readnone
define <2 x float> @f4(<2 x float> %a, <2 x float> %b, <2 x float> %c) nounwind readnone optsize ssp {
; CHECK-LABEL: f4:
; CHECK: @ %bb.0:
-; CHECK-NEXT: vldr d16, [sp]
-; CHECK-NEXT: vmov d17, r2, r3
-; CHECK-NEXT: vmov d18, r0, r1
-; CHECK-NEXT: vbit d16, d17, d18
-; CHECK-NEXT: vmov r0, r1, d16
-; CHECK-NEXT: mov pc, lr
+; CHECK-NEXT: vbsl d0, d1, d2
+; CHECK-NEXT: bx lr
%vbsl4.i = tail call <2 x float> @llvm.arm.neon.vbsl.v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %c) nounwind
ret <2 x float> %vbsl4.i
}
@@ -214,16 +192,8 @@ define <2 x float> @f4(<2 x float> %a, <2 x float> %b, <2 x float> %c) nounwind
define <16 x i8> @g1(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c) nounwind readnone optsize ssp {
; CHECK-LABEL: g1:
; CHECK: @ %bb.0:
-; CHECK-NEXT: vmov d19, r2, r3
-; CHECK-NEXT: add r12, sp, #16
-; CHECK-NEXT: vmov d18, r0, r1
-; CHECK-NEXT: mov r0, sp
-; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
-; CHECK-NEXT: vld1.64 {d20, d21}, [r0]
-; CHECK-NEXT: vbit q8, q10, q9
-; CHECK-NEXT: vmov r0, r1, d16
-; CHECK-NEXT: vmov r2, r3, d17
-; CHECK-NEXT: mov pc, lr
+; CHECK-NEXT: vbsl q0, q1, q2
+; CHECK-NEXT: bx lr
%vbsl.i = tail call <16 x i8> @llvm.arm.neon.vbsl.v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c) nounwind
ret <16 x i8> %vbsl.i
}
@@ -231,16 +201,8 @@ define <16 x i8> @g1(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c) nounwind readnone
define <8 x i16> @g2(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c) nounwind readnone optsize ssp {
; CHECK-LABEL: g2:
; CHECK: @ %bb.0:
-; CHECK-NEXT: vmov d19, r2, r3
-; CHECK-NEXT: add r12, sp, #16
-; CHECK-NEXT: vmov d18, r0, r1
-; CHECK-NEXT: mov r0, sp
-; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
-; CHECK-NEXT: vld1.64 {d20, d21}, [r0]
-; CHECK-NEXT: vbit q8, q10, q9
-; CHECK-NEXT: vmov r0, r1, d16
-; CHECK-NEXT: vmov r2, r3, d17
-; CHECK-NEXT: mov pc, lr
+; CHECK-NEXT: vbsl q0, q1, q2
+; CHECK-NEXT: bx lr
%vbsl3.i = tail call <8 x i16> @llvm.arm.neon.vbsl.v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c) nounwind
ret <8 x i16> %vbsl3.i
}
@@ -248,16 +210,8 @@ define <8 x i16> @g2(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c) nounwind readnone
define <4 x i32> @g3(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) nounwind readnone optsize ssp {
; CHECK-LABEL: g3:
; CHECK: @ %bb.0:
-; CHECK-NEXT: vmov d19, r2, r3
-; CHECK-NEXT: add r12, sp, #16
-; CHECK-NEXT: vmov d18, r0, r1
-; CHECK-NEXT: mov r0, sp
-; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
-; CHECK-NEXT: vld1.64 {d20, d21}, [r0]
-; CHECK-NEXT: vbit q8, q10, q9
-; CHECK-NEXT: vmov r0, r1, d16
-; CHECK-NEXT: vmov r2, r3, d17
-; CHECK-NEXT: mov pc, lr
+; CHECK-NEXT: vbsl q0, q1, q2
+; CHECK-NEXT: bx lr
%vbsl3.i = tail call <4 x i32> @llvm.arm.neon.vbsl.v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) nounwind
ret <4 x i32> %vbsl3.i
}
@@ -265,16 +219,8 @@ define <4 x i32> @g3(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) nounwind readnone
define <4 x float> @g4(<4 x float> %a, <4 x float> %b, <4 x float> %c) nounwind readnone optsize ssp {
; CHECK-LABEL: g4:
; CHECK: @ %bb.0:
-; CHECK-NEXT: vmov d19, r2, r3
-; CHECK-NEXT: add r12, sp, #16
-; CHECK-NEXT: vmov d18, r0, r1
-; CHECK-NEXT: mov r0, sp
-; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
-; CHECK-NEXT: vld1.64 {d20, d21}, [r0]
-; CHECK-NEXT: vbit q8, q10, q9
-; CHECK-NEXT: vmov r0, r1, d16
-; CHECK-NEXT: vmov r2, r3, d17
-; CHECK-NEXT: mov pc, lr
+; CHECK-NEXT: vbsl q0, q1, q2
+; CHECK-NEXT: bx lr
%vbsl4.i = tail call <4 x float> @llvm.arm.neon.vbsl.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) nounwind
ret <4 x float> %vbsl4.i
}
@@ -282,12 +228,8 @@ define <4 x float> @g4(<4 x float> %a, <4 x float> %b, <4 x float> %c) nounwind
define <1 x i64> @test_vbsl_s64(<1 x i64> %a, <1 x i64> %b, <1 x i64> %c) nounwind readnone optsize ssp {
; CHECK-LABEL: test_vbsl_s64:
; CHECK: @ %bb.0:
-; CHECK-NEXT: vldr d16, [sp]
-; CHECK-NEXT: vmov d17, r2, r3
-; CHECK-NEXT: vmov d18, r0, r1
-; CHECK-NEXT: vbit d16, d17, d18
-; CHECK-NEXT: vmov r0, r1, d16
-; CHECK-NEXT: mov pc, lr
+; CHECK-NEXT: vbsl d0, d1, d2
+; CHECK-NEXT: bx lr
%vbsl3.i = tail call <1 x i64> @llvm.arm.neon.vbsl.v1i64(<1 x i64> %a, <1 x i64> %b, <1 x i64> %c) nounwind
ret <1 x i64> %vbsl3.i
}
@@ -295,12 +237,8 @@ define <1 x i64> @test_vbsl_s64(<1 x i64> %a, <1 x i64> %b, <1 x i64> %c) nounwi
define <1 x i64> @test_vbsl_u64(<1 x i64> %a, <1 x i64> %b, <1 x i64> %c) nounwind readnone optsize ssp {
; CHECK-LABEL: test_vbsl_u64:
; CHECK: @ %bb.0:
-; CHECK-NEXT: vldr d16, [sp]
-; CHECK-NEXT: vmov d17, r2, r3
-; CHECK-NEXT: vmov d18, r0, r1
-; CHECK-NEXT: vbit d16, d17, d18
-; CHECK-NEXT: vmov r0, r1, d16
-; CHECK-NEXT: mov pc, lr
+; CHECK-NEXT: vbsl d0, d1, d2
+; CHECK-NEXT: bx lr
%vbsl3.i = tail call <1 x i64> @llvm.arm.neon.vbsl.v1i64(<1 x i64> %a, <1 x i64> %b, <1 x i64> %c) nounwind
ret <1 x i64> %vbsl3.i
}
@@ -308,16 +246,8 @@ define <1 x i64> @test_vbsl_u64(<1 x i64> %a, <1 x i64> %b, <1 x i64> %c) nounwi
define <2 x i64> @test_vbslq_s64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c) nounwind readnone optsize ssp {
; CHECK-LABEL: test_vbslq_s64:
; CHECK: @ %bb.0:
-; CHECK-NEXT: vmov d19, r2, r3
-; CHECK-NEXT: add r12, sp, #16
-; CHECK-NEXT: vmov d18, r0, r1
-; CHECK-NEXT: mov r0, sp
-; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
-; CHECK-NEXT: vld1.64 {d20, d21}, [r0]
-; CHECK-NEXT: vbit q8, q10, q9
-; CHECK-NEXT: vmov r0, r1, d16
-; CHECK-NEXT: vmov r2, r3, d17
-; CHECK-NEXT: mov pc, lr
+; CHECK-NEXT: vbsl q0, q1, q2
+; CHECK-NEXT: bx lr
%vbsl3.i = tail call <2 x i64> @llvm.arm.neon.vbsl.v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c) nounwind
ret <2 x i64> %vbsl3.i
}
@@ -325,20 +255,40 @@ define <2 x i64> @test_vbslq_s64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c) nounw
define <2 x i64> @test_vbslq_u64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c) nounwind readnone optsize ssp {
; CHECK-LABEL: test_vbslq_u64:
; CHECK: @ %bb.0:
-; CHECK-NEXT: vmov d19, r2, r3
-; CHECK-NEXT: add r12, sp, #16
-; CHECK-NEXT: vmov d18, r0, r1
-; CHECK-NEXT: mov r0, sp
-; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
-; CHECK-NEXT: vld1.64 {d20, d21}, [r0]
-; CHECK-NEXT: vbit q8, q10, q9
-; CHECK-NEXT: vmov r0, r1, d16
-; CHECK-NEXT: vmov r2, r3, d17
-; CHECK-NEXT: mov pc, lr
+; CHECK-NEXT: vbsl q0, q1, q2
+; CHECK-NEXT: bx lr
%vbsl3.i = tail call <2 x i64> @llvm.arm.neon.vbsl.v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c) nounwind
ret <2 x i64> %vbsl3.i
}
+define <8 x i8> @same_param_all(<8 x i8> %a, <8 x i8> %b) {
+; CHECK-LABEL: same_param_all:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vorr d0, d1, d1
+; CHECK-NEXT: vbsl d0, d1, d1
+; CHECK-NEXT: bx lr
+ %vbsl.i = tail call <8 x i8> @llvm.arm.neon.vbsl.v8i8(<8 x i8> %b, <8 x i8> %b, <8 x i8> %b)
+ ret <8 x i8> %vbsl.i
+}
+
+define <8 x i8> @same_param_12(<8 x i8> %a, <8 x i8> %b) {
+; CHECK-LABEL: same_param_12:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vbsl d0, d1, d1
+; CHECK-NEXT: bx lr
+ %vbsl.i = tail call <8 x i8> @llvm.arm.neon.vbsl.v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %b)
+ ret <8 x i8> %vbsl.i
+}
+
+define <8 x i8> @same_param_01(<8 x i8> %a, <8 x i8> %b) {
+; CHECK-LABEL: same_param_01:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vbif d0, d1, d0
+; CHECK-NEXT: bx lr
+ %vbsl.i = tail call <8 x i8> @llvm.arm.neon.vbsl.v8i8(<8 x i8> %a, <8 x i8> %a, <8 x i8> %b)
+ ret <8 x i8> %vbsl.i
+}
+
declare <4 x i32> @llvm.arm.neon.vbsl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) nounwind readnone
declare <8 x i16> @llvm.arm.neon.vbsl.v8i16(<8 x i16>, <8 x i16>, <8 x i16>) nounwind readnone
declare <16 x i8> @llvm.arm.neon.vbsl.v16i8(<16 x i8>, <16 x i8>, <16 x i8>) nounwind readnone
diff --git a/llvm/test/CodeGen/BPF/BTF/atomics.ll b/llvm/test/CodeGen/BPF/BTF/atomics.ll
new file mode 100644
index 0000000..2c02110
--- /dev/null
+++ b/llvm/test/CodeGen/BPF/BTF/atomics.ll
@@ -0,0 +1,151 @@
+; RUN: llc -march=bpfel -mcpu=v3 -filetype=obj -o %t1 %s
+; RUN: llvm-objcopy --dump-section='.BTF'=%t2 %t1
+; RUN: %python %p/print_btf.py %t2 | FileCheck -check-prefixes=CHECK %s
+;
+; Source:
+; #include <stdatomic.h>
+; struct gstruct_t {
+; _Atomic int a;
+; } gstruct;
+; extern _Atomic int ext;
+; _Atomic int gbl;
+; _Atomic int *pgbl;
+; volatile _Atomic int vvar;
+; _Atomic int __attribute__((btf_type_tag("foo"))) *tagptr1;
+; volatile __attribute__((btf_type_tag("foo"))) _Atomic int *tagptr2;
+; _Atomic int foo(_Atomic int a1, _Atomic int *p1) {
+; (void)__c11_atomic_fetch_add(&gstruct.a, 1, memory_order_relaxed);
+; (void)__c11_atomic_fetch_add(&ext, 1, memory_order_relaxed);
+; (void)__c11_atomic_fetch_add(&gbl, 1, memory_order_relaxed);
+; (void)__c11_atomic_fetch_add(pgbl, 1, memory_order_relaxed);
+; (void)__c11_atomic_fetch_add(&vvar, 1, memory_order_relaxed);
+; (void)__c11_atomic_fetch_add(p1, 1, memory_order_relaxed);
+;
+; return a1;
+; }
+
+target triple = "bpf"
+
+%struct.gstruct_t = type { i32 }
+
+@gstruct = dso_local global %struct.gstruct_t zeroinitializer, align 4, !dbg !0
+@ext = external dso_local global i32, align 4, !dbg !34
+@gbl = dso_local global i32 0, align 4, !dbg !16
+@pgbl = dso_local local_unnamed_addr global ptr null, align 8, !dbg !20
+@vvar = dso_local global i32 0, align 4, !dbg !23
+@tagptr1 = dso_local local_unnamed_addr global ptr null, align 8, !dbg !26
+@tagptr2 = dso_local local_unnamed_addr global ptr null, align 8, !dbg !31
+
+; Function Attrs: mustprogress nofree norecurse nounwind willreturn
+define dso_local i32 @foo(i32 returned %a1, ptr nocapture noundef %p1) local_unnamed_addr #0 !dbg !45 {
+entry:
+ #dbg_value(i32 %a1, !49, !DIExpression(), !51)
+ #dbg_value(ptr %p1, !50, !DIExpression(), !51)
+ %0 = atomicrmw add ptr @gstruct, i32 1 monotonic, align 4, !dbg !52
+ %1 = atomicrmw add ptr @ext, i32 1 monotonic, align 4, !dbg !53
+ %2 = atomicrmw add ptr @gbl, i32 1 monotonic, align 4, !dbg !54
+ %3 = load ptr, ptr @pgbl, align 8, !dbg !55, !tbaa !56
+ %4 = atomicrmw add ptr %3, i32 1 monotonic, align 4, !dbg !60
+ %5 = atomicrmw volatile add ptr @vvar, i32 1 monotonic, align 4, !dbg !61
+ %6 = atomicrmw add ptr %p1, i32 1 monotonic, align 4, !dbg !62
+ ret i32 %a1, !dbg !63
+}
+
+; CHECK: [1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED
+; CHECK-NEXT: [2] PTR '(anon)' type_id=1
+; CHECK-NEXT: [3] FUNC_PROTO '(anon)' ret_type_id=1 vlen=2
+; CHECK-NEXT: 'a1' type_id=1
+; CHECK-NEXT: 'p1' type_id=2
+; CHECK-NEXT: [4] FUNC 'foo' type_id=3 linkage=global
+; CHECK-NEXT: [5] STRUCT 'gstruct_t' size=4 vlen=1
+; CHECK-NEXT: 'a' type_id=1 bits_offset=0
+; CHECK-NEXT: [6] VAR 'gstruct' type_id=5, linkage=global
+; CHECK-NEXT: [7] VAR 'ext' type_id=1, linkage=extern
+; CHECK-NEXT: [8] VAR 'gbl' type_id=1, linkage=global
+; CHECK-NEXT: [9] VAR 'pgbl' type_id=2, linkage=global
+; CHECK-NEXT: [10] VOLATILE '(anon)' type_id=1
+; CHECK-NEXT: [11] VAR 'vvar' type_id=10, linkage=global
+; CHECK-NEXT: [12] TYPE_TAG 'foo' type_id=1
+; CHECK-NEXT: [13] PTR '(anon)' type_id=12
+; CHECK-NEXT: [14] VAR 'tagptr1' type_id=13, linkage=global
+; CHECK-NEXT: [15] TYPE_TAG 'foo' type_id=10
+; CHECK-NEXT: [16] PTR '(anon)' type_id=15
+; CHECK-NEXT: [17] VAR 'tagptr2' type_id=16, linkage=global
+; CHECK-NEXT: [18] DATASEC '.bss' size=0 vlen=6
+; CHECK-NEXT: type_id=6 offset=0 size=4
+; CHECK-NEXT: type_id=8 offset=0 size=4
+; CHECK-NEXT: type_id=9 offset=0 size=8
+; CHECK-NEXT: type_id=11 offset=0 size=4
+; CHECK-NEXT: type_id=14 offset=0 size=8
+; CHECK-NEXT: type_id=17 offset=0 size=8
+
+attributes #0 = { mustprogress nofree norecurse nounwind willreturn "frame-pointer"="all" "no-trapping-math"="true" "stack-protector-buffer-size"="8" }
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!39, !40, !41, !42, !43}
+!llvm.ident = !{!44}
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "gstruct", scope: !2, file: !3, line: 4, type: !36, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C11, file: !3, producer: "clang version 20.0.0git (git@github.com:yonghong-song/llvm-project.git 96b5b6e527c024bea84f07ea11d4b3ff63468c22)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !15, splitDebugInlining: false, nameTableKind: None)
+!3 = !DIFile(filename: "test6.c", directory: "/tmp/home/yhs/tmp3", checksumkind: CSK_MD5, checksum: "e743f2985da6027dcc5e048bd1dcccca")
+!4 = !{!5}
+!5 = !DICompositeType(tag: DW_TAG_enumeration_type, name: "memory_order", file: !6, line: 68, baseType: !7, size: 32, elements: !8)
+!6 = !DIFile(filename: "work/yhs/llvm-project/llvm/build/install/lib/clang/20/include/stdatomic.h", directory: "/home/yhs", checksumkind: CSK_MD5, checksum: "f17199a988fe91afffaf0f943ef87096")
+!7 = !DIBasicType(name: "unsigned int", size: 32, encoding: DW_ATE_unsigned)
+!8 = !{!9, !10, !11, !12, !13, !14}
+!9 = !DIEnumerator(name: "memory_order_relaxed", value: 0)
+!10 = !DIEnumerator(name: "memory_order_consume", value: 1)
+!11 = !DIEnumerator(name: "memory_order_acquire", value: 2)
+!12 = !DIEnumerator(name: "memory_order_release", value: 3)
+!13 = !DIEnumerator(name: "memory_order_acq_rel", value: 4)
+!14 = !DIEnumerator(name: "memory_order_seq_cst", value: 5)
+!15 = !{!0, !16, !20, !23, !26, !31, !34}
+!16 = !DIGlobalVariableExpression(var: !17, expr: !DIExpression())
+!17 = distinct !DIGlobalVariable(name: "gbl", scope: !2, file: !3, line: 6, type: !18, isLocal: false, isDefinition: true)
+!18 = !DIDerivedType(tag: DW_TAG_atomic_type, baseType: !19)
+!19 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!20 = !DIGlobalVariableExpression(var: !21, expr: !DIExpression())
+!21 = distinct !DIGlobalVariable(name: "pgbl", scope: !2, file: !3, line: 7, type: !22, isLocal: false, isDefinition: true)
+!22 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !18, size: 64)
+!23 = !DIGlobalVariableExpression(var: !24, expr: !DIExpression())
+!24 = distinct !DIGlobalVariable(name: "vvar", scope: !2, file: !3, line: 8, type: !25, isLocal: false, isDefinition: true)
+!25 = !DIDerivedType(tag: DW_TAG_volatile_type, baseType: !18)
+!26 = !DIGlobalVariableExpression(var: !27, expr: !DIExpression())
+!27 = distinct !DIGlobalVariable(name: "tagptr1", scope: !2, file: !3, line: 9, type: !28, isLocal: false, isDefinition: true)
+!28 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !18, size: 64, annotations: !29)
+!29 = !{!30}
+!30 = !{!"btf_type_tag", !"foo"}
+!31 = !DIGlobalVariableExpression(var: !32, expr: !DIExpression())
+!32 = distinct !DIGlobalVariable(name: "tagptr2", scope: !2, file: !3, line: 10, type: !33, isLocal: false, isDefinition: true)
+!33 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !25, size: 64, annotations: !29)
+!34 = !DIGlobalVariableExpression(var: !35, expr: !DIExpression())
+!35 = distinct !DIGlobalVariable(name: "ext", scope: !2, file: !3, line: 5, type: !18, isLocal: false, isDefinition: false)
+!36 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "gstruct_t", file: !3, line: 2, size: 32, elements: !37)
+!37 = !{!38}
+!38 = !DIDerivedType(tag: DW_TAG_member, name: "a", scope: !36, file: !3, line: 3, baseType: !18, size: 32)
+!39 = !{i32 7, !"Dwarf Version", i32 5}
+!40 = !{i32 2, !"Debug Info Version", i32 3}
+!41 = !{i32 1, !"wchar_size", i32 4}
+!42 = !{i32 7, !"frame-pointer", i32 2}
+!43 = !{i32 7, !"debug-info-assignment-tracking", i1 true}
+!44 = !{!"clang version 20.0.0git (git@github.com:yonghong-song/llvm-project.git 96b5b6e527c024bea84f07ea11d4b3ff63468c22)"}
+!45 = distinct !DISubprogram(name: "foo", scope: !3, file: !3, line: 11, type: !46, scopeLine: 11, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !2, retainedNodes: !48)
+!46 = !DISubroutineType(types: !47)
+!47 = !{!18, !18, !22}
+!48 = !{!49, !50}
+!49 = !DILocalVariable(name: "a1", arg: 1, scope: !45, file: !3, line: 11, type: !18)
+!50 = !DILocalVariable(name: "p1", arg: 2, scope: !45, file: !3, line: 11, type: !22)
+!51 = !DILocation(line: 0, scope: !45)
+!52 = !DILocation(line: 12, column: 9, scope: !45)
+!53 = !DILocation(line: 13, column: 9, scope: !45)
+!54 = !DILocation(line: 14, column: 9, scope: !45)
+!55 = !DILocation(line: 15, column: 32, scope: !45)
+!56 = !{!57, !57, i64 0}
+!57 = !{!"any pointer", !58, i64 0}
+!58 = !{!"omnipotent char", !59, i64 0}
+!59 = !{!"Simple C/C++ TBAA"}
+!60 = !DILocation(line: 15, column: 9, scope: !45)
+!61 = !DILocation(line: 16, column: 9, scope: !45)
+!62 = !DILocation(line: 17, column: 9, scope: !45)
+!63 = !DILocation(line: 19, column: 3, scope: !45)
diff --git a/llvm/test/CodeGen/BPF/BTF/print_btf.py b/llvm/test/CodeGen/BPF/BTF/print_btf.py
new file mode 100644
index 0000000..6ce08b7
--- /dev/null
+++ b/llvm/test/CodeGen/BPF/BTF/print_btf.py
@@ -0,0 +1,295 @@
+#!/usr/bin/env python3
+
+# Ad-hoc script to print BTF file in a readable format.
+# Follows the same printing conventions as bpftool with format 'raw'.
+# Usage:
+#
+# ./print_btf.py <btf_file>
+#
+# Parameters:
+#
+# <btf_file> :: a file name or '-' to read from stdin.
+#
+# Intended usage:
+#
+# llvm-objcopy --dump-section .BTF=- <input> | ./print_btf.py -
+#
+# Kernel documentation contains detailed format description:
+# https://www.kernel.org/doc/html/latest/bpf/btf.html
+
+import struct
+import ctypes
+import sys
+
+
+class SafeDict(dict):
+ def __getitem__(self, key):
+ try:
+ return dict.__getitem__(self, key)
+ except KeyError:
+ return f"<BAD_KEY: {key}>"
+
+
+KINDS = SafeDict(
+ {
+ 0: "UNKN",
+ 1: "INT",
+ 2: "PTR",
+ 3: "ARRAY",
+ 4: "STRUCT",
+ 5: "UNION",
+ 6: "ENUM",
+ 7: "FWD",
+ 8: "TYPEDEF",
+ 9: "VOLATILE",
+ 10: "CONST",
+ 11: "RESTRICT",
+ 12: "FUNC",
+ 13: "FUNC_PROTO",
+ 14: "VAR",
+ 15: "DATASEC",
+ 16: "FLOAT",
+ 17: "DECL_TAG",
+ 18: "TYPE_TAG",
+ 19: "ENUM64",
+ }
+)
+
+INT_ENCODING = SafeDict(
+ {0 << 0: "(none)", 1 << 0: "SIGNED", 1 << 1: "CHAR", 1 << 2: "BOOL"}
+)
+
+ENUM_ENCODING = SafeDict({0: "UNSIGNED", 1: "SIGNED"})
+
+FUNC_LINKAGE = SafeDict({0: "static", 1: "global", 2: "extern"})
+
+VAR_LINKAGE = SafeDict({0: "static", 1: "global", 2: "extern"})
+
+FWD_KIND = SafeDict(
+ {
+ 0: "struct",
+ 1: "union",
+ }
+)
+
+for val, name in KINDS.items():
+ globals()["BTF_KIND_" + name] = val
+
+
+def warn(message):
+ print(message, file=sys.stderr)
+
+
+def print_btf(filename):
+ if filename == "-":
+ buf = sys.stdin.buffer.read()
+ else:
+ with open(filename, "rb") as file:
+ buf = file.read()
+
+ fmt_cache = {}
+ endian_pfx = ""
+ off = 0
+
+ def unpack(fmt):
+ nonlocal off, endian_pfx
+ fmt = endian_pfx + fmt
+ if fmt not in fmt_cache:
+ fmt_cache[fmt] = struct.Struct(fmt)
+ st = fmt_cache[fmt]
+ r = st.unpack_from(buf, off)
+ off += st.size
+ return r
+
+ # Use magic number at the header start to determine endianness
+ (magic,) = unpack("H")
+ if magic == 0xEB9F:
+ endian_pfx = "<"
+ elif magic == 0x9FEB:
+ endian_pfx = ">"
+ else:
+ warn(f"Unexpected BTF magic: {magic:02x}")
+ return
+
+ # Rest of the header
+ version, flags, hdr_len = unpack("BBI")
+ type_off, type_len, str_off, str_len = unpack("IIII")
+
+ # Offsets in the header are relative to the end of a header
+ type_off += hdr_len
+ str_off += hdr_len
+ off = hdr_len
+ type_end = type_off + type_len
+
+ def string(rel_off):
+ try:
+ start = str_off + rel_off
+ end = buf.index(b"\0", start)
+ if start == end:
+ return "(anon)"
+ return buf[start:end].decode("utf8")
+ except ValueError as e:
+ warn(f"Can't get string at offset {str_off} + {rel_off}: {e}")
+ return f"<BAD_STRING {rel_off}>"
+
+ idx = 1
+ while off < type_end:
+ name_off, info, size = unpack("III")
+ kind = (info >> 24) & 0x1F
+ vlen = info & 0xFFFF
+ kflag = info >> 31
+ kind_name = KINDS[kind]
+ name = string(name_off)
+
+ def warn_nonzero(val, name):
+ nonlocal idx
+ if val != 0:
+ warn(f"<{idx}> {name} should be 0 but is {val}")
+
+ if kind == BTF_KIND_INT:
+ (info,) = unpack("I")
+ encoding = (info & 0x0F000000) >> 24
+ offset = (info & 0x00FF0000) >> 16
+ bits = info & 0x000000FF
+ enc_name = INT_ENCODING[encoding]
+ print(
+ f"[{idx}] {kind_name} '{name}' size={size} "
+ f"bits_offset={offset} "
+ f"nr_bits={bits} encoding={enc_name}"
+ )
+ warn_nonzero(kflag, "kflag")
+ warn_nonzero(vlen, "vlen")
+
+ elif kind in [
+ BTF_KIND_PTR,
+ BTF_KIND_CONST,
+ BTF_KIND_VOLATILE,
+ BTF_KIND_RESTRICT,
+ ]:
+ print(f"[{idx}] {kind_name} '{name}' type_id={size}")
+ warn_nonzero(name_off, "name_off")
+ warn_nonzero(kflag, "kflag")
+ warn_nonzero(vlen, "vlen")
+
+ elif kind == BTF_KIND_ARRAY:
+ warn_nonzero(name_off, "name_off")
+ warn_nonzero(kflag, "kflag")
+ warn_nonzero(vlen, "vlen")
+ warn_nonzero(size, "size")
+ type, index_type, nelems = unpack("III")
+ print(
+ f"[{idx}] {kind_name} '{name}' type_id={type} "
+ f"index_type_id={index_type} nr_elems={nelems}"
+ )
+
+ elif kind in [BTF_KIND_STRUCT, BTF_KIND_UNION]:
+ print(f"[{idx}] {kind_name} '{name}' size={size} vlen={vlen}")
+ if kflag not in [0, 1]:
+ warn(f"<{idx}> kflag should 0 or 1: {kflag}")
+ for _ in range(0, vlen):
+ name_off, type, offset = unpack("III")
+ if kflag == 0:
+ print(
+ f"\t'{string(name_off)}' type_id={type} "
+ f"bits_offset={offset}"
+ )
+ else:
+ bits_offset = offset & 0xFFFFFF
+ bitfield_size = offset >> 24
+ print(
+ f"\t'{string(name_off)}' type_id={type} "
+ f"bits_offset={bits_offset} "
+ f"bitfield_size={bitfield_size}"
+ )
+
+ elif kind == BTF_KIND_ENUM:
+ encoding = ENUM_ENCODING[kflag]
+ print(
+ f"[{idx}] {kind_name} '{name}' encoding={encoding} "
+ f"size={size} vlen={vlen}"
+ )
+ for _ in range(0, vlen):
+ (name_off,) = unpack("I")
+ (val,) = unpack("i" if kflag == 1 else "I")
+ print(f"\t'{string(name_off)}' val={val}")
+
+ elif kind == BTF_KIND_ENUM64:
+ encoding = ENUM_ENCODING[kflag]
+ print(
+ f"[{idx}] {kind_name} '{name}' encoding={encoding} "
+ f"size={size} vlen={vlen}"
+ )
+ for _ in range(0, vlen):
+ name_off, lo, hi = unpack("III")
+ val = hi << 32 | lo
+ if kflag == 1:
+ val = ctypes.c_long(val).value
+ print(f"\t'{string(name_off)}' val={val}LL")
+
+ elif kind == BTF_KIND_FWD:
+ print(f"[{idx}] {kind_name} '{name}' fwd_kind={FWD_KIND[kflag]}")
+ warn_nonzero(vlen, "vlen")
+ warn_nonzero(size, "size")
+
+ elif kind in [BTF_KIND_TYPEDEF, BTF_KIND_TYPE_TAG]:
+ print(f"[{idx}] {kind_name} '{name}' type_id={size}")
+ warn_nonzero(kflag, "kflag")
+ warn_nonzero(kflag, "vlen")
+
+ elif kind == BTF_KIND_FUNC:
+ linkage = FUNC_LINKAGE[vlen]
+ print(f"[{idx}] {kind_name} '{name}' type_id={size} " f"linkage={linkage}")
+ warn_nonzero(kflag, "kflag")
+
+ elif kind == BTF_KIND_FUNC_PROTO:
+ print(f"[{idx}] {kind_name} '{name}' ret_type_id={size} " f"vlen={vlen}")
+ warn_nonzero(name_off, "name_off")
+ warn_nonzero(kflag, "kflag")
+ for _ in range(0, vlen):
+ name_off, type = unpack("II")
+ print(f"\t'{string(name_off)}' type_id={type}")
+
+ elif kind == BTF_KIND_VAR:
+ (linkage,) = unpack("I")
+ linkage = VAR_LINKAGE[linkage]
+ print(f"[{idx}] {kind_name} '{name}' type_id={size}, " f"linkage={linkage}")
+ warn_nonzero(kflag, "kflag")
+ warn_nonzero(vlen, "vlen")
+
+ elif kind == BTF_KIND_DATASEC:
+ print(f"[{idx}] {kind_name} '{name}' size={size} vlen={vlen}")
+ warn_nonzero(kflag, "kflag")
+ warn_nonzero(size, "size")
+ for _ in range(0, vlen):
+ type, offset, size = unpack("III")
+ print(f"\ttype_id={type} offset={offset} size={size}")
+
+ elif kind == BTF_KIND_FLOAT:
+ print(f"[{idx}] {kind_name} '{name}' size={size}")
+ warn_nonzero(kflag, "kflag")
+ warn_nonzero(vlen, "vlen")
+
+ elif kind == BTF_KIND_DECL_TAG:
+ (component_idx,) = unpack("i")
+ print(
+ f"[{idx}] {kind_name} '{name}' type_id={size} "
+ + f"component_idx={component_idx}"
+ )
+ warn_nonzero(kflag, "kflag")
+ warn_nonzero(vlen, "vlen")
+
+ else:
+ warn(
+ f"<{idx}> Unexpected entry: kind={kind_name} "
+ f"name_off={name_off} "
+ f"vlen={vlen} kflag={kflag} size={size}"
+ )
+
+ idx += 1
+
+
+if __name__ == "__main__":
+ if len(sys.argv) != 2:
+ warn("Usage: {sys.argv[0]} <btf_file>")
+ sys.exit(1)
+ print_btf(sys.argv[1])
diff --git a/llvm/test/CodeGen/BPF/atomics_mem_order_v1.ll b/llvm/test/CodeGen/BPF/atomics_mem_order_v1.ll
new file mode 100644
index 0000000..3108158
--- /dev/null
+++ b/llvm/test/CodeGen/BPF/atomics_mem_order_v1.ll
@@ -0,0 +1,385 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -march=bpfel -mcpu=v1 -filetype=asm < %s | FileCheck %s
+;
+; Source:
+; $ cat atomics_mem_order_v1.c
+; #include <stdatomic.h>
+;
+; void test_fetch_add_32_noret(int _Atomic *i) {
+; (void)__c11_atomic_fetch_add(i, 10, memory_order_relaxed);
+; (void)__c11_atomic_fetch_add(i, 10, memory_order_acquire);
+; (void)__c11_atomic_fetch_add(i, 10, memory_order_release);
+; (void)__c11_atomic_fetch_add(i, 10, memory_order_acq_rel);
+; (void)__c11_atomic_fetch_add(i, 10, memory_order_seq_cst);
+; }
+;
+; void test_fetch_add_64_noret(long _Atomic *i) {
+; (void)__c11_atomic_fetch_add(i, 10, memory_order_relaxed);
+; (void)__c11_atomic_fetch_add(i, 10, memory_order_acquire);
+; (void)__c11_atomic_fetch_add(i, 10, memory_order_release);
+; (void)__c11_atomic_fetch_add(i, 10, memory_order_acq_rel);
+; (void)__c11_atomic_fetch_add(i, 10, memory_order_seq_cst);
+; }
+;
+; void test_fetch_sub_64_noret(long _Atomic *i) {
+; (void)__c11_atomic_fetch_sub(i, 10, memory_order_relaxed);
+; (void)__c11_atomic_fetch_sub(i, 10, memory_order_acquire);
+; (void)__c11_atomic_fetch_sub(i, 10, memory_order_release);
+; (void)__c11_atomic_fetch_sub(i, 10, memory_order_acq_rel);
+; (void)__c11_atomic_fetch_sub(i, 10, memory_order_seq_cst);
+; }
+;
+; long test_fetch_sub_64_ret(long _Atomic *i) {
+; return __c11_atomic_fetch_sub(i, 10, memory_order_acquire) +
+; __c11_atomic_fetch_sub(i, 10, memory_order_release) +
+; __c11_atomic_fetch_sub(i, 10, memory_order_acq_rel) +
+; __c11_atomic_fetch_sub(i, 10, memory_order_seq_cst);
+; }
+;
+; void test_fetch_and_64_noret(long _Atomic *i) {
+; (void)__c11_atomic_fetch_and(i, 10, memory_order_relaxed);
+; (void)__c11_atomic_fetch_and(i, 10, memory_order_acquire);
+; (void)__c11_atomic_fetch_and(i, 10, memory_order_release);
+; (void)__c11_atomic_fetch_and(i, 10, memory_order_acq_rel);
+; (void)__c11_atomic_fetch_and(i, 10, memory_order_seq_cst);
+; }
+;
+; long test_fetch_and_64_ret(long _Atomic *i) {
+; return __c11_atomic_fetch_and(i, 10, memory_order_relaxed) +
+; __c11_atomic_fetch_and(i, 10, memory_order_acquire) +
+; __c11_atomic_fetch_and(i, 10, memory_order_release) +
+; __c11_atomic_fetch_and(i, 10, memory_order_acq_rel) +
+; __c11_atomic_fetch_and(i, 10, memory_order_seq_cst);
+; }
+;
+; void test_fetch_or_64_noret(long _Atomic *i) {
+; (void)__c11_atomic_fetch_or(i, 10, memory_order_relaxed);
+; (void)__c11_atomic_fetch_or(i, 10, memory_order_acquire);
+; (void)__c11_atomic_fetch_or(i, 10, memory_order_release);
+; (void)__c11_atomic_fetch_or(i, 10, memory_order_acq_rel);
+; (void)__c11_atomic_fetch_or(i, 10, memory_order_seq_cst);
+; }
+;
+; long test_fetch_or_64_ret(long _Atomic *i) {
+; return __c11_atomic_fetch_or(i, 10, memory_order_relaxed) +
+; __c11_atomic_fetch_or(i, 10, memory_order_acquire) +
+; __c11_atomic_fetch_or(i, 10, memory_order_release) +
+; __c11_atomic_fetch_or(i, 10, memory_order_acq_rel) +
+; __c11_atomic_fetch_or(i, 10, memory_order_seq_cst);
+; }
+;
+; void test_fetch_xor_64_noret(long _Atomic *i) {
+; (void)__c11_atomic_fetch_xor(i, 10, memory_order_relaxed);
+; (void)__c11_atomic_fetch_xor(i, 10, memory_order_acquire);
+; (void)__c11_atomic_fetch_xor(i, 10, memory_order_release);
+; (void)__c11_atomic_fetch_xor(i, 10, memory_order_acq_rel);
+; (void)__c11_atomic_fetch_xor(i, 10, memory_order_seq_cst);
+; }
+;
+; long test_fetch_xor_64_ret(long _Atomic *i) {
+; return __c11_atomic_fetch_xor(i, 10, memory_order_relaxed) +
+; __c11_atomic_fetch_xor(i, 10, memory_order_acquire) +
+; __c11_atomic_fetch_xor(i, 10, memory_order_release) +
+; __c11_atomic_fetch_xor(i, 10, memory_order_acq_rel) +
+; __c11_atomic_fetch_xor(i, 10, memory_order_seq_cst);
+; }
+
+target triple = "bpf"
+
+; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+define dso_local void @test_fetch_add_32_noret(ptr nocapture noundef %i) local_unnamed_addr #0 {
+; CHECK-LABEL: test_fetch_add_32_noret:
+; CHECK: .Ltest_fetch_add_32_noret$local:
+; CHECK-NEXT: .type .Ltest_fetch_add_32_noret$local,@function
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: r2 = 10
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: lock *(u32 *)(r1 + 0) += r3
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: lock *(u32 *)(r1 + 0) += r3
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: lock *(u32 *)(r1 + 0) += r3
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: lock *(u32 *)(r1 + 0) += r3
+; CHECK-NEXT: lock *(u32 *)(r1 + 0) += r2
+; CHECK-NEXT: exit
+entry:
+ %0 = atomicrmw add ptr %i, i32 10 monotonic, align 4
+ %1 = atomicrmw add ptr %i, i32 10 acquire, align 4
+ %2 = atomicrmw add ptr %i, i32 10 release, align 4
+ %3 = atomicrmw add ptr %i, i32 10 acq_rel, align 4
+ %4 = atomicrmw add ptr %i, i32 10 seq_cst, align 4
+ ret void
+}
+
+; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+define dso_local void @test_fetch_add_64_noret(ptr nocapture noundef %i) local_unnamed_addr #0 {
+; CHECK-LABEL: test_fetch_add_64_noret:
+; CHECK: .Ltest_fetch_add_64_noret$local:
+; CHECK-NEXT: .type .Ltest_fetch_add_64_noret$local,@function
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: r2 = 10
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: lock *(u64 *)(r1 + 0) += r3
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: lock *(u64 *)(r1 + 0) += r3
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: lock *(u64 *)(r1 + 0) += r3
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: lock *(u64 *)(r1 + 0) += r3
+; CHECK-NEXT: lock *(u64 *)(r1 + 0) += r2
+; CHECK-NEXT: exit
+entry:
+ %0 = atomicrmw add ptr %i, i64 10 monotonic, align 8
+ %1 = atomicrmw add ptr %i, i64 10 acquire, align 8
+ %2 = atomicrmw add ptr %i, i64 10 release, align 8
+ %3 = atomicrmw add ptr %i, i64 10 acq_rel, align 8
+ %4 = atomicrmw add ptr %i, i64 10 seq_cst, align 8
+ ret void
+}
+
+; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+define dso_local void @test_fetch_sub_64_noret(ptr nocapture noundef %i) local_unnamed_addr #0 {
+; CHECK-LABEL: test_fetch_sub_64_noret:
+; CHECK: .Ltest_fetch_sub_64_noret$local:
+; CHECK-NEXT: .type .Ltest_fetch_sub_64_noret$local,@function
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: r2 = 10
+; CHECK-NEXT: r2 = -r2
+; CHECK-NEXT: r3 = r2
+; CHECK-NEXT: lock *(u64 *)(r1 + 0) += r3
+; CHECK-NEXT: r3 = r2
+; CHECK-NEXT: r3 = atomic_fetch_add((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r3 = r2
+; CHECK-NEXT: r3 = atomic_fetch_add((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r3 = r2
+; CHECK-NEXT: r3 = atomic_fetch_add((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r2 = atomic_fetch_add((u64 *)(r1 + 0), r2)
+; CHECK-NEXT: exit
+entry:
+ %0 = atomicrmw sub ptr %i, i64 10 monotonic, align 8
+ %1 = atomicrmw sub ptr %i, i64 10 acquire, align 8
+ %2 = atomicrmw sub ptr %i, i64 10 release, align 8
+ %3 = atomicrmw sub ptr %i, i64 10 acq_rel, align 8
+ %4 = atomicrmw sub ptr %i, i64 10 seq_cst, align 8
+ ret void
+}
+
+; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+define dso_local i64 @test_fetch_sub_64_ret(ptr nocapture noundef %i) local_unnamed_addr #0 {
+; CHECK-LABEL: test_fetch_sub_64_ret:
+; CHECK: .Ltest_fetch_sub_64_ret$local:
+; CHECK-NEXT: .type .Ltest_fetch_sub_64_ret$local,@function
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: r2 = 10
+; CHECK-NEXT: r2 = -r2
+; CHECK-NEXT: r3 = r2
+; CHECK-NEXT: r3 = atomic_fetch_add((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r0 = r2
+; CHECK-NEXT: r0 = atomic_fetch_add((u64 *)(r1 + 0), r0)
+; CHECK-NEXT: r0 += r3
+; CHECK-NEXT: r3 = r2
+; CHECK-NEXT: r3 = atomic_fetch_add((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r0 += r3
+; CHECK-NEXT: r2 = atomic_fetch_add((u64 *)(r1 + 0), r2)
+; CHECK-NEXT: r0 += r2
+; CHECK-NEXT: exit
+entry:
+ %0 = atomicrmw sub ptr %i, i64 10 acquire, align 8
+ %1 = atomicrmw sub ptr %i, i64 10 release, align 8
+ %add = add nsw i64 %1, %0
+ %2 = atomicrmw sub ptr %i, i64 10 acq_rel, align 8
+ %add5 = add nsw i64 %add, %2
+ %3 = atomicrmw sub ptr %i, i64 10 seq_cst, align 8
+ %add8 = add nsw i64 %add5, %3
+ ret i64 %add8
+}
+
+; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+define dso_local void @test_fetch_and_64_noret(ptr nocapture noundef %i) local_unnamed_addr #0 {
+; CHECK-LABEL: test_fetch_and_64_noret:
+; CHECK: .Ltest_fetch_and_64_noret$local:
+; CHECK-NEXT: .type .Ltest_fetch_and_64_noret$local,@function
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: r2 = 10
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: lock *(u64 *)(r1 + 0) &= r3
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_and((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_and((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_and((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r2 = atomic_fetch_and((u64 *)(r1 + 0), r2)
+; CHECK-NEXT: exit
+entry:
+ %0 = atomicrmw and ptr %i, i64 10 monotonic, align 8
+ %1 = atomicrmw and ptr %i, i64 10 acquire, align 8
+ %2 = atomicrmw and ptr %i, i64 10 release, align 8
+ %3 = atomicrmw and ptr %i, i64 10 acq_rel, align 8
+ %4 = atomicrmw and ptr %i, i64 10 seq_cst, align 8
+ ret void
+}
+
+; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+define dso_local i64 @test_fetch_and_64_ret(ptr nocapture noundef %i) local_unnamed_addr #0 {
+; CHECK-LABEL: test_fetch_and_64_ret:
+; CHECK: .Ltest_fetch_and_64_ret$local:
+; CHECK-NEXT: .type .Ltest_fetch_and_64_ret$local,@function
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: r2 = 10
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_and((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r0 = 10
+; CHECK-NEXT: r0 = atomic_fetch_and((u64 *)(r1 + 0), r0)
+; CHECK-NEXT: r0 += r3
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_and((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r0 += r3
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_and((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r0 += r3
+; CHECK-NEXT: r2 = atomic_fetch_and((u64 *)(r1 + 0), r2)
+; CHECK-NEXT: r0 += r2
+; CHECK-NEXT: exit
+entry:
+ %0 = atomicrmw and ptr %i, i64 10 monotonic, align 8
+ %1 = atomicrmw and ptr %i, i64 10 acquire, align 8
+ %add = add nsw i64 %1, %0
+ %2 = atomicrmw and ptr %i, i64 10 release, align 8
+ %add5 = add nsw i64 %add, %2
+ %3 = atomicrmw and ptr %i, i64 10 acq_rel, align 8
+ %add8 = add nsw i64 %add5, %3
+ %4 = atomicrmw and ptr %i, i64 10 seq_cst, align 8
+ %add11 = add nsw i64 %add8, %4
+ ret i64 %add11
+}
+
+; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+define dso_local void @test_fetch_or_64_noret(ptr nocapture noundef %i) local_unnamed_addr #0 {
+; CHECK-LABEL: test_fetch_or_64_noret:
+; CHECK: .Ltest_fetch_or_64_noret$local:
+; CHECK-NEXT: .type .Ltest_fetch_or_64_noret$local,@function
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: r2 = 10
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: lock *(u64 *)(r1 + 0) |= r3
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_or((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_or((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_or((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r2 = atomic_fetch_or((u64 *)(r1 + 0), r2)
+; CHECK-NEXT: exit
+entry:
+ %0 = atomicrmw or ptr %i, i64 10 monotonic, align 8
+ %1 = atomicrmw or ptr %i, i64 10 acquire, align 8
+ %2 = atomicrmw or ptr %i, i64 10 release, align 8
+ %3 = atomicrmw or ptr %i, i64 10 acq_rel, align 8
+ %4 = atomicrmw or ptr %i, i64 10 seq_cst, align 8
+ ret void
+}
+
+; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+define dso_local i64 @test_fetch_or_64_ret(ptr nocapture noundef %i) local_unnamed_addr #0 {
+; CHECK-LABEL: test_fetch_or_64_ret:
+; CHECK: .Ltest_fetch_or_64_ret$local:
+; CHECK-NEXT: .type .Ltest_fetch_or_64_ret$local,@function
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: r2 = 10
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_or((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r0 = 10
+; CHECK-NEXT: r0 = atomic_fetch_or((u64 *)(r1 + 0), r0)
+; CHECK-NEXT: r0 += r3
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_or((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r0 += r3
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_or((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r0 += r3
+; CHECK-NEXT: r2 = atomic_fetch_or((u64 *)(r1 + 0), r2)
+; CHECK-NEXT: r0 += r2
+; CHECK-NEXT: exit
+entry:
+ %0 = atomicrmw or ptr %i, i64 10 monotonic, align 8
+ %1 = atomicrmw or ptr %i, i64 10 acquire, align 8
+ %add = add nsw i64 %1, %0
+ %2 = atomicrmw or ptr %i, i64 10 release, align 8
+ %add5 = add nsw i64 %add, %2
+ %3 = atomicrmw or ptr %i, i64 10 acq_rel, align 8
+ %add8 = add nsw i64 %add5, %3
+ %4 = atomicrmw or ptr %i, i64 10 seq_cst, align 8
+ %add11 = add nsw i64 %add8, %4
+ ret i64 %add11
+}
+
+; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+define dso_local void @test_fetch_xor_64_noret(ptr nocapture noundef %i) local_unnamed_addr #0 {
+; CHECK-LABEL: test_fetch_xor_64_noret:
+; CHECK: .Ltest_fetch_xor_64_noret$local:
+; CHECK-NEXT: .type .Ltest_fetch_xor_64_noret$local,@function
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: r2 = 10
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: lock *(u64 *)(r1 + 0) ^= r3
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_xor((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_xor((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_xor((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r2 = atomic_fetch_xor((u64 *)(r1 + 0), r2)
+; CHECK-NEXT: exit
+entry:
+ %0 = atomicrmw xor ptr %i, i64 10 monotonic, align 8
+ %1 = atomicrmw xor ptr %i, i64 10 acquire, align 8
+ %2 = atomicrmw xor ptr %i, i64 10 release, align 8
+ %3 = atomicrmw xor ptr %i, i64 10 acq_rel, align 8
+ %4 = atomicrmw xor ptr %i, i64 10 seq_cst, align 8
+ ret void
+}
+
+; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+define dso_local i64 @test_fetch_xor_64_ret(ptr nocapture noundef %i) local_unnamed_addr #0 {
+; CHECK-LABEL: test_fetch_xor_64_ret:
+; CHECK: .Ltest_fetch_xor_64_ret$local:
+; CHECK-NEXT: .type .Ltest_fetch_xor_64_ret$local,@function
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: r2 = 10
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_xor((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r0 = 10
+; CHECK-NEXT: r0 = atomic_fetch_xor((u64 *)(r1 + 0), r0)
+; CHECK-NEXT: r0 += r3
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_xor((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r0 += r3
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_xor((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r0 += r3
+; CHECK-NEXT: r2 = atomic_fetch_xor((u64 *)(r1 + 0), r2)
+; CHECK-NEXT: r0 += r2
+; CHECK-NEXT: exit
+entry:
+ %0 = atomicrmw xor ptr %i, i64 10 monotonic, align 8
+ %1 = atomicrmw xor ptr %i, i64 10 acquire, align 8
+ %add = add nsw i64 %1, %0
+ %2 = atomicrmw xor ptr %i, i64 10 release, align 8
+ %add5 = add nsw i64 %add, %2
+ %3 = atomicrmw xor ptr %i, i64 10 acq_rel, align 8
+ %add8 = add nsw i64 %add5, %3
+ %4 = atomicrmw xor ptr %i, i64 10 seq_cst, align 8
+ %add11 = add nsw i64 %add8, %4
+ ret i64 %add11
+}
+
+attributes #0 = { mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite) "frame-pointer"="all" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="v1" }
+
+!llvm.module.flags = !{!0, !1}
+!llvm.ident = !{!2}
+
+!0 = !{i32 1, !"wchar_size", i32 4}
+!1 = !{i32 7, !"frame-pointer", i32 2}
+!2 = !{!"clang version 20.0.0git (git@github.com:yonghong-song/llvm-project.git 6f71e34e194dab5a52cb2211af575c6067e9e504)"}
diff --git a/llvm/test/CodeGen/BPF/atomics_mem_order_v3.ll b/llvm/test/CodeGen/BPF/atomics_mem_order_v3.ll
new file mode 100644
index 0000000..20b9ebc
--- /dev/null
+++ b/llvm/test/CodeGen/BPF/atomics_mem_order_v3.ll
@@ -0,0 +1,781 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -march=bpfel -mcpu=v3 -filetype=asm < %s | FileCheck %s
+;
+; Source:
+; $ cat atomics_mem_order_v3.c
+; #include <stdatomic.h>
+;
+; void test_fetch_add_32_noret(int _Atomic *i) {
+; (void)__c11_atomic_fetch_add(i, 10, memory_order_relaxed);
+; (void)__c11_atomic_fetch_add(i, 10, memory_order_acquire);
+; (void)__c11_atomic_fetch_add(i, 10, memory_order_release);
+; (void)__c11_atomic_fetch_add(i, 10, memory_order_acq_rel);
+; (void)__c11_atomic_fetch_add(i, 10, memory_order_seq_cst);
+; }
+;
+; int test_fetch_add_32_ret(int _Atomic *i) {
+; return __c11_atomic_fetch_add(i, 10, memory_order_relaxed) +
+; __c11_atomic_fetch_add(i, 10, memory_order_acquire) +
+; __c11_atomic_fetch_add(i, 10, memory_order_release) +
+; __c11_atomic_fetch_add(i, 10, memory_order_acq_rel) +
+; __c11_atomic_fetch_add(i, 10, memory_order_seq_cst);
+; }
+;
+; void test_fetch_add_64_noret(long _Atomic *i) {
+; (void)__c11_atomic_fetch_add(i, 10, memory_order_relaxed);
+; (void)__c11_atomic_fetch_add(i, 10, memory_order_acquire);
+; (void)__c11_atomic_fetch_add(i, 10, memory_order_release);
+; (void)__c11_atomic_fetch_add(i, 10, memory_order_acq_rel);
+; (void)__c11_atomic_fetch_add(i, 10, memory_order_seq_cst);
+; }
+;
+; long test_fetch_add_64_ret(long _Atomic *i) {
+; return __c11_atomic_fetch_add(i, 10, memory_order_relaxed) +
+; __c11_atomic_fetch_add(i, 10, memory_order_acquire) +
+; __c11_atomic_fetch_add(i, 10, memory_order_release) +
+; __c11_atomic_fetch_add(i, 10, memory_order_acq_rel) +
+; __c11_atomic_fetch_add(i, 10, memory_order_seq_cst);
+; }
+;
+; void test_fetch_sub_32_noret(int _Atomic *i) {
+; (void)__c11_atomic_fetch_sub(i, 10, memory_order_relaxed);
+; (void)__c11_atomic_fetch_sub(i, 10, memory_order_acquire);
+; (void)__c11_atomic_fetch_sub(i, 10, memory_order_release);
+; (void)__c11_atomic_fetch_sub(i, 10, memory_order_acq_rel);
+; (void)__c11_atomic_fetch_sub(i, 10, memory_order_seq_cst);
+; }
+;
+; int test_fetch_sub_32_ret(int _Atomic *i) {
+; return __c11_atomic_fetch_sub(i, 10, memory_order_relaxed) +
+; __c11_atomic_fetch_sub(i, 10, memory_order_acquire) +
+; __c11_atomic_fetch_sub(i, 10, memory_order_release) +
+; __c11_atomic_fetch_sub(i, 10, memory_order_acq_rel) +
+; __c11_atomic_fetch_sub(i, 10, memory_order_seq_cst);
+; }
+;
+; void test_fetch_sub_64_noret(long _Atomic *i) {
+; (void)__c11_atomic_fetch_sub(i, 10, memory_order_relaxed);
+; (void)__c11_atomic_fetch_sub(i, 10, memory_order_acquire);
+; (void)__c11_atomic_fetch_sub(i, 10, memory_order_release);
+; (void)__c11_atomic_fetch_sub(i, 10, memory_order_acq_rel);
+; (void)__c11_atomic_fetch_sub(i, 10, memory_order_seq_cst);
+; }
+;
+; long test_fetch_sub_64_ret(long _Atomic *i) {
+; return __c11_atomic_fetch_sub(i, 10, memory_order_relaxed) +
+; __c11_atomic_fetch_sub(i, 10, memory_order_acquire) +
+; __c11_atomic_fetch_sub(i, 10, memory_order_release) +
+; __c11_atomic_fetch_sub(i, 10, memory_order_acq_rel) +
+; __c11_atomic_fetch_sub(i, 10, memory_order_seq_cst);
+; }
+;
+; void test_fetch_and_32_noret(int _Atomic *i) {
+; (void)__c11_atomic_fetch_and(i, 10, memory_order_relaxed);
+; (void)__c11_atomic_fetch_and(i, 10, memory_order_acquire);
+; (void)__c11_atomic_fetch_and(i, 10, memory_order_release);
+; (void)__c11_atomic_fetch_and(i, 10, memory_order_acq_rel);
+; (void)__c11_atomic_fetch_and(i, 10, memory_order_seq_cst);
+; }
+;
+; int test_fetch_and_32_ret(int _Atomic *i) {
+; return __c11_atomic_fetch_and(i, 10, memory_order_relaxed) +
+; __c11_atomic_fetch_and(i, 10, memory_order_acquire) +
+; __c11_atomic_fetch_and(i, 10, memory_order_release) +
+; __c11_atomic_fetch_and(i, 10, memory_order_acq_rel) +
+; __c11_atomic_fetch_and(i, 10, memory_order_seq_cst);
+; }
+;
+; void test_fetch_and_64_noret(long _Atomic *i) {
+; (void)__c11_atomic_fetch_and(i, 10, memory_order_relaxed);
+; (void)__c11_atomic_fetch_and(i, 10, memory_order_acquire);
+; (void)__c11_atomic_fetch_and(i, 10, memory_order_release);
+; (void)__c11_atomic_fetch_and(i, 10, memory_order_acq_rel);
+; (void)__c11_atomic_fetch_and(i, 10, memory_order_seq_cst);
+; }
+;
+; long test_fetch_and_64_ret(long _Atomic *i) {
+; return __c11_atomic_fetch_and(i, 10, memory_order_relaxed) +
+; __c11_atomic_fetch_and(i, 10, memory_order_acquire) +
+; __c11_atomic_fetch_and(i, 10, memory_order_release) +
+; __c11_atomic_fetch_and(i, 10, memory_order_acq_rel) +
+; __c11_atomic_fetch_and(i, 10, memory_order_seq_cst);
+; }
+;
+; void test_fetch_or_32_noret(int _Atomic *i) {
+; (void)__c11_atomic_fetch_or(i, 10, memory_order_relaxed);
+; (void)__c11_atomic_fetch_or(i, 10, memory_order_acquire);
+; (void)__c11_atomic_fetch_or(i, 10, memory_order_release);
+; (void)__c11_atomic_fetch_or(i, 10, memory_order_acq_rel);
+; (void)__c11_atomic_fetch_or(i, 10, memory_order_seq_cst);
+; }
+;
+; int test_fetch_or_32_ret(int _Atomic *i) {
+; return __c11_atomic_fetch_or(i, 10, memory_order_relaxed) +
+; __c11_atomic_fetch_or(i, 10, memory_order_acquire) +
+; __c11_atomic_fetch_or(i, 10, memory_order_release) +
+; __c11_atomic_fetch_or(i, 10, memory_order_acq_rel) +
+; __c11_atomic_fetch_or(i, 10, memory_order_seq_cst);
+; }
+;
+; void test_fetch_or_64_noret(long _Atomic *i) {
+; (void)__c11_atomic_fetch_or(i, 10, memory_order_relaxed);
+; (void)__c11_atomic_fetch_or(i, 10, memory_order_acquire);
+; (void)__c11_atomic_fetch_or(i, 10, memory_order_release);
+; (void)__c11_atomic_fetch_or(i, 10, memory_order_acq_rel);
+; (void)__c11_atomic_fetch_or(i, 10, memory_order_seq_cst);
+; }
+;
+; long test_fetch_or_64_ret(long _Atomic *i) {
+; return __c11_atomic_fetch_or(i, 10, memory_order_relaxed) +
+; __c11_atomic_fetch_or(i, 10, memory_order_acquire) +
+; __c11_atomic_fetch_or(i, 10, memory_order_release) +
+; __c11_atomic_fetch_or(i, 10, memory_order_acq_rel) +
+; __c11_atomic_fetch_or(i, 10, memory_order_seq_cst);
+; }
+;
+; void test_fetch_xor_32_noret(int _Atomic *i) {
+; (void)__c11_atomic_fetch_xor(i, 10, memory_order_relaxed);
+; (void)__c11_atomic_fetch_xor(i, 10, memory_order_acquire);
+; (void)__c11_atomic_fetch_xor(i, 10, memory_order_release);
+; (void)__c11_atomic_fetch_xor(i, 10, memory_order_acq_rel);
+; (void)__c11_atomic_fetch_xor(i, 10, memory_order_seq_cst);
+; }
+;
+; int test_fetch_xor_32_ret(int _Atomic *i) {
+; return __c11_atomic_fetch_xor(i, 10, memory_order_relaxed) +
+; __c11_atomic_fetch_xor(i, 10, memory_order_acquire) +
+; __c11_atomic_fetch_xor(i, 10, memory_order_release) +
+; __c11_atomic_fetch_xor(i, 10, memory_order_acq_rel) +
+; __c11_atomic_fetch_xor(i, 10, memory_order_seq_cst);
+; }
+;
+; void test_fetch_xor_64_noret(long _Atomic *i) {
+; (void)__c11_atomic_fetch_xor(i, 10, memory_order_relaxed);
+; (void)__c11_atomic_fetch_xor(i, 10, memory_order_acquire);
+; (void)__c11_atomic_fetch_xor(i, 10, memory_order_release);
+; (void)__c11_atomic_fetch_xor(i, 10, memory_order_acq_rel);
+; (void)__c11_atomic_fetch_xor(i, 10, memory_order_seq_cst);
+; }
+;
+; long test_fetch_xor_64_ret(long _Atomic *i) {
+; return __c11_atomic_fetch_xor(i, 10, memory_order_relaxed) +
+; __c11_atomic_fetch_xor(i, 10, memory_order_acquire) +
+; __c11_atomic_fetch_xor(i, 10, memory_order_release) +
+; __c11_atomic_fetch_xor(i, 10, memory_order_acq_rel) +
+; __c11_atomic_fetch_xor(i, 10, memory_order_seq_cst);
+; }
+
+target triple = "bpf"
+
+; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+define dso_local void @test_fetch_add_32_noret(ptr nocapture noundef %i) local_unnamed_addr #0 {
+; CHECK-LABEL: test_fetch_add_32_noret:
+; CHECK: .Ltest_fetch_add_32_noret$local:
+; CHECK-NEXT: .type .Ltest_fetch_add_32_noret$local,@function
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: w2 = 10
+; CHECK-NEXT: w3 = 10
+; CHECK-NEXT: lock *(u32 *)(r1 + 0) += w3
+; CHECK-NEXT: w3 = 10
+; CHECK-NEXT: w3 = atomic_fetch_add((u32 *)(r1 + 0), w3)
+; CHECK-NEXT: w3 = 10
+; CHECK-NEXT: w3 = atomic_fetch_add((u32 *)(r1 + 0), w3)
+; CHECK-NEXT: w3 = 10
+; CHECK-NEXT: w3 = atomic_fetch_add((u32 *)(r1 + 0), w3)
+; CHECK-NEXT: w2 = atomic_fetch_add((u32 *)(r1 + 0), w2)
+; CHECK-NEXT: exit
+entry:
+ %0 = atomicrmw add ptr %i, i32 10 monotonic, align 4
+ %1 = atomicrmw add ptr %i, i32 10 acquire, align 4
+ %2 = atomicrmw add ptr %i, i32 10 release, align 4
+ %3 = atomicrmw add ptr %i, i32 10 acq_rel, align 4
+ %4 = atomicrmw add ptr %i, i32 10 seq_cst, align 4
+ ret void
+}
+
+; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+define dso_local i32 @test_fetch_add_32_ret(ptr nocapture noundef %i) local_unnamed_addr #0 {
+; CHECK-LABEL: test_fetch_add_32_ret:
+; CHECK: .Ltest_fetch_add_32_ret$local:
+; CHECK-NEXT: .type .Ltest_fetch_add_32_ret$local,@function
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: w2 = 10
+; CHECK-NEXT: w3 = 10
+; CHECK-NEXT: lock *(u32 *)(r1 + 0) += w3
+; CHECK-NEXT: w0 = 10
+; CHECK-NEXT: w0 = atomic_fetch_add((u32 *)(r1 + 0), w0)
+; CHECK-NEXT: w0 += w3
+; CHECK-NEXT: w3 = 10
+; CHECK-NEXT: w3 = atomic_fetch_add((u32 *)(r1 + 0), w3)
+; CHECK-NEXT: w0 += w3
+; CHECK-NEXT: w3 = 10
+; CHECK-NEXT: w3 = atomic_fetch_add((u32 *)(r1 + 0), w3)
+; CHECK-NEXT: w0 += w3
+; CHECK-NEXT: w2 = atomic_fetch_add((u32 *)(r1 + 0), w2)
+; CHECK-NEXT: w0 += w2
+; CHECK-NEXT: exit
+entry:
+ %0 = atomicrmw add ptr %i, i32 10 monotonic, align 4
+ %1 = atomicrmw add ptr %i, i32 10 acquire, align 4
+ %add = add nsw i32 %1, %0
+ %2 = atomicrmw add ptr %i, i32 10 release, align 4
+ %add5 = add nsw i32 %add, %2
+ %3 = atomicrmw add ptr %i, i32 10 acq_rel, align 4
+ %add8 = add nsw i32 %add5, %3
+ %4 = atomicrmw add ptr %i, i32 10 seq_cst, align 4
+ %add11 = add nsw i32 %add8, %4
+ ret i32 %add11
+}
+
+; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+define dso_local void @test_fetch_add_64_noret(ptr nocapture noundef %i) local_unnamed_addr #0 {
+; CHECK-LABEL: test_fetch_add_64_noret:
+; CHECK: .Ltest_fetch_add_64_noret$local:
+; CHECK-NEXT: .type .Ltest_fetch_add_64_noret$local,@function
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: r2 = 10
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: lock *(u64 *)(r1 + 0) += r3
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_add((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_add((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_add((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r2 = atomic_fetch_add((u64 *)(r1 + 0), r2)
+; CHECK-NEXT: exit
+entry:
+ %0 = atomicrmw add ptr %i, i64 10 monotonic, align 8
+ %1 = atomicrmw add ptr %i, i64 10 acquire, align 8
+ %2 = atomicrmw add ptr %i, i64 10 release, align 8
+ %3 = atomicrmw add ptr %i, i64 10 acq_rel, align 8
+ %4 = atomicrmw add ptr %i, i64 10 seq_cst, align 8
+ ret void
+}
+
+; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+define dso_local i64 @test_fetch_add_64_ret(ptr nocapture noundef %i) local_unnamed_addr #0 {
+; CHECK-LABEL: test_fetch_add_64_ret:
+; CHECK: .Ltest_fetch_add_64_ret$local:
+; CHECK-NEXT: .type .Ltest_fetch_add_64_ret$local,@function
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: r2 = 10
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: lock *(u64 *)(r1 + 0) += r3
+; CHECK-NEXT: r0 = 10
+; CHECK-NEXT: r0 = atomic_fetch_add((u64 *)(r1 + 0), r0)
+; CHECK-NEXT: r0 += r3
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_add((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r0 += r3
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_add((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r0 += r3
+; CHECK-NEXT: r2 = atomic_fetch_add((u64 *)(r1 + 0), r2)
+; CHECK-NEXT: r0 += r2
+; CHECK-NEXT: exit
+entry:
+ %0 = atomicrmw add ptr %i, i64 10 monotonic, align 8
+ %1 = atomicrmw add ptr %i, i64 10 acquire, align 8
+ %add = add nsw i64 %1, %0
+ %2 = atomicrmw add ptr %i, i64 10 release, align 8
+ %add5 = add nsw i64 %add, %2
+ %3 = atomicrmw add ptr %i, i64 10 acq_rel, align 8
+ %add8 = add nsw i64 %add5, %3
+ %4 = atomicrmw add ptr %i, i64 10 seq_cst, align 8
+ %add11 = add nsw i64 %add8, %4
+ ret i64 %add11
+}
+
+; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+define dso_local void @test_fetch_sub_32_noret(ptr nocapture noundef %i) local_unnamed_addr #0 {
+; CHECK-LABEL: test_fetch_sub_32_noret:
+; CHECK: .Ltest_fetch_sub_32_noret$local:
+; CHECK-NEXT: .type .Ltest_fetch_sub_32_noret$local,@function
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: w2 = 10
+; CHECK-NEXT: w2 = -w2
+; CHECK-NEXT: w3 = w2
+; CHECK-NEXT: lock *(u32 *)(r1 + 0) += w3
+; CHECK-NEXT: w3 = w2
+; CHECK-NEXT: w3 = atomic_fetch_add((u32 *)(r1 + 0), w3)
+; CHECK-NEXT: w3 = w2
+; CHECK-NEXT: w3 = atomic_fetch_add((u32 *)(r1 + 0), w3)
+; CHECK-NEXT: w3 = w2
+; CHECK-NEXT: w3 = atomic_fetch_add((u32 *)(r1 + 0), w3)
+; CHECK-NEXT: w2 = atomic_fetch_add((u32 *)(r1 + 0), w2)
+; CHECK-NEXT: exit
+entry:
+ %0 = atomicrmw sub ptr %i, i32 10 monotonic, align 4
+ %1 = atomicrmw sub ptr %i, i32 10 acquire, align 4
+ %2 = atomicrmw sub ptr %i, i32 10 release, align 4
+ %3 = atomicrmw sub ptr %i, i32 10 acq_rel, align 4
+ %4 = atomicrmw sub ptr %i, i32 10 seq_cst, align 4
+ ret void
+}
+
+; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+define dso_local i32 @test_fetch_sub_32_ret(ptr nocapture noundef %i) local_unnamed_addr #0 {
+; CHECK-LABEL: test_fetch_sub_32_ret:
+; CHECK: .Ltest_fetch_sub_32_ret$local:
+; CHECK-NEXT: .type .Ltest_fetch_sub_32_ret$local,@function
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: w2 = 10
+; CHECK-NEXT: w2 = -w2
+; CHECK-NEXT: w3 = w2
+; CHECK-NEXT: lock *(u32 *)(r1 + 0) += w3
+; CHECK-NEXT: w0 = w2
+; CHECK-NEXT: w0 = atomic_fetch_add((u32 *)(r1 + 0), w0)
+; CHECK-NEXT: w0 += w3
+; CHECK-NEXT: w3 = w2
+; CHECK-NEXT: w3 = atomic_fetch_add((u32 *)(r1 + 0), w3)
+; CHECK-NEXT: w0 += w3
+; CHECK-NEXT: w3 = w2
+; CHECK-NEXT: w3 = atomic_fetch_add((u32 *)(r1 + 0), w3)
+; CHECK-NEXT: w0 += w3
+; CHECK-NEXT: w2 = atomic_fetch_add((u32 *)(r1 + 0), w2)
+; CHECK-NEXT: w0 += w2
+; CHECK-NEXT: exit
+entry:
+ %0 = atomicrmw sub ptr %i, i32 10 monotonic, align 4
+ %1 = atomicrmw sub ptr %i, i32 10 acquire, align 4
+ %add = add nsw i32 %1, %0
+ %2 = atomicrmw sub ptr %i, i32 10 release, align 4
+ %add5 = add nsw i32 %add, %2
+ %3 = atomicrmw sub ptr %i, i32 10 acq_rel, align 4
+ %add8 = add nsw i32 %add5, %3
+ %4 = atomicrmw sub ptr %i, i32 10 seq_cst, align 4
+ %add11 = add nsw i32 %add8, %4
+ ret i32 %add11
+}
+
+; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+define dso_local void @test_fetch_sub_64_noret(ptr nocapture noundef %i) local_unnamed_addr #0 {
+; CHECK-LABEL: test_fetch_sub_64_noret:
+; CHECK: .Ltest_fetch_sub_64_noret$local:
+; CHECK-NEXT: .type .Ltest_fetch_sub_64_noret$local,@function
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: r2 = 10
+; CHECK-NEXT: r2 = -r2
+; CHECK-NEXT: r3 = r2
+; CHECK-NEXT: lock *(u64 *)(r1 + 0) += r3
+; CHECK-NEXT: r3 = r2
+; CHECK-NEXT: r3 = atomic_fetch_add((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r3 = r2
+; CHECK-NEXT: r3 = atomic_fetch_add((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r3 = r2
+; CHECK-NEXT: r3 = atomic_fetch_add((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r2 = atomic_fetch_add((u64 *)(r1 + 0), r2)
+; CHECK-NEXT: exit
+entry:
+ %0 = atomicrmw sub ptr %i, i64 10 monotonic, align 8
+ %1 = atomicrmw sub ptr %i, i64 10 acquire, align 8
+ %2 = atomicrmw sub ptr %i, i64 10 release, align 8
+ %3 = atomicrmw sub ptr %i, i64 10 acq_rel, align 8
+ %4 = atomicrmw sub ptr %i, i64 10 seq_cst, align 8
+ ret void
+}
+
+; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+define dso_local i64 @test_fetch_sub_64_ret(ptr nocapture noundef %i) local_unnamed_addr #0 {
+; CHECK-LABEL: test_fetch_sub_64_ret:
+; CHECK: .Ltest_fetch_sub_64_ret$local:
+; CHECK-NEXT: .type .Ltest_fetch_sub_64_ret$local,@function
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: r2 = 10
+; CHECK-NEXT: r2 = -r2
+; CHECK-NEXT: r3 = r2
+; CHECK-NEXT: lock *(u64 *)(r1 + 0) += r3
+; CHECK-NEXT: r0 = r2
+; CHECK-NEXT: r0 = atomic_fetch_add((u64 *)(r1 + 0), r0)
+; CHECK-NEXT: r0 += r3
+; CHECK-NEXT: r3 = r2
+; CHECK-NEXT: r3 = atomic_fetch_add((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r0 += r3
+; CHECK-NEXT: r3 = r2
+; CHECK-NEXT: r3 = atomic_fetch_add((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r0 += r3
+; CHECK-NEXT: r2 = atomic_fetch_add((u64 *)(r1 + 0), r2)
+; CHECK-NEXT: r0 += r2
+; CHECK-NEXT: exit
+entry:
+ %0 = atomicrmw sub ptr %i, i64 10 monotonic, align 8
+ %1 = atomicrmw sub ptr %i, i64 10 acquire, align 8
+ %add = add nsw i64 %1, %0
+ %2 = atomicrmw sub ptr %i, i64 10 release, align 8
+ %add5 = add nsw i64 %add, %2
+ %3 = atomicrmw sub ptr %i, i64 10 acq_rel, align 8
+ %add8 = add nsw i64 %add5, %3
+ %4 = atomicrmw sub ptr %i, i64 10 seq_cst, align 8
+ %add11 = add nsw i64 %add8, %4
+ ret i64 %add11
+}
+
+; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+define dso_local void @test_fetch_and_32_noret(ptr nocapture noundef %i) local_unnamed_addr #0 {
+; CHECK-LABEL: test_fetch_and_32_noret:
+; CHECK: .Ltest_fetch_and_32_noret$local:
+; CHECK-NEXT: .type .Ltest_fetch_and_32_noret$local,@function
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: w2 = 10
+; CHECK-NEXT: w3 = 10
+; CHECK-NEXT: lock *(u32 *)(r1 + 0) &= w3
+; CHECK-NEXT: w3 = 10
+; CHECK-NEXT: w3 = atomic_fetch_and((u32 *)(r1 + 0), w3)
+; CHECK-NEXT: w3 = 10
+; CHECK-NEXT: w3 = atomic_fetch_and((u32 *)(r1 + 0), w3)
+; CHECK-NEXT: w3 = 10
+; CHECK-NEXT: w3 = atomic_fetch_and((u32 *)(r1 + 0), w3)
+; CHECK-NEXT: w2 = atomic_fetch_and((u32 *)(r1 + 0), w2)
+; CHECK-NEXT: exit
+entry:
+ %0 = atomicrmw and ptr %i, i32 10 monotonic, align 4
+ %1 = atomicrmw and ptr %i, i32 10 acquire, align 4
+ %2 = atomicrmw and ptr %i, i32 10 release, align 4
+ %3 = atomicrmw and ptr %i, i32 10 acq_rel, align 4
+ %4 = atomicrmw and ptr %i, i32 10 seq_cst, align 4
+ ret void
+}
+
+; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+define dso_local i32 @test_fetch_and_32_ret(ptr nocapture noundef %i) local_unnamed_addr #0 {
+; CHECK-LABEL: test_fetch_and_32_ret:
+; CHECK: .Ltest_fetch_and_32_ret$local:
+; CHECK-NEXT: .type .Ltest_fetch_and_32_ret$local,@function
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: w2 = 10
+; CHECK-NEXT: w3 = 10
+; CHECK-NEXT: lock *(u32 *)(r1 + 0) &= w3
+; CHECK-NEXT: w0 = 10
+; CHECK-NEXT: w0 = atomic_fetch_and((u32 *)(r1 + 0), w0)
+; CHECK-NEXT: w0 += w3
+; CHECK-NEXT: w3 = 10
+; CHECK-NEXT: w3 = atomic_fetch_and((u32 *)(r1 + 0), w3)
+; CHECK-NEXT: w0 += w3
+; CHECK-NEXT: w3 = 10
+; CHECK-NEXT: w3 = atomic_fetch_and((u32 *)(r1 + 0), w3)
+; CHECK-NEXT: w0 += w3
+; CHECK-NEXT: w2 = atomic_fetch_and((u32 *)(r1 + 0), w2)
+; CHECK-NEXT: w0 += w2
+; CHECK-NEXT: exit
+entry:
+ %0 = atomicrmw and ptr %i, i32 10 monotonic, align 4
+ %1 = atomicrmw and ptr %i, i32 10 acquire, align 4
+ %add = add nsw i32 %1, %0
+ %2 = atomicrmw and ptr %i, i32 10 release, align 4
+ %add5 = add nsw i32 %add, %2
+ %3 = atomicrmw and ptr %i, i32 10 acq_rel, align 4
+ %add8 = add nsw i32 %add5, %3
+ %4 = atomicrmw and ptr %i, i32 10 seq_cst, align 4
+ %add11 = add nsw i32 %add8, %4
+ ret i32 %add11
+}
+
+; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+define dso_local void @test_fetch_and_64_noret(ptr nocapture noundef %i) local_unnamed_addr #0 {
+; CHECK-LABEL: test_fetch_and_64_noret:
+; CHECK: .Ltest_fetch_and_64_noret$local:
+; CHECK-NEXT: .type .Ltest_fetch_and_64_noret$local,@function
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: r2 = 10
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: lock *(u64 *)(r1 + 0) &= r3
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_and((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_and((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_and((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r2 = atomic_fetch_and((u64 *)(r1 + 0), r2)
+; CHECK-NEXT: exit
+entry:
+ %0 = atomicrmw and ptr %i, i64 10 monotonic, align 8
+ %1 = atomicrmw and ptr %i, i64 10 acquire, align 8
+ %2 = atomicrmw and ptr %i, i64 10 release, align 8
+ %3 = atomicrmw and ptr %i, i64 10 acq_rel, align 8
+ %4 = atomicrmw and ptr %i, i64 10 seq_cst, align 8
+ ret void
+}
+
+; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+define dso_local i64 @test_fetch_and_64_ret(ptr nocapture noundef %i) local_unnamed_addr #0 {
+; CHECK-LABEL: test_fetch_and_64_ret:
+; CHECK: .Ltest_fetch_and_64_ret$local:
+; CHECK-NEXT: .type .Ltest_fetch_and_64_ret$local,@function
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: r2 = 10
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_and((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r0 = 10
+; CHECK-NEXT: r0 = atomic_fetch_and((u64 *)(r1 + 0), r0)
+; CHECK-NEXT: r0 += r3
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_and((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r0 += r3
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_and((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r0 += r3
+; CHECK-NEXT: r2 = atomic_fetch_and((u64 *)(r1 + 0), r2)
+; CHECK-NEXT: r0 += r2
+; CHECK-NEXT: exit
+entry:
+ %0 = atomicrmw and ptr %i, i64 10 monotonic, align 8
+ %1 = atomicrmw and ptr %i, i64 10 acquire, align 8
+ %add = add nsw i64 %1, %0
+ %2 = atomicrmw and ptr %i, i64 10 release, align 8
+ %add5 = add nsw i64 %add, %2
+ %3 = atomicrmw and ptr %i, i64 10 acq_rel, align 8
+ %add8 = add nsw i64 %add5, %3
+ %4 = atomicrmw and ptr %i, i64 10 seq_cst, align 8
+ %add11 = add nsw i64 %add8, %4
+ ret i64 %add11
+}
+
+; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+define dso_local void @test_fetch_or_32_noret(ptr nocapture noundef %i) local_unnamed_addr #0 {
+; CHECK-LABEL: test_fetch_or_32_noret:
+; CHECK: .Ltest_fetch_or_32_noret$local:
+; CHECK-NEXT: .type .Ltest_fetch_or_32_noret$local,@function
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: w2 = 10
+; CHECK-NEXT: w3 = 10
+; CHECK-NEXT: lock *(u32 *)(r1 + 0) |= w3
+; CHECK-NEXT: w3 = 10
+; CHECK-NEXT: w3 = atomic_fetch_or((u32 *)(r1 + 0), w3)
+; CHECK-NEXT: w3 = 10
+; CHECK-NEXT: w3 = atomic_fetch_or((u32 *)(r1 + 0), w3)
+; CHECK-NEXT: w3 = 10
+; CHECK-NEXT: w3 = atomic_fetch_or((u32 *)(r1 + 0), w3)
+; CHECK-NEXT: w2 = atomic_fetch_or((u32 *)(r1 + 0), w2)
+; CHECK-NEXT: exit
+entry:
+ %0 = atomicrmw or ptr %i, i32 10 monotonic, align 4
+ %1 = atomicrmw or ptr %i, i32 10 acquire, align 4
+ %2 = atomicrmw or ptr %i, i32 10 release, align 4
+ %3 = atomicrmw or ptr %i, i32 10 acq_rel, align 4
+ %4 = atomicrmw or ptr %i, i32 10 seq_cst, align 4
+ ret void
+}
+
+; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+define dso_local i32 @test_fetch_or_32_ret(ptr nocapture noundef %i) local_unnamed_addr #0 {
+; CHECK-LABEL: test_fetch_or_32_ret:
+; CHECK: .Ltest_fetch_or_32_ret$local:
+; CHECK-NEXT: .type .Ltest_fetch_or_32_ret$local,@function
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: w2 = 10
+; CHECK-NEXT: w3 = 10
+; CHECK-NEXT: lock *(u32 *)(r1 + 0) |= w3
+; CHECK-NEXT: w0 = 10
+; CHECK-NEXT: w0 = atomic_fetch_or((u32 *)(r1 + 0), w0)
+; CHECK-NEXT: w0 += w3
+; CHECK-NEXT: w3 = 10
+; CHECK-NEXT: w3 = atomic_fetch_or((u32 *)(r1 + 0), w3)
+; CHECK-NEXT: w0 += w3
+; CHECK-NEXT: w3 = 10
+; CHECK-NEXT: w3 = atomic_fetch_or((u32 *)(r1 + 0), w3)
+; CHECK-NEXT: w0 += w3
+; CHECK-NEXT: w2 = atomic_fetch_or((u32 *)(r1 + 0), w2)
+; CHECK-NEXT: w0 += w2
+; CHECK-NEXT: exit
+entry:
+ %0 = atomicrmw or ptr %i, i32 10 monotonic, align 4
+ %1 = atomicrmw or ptr %i, i32 10 acquire, align 4
+ %add = add nsw i32 %1, %0
+ %2 = atomicrmw or ptr %i, i32 10 release, align 4
+ %add5 = add nsw i32 %add, %2
+ %3 = atomicrmw or ptr %i, i32 10 acq_rel, align 4
+ %add8 = add nsw i32 %add5, %3
+ %4 = atomicrmw or ptr %i, i32 10 seq_cst, align 4
+ %add11 = add nsw i32 %add8, %4
+ ret i32 %add11
+}
+
+; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+define dso_local void @test_fetch_or_64_noret(ptr nocapture noundef %i) local_unnamed_addr #0 {
+; CHECK-LABEL: test_fetch_or_64_noret:
+; CHECK: .Ltest_fetch_or_64_noret$local:
+; CHECK-NEXT: .type .Ltest_fetch_or_64_noret$local,@function
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: r2 = 10
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: lock *(u64 *)(r1 + 0) |= r3
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_or((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_or((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_or((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r2 = atomic_fetch_or((u64 *)(r1 + 0), r2)
+; CHECK-NEXT: exit
+entry:
+ %0 = atomicrmw or ptr %i, i64 10 monotonic, align 8
+ %1 = atomicrmw or ptr %i, i64 10 acquire, align 8
+ %2 = atomicrmw or ptr %i, i64 10 release, align 8
+ %3 = atomicrmw or ptr %i, i64 10 acq_rel, align 8
+ %4 = atomicrmw or ptr %i, i64 10 seq_cst, align 8
+ ret void
+}
+
+; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+define dso_local i64 @test_fetch_or_64_ret(ptr nocapture noundef %i) local_unnamed_addr #0 {
+; CHECK-LABEL: test_fetch_or_64_ret:
+; CHECK: .Ltest_fetch_or_64_ret$local:
+; CHECK-NEXT: .type .Ltest_fetch_or_64_ret$local,@function
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: r2 = 10
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_or((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r0 = 10
+; CHECK-NEXT: r0 = atomic_fetch_or((u64 *)(r1 + 0), r0)
+; CHECK-NEXT: r0 += r3
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_or((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r0 += r3
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_or((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r0 += r3
+; CHECK-NEXT: r2 = atomic_fetch_or((u64 *)(r1 + 0), r2)
+; CHECK-NEXT: r0 += r2
+; CHECK-NEXT: exit
+entry:
+ %0 = atomicrmw or ptr %i, i64 10 monotonic, align 8
+ %1 = atomicrmw or ptr %i, i64 10 acquire, align 8
+ %add = add nsw i64 %1, %0
+ %2 = atomicrmw or ptr %i, i64 10 release, align 8
+ %add5 = add nsw i64 %add, %2
+ %3 = atomicrmw or ptr %i, i64 10 acq_rel, align 8
+ %add8 = add nsw i64 %add5, %3
+ %4 = atomicrmw or ptr %i, i64 10 seq_cst, align 8
+ %add11 = add nsw i64 %add8, %4
+ ret i64 %add11
+}
+
+; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+define dso_local void @test_fetch_xor_32_noret(ptr nocapture noundef %i) local_unnamed_addr #0 {
+; CHECK-LABEL: test_fetch_xor_32_noret:
+; CHECK: .Ltest_fetch_xor_32_noret$local:
+; CHECK-NEXT: .type .Ltest_fetch_xor_32_noret$local,@function
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: w2 = 10
+; CHECK-NEXT: w3 = 10
+; CHECK-NEXT: lock *(u32 *)(r1 + 0) ^= w3
+; CHECK-NEXT: w3 = 10
+; CHECK-NEXT: w3 = atomic_fetch_xor((u32 *)(r1 + 0), w3)
+; CHECK-NEXT: w3 = 10
+; CHECK-NEXT: w3 = atomic_fetch_xor((u32 *)(r1 + 0), w3)
+; CHECK-NEXT: w3 = 10
+; CHECK-NEXT: w3 = atomic_fetch_xor((u32 *)(r1 + 0), w3)
+; CHECK-NEXT: w2 = atomic_fetch_xor((u32 *)(r1 + 0), w2)
+; CHECK-NEXT: exit
+entry:
+ %0 = atomicrmw xor ptr %i, i32 10 monotonic, align 4
+ %1 = atomicrmw xor ptr %i, i32 10 acquire, align 4
+ %2 = atomicrmw xor ptr %i, i32 10 release, align 4
+ %3 = atomicrmw xor ptr %i, i32 10 acq_rel, align 4
+ %4 = atomicrmw xor ptr %i, i32 10 seq_cst, align 4
+ ret void
+}
+
+; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+define dso_local i32 @test_fetch_xor_32_ret(ptr nocapture noundef %i) local_unnamed_addr #0 {
+; CHECK-LABEL: test_fetch_xor_32_ret:
+; CHECK: .Ltest_fetch_xor_32_ret$local:
+; CHECK-NEXT: .type .Ltest_fetch_xor_32_ret$local,@function
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: w2 = 10
+; CHECK-NEXT: w3 = 10
+; CHECK-NEXT: lock *(u32 *)(r1 + 0) ^= w3
+; CHECK-NEXT: w0 = 10
+; CHECK-NEXT: w0 = atomic_fetch_xor((u32 *)(r1 + 0), w0)
+; CHECK-NEXT: w0 += w3
+; CHECK-NEXT: w3 = 10
+; CHECK-NEXT: w3 = atomic_fetch_xor((u32 *)(r1 + 0), w3)
+; CHECK-NEXT: w0 += w3
+; CHECK-NEXT: w3 = 10
+; CHECK-NEXT: w3 = atomic_fetch_xor((u32 *)(r1 + 0), w3)
+; CHECK-NEXT: w0 += w3
+; CHECK-NEXT: w2 = atomic_fetch_xor((u32 *)(r1 + 0), w2)
+; CHECK-NEXT: w0 += w2
+; CHECK-NEXT: exit
+entry:
+ %0 = atomicrmw xor ptr %i, i32 10 monotonic, align 4
+ %1 = atomicrmw xor ptr %i, i32 10 acquire, align 4
+ %add = add nsw i32 %1, %0
+ %2 = atomicrmw xor ptr %i, i32 10 release, align 4
+ %add5 = add nsw i32 %add, %2
+ %3 = atomicrmw xor ptr %i, i32 10 acq_rel, align 4
+ %add8 = add nsw i32 %add5, %3
+ %4 = atomicrmw xor ptr %i, i32 10 seq_cst, align 4
+ %add11 = add nsw i32 %add8, %4
+ ret i32 %add11
+}
+
+; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+define dso_local void @test_fetch_xor_64_noret(ptr nocapture noundef %i) local_unnamed_addr #0 {
+; CHECK-LABEL: test_fetch_xor_64_noret:
+; CHECK: .Ltest_fetch_xor_64_noret$local:
+; CHECK-NEXT: .type .Ltest_fetch_xor_64_noret$local,@function
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: r2 = 10
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: lock *(u64 *)(r1 + 0) ^= r3
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_xor((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_xor((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_xor((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r2 = atomic_fetch_xor((u64 *)(r1 + 0), r2)
+; CHECK-NEXT: exit
+entry:
+ %0 = atomicrmw xor ptr %i, i64 10 monotonic, align 8
+ %1 = atomicrmw xor ptr %i, i64 10 acquire, align 8
+ %2 = atomicrmw xor ptr %i, i64 10 release, align 8
+ %3 = atomicrmw xor ptr %i, i64 10 acq_rel, align 8
+ %4 = atomicrmw xor ptr %i, i64 10 seq_cst, align 8
+ ret void
+}
+
+; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+define dso_local i64 @test_fetch_xor_64_ret(ptr nocapture noundef %i) local_unnamed_addr #0 {
+; CHECK-LABEL: test_fetch_xor_64_ret:
+; CHECK: .Ltest_fetch_xor_64_ret$local:
+; CHECK-NEXT: .type .Ltest_fetch_xor_64_ret$local,@function
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: r2 = 10
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_xor((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r0 = 10
+; CHECK-NEXT: r0 = atomic_fetch_xor((u64 *)(r1 + 0), r0)
+; CHECK-NEXT: r0 += r3
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_xor((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r0 += r3
+; CHECK-NEXT: r3 = 10
+; CHECK-NEXT: r3 = atomic_fetch_xor((u64 *)(r1 + 0), r3)
+; CHECK-NEXT: r0 += r3
+; CHECK-NEXT: r2 = atomic_fetch_xor((u64 *)(r1 + 0), r2)
+; CHECK-NEXT: r0 += r2
+; CHECK-NEXT: exit
+entry:
+ %0 = atomicrmw xor ptr %i, i64 10 monotonic, align 8
+ %1 = atomicrmw xor ptr %i, i64 10 acquire, align 8
+ %add = add nsw i64 %1, %0
+ %2 = atomicrmw xor ptr %i, i64 10 release, align 8
+ %add5 = add nsw i64 %add, %2
+ %3 = atomicrmw xor ptr %i, i64 10 acq_rel, align 8
+ %add8 = add nsw i64 %add5, %3
+ %4 = atomicrmw xor ptr %i, i64 10 seq_cst, align 8
+ %add11 = add nsw i64 %add8, %4
+ ret i64 %add11
+}
+
+attributes #0 = { mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite) "frame-pointer"="all" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="v3" }
+
+!llvm.module.flags = !{!0, !1}
+!llvm.ident = !{!2}
+
+!0 = !{i32 1, !"wchar_size", i32 4}
+!1 = !{i32 7, !"frame-pointer", i32 2}
+!2 = !{!"clang version 20.0.0git (git@github.com:yonghong-song/llvm-project.git 6f71e34e194dab5a52cb2211af575c6067e9e504)"}
diff --git a/llvm/test/CodeGen/BPF/atomics_sub64_relaxed_v1.ll b/llvm/test/CodeGen/BPF/atomics_sub64_relaxed_v1.ll
new file mode 100644
index 0000000..4d630d4
--- /dev/null
+++ b/llvm/test/CodeGen/BPF/atomics_sub64_relaxed_v1.ll
@@ -0,0 +1,27 @@
+; RUN: not llc -march=bpfel -mcpu=v1 -filetype=asm < %s
+;
+; Source:
+; $ cat atomics_sub64_relaxed_v1.c
+; #include <stdatomic.h>
+;
+; long test_fetch_sub_64_ret(long _Atomic *i) {
+; return __c11_atomic_fetch_sub(i, 10, memory_order_relaxed);
+; }
+
+target triple = "bpf"
+
+; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+define dso_local i64 @test_fetch_sub_64_ret(ptr nocapture noundef %i) local_unnamed_addr #0 {
+entry:
+ %0 = atomicrmw sub ptr %i, i64 10 monotonic, align 8
+ ret i64 %0
+}
+
+attributes #0 = { mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite) "frame-pointer"="all" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="v1" }
+
+!llvm.module.flags = !{!0, !1}
+!llvm.ident = !{!2}
+
+!0 = !{i32 1, !"wchar_size", i32 4}
+!1 = !{i32 7, !"frame-pointer", i32 2}
+!2 = !{!"clang version 20.0.0git (git@github.com:yonghong-song/llvm-project.git 6f71e34e194dab5a52cb2211af575c6067e9e504)"}
diff --git a/llvm/test/CodeGen/BPF/xaddd_v1.ll b/llvm/test/CodeGen/BPF/xaddd_v1.ll
new file mode 100644
index 0000000..d3bfd8d
--- /dev/null
+++ b/llvm/test/CodeGen/BPF/xaddd_v1.ll
@@ -0,0 +1,25 @@
+; RUN: not llc -march=bpfel -mcpu=v1 -filetype=asm < %s
+;
+; Source:
+; $ cat xaddd_v1.c
+; long test_fetch_add_64_ret(long *i) {
+; return __sync_fetch_and_add(i, 10);
+; }
+
+target triple = "bpf"
+
+; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+define dso_local i64 @test_fetch_add_64_ret(ptr nocapture noundef %i) local_unnamed_addr #0 {
+entry:
+ %0 = atomicrmw add ptr %i, i64 10 seq_cst, align 8
+ ret i64 %0
+}
+
+attributes #0 = { mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite) "frame-pointer"="all" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="v1" }
+
+!llvm.module.flags = !{!0, !1}
+!llvm.ident = !{!2}
+
+!0 = !{i32 1, !"wchar_size", i32 4}
+!1 = !{i32 7, !"frame-pointer", i32 2}
+!2 = !{!"clang version 20.0.0git (git@github.com:yonghong-song/llvm-project.git 6f71e34e194dab5a52cb2211af575c6067e9e504)"}
diff --git a/llvm/test/CodeGen/DirectX/Metadata/lib-entries.ll b/llvm/test/CodeGen/DirectX/Metadata/lib-entries.ll
new file mode 100644
index 0000000..e2f2a48
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/Metadata/lib-entries.ll
@@ -0,0 +1,37 @@
+; RUN: opt -S -S -dxil-translate-metadata %s 2>&1 | FileCheck %s
+target triple = "dxil-pc-shadermodel6.8-library"
+
+
+; CHECK: !dx.shaderModel = !{![[SM:[0-9]+]]}
+; CHECK: !dx.version = !{![[DXVER:[0-9]+]]}
+; CHECK: !dx.entryPoints = !{![[LIB:[0-9]+]], ![[AS:[0-9]+]], ![[MS:[0-9]+]], ![[CS:[0-9]+]]}
+
+; CHECK: ![[SM]] = !{!"lib", i32 6, i32 8}
+; CHECK: ![[DXVER]] = !{i32 1, i32 8}
+; CHECK: ![[LIB]] = !{null, !"", null, null, null}
+; CHECK: ![[AS]] = !{ptr @entry_as, !"entry_as", null, null, ![[AS_SF:[0-9]*]]}
+; CHECK: ![[AS_SF]] = !{i32 8, i32 14}
+; CHECK: ![[MS]] = !{ptr @entry_ms, !"entry_ms", null, null, ![[MS_SF:[0-9]*]]}
+; CHECK: ![[MS_SF]] = !{i32 8, i32 13}
+; CHECK: ![[CS]] = !{ptr @entry_cs, !"entry_cs", null, null, ![[CS_SF:[0-9]*]]}
+; CHECK: ![[CS_SF]] = !{i32 8, i32 5, i32 4, ![[CS_NT:[0-9]*]]}
+; CHECK: !{i32 1, i32 2, i32 1}
+
+define void @entry_as() #0 {
+entry:
+ ret void
+}
+
+define i32 @entry_ms(i32 %a) #1 {
+entry:
+ ret i32 %a
+}
+
+define float @entry_cs(float %f) #3 {
+entry:
+ ret float %f
+}
+
+attributes #0 = { noinline nounwind "hlsl.shader"="amplification" }
+attributes #1 = { noinline nounwind "hlsl.shader"="mesh" }
+attributes #3 = { noinline nounwind "hlsl.numthreads"="1,2,1" "hlsl.shader"="compute" }
diff --git a/llvm/test/CodeGen/DirectX/Metadata/multiple-entries-cs-error.ll b/llvm/test/CodeGen/DirectX/Metadata/multiple-entries-cs-error.ll
new file mode 100644
index 0000000..9697d438
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/Metadata/multiple-entries-cs-error.ll
@@ -0,0 +1,23 @@
+; RUN: not opt -S -S -dxil-translate-metadata %s 2>&1 | FileCheck %s
+target triple = "dxil-pc-shadermodel6.8-compute"
+
+; CHECK: Non-library shader: One and only one entry expected
+
+define void @entry_as() #0 {
+entry:
+ ret void
+}
+
+define i32 @entry_ms(i32 %a) #1 {
+entry:
+ ret i32 %a
+}
+
+define float @entry_cs(float %f) #3 {
+entry:
+ ret float %f
+}
+
+attributes #0 = { noinline nounwind "hlsl.shader"="amplification" }
+attributes #1 = { noinline nounwind "hlsl.shader"="mesh" }
+attributes #3 = { noinline nounwind "hlsl.numthreads"="1,2,1" "hlsl.shader"="compute" }
diff --git a/llvm/test/CodeGen/DirectX/Metadata/target-profile-error.ll b/llvm/test/CodeGen/DirectX/Metadata/target-profile-error.ll
new file mode 100644
index 0000000..671406c
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/Metadata/target-profile-error.ll
@@ -0,0 +1,12 @@
+; RUN: not opt -S -dxil-translate-metadata %s 2>&1 | FileCheck %s
+
+target triple = "dxil-pc-shadermodel6.6-pixel"
+
+; CHECK: Shader stage 'cs' for entry 'entry' different from specified target profile 'pixel'
+
+define void @entry() #0 {
+entry:
+ ret void
+}
+
+attributes #0 = { noinline nounwind "exp-shader"="cs" "hlsl.numthreads"="1,2,1" "hlsl.shader"="compute" }
diff --git a/llvm/test/CodeGen/DirectX/legalize-module-flags.ll b/llvm/test/CodeGen/DirectX/legalize-module-flags.ll
index 1483a87e..6c29dea 100644
--- a/llvm/test/CodeGen/DirectX/legalize-module-flags.ll
+++ b/llvm/test/CodeGen/DirectX/legalize-module-flags.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S -dxil-prepare < %s | FileCheck %s
+; RUN: opt -S -dxil-prepare -mtriple=dxil-unknown-shadermodel6.0-compute %s | FileCheck %s
; Make sure behavior flag > 6 is fixed.
; CHECK: !{i32 2, !"frame-pointer", i32 2}
diff --git a/llvm/test/CodeGen/DirectX/legalize-module-flags2.ll b/llvm/test/CodeGen/DirectX/legalize-module-flags2.ll
index e1803b4..244ec8d 100644
--- a/llvm/test/CodeGen/DirectX/legalize-module-flags2.ll
+++ b/llvm/test/CodeGen/DirectX/legalize-module-flags2.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S -dxil-prepare < %s | FileCheck %s
+; RUN: opt -S -dxil-prepare -mtriple=dxil-unknown-shadermodel6.0-library %s | FileCheck %s
; CHECK: define void @main()
; Make sure behavior flag > 6 is fixed.
diff --git a/llvm/test/CodeGen/DirectX/strip-call-attrs.ll b/llvm/test/CodeGen/DirectX/strip-call-attrs.ll
index f530e12..e232ab2 100644
--- a/llvm/test/CodeGen/DirectX/strip-call-attrs.ll
+++ b/llvm/test/CodeGen/DirectX/strip-call-attrs.ll
@@ -1,6 +1,6 @@
; RUN: opt -S -dxil-prepare < %s | FileCheck %s
-target triple = "dxil-unknown-unknown"
+target triple = "dxil-unknown-shadermodel6.0-library"
@f = internal unnamed_addr global float 0.000000e+00, align 4
@llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr @_GLOBAL__sub_I_static_global.hlsl, ptr null }]
diff --git a/llvm/test/CodeGen/DirectX/typed_ptr.ll b/llvm/test/CodeGen/DirectX/typed_ptr.ll
index 5453e876..355c4f1 100644
--- a/llvm/test/CodeGen/DirectX/typed_ptr.ll
+++ b/llvm/test/CodeGen/DirectX/typed_ptr.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3
; RUN: opt -S -dxil-prepare < %s | FileCheck %s
-target triple = "dxil-unknown-unknown"
+target triple = "dxil-unknown-shadermodel6.0-compute"
@gs = external addrspace(3) global [20 x [6 x float]], align 4
diff --git a/llvm/test/CodeGen/Generic/allow-check.ll b/llvm/test/CodeGen/Generic/allow-check.ll
index a084889..148ee81 100644
--- a/llvm/test/CodeGen/Generic/allow-check.ll
+++ b/llvm/test/CodeGen/Generic/allow-check.ll
@@ -1,5 +1,5 @@
; Avoid `!DL->isLittleEndian() && !CLI->enableBigEndian()` missmatch on PPC64BE.
-; REQUIRES: host-byteorder-little-endian
+; REQUIRES: target-byteorder-little-endian
; -global-isel=1 is unsupported.
; XFAIL: target=loongarch{{.*}}
diff --git a/llvm/test/CodeGen/Hexagon/expand-condsets-impuse2.mir b/llvm/test/CodeGen/Hexagon/expand-condsets-impuse2.mir
index ae3f4ba..ebb361a 100644
--- a/llvm/test/CodeGen/Hexagon/expand-condsets-impuse2.mir
+++ b/llvm/test/CodeGen/Hexagon/expand-condsets-impuse2.mir
@@ -6,12 +6,12 @@
name: f0
tracksRegLiveness: true
+isSSA: false
body: |
bb.0:
successors: %bb.1
liveins: $r0, $r1
%0:intregs = COPY $r0
- %0:intregs = COPY $r0 ; defeat IsSSA detection
%1:intregs = COPY $r1
%2:intregs = COPY $r0
%3:intregs = M2_mpyi %2, %1
diff --git a/llvm/test/CodeGen/Hexagon/expand-condsets-phys-reg.mir b/llvm/test/CodeGen/Hexagon/expand-condsets-phys-reg.mir
index e62cd1c..d252ec5 100644
--- a/llvm/test/CodeGen/Hexagon/expand-condsets-phys-reg.mir
+++ b/llvm/test/CodeGen/Hexagon/expand-condsets-phys-reg.mir
@@ -9,12 +9,12 @@
name: fred
tracksRegLiveness: true
+isSSA: false
body: |
bb.0:
successors: %bb.1, %bb.2
liveins: $r0
- %0:intregs = A2_tfrsi 0 ;; Multiple defs to ensure IsSSA = false
%0:intregs = L2_loadri_io $r0, 0
%1:predregs = C2_cmpgti %0, 10
%2:intregs = C2_mux %1, $r31, %0
diff --git a/llvm/test/CodeGen/Hexagon/expand-condsets-rm-reg.mir b/llvm/test/CodeGen/Hexagon/expand-condsets-rm-reg.mir
index 6d7b6cd..463aa9a 100644
--- a/llvm/test/CodeGen/Hexagon/expand-condsets-rm-reg.mir
+++ b/llvm/test/CodeGen/Hexagon/expand-condsets-rm-reg.mir
@@ -20,6 +20,7 @@
name: fred
tracksRegLiveness: true
+isSSA: false
registers:
- { id: 0, class: intregs }
- { id: 1, class: intregs }
@@ -35,7 +36,6 @@ body: |
bb.0:
liveins: $r0, $r1, $p0
%0 = COPY $r0
- %0 = COPY $r0 ; Force isSSA = false.
%1 = COPY $r1
%2 = COPY $p0
; Check that %3 was coalesced into %4.
diff --git a/llvm/test/CodeGen/MIR/Generic/machine-function-optionally-computed-properties-conflict.mir b/llvm/test/CodeGen/MIR/Generic/machine-function-optionally-computed-properties-conflict.mir
new file mode 100644
index 0000000..d8d178d
--- /dev/null
+++ b/llvm/test/CodeGen/MIR/Generic/machine-function-optionally-computed-properties-conflict.mir
@@ -0,0 +1,35 @@
+# RUN: not llc -run-pass none -o /dev/null %s 2>&1 | FileCheck %s
+
+# Test that computed properties are not conflicting with explicitly set
+# properties
+
+---
+# CHECK: error: {{.*}}: TestNoPhisOverrideConflict has explicit property NoPhi, but contains at least one PHI
+name: TestNoPhisOverrideConflict
+noPhis: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ %0:_(s32) = G_IMPLICIT_DEF
+
+ bb.1:
+ %1:_(s32) = PHI %0, %bb.0, %1, %bb.1
+ G_BR %bb.1
+...
+---
+# CHECK: error: {{.*}}: TestIsSSAOverrideConflict has explicit property IsSSA, but is not valid SSA
+name: TestIsSSAOverrideConflict
+isSSA: true
+body: |
+ bb.0:
+ %0:_(s32) = G_IMPLICIT_DEF
+ %0:_(s32) = G_IMPLICIT_DEF
+...
+---
+# CHECK: error: {{.*}}: TestNoVRegsOverrideConflict has explicit property NoVRegs, but contains virtual registers
+name: TestNoVRegsOverrideConflict
+noVRegs: true
+body: |
+ bb.0:
+ %0:_(s32) = G_IMPLICIT_DEF
+...
diff --git a/llvm/test/CodeGen/MIR/Generic/machine-function-optionally-computed-properties.mir b/llvm/test/CodeGen/MIR/Generic/machine-function-optionally-computed-properties.mir
new file mode 100644
index 0000000..858bbc8
--- /dev/null
+++ b/llvm/test/CodeGen/MIR/Generic/machine-function-optionally-computed-properties.mir
@@ -0,0 +1,64 @@
+# RUN: llc -run-pass none -o - %s | FileCheck %s
+
+# Test that we can disable certain properties that are normally computed
+
+---
+# CHECK-LABEL: name: TestNoPhis
+# CHECK: noPhis: true
+# CHECK: ...
+name: TestNoPhis
+...
+---
+# CHECK-LABEL: name: TestNoPhisOverride
+# CHECK: noPhis: false
+# CHECK: ...
+name: TestNoPhisOverride
+noPhis: false
+...
+---
+# CHECK-LABEL: name: TestNoPhisOverrideTrue
+# CHECK: noPhis: true
+# CHECK: ...
+name: TestNoPhisOverrideTrue
+noPhis: true
+...
+---
+# CHECK-LABEL: name: TestIsSSA
+# CHECK: isSSA: true
+# CHECK: ...
+name: TestIsSSA
+...
+---
+# CHECK-LABEL: name: TestIsSSAOverride
+# CHECK: isSSA: false
+# CHECK: ...
+name: TestIsSSAOverride
+isSSA: false
+...
+---
+# CHECK-LABEL: name: TestIsSSAOverrideTrue
+# CHECK: isSSA: true
+# CHECK: ...
+name: TestIsSSAOverrideTrue
+isSSA: true
+...
+---
+# CHECK-LABEL: name: TestNoVRegs
+# CHECK: noVRegs: true
+# CHECK: ...
+name: TestNoVRegs
+...
+---
+# CHECK-LABEL: name: TestNoVRegsOverride
+# CHECK: noVRegs: false
+# CHECK: ...
+name: TestNoVRegsOverride
+noVRegs: false
+...
+---
+# CHECK-LABEL: name: TestNoVRegsOverrideTrue
+# CHECK: noVRegs: true
+# CHECK: ...
+name: TestNoVRegsOverrideTrue
+noVRegs: true
+...
diff --git a/llvm/test/CodeGen/MIR/NVPTX/floating-point-immediate-operands.mir b/llvm/test/CodeGen/MIR/NVPTX/floating-point-immediate-operands.mir
index 58e2e64..a40b4d8 100644
--- a/llvm/test/CodeGen/MIR/NVPTX/floating-point-immediate-operands.mir
+++ b/llvm/test/CodeGen/MIR/NVPTX/floating-point-immediate-operands.mir
@@ -40,9 +40,9 @@ registers:
- { id: 7, class: float32regs }
body: |
bb.0.entry:
- %0 = LD_f32_avar 0, 4, 1, 2, 32, &test_param_0
+ %0 = LD_f32_avar 0, 0, 4, 1, 2, 32, &test_param_0
%1 = CVT_f64_f32 %0, 0
- %2 = LD_i32_avar 0, 4, 1, 0, 32, &test_param_1
+ %2 = LD_i32_avar 0, 0, 4, 1, 0, 32, &test_param_1
; CHECK: %3:float64regs = FADD_rnf64ri %1, double 3.250000e+00
%3 = FADD_rnf64ri %1, double 3.250000e+00
%4 = CVT_f32_f64 %3, 5
@@ -66,9 +66,9 @@ registers:
- { id: 7, class: float32regs }
body: |
bb.0.entry:
- %0 = LD_f32_avar 0, 4, 1, 2, 32, &test2_param_0
+ %0 = LD_f32_avar 0, 0, 4, 1, 2, 32, &test2_param_0
%1 = CVT_f64_f32 %0, 0
- %2 = LD_i32_avar 0, 4, 1, 0, 32, &test2_param_1
+ %2 = LD_i32_avar 0, 0, 4, 1, 0, 32, &test2_param_1
; CHECK: %3:float64regs = FADD_rnf64ri %1, double 0x7FF8000000000000
%3 = FADD_rnf64ri %1, double 0x7FF8000000000000
%4 = CVT_f32_f64 %3, 5
diff --git a/llvm/test/CodeGen/Mips/cconv/illegal-vectors.ll b/llvm/test/CodeGen/Mips/cconv/illegal-vectors.ll
index 5cb5972..5cf418e 100644
--- a/llvm/test/CodeGen/Mips/cconv/illegal-vectors.ll
+++ b/llvm/test/CodeGen/Mips/cconv/illegal-vectors.ll
@@ -73,6 +73,121 @@ define void @call_v1i32(ptr %p) nounwind {
ret void
}
+define void @arg_v1i80(<1 x i80> %vec, ptr %p) {
+; MIPS64-LABEL: arg_v1i80:
+; MIPS64: # %bb.0:
+; MIPS64-NEXT: sh $5, 8($6)
+; MIPS64-NEXT: dsrl $1, $5, 16
+; MIPS64-NEXT: dsll $2, $4, 48
+; MIPS64-NEXT: or $1, $2, $1
+; MIPS64-NEXT: jr $ra
+; MIPS64-NEXT: sd $1, 0($6)
+;
+; MIPS32-LABEL: arg_v1i80:
+; MIPS32: # %bb.0:
+; MIPS32-NEXT: sll $1, $5, 16
+; MIPS32-NEXT: srl $2, $6, 16
+; MIPS32-NEXT: sh $6, 8($7)
+; MIPS32-NEXT: or $1, $2, $1
+; MIPS32-NEXT: sw $1, 4($7)
+; MIPS32-NEXT: srl $1, $5, 16
+; MIPS32-NEXT: sll $2, $4, 16
+; MIPS32-NEXT: or $1, $2, $1
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: sw $1, 0($7)
+ store <1 x i80> %vec, ptr %p
+ ret void
+}
+
+define <1 x i80> @ret_v1i80(ptr %p) {
+; MIPS64-LABEL: ret_v1i80:
+; MIPS64: # %bb.0:
+; MIPS64-NEXT: lhu $1, 8($4)
+; MIPS64-NEXT: ld $2, 0($4)
+; MIPS64-NEXT: dsll $3, $2, 16
+; MIPS64-NEXT: or $3, $1, $3
+; MIPS64-NEXT: jr $ra
+; MIPS64-NEXT: dsrl $2, $2, 48
+;
+; MIPS32-LABEL: ret_v1i80:
+; MIPS32: # %bb.0:
+; MIPS32-NEXT: lw $1, 4($4)
+; MIPS32-NEXT: srl $2, $1, 16
+; MIPS32-NEXT: lw $5, 0($4)
+; MIPS32-NEXT: sll $3, $5, 16
+; MIPS32-NEXT: or $3, $3, $2
+; MIPS32-NEXT: lhu $2, 8($4)
+; MIPS32-NEXT: sll $1, $1, 16
+; MIPS32-NEXT: or $4, $2, $1
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: srl $2, $5, 16
+ %v = load <1 x i80>, ptr %p
+ ret <1 x i80> %v
+}
+
+define void @call_v1i80(ptr %p) nounwind {
+; MIPS64-LABEL: call_v1i80:
+; MIPS64: # %bb.0:
+; MIPS64-NEXT: daddiu $sp, $sp, -16
+; MIPS64-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill
+; MIPS64-NEXT: sd $16, 0($sp) # 8-byte Folded Spill
+; MIPS64-NEXT: move $16, $4
+; MIPS64-NEXT: lhu $1, 8($4)
+; MIPS64-NEXT: ld $2, 0($4)
+; MIPS64-NEXT: dsll $3, $2, 16
+; MIPS64-NEXT: or $5, $1, $3
+; MIPS64-NEXT: jal arg_v1i80
+; MIPS64-NEXT: dsrl $4, $2, 48
+; MIPS64-NEXT: jal ret_v1i80
+; MIPS64-NEXT: nop
+; MIPS64-NEXT: sh $3, 8($16)
+; MIPS64-NEXT: dsrl $1, $3, 16
+; MIPS64-NEXT: dsll $2, $2, 48
+; MIPS64-NEXT: or $1, $2, $1
+; MIPS64-NEXT: sd $1, 0($16)
+; MIPS64-NEXT: ld $16, 0($sp) # 8-byte Folded Reload
+; MIPS64-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload
+; MIPS64-NEXT: jr $ra
+; MIPS64-NEXT: daddiu $sp, $sp, 16
+;
+; MIPS32-LABEL: call_v1i80:
+; MIPS32: # %bb.0:
+; MIPS32-NEXT: addiu $sp, $sp, -24
+; MIPS32-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill
+; MIPS32-NEXT: sw $16, 16($sp) # 4-byte Folded Spill
+; MIPS32-NEXT: move $16, $4
+; MIPS32-NEXT: lw $1, 4($4)
+; MIPS32-NEXT: srl $2, $1, 16
+; MIPS32-NEXT: lw $3, 0($4)
+; MIPS32-NEXT: sll $4, $3, 16
+; MIPS32-NEXT: or $5, $4, $2
+; MIPS32-NEXT: lhu $2, 8($16)
+; MIPS32-NEXT: sll $1, $1, 16
+; MIPS32-NEXT: or $6, $2, $1
+; MIPS32-NEXT: jal arg_v1i80
+; MIPS32-NEXT: srl $4, $3, 16
+; MIPS32-NEXT: jal ret_v1i80
+; MIPS32-NEXT: nop
+; MIPS32-NEXT: sh $4, 8($16)
+; MIPS32-NEXT: sll $1, $3, 16
+; MIPS32-NEXT: srl $4, $4, 16
+; MIPS32-NEXT: or $1, $4, $1
+; MIPS32-NEXT: sw $1, 4($16)
+; MIPS32-NEXT: srl $1, $3, 16
+; MIPS32-NEXT: sll $2, $2, 16
+; MIPS32-NEXT: or $1, $2, $1
+; MIPS32-NEXT: sw $1, 0($16)
+; MIPS32-NEXT: lw $16, 16($sp) # 4-byte Folded Reload
+; MIPS32-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: addiu $sp, $sp, 24
+ %v1 = load <1 x i80>, ptr %p
+ call void @arg_v1i80(<1 x i80> %v1)
+ %v2 = call <1 x i80> @ret_v1i80()
+ store <1 x i80> %v2, ptr %p
+ ret void
+}
+
define void @arg_v2i32(<2 x i32> %vec, ptr %p) {
; MIPS64-LABEL: arg_v2i32:
; MIPS64: # %bb.0:
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/ashr.ll b/llvm/test/CodeGen/Mips/llvm-ir/ashr.ll
index 450fe96..2b8129a 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/ashr.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/ashr.ll
@@ -382,53 +382,40 @@ define signext i128 @ashr_i128(i128 signext %a, i128 signext %b) {
; MIPS: # %bb.0: # %entry
; MIPS-NEXT: addiu $sp, $sp, -32
; MIPS-NEXT: .cfi_def_cfa_offset 32
-; MIPS-NEXT: swl $7, 28($sp)
-; MIPS-NEXT: swl $6, 24($sp)
; MIPS-NEXT: sra $1, $4, 31
-; MIPS-NEXT: swl $5, 20($sp)
-; MIPS-NEXT: swl $4, 16($sp)
-; MIPS-NEXT: swl $1, 12($sp)
-; MIPS-NEXT: swl $1, 8($sp)
-; MIPS-NEXT: swl $1, 4($sp)
-; MIPS-NEXT: swl $1, 0($sp)
-; MIPS-NEXT: addiu $2, $sp, 0
-; MIPS-NEXT: swr $7, 31($sp)
-; MIPS-NEXT: swr $6, 27($sp)
-; MIPS-NEXT: swr $5, 23($sp)
-; MIPS-NEXT: swr $4, 19($sp)
-; MIPS-NEXT: swr $1, 15($sp)
-; MIPS-NEXT: swr $1, 11($sp)
-; MIPS-NEXT: swr $1, 7($sp)
-; MIPS-NEXT: swr $1, 3($sp)
-; MIPS-NEXT: addiu $1, $2, 16
+; MIPS-NEXT: sw $7, 28($sp)
+; MIPS-NEXT: sw $6, 24($sp)
+; MIPS-NEXT: sw $5, 20($sp)
+; MIPS-NEXT: sw $4, 16($sp)
+; MIPS-NEXT: sw $1, 12($sp)
+; MIPS-NEXT: sw $1, 8($sp)
+; MIPS-NEXT: sw $1, 4($sp)
+; MIPS-NEXT: sw $1, 0($sp)
+; MIPS-NEXT: addiu $1, $sp, 0
+; MIPS-NEXT: addiu $1, $1, 16
; MIPS-NEXT: lw $2, 60($sp)
; MIPS-NEXT: srl $3, $2, 3
-; MIPS-NEXT: andi $3, $3, 15
+; MIPS-NEXT: andi $3, $3, 12
; MIPS-NEXT: subu $1, $1, $3
-; MIPS-NEXT: lwl $3, 4($1)
-; MIPS-NEXT: lwr $3, 7($1)
-; MIPS-NEXT: sll $4, $3, 1
-; MIPS-NEXT: lwl $5, 8($1)
-; MIPS-NEXT: lwr $5, 11($1)
-; MIPS-NEXT: andi $2, $2, 7
-; MIPS-NEXT: not $6, $2
-; MIPS-NEXT: srlv $7, $5, $2
-; MIPS-NEXT: sllv $4, $4, $6
+; MIPS-NEXT: lw $3, 4($1)
+; MIPS-NEXT: lw $5, 8($1)
+; MIPS-NEXT: srlv $4, $5, $2
+; MIPS-NEXT: sll $6, $3, 1
+; MIPS-NEXT: andi $7, $2, 31
+; MIPS-NEXT: xori $7, $7, 31
+; MIPS-NEXT: sllv $6, $6, $7
; MIPS-NEXT: srlv $3, $3, $2
-; MIPS-NEXT: lwl $6, 0($1)
-; MIPS-NEXT: lwr $6, 3($1)
-; MIPS-NEXT: sll $8, $6, 1
-; MIPS-NEXT: xori $9, $2, 31
-; MIPS-NEXT: sllv $8, $8, $9
-; MIPS-NEXT: or $3, $3, $8
-; MIPS-NEXT: or $4, $7, $4
-; MIPS-NEXT: lwl $7, 12($1)
-; MIPS-NEXT: lwr $7, 15($1)
-; MIPS-NEXT: srlv $1, $7, $2
+; MIPS-NEXT: lw $8, 0($1)
+; MIPS-NEXT: sll $9, $8, 1
+; MIPS-NEXT: sllv $9, $9, $7
+; MIPS-NEXT: or $3, $3, $9
+; MIPS-NEXT: or $4, $4, $6
+; MIPS-NEXT: lw $1, 12($1)
+; MIPS-NEXT: srlv $1, $1, $2
; MIPS-NEXT: sll $5, $5, 1
-; MIPS-NEXT: sllv $5, $5, $9
+; MIPS-NEXT: sllv $5, $5, $7
; MIPS-NEXT: or $5, $1, $5
-; MIPS-NEXT: srav $2, $6, $2
+; MIPS-NEXT: srav $2, $8, $2
; MIPS-NEXT: jr $ra
; MIPS-NEXT: addiu $sp, $sp, 32
;
@@ -436,53 +423,40 @@ define signext i128 @ashr_i128(i128 signext %a, i128 signext %b) {
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: addiu $sp, $sp, -32
; MIPS32-NEXT: .cfi_def_cfa_offset 32
-; MIPS32-NEXT: swl $7, 28($sp)
-; MIPS32-NEXT: swl $6, 24($sp)
; MIPS32-NEXT: sra $1, $4, 31
-; MIPS32-NEXT: swl $5, 20($sp)
-; MIPS32-NEXT: swl $4, 16($sp)
-; MIPS32-NEXT: swl $1, 12($sp)
-; MIPS32-NEXT: swl $1, 8($sp)
-; MIPS32-NEXT: swl $1, 4($sp)
-; MIPS32-NEXT: swl $1, 0($sp)
-; MIPS32-NEXT: addiu $2, $sp, 0
-; MIPS32-NEXT: swr $7, 31($sp)
-; MIPS32-NEXT: swr $6, 27($sp)
-; MIPS32-NEXT: swr $5, 23($sp)
-; MIPS32-NEXT: swr $4, 19($sp)
-; MIPS32-NEXT: swr $1, 15($sp)
-; MIPS32-NEXT: swr $1, 11($sp)
-; MIPS32-NEXT: swr $1, 7($sp)
-; MIPS32-NEXT: swr $1, 3($sp)
-; MIPS32-NEXT: addiu $1, $2, 16
+; MIPS32-NEXT: sw $7, 28($sp)
+; MIPS32-NEXT: sw $6, 24($sp)
+; MIPS32-NEXT: sw $5, 20($sp)
+; MIPS32-NEXT: sw $4, 16($sp)
+; MIPS32-NEXT: sw $1, 12($sp)
+; MIPS32-NEXT: sw $1, 8($sp)
+; MIPS32-NEXT: sw $1, 4($sp)
+; MIPS32-NEXT: sw $1, 0($sp)
+; MIPS32-NEXT: addiu $1, $sp, 0
+; MIPS32-NEXT: addiu $1, $1, 16
; MIPS32-NEXT: lw $2, 60($sp)
; MIPS32-NEXT: srl $3, $2, 3
-; MIPS32-NEXT: andi $3, $3, 15
+; MIPS32-NEXT: andi $3, $3, 12
; MIPS32-NEXT: subu $1, $1, $3
-; MIPS32-NEXT: lwl $3, 4($1)
-; MIPS32-NEXT: lwr $3, 7($1)
-; MIPS32-NEXT: sll $4, $3, 1
-; MIPS32-NEXT: lwl $5, 8($1)
-; MIPS32-NEXT: lwr $5, 11($1)
-; MIPS32-NEXT: andi $2, $2, 7
-; MIPS32-NEXT: not $6, $2
-; MIPS32-NEXT: srlv $7, $5, $2
-; MIPS32-NEXT: sllv $4, $4, $6
+; MIPS32-NEXT: lw $3, 4($1)
+; MIPS32-NEXT: lw $5, 8($1)
+; MIPS32-NEXT: srlv $4, $5, $2
+; MIPS32-NEXT: sll $6, $3, 1
+; MIPS32-NEXT: andi $7, $2, 31
+; MIPS32-NEXT: xori $7, $7, 31
+; MIPS32-NEXT: sllv $6, $6, $7
; MIPS32-NEXT: srlv $3, $3, $2
-; MIPS32-NEXT: lwl $6, 0($1)
-; MIPS32-NEXT: lwr $6, 3($1)
-; MIPS32-NEXT: sll $8, $6, 1
-; MIPS32-NEXT: xori $9, $2, 31
-; MIPS32-NEXT: sllv $8, $8, $9
-; MIPS32-NEXT: or $3, $3, $8
-; MIPS32-NEXT: or $4, $7, $4
-; MIPS32-NEXT: lwl $7, 12($1)
-; MIPS32-NEXT: lwr $7, 15($1)
-; MIPS32-NEXT: srlv $1, $7, $2
+; MIPS32-NEXT: lw $8, 0($1)
+; MIPS32-NEXT: sll $9, $8, 1
+; MIPS32-NEXT: sllv $9, $9, $7
+; MIPS32-NEXT: or $3, $3, $9
+; MIPS32-NEXT: or $4, $4, $6
+; MIPS32-NEXT: lw $1, 12($1)
+; MIPS32-NEXT: srlv $1, $1, $2
; MIPS32-NEXT: sll $5, $5, 1
-; MIPS32-NEXT: sllv $5, $5, $9
+; MIPS32-NEXT: sllv $5, $5, $7
; MIPS32-NEXT: or $5, $1, $5
-; MIPS32-NEXT: srav $2, $6, $2
+; MIPS32-NEXT: srav $2, $8, $2
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: addiu $sp, $sp, 32
;
@@ -490,52 +464,40 @@ define signext i128 @ashr_i128(i128 signext %a, i128 signext %b) {
; 32R2: # %bb.0: # %entry
; 32R2-NEXT: addiu $sp, $sp, -32
; 32R2-NEXT: .cfi_def_cfa_offset 32
-; 32R2-NEXT: swl $7, 28($sp)
-; 32R2-NEXT: swl $6, 24($sp)
-; 32R2-NEXT: swl $5, 20($sp)
; 32R2-NEXT: sra $1, $4, 31
-; 32R2-NEXT: swl $4, 16($sp)
-; 32R2-NEXT: swl $1, 12($sp)
-; 32R2-NEXT: swl $1, 8($sp)
-; 32R2-NEXT: swl $1, 4($sp)
-; 32R2-NEXT: swl $1, 0($sp)
-; 32R2-NEXT: swr $7, 31($sp)
-; 32R2-NEXT: swr $6, 27($sp)
-; 32R2-NEXT: swr $5, 23($sp)
-; 32R2-NEXT: swr $4, 19($sp)
-; 32R2-NEXT: swr $1, 15($sp)
-; 32R2-NEXT: swr $1, 11($sp)
-; 32R2-NEXT: swr $1, 7($sp)
-; 32R2-NEXT: swr $1, 3($sp)
+; 32R2-NEXT: sw $7, 28($sp)
+; 32R2-NEXT: sw $6, 24($sp)
+; 32R2-NEXT: sw $5, 20($sp)
+; 32R2-NEXT: sw $4, 16($sp)
+; 32R2-NEXT: sw $1, 12($sp)
+; 32R2-NEXT: sw $1, 8($sp)
+; 32R2-NEXT: sw $1, 4($sp)
+; 32R2-NEXT: sw $1, 0($sp)
; 32R2-NEXT: addiu $1, $sp, 0
; 32R2-NEXT: addiu $1, $1, 16
; 32R2-NEXT: lw $2, 60($sp)
-; 32R2-NEXT: ext $3, $2, 3, 4
+; 32R2-NEXT: srl $3, $2, 3
+; 32R2-NEXT: andi $3, $3, 12
; 32R2-NEXT: subu $1, $1, $3
-; 32R2-NEXT: lwl $3, 4($1)
-; 32R2-NEXT: lwr $3, 7($1)
-; 32R2-NEXT: sll $4, $3, 1
-; 32R2-NEXT: lwl $5, 8($1)
-; 32R2-NEXT: lwr $5, 11($1)
-; 32R2-NEXT: andi $2, $2, 7
-; 32R2-NEXT: not $6, $2
-; 32R2-NEXT: srlv $7, $5, $2
-; 32R2-NEXT: sllv $4, $4, $6
+; 32R2-NEXT: lw $3, 4($1)
+; 32R2-NEXT: lw $5, 8($1)
+; 32R2-NEXT: srlv $4, $5, $2
+; 32R2-NEXT: sll $6, $3, 1
+; 32R2-NEXT: andi $7, $2, 31
+; 32R2-NEXT: xori $7, $7, 31
+; 32R2-NEXT: sllv $6, $6, $7
; 32R2-NEXT: srlv $3, $3, $2
-; 32R2-NEXT: lwl $6, 0($1)
-; 32R2-NEXT: lwr $6, 3($1)
-; 32R2-NEXT: sll $8, $6, 1
-; 32R2-NEXT: xori $9, $2, 31
-; 32R2-NEXT: sllv $8, $8, $9
-; 32R2-NEXT: or $3, $3, $8
-; 32R2-NEXT: or $4, $7, $4
-; 32R2-NEXT: lwl $7, 12($1)
-; 32R2-NEXT: lwr $7, 15($1)
-; 32R2-NEXT: srlv $1, $7, $2
+; 32R2-NEXT: lw $8, 0($1)
+; 32R2-NEXT: sll $9, $8, 1
+; 32R2-NEXT: sllv $9, $9, $7
+; 32R2-NEXT: or $3, $3, $9
+; 32R2-NEXT: or $4, $4, $6
+; 32R2-NEXT: lw $1, 12($1)
+; 32R2-NEXT: srlv $1, $1, $2
; 32R2-NEXT: sll $5, $5, 1
-; 32R2-NEXT: sllv $5, $5, $9
+; 32R2-NEXT: sllv $5, $5, $7
; 32R2-NEXT: or $5, $1, $5
-; 32R2-NEXT: srav $2, $6, $2
+; 32R2-NEXT: srav $2, $8, $2
; 32R2-NEXT: jr $ra
; 32R2-NEXT: addiu $sp, $sp, 32
;
@@ -555,28 +517,28 @@ define signext i128 @ashr_i128(i128 signext %a, i128 signext %b) {
; 32R6-NEXT: addiu $1, $sp, 0
; 32R6-NEXT: addiu $1, $1, 16
; 32R6-NEXT: lw $2, 60($sp)
-; 32R6-NEXT: ext $3, $2, 3, 4
+; 32R6-NEXT: srl $3, $2, 3
+; 32R6-NEXT: andi $3, $3, 12
; 32R6-NEXT: subu $1, $1, $3
; 32R6-NEXT: lw $3, 4($1)
-; 32R6-NEXT: sll $4, $3, 1
; 32R6-NEXT: lw $5, 8($1)
-; 32R6-NEXT: andi $2, $2, 7
-; 32R6-NEXT: not $6, $2
-; 32R6-NEXT: srlv $7, $5, $2
-; 32R6-NEXT: sllv $4, $4, $6
+; 32R6-NEXT: srlv $4, $5, $2
+; 32R6-NEXT: sll $6, $3, 1
+; 32R6-NEXT: andi $7, $2, 31
+; 32R6-NEXT: xori $7, $7, 31
+; 32R6-NEXT: sllv $6, $6, $7
; 32R6-NEXT: srlv $3, $3, $2
-; 32R6-NEXT: lw $6, 0($1)
-; 32R6-NEXT: sll $8, $6, 1
-; 32R6-NEXT: xori $9, $2, 31
-; 32R6-NEXT: sllv $8, $8, $9
-; 32R6-NEXT: or $3, $3, $8
-; 32R6-NEXT: or $4, $7, $4
+; 32R6-NEXT: lw $8, 0($1)
+; 32R6-NEXT: sll $9, $8, 1
+; 32R6-NEXT: sllv $9, $9, $7
+; 32R6-NEXT: or $3, $3, $9
+; 32R6-NEXT: or $4, $4, $6
; 32R6-NEXT: lw $1, 12($1)
; 32R6-NEXT: srlv $1, $1, $2
; 32R6-NEXT: sll $5, $5, 1
-; 32R6-NEXT: sllv $5, $5, $9
+; 32R6-NEXT: sllv $5, $5, $7
; 32R6-NEXT: or $5, $1, $5
-; 32R6-NEXT: srav $2, $6, $2
+; 32R6-NEXT: srav $2, $8, $2
; 32R6-NEXT: jr $ra
; 32R6-NEXT: addiu $sp, $sp, 32
;
@@ -656,53 +618,37 @@ define signext i128 @ashr_i128(i128 signext %a, i128 signext %b) {
; MMR3-NEXT: swp $16, 32($sp)
; MMR3-NEXT: .cfi_offset 17, -4
; MMR3-NEXT: .cfi_offset 16, -8
-; MMR3-NEXT: swl $7, 28($sp)
-; MMR3-NEXT: swl $6, 24($sp)
-; MMR3-NEXT: swl $5, 20($sp)
; MMR3-NEXT: sra $1, $4, 31
-; MMR3-NEXT: swl $4, 16($sp)
-; MMR3-NEXT: swl $1, 12($sp)
-; MMR3-NEXT: swl $1, 8($sp)
-; MMR3-NEXT: swl $1, 4($sp)
-; MMR3-NEXT: swl $1, 0($sp)
-; MMR3-NEXT: swr $7, 31($sp)
-; MMR3-NEXT: swr $6, 27($sp)
-; MMR3-NEXT: swr $5, 23($sp)
-; MMR3-NEXT: swr $4, 19($sp)
-; MMR3-NEXT: swr $1, 15($sp)
-; MMR3-NEXT: swr $1, 11($sp)
-; MMR3-NEXT: swr $1, 7($sp)
-; MMR3-NEXT: swr $1, 3($sp)
+; MMR3-NEXT: swp $6, 24($sp)
+; MMR3-NEXT: swp $4, 16($sp)
+; MMR3-NEXT: sw $1, 12($sp)
+; MMR3-NEXT: sw $1, 8($sp)
+; MMR3-NEXT: sw $1, 4($sp)
+; MMR3-NEXT: sw $1, 0($sp)
; MMR3-NEXT: addiur1sp $2, 0
; MMR3-NEXT: addiur2 $2, $2, 16
; MMR3-NEXT: lw $3, 68($sp)
-; MMR3-NEXT: ext $4, $3, 3, 4
-; MMR3-NEXT: subu16 $2, $2, $4
-; MMR3-NEXT: lwl $7, 4($2)
-; MMR3-NEXT: lwr $7, 7($2)
-; MMR3-NEXT: sll16 $4, $7, 1
-; MMR3-NEXT: lwl $5, 8($2)
-; MMR3-NEXT: lwr $5, 11($2)
-; MMR3-NEXT: andi16 $6, $3, 7
-; MMR3-NEXT: not16 $3, $6
-; MMR3-NEXT: andi16 $3, $3, 31
-; MMR3-NEXT: srlv $16, $5, $6
-; MMR3-NEXT: sllv $4, $4, $3
-; MMR3-NEXT: srlv $17, $7, $6
-; MMR3-NEXT: lwl $7, 0($2)
-; MMR3-NEXT: lwr $7, 3($2)
-; MMR3-NEXT: sll16 $3, $7, 1
-; MMR3-NEXT: xori $1, $6, 31
+; MMR3-NEXT: srl16 $4, $3, 3
+; MMR3-NEXT: andi $4, $4, 12
+; MMR3-NEXT: subu16 $5, $2, $4
+; MMR3-NEXT: lwp $6, 4($5)
+; MMR3-NEXT: andi16 $2, $3, 31
+; MMR3-NEXT: srlv $16, $7, $2
+; MMR3-NEXT: sll16 $3, $6, 1
+; MMR3-NEXT: xori $1, $2, 31
+; MMR3-NEXT: sllv $4, $3, $1
+; MMR3-NEXT: srlv $6, $6, $2
+; MMR3-NEXT: lw16 $17, 0($5)
+; MMR3-NEXT: sll16 $3, $17, 1
; MMR3-NEXT: sllv $3, $3, $1
-; MMR3-NEXT: or16 $3, $17
+; MMR3-NEXT: or16 $3, $6
; MMR3-NEXT: or16 $4, $16
-; MMR3-NEXT: lwl $8, 12($2)
-; MMR3-NEXT: lwr $8, 15($2)
-; MMR3-NEXT: srlv $2, $8, $6
-; MMR3-NEXT: sll16 $5, $5, 1
+; MMR3-NEXT: lw16 $5, 12($5)
+; MMR3-NEXT: srlv $6, $5, $2
+; MMR3-NEXT: sll16 $5, $7, 1
; MMR3-NEXT: sllv $5, $5, $1
-; MMR3-NEXT: or16 $5, $2
-; MMR3-NEXT: srav $2, $7, $6
+; MMR3-NEXT: or16 $5, $6
+; MMR3-NEXT: srav $2, $17, $2
; MMR3-NEXT: lwp $16, 32($sp)
; MMR3-NEXT: addiusp 40
; MMR3-NEXT: jrc $ra
@@ -714,40 +660,39 @@ define signext i128 @ashr_i128(i128 signext %a, i128 signext %b) {
; MMR6-NEXT: sw $16, 36($sp) # 4-byte Folded Spill
; MMR6-NEXT: .cfi_offset 16, -4
; MMR6-NEXT: sra $1, $4, 31
-; MMR6-NEXT: sw $7, 32($sp)
-; MMR6-NEXT: sw $6, 28($sp)
-; MMR6-NEXT: sw $5, 24($sp)
-; MMR6-NEXT: sw $4, 20($sp)
-; MMR6-NEXT: sw $1, 16($sp)
+; MMR6-NEXT: sw $7, 28($sp)
+; MMR6-NEXT: sw $6, 24($sp)
+; MMR6-NEXT: sw $5, 20($sp)
+; MMR6-NEXT: sw $4, 16($sp)
; MMR6-NEXT: sw $1, 12($sp)
; MMR6-NEXT: sw $1, 8($sp)
; MMR6-NEXT: sw $1, 4($sp)
-; MMR6-NEXT: addiu $2, $sp, 4
+; MMR6-NEXT: sw $1, 0($sp)
+; MMR6-NEXT: addiu $2, $sp, 0
; MMR6-NEXT: addiur2 $2, $2, 16
; MMR6-NEXT: lw $3, 68($sp)
-; MMR6-NEXT: ext $4, $3, 3, 4
-; MMR6-NEXT: subu16 $5, $2, $4
-; MMR6-NEXT: lw16 $4, 4($5)
-; MMR6-NEXT: sll16 $6, $4, 1
-; MMR6-NEXT: lw16 $7, 8($5)
-; MMR6-NEXT: andi16 $2, $3, 7
-; MMR6-NEXT: not16 $3, $2
-; MMR6-NEXT: andi16 $3, $3, 31
-; MMR6-NEXT: srlv $1, $7, $2
-; MMR6-NEXT: sllv $6, $6, $3
-; MMR6-NEXT: srlv $3, $4, $2
-; MMR6-NEXT: lw16 $16, 0($5)
+; MMR6-NEXT: srl16 $4, $3, 3
+; MMR6-NEXT: andi $4, $4, 12
+; MMR6-NEXT: subu16 $2, $2, $4
+; MMR6-NEXT: lw16 $4, 4($2)
+; MMR6-NEXT: lw16 $5, 8($2)
+; MMR6-NEXT: andi16 $6, $3, 31
+; MMR6-NEXT: srlv $1, $5, $6
+; MMR6-NEXT: sll16 $3, $4, 1
+; MMR6-NEXT: xori $7, $6, 31
+; MMR6-NEXT: sllv $8, $3, $7
+; MMR6-NEXT: srlv $3, $4, $6
+; MMR6-NEXT: lw16 $16, 0($2)
; MMR6-NEXT: sll16 $4, $16, 1
-; MMR6-NEXT: xori $8, $2, 31
-; MMR6-NEXT: sllv $4, $4, $8
+; MMR6-NEXT: sllv $4, $4, $7
; MMR6-NEXT: or $3, $3, $4
-; MMR6-NEXT: or $4, $1, $6
-; MMR6-NEXT: lw16 $5, 12($5)
-; MMR6-NEXT: srlv $1, $5, $2
-; MMR6-NEXT: sll16 $5, $7, 1
-; MMR6-NEXT: sllv $5, $5, $8
-; MMR6-NEXT: or $5, $1, $5
-; MMR6-NEXT: srav $2, $16, $2
+; MMR6-NEXT: or $4, $1, $8
+; MMR6-NEXT: lw16 $2, 12($2)
+; MMR6-NEXT: srlv $1, $2, $6
+; MMR6-NEXT: sll16 $2, $5, 1
+; MMR6-NEXT: sllv $2, $2, $7
+; MMR6-NEXT: or $5, $1, $2
+; MMR6-NEXT: srav $2, $16, $6
; MMR6-NEXT: lw $16, 36($sp) # 4-byte Folded Reload
; MMR6-NEXT: addiu $sp, $sp, 40
; MMR6-NEXT: jrc $ra
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/lshr.ll b/llvm/test/CodeGen/Mips/llvm-ir/lshr.ll
index 03cf104..69b842c 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/lshr.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/lshr.ll
@@ -398,52 +398,39 @@ define signext i128 @lshr_i128(i128 signext %a, i128 signext %b) {
; MIPS2: # %bb.0: # %entry
; MIPS2-NEXT: addiu $sp, $sp, -32
; MIPS2-NEXT: .cfi_def_cfa_offset 32
-; MIPS2-NEXT: swl $7, 28($sp)
-; MIPS2-NEXT: swl $6, 24($sp)
-; MIPS2-NEXT: swl $5, 20($sp)
-; MIPS2-NEXT: swl $4, 16($sp)
-; MIPS2-NEXT: swl $zero, 12($sp)
-; MIPS2-NEXT: swl $zero, 8($sp)
-; MIPS2-NEXT: swl $zero, 4($sp)
-; MIPS2-NEXT: swl $zero, 0($sp)
; MIPS2-NEXT: addiu $1, $sp, 0
-; MIPS2-NEXT: swr $7, 31($sp)
-; MIPS2-NEXT: swr $6, 27($sp)
-; MIPS2-NEXT: swr $5, 23($sp)
-; MIPS2-NEXT: swr $4, 19($sp)
-; MIPS2-NEXT: swr $zero, 15($sp)
-; MIPS2-NEXT: swr $zero, 11($sp)
-; MIPS2-NEXT: swr $zero, 7($sp)
-; MIPS2-NEXT: swr $zero, 3($sp)
+; MIPS2-NEXT: sw $7, 28($sp)
+; MIPS2-NEXT: sw $6, 24($sp)
+; MIPS2-NEXT: sw $5, 20($sp)
+; MIPS2-NEXT: sw $4, 16($sp)
; MIPS2-NEXT: addiu $1, $1, 16
; MIPS2-NEXT: lw $2, 60($sp)
; MIPS2-NEXT: srl $3, $2, 3
-; MIPS2-NEXT: andi $3, $3, 15
+; MIPS2-NEXT: andi $3, $3, 12
; MIPS2-NEXT: subu $1, $1, $3
-; MIPS2-NEXT: lwl $3, 4($1)
-; MIPS2-NEXT: lwr $3, 7($1)
-; MIPS2-NEXT: sll $4, $3, 1
-; MIPS2-NEXT: lwl $5, 8($1)
-; MIPS2-NEXT: lwr $5, 11($1)
-; MIPS2-NEXT: andi $2, $2, 7
-; MIPS2-NEXT: not $6, $2
-; MIPS2-NEXT: srlv $7, $5, $2
-; MIPS2-NEXT: sllv $4, $4, $6
+; MIPS2-NEXT: sw $zero, 12($sp)
+; MIPS2-NEXT: sw $zero, 8($sp)
+; MIPS2-NEXT: sw $zero, 4($sp)
+; MIPS2-NEXT: sw $zero, 0($sp)
+; MIPS2-NEXT: lw $3, 4($1)
+; MIPS2-NEXT: lw $5, 8($1)
+; MIPS2-NEXT: srlv $4, $5, $2
+; MIPS2-NEXT: sll $6, $3, 1
+; MIPS2-NEXT: andi $7, $2, 31
+; MIPS2-NEXT: xori $7, $7, 31
+; MIPS2-NEXT: sllv $6, $6, $7
; MIPS2-NEXT: srlv $3, $3, $2
-; MIPS2-NEXT: lwl $6, 0($1)
-; MIPS2-NEXT: lwr $6, 3($1)
-; MIPS2-NEXT: sll $8, $6, 1
-; MIPS2-NEXT: xori $9, $2, 31
-; MIPS2-NEXT: sllv $8, $8, $9
-; MIPS2-NEXT: or $3, $3, $8
-; MIPS2-NEXT: or $4, $7, $4
-; MIPS2-NEXT: lwl $7, 12($1)
-; MIPS2-NEXT: lwr $7, 15($1)
-; MIPS2-NEXT: srlv $1, $7, $2
+; MIPS2-NEXT: lw $8, 0($1)
+; MIPS2-NEXT: sll $9, $8, 1
+; MIPS2-NEXT: sllv $9, $9, $7
+; MIPS2-NEXT: or $3, $3, $9
+; MIPS2-NEXT: or $4, $4, $6
+; MIPS2-NEXT: lw $1, 12($1)
+; MIPS2-NEXT: srlv $1, $1, $2
; MIPS2-NEXT: sll $5, $5, 1
-; MIPS2-NEXT: sllv $5, $5, $9
+; MIPS2-NEXT: sllv $5, $5, $7
; MIPS2-NEXT: or $5, $1, $5
-; MIPS2-NEXT: srlv $2, $6, $2
+; MIPS2-NEXT: srlv $2, $8, $2
; MIPS2-NEXT: jr $ra
; MIPS2-NEXT: addiu $sp, $sp, 32
;
@@ -451,52 +438,39 @@ define signext i128 @lshr_i128(i128 signext %a, i128 signext %b) {
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: addiu $sp, $sp, -32
; MIPS32-NEXT: .cfi_def_cfa_offset 32
-; MIPS32-NEXT: swl $7, 28($sp)
-; MIPS32-NEXT: swl $6, 24($sp)
-; MIPS32-NEXT: swl $5, 20($sp)
-; MIPS32-NEXT: swl $4, 16($sp)
-; MIPS32-NEXT: swl $zero, 12($sp)
-; MIPS32-NEXT: swl $zero, 8($sp)
-; MIPS32-NEXT: swl $zero, 4($sp)
-; MIPS32-NEXT: swl $zero, 0($sp)
; MIPS32-NEXT: addiu $1, $sp, 0
-; MIPS32-NEXT: swr $7, 31($sp)
-; MIPS32-NEXT: swr $6, 27($sp)
-; MIPS32-NEXT: swr $5, 23($sp)
-; MIPS32-NEXT: swr $4, 19($sp)
-; MIPS32-NEXT: swr $zero, 15($sp)
-; MIPS32-NEXT: swr $zero, 11($sp)
-; MIPS32-NEXT: swr $zero, 7($sp)
-; MIPS32-NEXT: swr $zero, 3($sp)
+; MIPS32-NEXT: sw $7, 28($sp)
+; MIPS32-NEXT: sw $6, 24($sp)
+; MIPS32-NEXT: sw $5, 20($sp)
+; MIPS32-NEXT: sw $4, 16($sp)
; MIPS32-NEXT: addiu $1, $1, 16
; MIPS32-NEXT: lw $2, 60($sp)
; MIPS32-NEXT: srl $3, $2, 3
-; MIPS32-NEXT: andi $3, $3, 15
+; MIPS32-NEXT: andi $3, $3, 12
; MIPS32-NEXT: subu $1, $1, $3
-; MIPS32-NEXT: lwl $3, 4($1)
-; MIPS32-NEXT: lwr $3, 7($1)
-; MIPS32-NEXT: sll $4, $3, 1
-; MIPS32-NEXT: lwl $5, 8($1)
-; MIPS32-NEXT: lwr $5, 11($1)
-; MIPS32-NEXT: andi $2, $2, 7
-; MIPS32-NEXT: not $6, $2
-; MIPS32-NEXT: srlv $7, $5, $2
-; MIPS32-NEXT: sllv $4, $4, $6
+; MIPS32-NEXT: sw $zero, 12($sp)
+; MIPS32-NEXT: sw $zero, 8($sp)
+; MIPS32-NEXT: sw $zero, 4($sp)
+; MIPS32-NEXT: sw $zero, 0($sp)
+; MIPS32-NEXT: lw $3, 4($1)
+; MIPS32-NEXT: lw $5, 8($1)
+; MIPS32-NEXT: srlv $4, $5, $2
+; MIPS32-NEXT: sll $6, $3, 1
+; MIPS32-NEXT: andi $7, $2, 31
+; MIPS32-NEXT: xori $7, $7, 31
+; MIPS32-NEXT: sllv $6, $6, $7
; MIPS32-NEXT: srlv $3, $3, $2
-; MIPS32-NEXT: lwl $6, 0($1)
-; MIPS32-NEXT: lwr $6, 3($1)
-; MIPS32-NEXT: sll $8, $6, 1
-; MIPS32-NEXT: xori $9, $2, 31
-; MIPS32-NEXT: sllv $8, $8, $9
-; MIPS32-NEXT: or $3, $3, $8
-; MIPS32-NEXT: or $4, $7, $4
-; MIPS32-NEXT: lwl $7, 12($1)
-; MIPS32-NEXT: lwr $7, 15($1)
-; MIPS32-NEXT: srlv $1, $7, $2
+; MIPS32-NEXT: lw $8, 0($1)
+; MIPS32-NEXT: sll $9, $8, 1
+; MIPS32-NEXT: sllv $9, $9, $7
+; MIPS32-NEXT: or $3, $3, $9
+; MIPS32-NEXT: or $4, $4, $6
+; MIPS32-NEXT: lw $1, 12($1)
+; MIPS32-NEXT: srlv $1, $1, $2
; MIPS32-NEXT: sll $5, $5, 1
-; MIPS32-NEXT: sllv $5, $5, $9
+; MIPS32-NEXT: sllv $5, $5, $7
; MIPS32-NEXT: or $5, $1, $5
-; MIPS32-NEXT: srlv $2, $6, $2
+; MIPS32-NEXT: srlv $2, $8, $2
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: addiu $sp, $sp, 32
;
@@ -504,51 +478,39 @@ define signext i128 @lshr_i128(i128 signext %a, i128 signext %b) {
; MIPS32R2: # %bb.0: # %entry
; MIPS32R2-NEXT: addiu $sp, $sp, -32
; MIPS32R2-NEXT: .cfi_def_cfa_offset 32
-; MIPS32R2-NEXT: swl $7, 28($sp)
-; MIPS32R2-NEXT: swl $6, 24($sp)
-; MIPS32R2-NEXT: swl $5, 20($sp)
-; MIPS32R2-NEXT: swl $4, 16($sp)
-; MIPS32R2-NEXT: swl $zero, 12($sp)
-; MIPS32R2-NEXT: swl $zero, 8($sp)
-; MIPS32R2-NEXT: swl $zero, 4($sp)
-; MIPS32R2-NEXT: swl $zero, 0($sp)
-; MIPS32R2-NEXT: swr $7, 31($sp)
-; MIPS32R2-NEXT: swr $6, 27($sp)
-; MIPS32R2-NEXT: swr $5, 23($sp)
-; MIPS32R2-NEXT: swr $4, 19($sp)
-; MIPS32R2-NEXT: swr $zero, 15($sp)
-; MIPS32R2-NEXT: swr $zero, 11($sp)
-; MIPS32R2-NEXT: swr $zero, 7($sp)
-; MIPS32R2-NEXT: swr $zero, 3($sp)
; MIPS32R2-NEXT: addiu $1, $sp, 0
+; MIPS32R2-NEXT: sw $7, 28($sp)
+; MIPS32R2-NEXT: sw $6, 24($sp)
+; MIPS32R2-NEXT: sw $5, 20($sp)
+; MIPS32R2-NEXT: sw $4, 16($sp)
; MIPS32R2-NEXT: addiu $1, $1, 16
; MIPS32R2-NEXT: lw $2, 60($sp)
-; MIPS32R2-NEXT: ext $3, $2, 3, 4
+; MIPS32R2-NEXT: srl $3, $2, 3
+; MIPS32R2-NEXT: andi $3, $3, 12
; MIPS32R2-NEXT: subu $1, $1, $3
-; MIPS32R2-NEXT: lwl $3, 4($1)
-; MIPS32R2-NEXT: lwr $3, 7($1)
-; MIPS32R2-NEXT: sll $4, $3, 1
-; MIPS32R2-NEXT: lwl $5, 8($1)
-; MIPS32R2-NEXT: lwr $5, 11($1)
-; MIPS32R2-NEXT: andi $2, $2, 7
-; MIPS32R2-NEXT: not $6, $2
-; MIPS32R2-NEXT: srlv $7, $5, $2
-; MIPS32R2-NEXT: sllv $4, $4, $6
+; MIPS32R2-NEXT: sw $zero, 12($sp)
+; MIPS32R2-NEXT: sw $zero, 8($sp)
+; MIPS32R2-NEXT: sw $zero, 4($sp)
+; MIPS32R2-NEXT: sw $zero, 0($sp)
+; MIPS32R2-NEXT: lw $3, 4($1)
+; MIPS32R2-NEXT: lw $5, 8($1)
+; MIPS32R2-NEXT: srlv $4, $5, $2
+; MIPS32R2-NEXT: sll $6, $3, 1
+; MIPS32R2-NEXT: andi $7, $2, 31
+; MIPS32R2-NEXT: xori $7, $7, 31
+; MIPS32R2-NEXT: sllv $6, $6, $7
; MIPS32R2-NEXT: srlv $3, $3, $2
-; MIPS32R2-NEXT: lwl $6, 0($1)
-; MIPS32R2-NEXT: lwr $6, 3($1)
-; MIPS32R2-NEXT: sll $8, $6, 1
-; MIPS32R2-NEXT: xori $9, $2, 31
-; MIPS32R2-NEXT: sllv $8, $8, $9
-; MIPS32R2-NEXT: or $3, $3, $8
-; MIPS32R2-NEXT: or $4, $7, $4
-; MIPS32R2-NEXT: lwl $7, 12($1)
-; MIPS32R2-NEXT: lwr $7, 15($1)
-; MIPS32R2-NEXT: srlv $1, $7, $2
+; MIPS32R2-NEXT: lw $8, 0($1)
+; MIPS32R2-NEXT: sll $9, $8, 1
+; MIPS32R2-NEXT: sllv $9, $9, $7
+; MIPS32R2-NEXT: or $3, $3, $9
+; MIPS32R2-NEXT: or $4, $4, $6
+; MIPS32R2-NEXT: lw $1, 12($1)
+; MIPS32R2-NEXT: srlv $1, $1, $2
; MIPS32R2-NEXT: sll $5, $5, 1
-; MIPS32R2-NEXT: sllv $5, $5, $9
+; MIPS32R2-NEXT: sllv $5, $5, $7
; MIPS32R2-NEXT: or $5, $1, $5
-; MIPS32R2-NEXT: srlv $2, $6, $2
+; MIPS32R2-NEXT: srlv $2, $8, $2
; MIPS32R2-NEXT: jr $ra
; MIPS32R2-NEXT: addiu $sp, $sp, 32
;
@@ -563,32 +525,32 @@ define signext i128 @lshr_i128(i128 signext %a, i128 signext %b) {
; MIPS32R6-NEXT: sw $4, 16($sp)
; MIPS32R6-NEXT: addiu $1, $1, 16
; MIPS32R6-NEXT: lw $2, 60($sp)
-; MIPS32R6-NEXT: ext $3, $2, 3, 4
+; MIPS32R6-NEXT: srl $3, $2, 3
+; MIPS32R6-NEXT: andi $3, $3, 12
; MIPS32R6-NEXT: subu $1, $1, $3
; MIPS32R6-NEXT: sw $zero, 12($sp)
; MIPS32R6-NEXT: sw $zero, 8($sp)
; MIPS32R6-NEXT: sw $zero, 4($sp)
; MIPS32R6-NEXT: sw $zero, 0($sp)
; MIPS32R6-NEXT: lw $3, 4($1)
-; MIPS32R6-NEXT: sll $4, $3, 1
; MIPS32R6-NEXT: lw $5, 8($1)
-; MIPS32R6-NEXT: andi $2, $2, 7
-; MIPS32R6-NEXT: not $6, $2
-; MIPS32R6-NEXT: srlv $7, $5, $2
-; MIPS32R6-NEXT: sllv $4, $4, $6
+; MIPS32R6-NEXT: srlv $4, $5, $2
+; MIPS32R6-NEXT: sll $6, $3, 1
+; MIPS32R6-NEXT: andi $7, $2, 31
+; MIPS32R6-NEXT: xori $7, $7, 31
+; MIPS32R6-NEXT: sllv $6, $6, $7
; MIPS32R6-NEXT: srlv $3, $3, $2
-; MIPS32R6-NEXT: lw $6, 0($1)
-; MIPS32R6-NEXT: sll $8, $6, 1
-; MIPS32R6-NEXT: xori $9, $2, 31
-; MIPS32R6-NEXT: sllv $8, $8, $9
-; MIPS32R6-NEXT: or $3, $3, $8
-; MIPS32R6-NEXT: or $4, $7, $4
+; MIPS32R6-NEXT: lw $8, 0($1)
+; MIPS32R6-NEXT: sll $9, $8, 1
+; MIPS32R6-NEXT: sllv $9, $9, $7
+; MIPS32R6-NEXT: or $3, $3, $9
+; MIPS32R6-NEXT: or $4, $4, $6
; MIPS32R6-NEXT: lw $1, 12($1)
; MIPS32R6-NEXT: srlv $1, $1, $2
; MIPS32R6-NEXT: sll $5, $5, 1
-; MIPS32R6-NEXT: sllv $5, $5, $9
+; MIPS32R6-NEXT: sllv $5, $5, $7
; MIPS32R6-NEXT: or $5, $1, $5
-; MIPS32R6-NEXT: srlv $2, $6, $2
+; MIPS32R6-NEXT: srlv $2, $8, $2
; MIPS32R6-NEXT: jr $ra
; MIPS32R6-NEXT: addiu $sp, $sp, 32
;
@@ -677,53 +639,37 @@ define signext i128 @lshr_i128(i128 signext %a, i128 signext %b) {
; MMR3-NEXT: swp $16, 32($sp)
; MMR3-NEXT: .cfi_offset 17, -4
; MMR3-NEXT: .cfi_offset 16, -8
-; MMR3-NEXT: swl $7, 28($sp)
-; MMR3-NEXT: swl $6, 24($sp)
-; MMR3-NEXT: swl $5, 20($sp)
; MMR3-NEXT: li16 $2, 0
-; MMR3-NEXT: swl $4, 16($sp)
-; MMR3-NEXT: swl $2, 12($sp)
-; MMR3-NEXT: swl $2, 8($sp)
-; MMR3-NEXT: swl $2, 4($sp)
-; MMR3-NEXT: swl $2, 0($sp)
-; MMR3-NEXT: swr $7, 31($sp)
-; MMR3-NEXT: swr $6, 27($sp)
-; MMR3-NEXT: swr $5, 23($sp)
-; MMR3-NEXT: swr $4, 19($sp)
-; MMR3-NEXT: swr $2, 15($sp)
-; MMR3-NEXT: swr $2, 11($sp)
-; MMR3-NEXT: swr $2, 7($sp)
-; MMR3-NEXT: swr $2, 3($sp)
+; MMR3-NEXT: swp $6, 24($sp)
+; MMR3-NEXT: swp $4, 16($sp)
+; MMR3-NEXT: sw $2, 12($sp)
+; MMR3-NEXT: sw $2, 8($sp)
+; MMR3-NEXT: sw $2, 4($sp)
+; MMR3-NEXT: sw $2, 0($sp)
; MMR3-NEXT: addiur1sp $2, 0
; MMR3-NEXT: addiur2 $2, $2, 16
; MMR3-NEXT: lw $3, 68($sp)
-; MMR3-NEXT: ext $4, $3, 3, 4
-; MMR3-NEXT: subu16 $2, $2, $4
-; MMR3-NEXT: lwl $7, 4($2)
-; MMR3-NEXT: lwr $7, 7($2)
-; MMR3-NEXT: sll16 $4, $7, 1
-; MMR3-NEXT: lwl $5, 8($2)
-; MMR3-NEXT: lwr $5, 11($2)
-; MMR3-NEXT: andi16 $6, $3, 7
-; MMR3-NEXT: not16 $3, $6
-; MMR3-NEXT: andi16 $3, $3, 31
-; MMR3-NEXT: srlv $16, $5, $6
-; MMR3-NEXT: sllv $4, $4, $3
-; MMR3-NEXT: srlv $17, $7, $6
-; MMR3-NEXT: lwl $7, 0($2)
-; MMR3-NEXT: lwr $7, 3($2)
-; MMR3-NEXT: sll16 $3, $7, 1
-; MMR3-NEXT: xori $1, $6, 31
+; MMR3-NEXT: srl16 $4, $3, 3
+; MMR3-NEXT: andi $4, $4, 12
+; MMR3-NEXT: subu16 $5, $2, $4
+; MMR3-NEXT: lwp $6, 4($5)
+; MMR3-NEXT: andi16 $2, $3, 31
+; MMR3-NEXT: srlv $16, $7, $2
+; MMR3-NEXT: sll16 $3, $6, 1
+; MMR3-NEXT: xori $1, $2, 31
+; MMR3-NEXT: sllv $4, $3, $1
+; MMR3-NEXT: srlv $6, $6, $2
+; MMR3-NEXT: lw16 $17, 0($5)
+; MMR3-NEXT: sll16 $3, $17, 1
; MMR3-NEXT: sllv $3, $3, $1
-; MMR3-NEXT: or16 $3, $17
+; MMR3-NEXT: or16 $3, $6
; MMR3-NEXT: or16 $4, $16
-; MMR3-NEXT: lwl $8, 12($2)
-; MMR3-NEXT: lwr $8, 15($2)
-; MMR3-NEXT: srlv $2, $8, $6
-; MMR3-NEXT: sll16 $5, $5, 1
+; MMR3-NEXT: lw16 $5, 12($5)
+; MMR3-NEXT: srlv $6, $5, $2
+; MMR3-NEXT: sll16 $5, $7, 1
; MMR3-NEXT: sllv $5, $5, $1
-; MMR3-NEXT: or16 $5, $2
-; MMR3-NEXT: srlv $2, $7, $6
+; MMR3-NEXT: or16 $5, $6
+; MMR3-NEXT: srlv $2, $17, $2
; MMR3-NEXT: lwp $16, 32($sp)
; MMR3-NEXT: addiusp 40
; MMR3-NEXT: jrc $ra
@@ -735,40 +681,39 @@ define signext i128 @lshr_i128(i128 signext %a, i128 signext %b) {
; MMR6-NEXT: sw $16, 36($sp) # 4-byte Folded Spill
; MMR6-NEXT: .cfi_offset 16, -4
; MMR6-NEXT: li16 $2, 0
-; MMR6-NEXT: sw $7, 32($sp)
-; MMR6-NEXT: sw $6, 28($sp)
-; MMR6-NEXT: sw $5, 24($sp)
-; MMR6-NEXT: sw $4, 20($sp)
-; MMR6-NEXT: sw $2, 16($sp)
+; MMR6-NEXT: sw $7, 28($sp)
+; MMR6-NEXT: sw $6, 24($sp)
+; MMR6-NEXT: sw $5, 20($sp)
+; MMR6-NEXT: sw $4, 16($sp)
; MMR6-NEXT: sw $2, 12($sp)
; MMR6-NEXT: sw $2, 8($sp)
; MMR6-NEXT: sw $2, 4($sp)
-; MMR6-NEXT: addiu $2, $sp, 4
+; MMR6-NEXT: sw $2, 0($sp)
+; MMR6-NEXT: addiu $2, $sp, 0
; MMR6-NEXT: addiur2 $2, $2, 16
; MMR6-NEXT: lw $3, 68($sp)
-; MMR6-NEXT: ext $4, $3, 3, 4
-; MMR6-NEXT: subu16 $5, $2, $4
-; MMR6-NEXT: lw16 $4, 4($5)
-; MMR6-NEXT: sll16 $6, $4, 1
-; MMR6-NEXT: lw16 $7, 8($5)
-; MMR6-NEXT: andi16 $2, $3, 7
-; MMR6-NEXT: not16 $3, $2
-; MMR6-NEXT: andi16 $3, $3, 31
-; MMR6-NEXT: srlv $1, $7, $2
-; MMR6-NEXT: sllv $6, $6, $3
-; MMR6-NEXT: srlv $3, $4, $2
-; MMR6-NEXT: lw16 $16, 0($5)
+; MMR6-NEXT: srl16 $4, $3, 3
+; MMR6-NEXT: andi $4, $4, 12
+; MMR6-NEXT: subu16 $2, $2, $4
+; MMR6-NEXT: lw16 $4, 4($2)
+; MMR6-NEXT: lw16 $5, 8($2)
+; MMR6-NEXT: andi16 $6, $3, 31
+; MMR6-NEXT: srlv $1, $5, $6
+; MMR6-NEXT: sll16 $3, $4, 1
+; MMR6-NEXT: xori $7, $6, 31
+; MMR6-NEXT: sllv $8, $3, $7
+; MMR6-NEXT: srlv $3, $4, $6
+; MMR6-NEXT: lw16 $16, 0($2)
; MMR6-NEXT: sll16 $4, $16, 1
-; MMR6-NEXT: xori $8, $2, 31
-; MMR6-NEXT: sllv $4, $4, $8
+; MMR6-NEXT: sllv $4, $4, $7
; MMR6-NEXT: or $3, $3, $4
-; MMR6-NEXT: or $4, $1, $6
-; MMR6-NEXT: lw16 $5, 12($5)
-; MMR6-NEXT: srlv $1, $5, $2
-; MMR6-NEXT: sll16 $5, $7, 1
-; MMR6-NEXT: sllv $5, $5, $8
-; MMR6-NEXT: or $5, $1, $5
-; MMR6-NEXT: srlv $2, $16, $2
+; MMR6-NEXT: or $4, $1, $8
+; MMR6-NEXT: lw16 $2, 12($2)
+; MMR6-NEXT: srlv $1, $2, $6
+; MMR6-NEXT: sll16 $2, $5, 1
+; MMR6-NEXT: sllv $2, $2, $7
+; MMR6-NEXT: or $5, $1, $2
+; MMR6-NEXT: srlv $2, $16, $6
; MMR6-NEXT: lw $16, 36($sp) # 4-byte Folded Reload
; MMR6-NEXT: addiu $sp, $sp, 40
; MMR6-NEXT: jrc $ra
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/sdiv.ll b/llvm/test/CodeGen/Mips/llvm-ir/sdiv.ll
index af3d4f5..8d54886 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/sdiv.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/sdiv.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=mips -mcpu=mips2 -relocation-model=pic \
-; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP32,GP32R0R2
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS2
; RUN: llc < %s -mtriple=mips -mcpu=mips32 -relocation-model=pic \
; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP32,GP32R0R2
; RUN: llc < %s -mtriple=mips -mcpu=mips32r2 -relocation-model=pic \
@@ -13,9 +13,9 @@
; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefix=GP32R6
; RUN: llc < %s -mtriple=mips64 -mcpu=mips3 -relocation-model=pic \
-; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP64,GP64R0R1
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS3
; RUN: llc < %s -mtriple=mips64 -mcpu=mips4 -relocation-model=pic \
-; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP64,GP64R0R1
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS3
; RUN: llc < %s -mtriple=mips64 -mcpu=mips64 -relocation-model=pic \
; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP64,GP64R0R1
; RUN: llc < %s -mtriple=mips64 -mcpu=mips64r2 -relocation-model=pic \
@@ -35,6 +35,11 @@
; RUN: FileCheck %s -check-prefix=MMR6
define signext i1 @sdiv_i1(i1 signext %a, i1 signext %b) {
+; MIPS2-LABEL: sdiv_i1:
+; MIPS2: # %bb.0: # %entry
+; MIPS2-NEXT: jr $ra
+; MIPS2-NEXT: move $2, $4
+;
; GP32-LABEL: sdiv_i1:
; GP32: # %bb.0: # %entry
; GP32-NEXT: jr $ra
@@ -45,6 +50,11 @@ define signext i1 @sdiv_i1(i1 signext %a, i1 signext %b) {
; GP32R6-NEXT: jr $ra
; GP32R6-NEXT: move $2, $4
;
+; MIPS3-LABEL: sdiv_i1:
+; MIPS3: # %bb.0: # %entry
+; MIPS3-NEXT: jr $ra
+; MIPS3-NEXT: move $2, $4
+;
; GP64-LABEL: sdiv_i1:
; GP64: # %bb.0: # %entry
; GP64-NEXT: jr $ra
@@ -70,6 +80,15 @@ entry:
}
define signext i8 @sdiv_i8(i8 signext %a, i8 signext %b) {
+; MIPS2-LABEL: sdiv_i8:
+; MIPS2: # %bb.0: # %entry
+; MIPS2-NEXT: div $zero, $4, $5
+; MIPS2-NEXT: teq $5, $zero, 7
+; MIPS2-NEXT: mflo $1
+; MIPS2-NEXT: sll $1, $1, 24
+; MIPS2-NEXT: jr $ra
+; MIPS2-NEXT: sra $2, $1, 24
+;
; GP32R0R2-LABEL: sdiv_i8:
; GP32R0R2: # %bb.0: # %entry
; GP32R0R2-NEXT: div $zero, $4, $5
@@ -94,6 +113,15 @@ define signext i8 @sdiv_i8(i8 signext %a, i8 signext %b) {
; GP32R6-NEXT: jr $ra
; GP32R6-NEXT: seb $2, $1
;
+; MIPS3-LABEL: sdiv_i8:
+; MIPS3: # %bb.0: # %entry
+; MIPS3-NEXT: div $zero, $4, $5
+; MIPS3-NEXT: teq $5, $zero, 7
+; MIPS3-NEXT: mflo $1
+; MIPS3-NEXT: sll $1, $1, 24
+; MIPS3-NEXT: jr $ra
+; MIPS3-NEXT: sra $2, $1, 24
+;
; GP64R0R1-LABEL: sdiv_i8:
; GP64R0R1: # %bb.0: # %entry
; GP64R0R1-NEXT: div $zero, $4, $5
@@ -138,6 +166,15 @@ entry:
}
define signext i16 @sdiv_i16(i16 signext %a, i16 signext %b) {
+; MIPS2-LABEL: sdiv_i16:
+; MIPS2: # %bb.0: # %entry
+; MIPS2-NEXT: div $zero, $4, $5
+; MIPS2-NEXT: teq $5, $zero, 7
+; MIPS2-NEXT: mflo $1
+; MIPS2-NEXT: sll $1, $1, 16
+; MIPS2-NEXT: jr $ra
+; MIPS2-NEXT: sra $2, $1, 16
+;
; GP32R0R2-LABEL: sdiv_i16:
; GP32R0R2: # %bb.0: # %entry
; GP32R0R2-NEXT: div $zero, $4, $5
@@ -162,6 +199,15 @@ define signext i16 @sdiv_i16(i16 signext %a, i16 signext %b) {
; GP32R6-NEXT: jr $ra
; GP32R6-NEXT: seh $2, $1
;
+; MIPS3-LABEL: sdiv_i16:
+; MIPS3: # %bb.0: # %entry
+; MIPS3-NEXT: div $zero, $4, $5
+; MIPS3-NEXT: teq $5, $zero, 7
+; MIPS3-NEXT: mflo $1
+; MIPS3-NEXT: sll $1, $1, 16
+; MIPS3-NEXT: jr $ra
+; MIPS3-NEXT: sra $2, $1, 16
+;
; GP64R0R1-LABEL: sdiv_i16:
; GP64R0R1: # %bb.0: # %entry
; GP64R0R1-NEXT: div $zero, $4, $5
@@ -206,6 +252,14 @@ entry:
}
define signext i32 @sdiv_i32(i32 signext %a, i32 signext %b) {
+; MIPS2-LABEL: sdiv_i32:
+; MIPS2: # %bb.0: # %entry
+; MIPS2-NEXT: div $zero, $4, $5
+; MIPS2-NEXT: teq $5, $zero, 7
+; MIPS2-NEXT: mflo $2
+; MIPS2-NEXT: jr $ra
+; MIPS2-NEXT: nop
+;
; GP32-LABEL: sdiv_i32:
; GP32: # %bb.0: # %entry
; GP32-NEXT: div $zero, $4, $5
@@ -219,6 +273,14 @@ define signext i32 @sdiv_i32(i32 signext %a, i32 signext %b) {
; GP32R6-NEXT: teq $5, $zero, 7
; GP32R6-NEXT: jrc $ra
;
+; MIPS3-LABEL: sdiv_i32:
+; MIPS3: # %bb.0: # %entry
+; MIPS3-NEXT: div $zero, $4, $5
+; MIPS3-NEXT: teq $5, $zero, 7
+; MIPS3-NEXT: mflo $2
+; MIPS3-NEXT: jr $ra
+; MIPS3-NEXT: nop
+;
; GP64-LABEL: sdiv_i32:
; GP64: # %bb.0: # %entry
; GP64-NEXT: div $zero, $4, $5
@@ -250,6 +312,22 @@ entry:
}
define signext i64 @sdiv_i64(i64 signext %a, i64 signext %b) {
+; MIPS2-LABEL: sdiv_i64:
+; MIPS2: # %bb.0: # %entry
+; MIPS2-NEXT: lui $2, %hi(_gp_disp)
+; MIPS2-NEXT: addiu $2, $2, %lo(_gp_disp)
+; MIPS2-NEXT: addiu $sp, $sp, -24
+; MIPS2-NEXT: .cfi_def_cfa_offset 24
+; MIPS2-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill
+; MIPS2-NEXT: .cfi_offset 31, -4
+; MIPS2-NEXT: addu $gp, $2, $25
+; MIPS2-NEXT: lw $25, %call16(__divdi3)($gp)
+; MIPS2-NEXT: jalr $25
+; MIPS2-NEXT: nop
+; MIPS2-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload
+; MIPS2-NEXT: jr $ra
+; MIPS2-NEXT: addiu $sp, $sp, 24
+;
; GP32-LABEL: sdiv_i64:
; GP32: # %bb.0: # %entry
; GP32-NEXT: lui $2, %hi(_gp_disp)
@@ -281,6 +359,14 @@ define signext i64 @sdiv_i64(i64 signext %a, i64 signext %b) {
; GP32R6-NEXT: jr $ra
; GP32R6-NEXT: addiu $sp, $sp, 24
;
+; MIPS3-LABEL: sdiv_i64:
+; MIPS3: # %bb.0: # %entry
+; MIPS3-NEXT: ddiv $zero, $4, $5
+; MIPS3-NEXT: teq $5, $zero, 7
+; MIPS3-NEXT: mflo $2
+; MIPS3-NEXT: jr $ra
+; MIPS3-NEXT: nop
+;
; GP64-LABEL: sdiv_i64:
; GP64: # %bb.0: # %entry
; GP64-NEXT: ddiv $zero, $4, $5
@@ -332,6 +418,30 @@ entry:
}
define signext i128 @sdiv_i128(i128 signext %a, i128 signext %b) {
+; MIPS2-LABEL: sdiv_i128:
+; MIPS2: # %bb.0: # %entry
+; MIPS2-NEXT: lui $2, %hi(_gp_disp)
+; MIPS2-NEXT: addiu $2, $2, %lo(_gp_disp)
+; MIPS2-NEXT: addiu $sp, $sp, -40
+; MIPS2-NEXT: .cfi_def_cfa_offset 40
+; MIPS2-NEXT: sw $ra, 36($sp) # 4-byte Folded Spill
+; MIPS2-NEXT: .cfi_offset 31, -4
+; MIPS2-NEXT: addu $gp, $2, $25
+; MIPS2-NEXT: lw $1, 60($sp)
+; MIPS2-NEXT: lw $2, 64($sp)
+; MIPS2-NEXT: lw $3, 68($sp)
+; MIPS2-NEXT: sw $3, 28($sp)
+; MIPS2-NEXT: sw $2, 24($sp)
+; MIPS2-NEXT: sw $1, 20($sp)
+; MIPS2-NEXT: lw $1, 56($sp)
+; MIPS2-NEXT: sw $1, 16($sp)
+; MIPS2-NEXT: lw $25, %call16(__divti3)($gp)
+; MIPS2-NEXT: jalr $25
+; MIPS2-NEXT: nop
+; MIPS2-NEXT: lw $ra, 36($sp) # 4-byte Folded Reload
+; MIPS2-NEXT: jr $ra
+; MIPS2-NEXT: addiu $sp, $sp, 40
+;
; GP32-LABEL: sdiv_i128:
; GP32: # %bb.0: # %entry
; GP32-NEXT: lui $2, %hi(_gp_disp)
@@ -379,6 +489,25 @@ define signext i128 @sdiv_i128(i128 signext %a, i128 signext %b) {
; GP32R6-NEXT: jr $ra
; GP32R6-NEXT: addiu $sp, $sp, 40
;
+; MIPS3-LABEL: sdiv_i128:
+; MIPS3: # %bb.0: # %entry
+; MIPS3-NEXT: daddiu $sp, $sp, -16
+; MIPS3-NEXT: .cfi_def_cfa_offset 16
+; MIPS3-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill
+; MIPS3-NEXT: sd $gp, 0($sp) # 8-byte Folded Spill
+; MIPS3-NEXT: .cfi_offset 31, -8
+; MIPS3-NEXT: .cfi_offset 28, -16
+; MIPS3-NEXT: lui $1, %hi(%neg(%gp_rel(sdiv_i128)))
+; MIPS3-NEXT: daddu $1, $1, $25
+; MIPS3-NEXT: daddiu $gp, $1, %lo(%neg(%gp_rel(sdiv_i128)))
+; MIPS3-NEXT: ld $25, %call16(__divti3)($gp)
+; MIPS3-NEXT: jalr $25
+; MIPS3-NEXT: nop
+; MIPS3-NEXT: ld $gp, 0($sp) # 8-byte Folded Reload
+; MIPS3-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload
+; MIPS3-NEXT: jr $ra
+; MIPS3-NEXT: daddiu $sp, $sp, 16
+;
; GP64-LABEL: sdiv_i128:
; GP64: # %bb.0: # %entry
; GP64-NEXT: daddiu $sp, $sp, -16
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/shl.ll b/llvm/test/CodeGen/Mips/llvm-ir/shl.ll
index 81f089a..394890a 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/shl.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/shl.ll
@@ -440,49 +440,36 @@ define signext i128 @shl_i128(i128 signext %a, i128 signext %b) {
; MIPS2: # %bb.0: # %entry
; MIPS2-NEXT: addiu $sp, $sp, -32
; MIPS2-NEXT: .cfi_def_cfa_offset 32
-; MIPS2-NEXT: swl $zero, 28($sp)
-; MIPS2-NEXT: swl $zero, 24($sp)
-; MIPS2-NEXT: swl $zero, 20($sp)
-; MIPS2-NEXT: swl $zero, 16($sp)
-; MIPS2-NEXT: swl $7, 12($sp)
-; MIPS2-NEXT: swl $6, 8($sp)
-; MIPS2-NEXT: swl $5, 4($sp)
-; MIPS2-NEXT: swl $4, 0($sp)
-; MIPS2-NEXT: swr $zero, 31($sp)
-; MIPS2-NEXT: swr $zero, 27($sp)
-; MIPS2-NEXT: swr $zero, 23($sp)
-; MIPS2-NEXT: swr $zero, 19($sp)
-; MIPS2-NEXT: swr $7, 15($sp)
-; MIPS2-NEXT: swr $6, 11($sp)
-; MIPS2-NEXT: swr $5, 7($sp)
-; MIPS2-NEXT: swr $4, 3($sp)
; MIPS2-NEXT: lw $1, 60($sp)
; MIPS2-NEXT: srl $2, $1, 3
-; MIPS2-NEXT: andi $2, $2, 15
+; MIPS2-NEXT: sw $7, 12($sp)
+; MIPS2-NEXT: sw $6, 8($sp)
+; MIPS2-NEXT: sw $5, 4($sp)
+; MIPS2-NEXT: sw $4, 0($sp)
+; MIPS2-NEXT: andi $2, $2, 12
; MIPS2-NEXT: addiu $3, $sp, 0
; MIPS2-NEXT: addu $4, $3, $2
-; MIPS2-NEXT: lwl $5, 8($4)
-; MIPS2-NEXT: lwr $5, 11($4)
-; MIPS2-NEXT: srl $2, $5, 1
-; MIPS2-NEXT: lwl $3, 4($4)
-; MIPS2-NEXT: lwr $3, 7($4)
-; MIPS2-NEXT: andi $1, $1, 7
-; MIPS2-NEXT: not $6, $1
-; MIPS2-NEXT: sllv $7, $3, $1
-; MIPS2-NEXT: srlv $6, $2, $6
-; MIPS2-NEXT: lwl $2, 0($4)
-; MIPS2-NEXT: lwr $2, 3($4)
-; MIPS2-NEXT: sllv $2, $2, $1
-; MIPS2-NEXT: srl $3, $3, 1
-; MIPS2-NEXT: xori $8, $1, 31
-; MIPS2-NEXT: srlv $3, $3, $8
-; MIPS2-NEXT: or $2, $2, $3
-; MIPS2-NEXT: or $3, $7, $6
+; MIPS2-NEXT: sw $zero, 28($sp)
+; MIPS2-NEXT: sw $zero, 24($sp)
+; MIPS2-NEXT: sw $zero, 20($sp)
+; MIPS2-NEXT: sw $zero, 16($sp)
+; MIPS2-NEXT: lw $5, 8($4)
+; MIPS2-NEXT: lw $2, 4($4)
+; MIPS2-NEXT: sllv $3, $2, $1
+; MIPS2-NEXT: srl $6, $5, 1
+; MIPS2-NEXT: andi $7, $1, 31
+; MIPS2-NEXT: xori $7, $7, 31
+; MIPS2-NEXT: srlv $6, $6, $7
+; MIPS2-NEXT: lw $8, 0($4)
+; MIPS2-NEXT: sllv $8, $8, $1
+; MIPS2-NEXT: srl $2, $2, 1
+; MIPS2-NEXT: srlv $2, $2, $7
+; MIPS2-NEXT: or $2, $8, $2
+; MIPS2-NEXT: or $3, $3, $6
; MIPS2-NEXT: sllv $5, $5, $1
-; MIPS2-NEXT: lwl $6, 12($4)
-; MIPS2-NEXT: lwr $6, 15($4)
+; MIPS2-NEXT: lw $6, 12($4)
; MIPS2-NEXT: srl $4, $6, 1
-; MIPS2-NEXT: srlv $4, $4, $8
+; MIPS2-NEXT: srlv $4, $4, $7
; MIPS2-NEXT: or $4, $5, $4
; MIPS2-NEXT: sllv $5, $6, $1
; MIPS2-NEXT: jr $ra
@@ -492,49 +479,36 @@ define signext i128 @shl_i128(i128 signext %a, i128 signext %b) {
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: addiu $sp, $sp, -32
; MIPS32-NEXT: .cfi_def_cfa_offset 32
-; MIPS32-NEXT: swl $zero, 28($sp)
-; MIPS32-NEXT: swl $zero, 24($sp)
-; MIPS32-NEXT: swl $zero, 20($sp)
-; MIPS32-NEXT: swl $zero, 16($sp)
-; MIPS32-NEXT: swl $7, 12($sp)
-; MIPS32-NEXT: swl $6, 8($sp)
-; MIPS32-NEXT: swl $5, 4($sp)
-; MIPS32-NEXT: swl $4, 0($sp)
-; MIPS32-NEXT: swr $zero, 31($sp)
-; MIPS32-NEXT: swr $zero, 27($sp)
-; MIPS32-NEXT: swr $zero, 23($sp)
-; MIPS32-NEXT: swr $zero, 19($sp)
-; MIPS32-NEXT: swr $7, 15($sp)
-; MIPS32-NEXT: swr $6, 11($sp)
-; MIPS32-NEXT: swr $5, 7($sp)
-; MIPS32-NEXT: swr $4, 3($sp)
; MIPS32-NEXT: lw $1, 60($sp)
; MIPS32-NEXT: srl $2, $1, 3
-; MIPS32-NEXT: andi $2, $2, 15
+; MIPS32-NEXT: sw $7, 12($sp)
+; MIPS32-NEXT: sw $6, 8($sp)
+; MIPS32-NEXT: sw $5, 4($sp)
+; MIPS32-NEXT: sw $4, 0($sp)
+; MIPS32-NEXT: andi $2, $2, 12
; MIPS32-NEXT: addiu $3, $sp, 0
; MIPS32-NEXT: addu $4, $3, $2
-; MIPS32-NEXT: lwl $5, 8($4)
-; MIPS32-NEXT: lwr $5, 11($4)
-; MIPS32-NEXT: srl $2, $5, 1
-; MIPS32-NEXT: lwl $3, 4($4)
-; MIPS32-NEXT: lwr $3, 7($4)
-; MIPS32-NEXT: andi $1, $1, 7
-; MIPS32-NEXT: not $6, $1
-; MIPS32-NEXT: sllv $7, $3, $1
-; MIPS32-NEXT: srlv $6, $2, $6
-; MIPS32-NEXT: lwl $2, 0($4)
-; MIPS32-NEXT: lwr $2, 3($4)
-; MIPS32-NEXT: sllv $2, $2, $1
-; MIPS32-NEXT: srl $3, $3, 1
-; MIPS32-NEXT: xori $8, $1, 31
-; MIPS32-NEXT: srlv $3, $3, $8
-; MIPS32-NEXT: or $2, $2, $3
-; MIPS32-NEXT: or $3, $7, $6
+; MIPS32-NEXT: sw $zero, 28($sp)
+; MIPS32-NEXT: sw $zero, 24($sp)
+; MIPS32-NEXT: sw $zero, 20($sp)
+; MIPS32-NEXT: sw $zero, 16($sp)
+; MIPS32-NEXT: lw $5, 8($4)
+; MIPS32-NEXT: lw $2, 4($4)
+; MIPS32-NEXT: sllv $3, $2, $1
+; MIPS32-NEXT: srl $6, $5, 1
+; MIPS32-NEXT: andi $7, $1, 31
+; MIPS32-NEXT: xori $7, $7, 31
+; MIPS32-NEXT: srlv $6, $6, $7
+; MIPS32-NEXT: lw $8, 0($4)
+; MIPS32-NEXT: sllv $8, $8, $1
+; MIPS32-NEXT: srl $2, $2, 1
+; MIPS32-NEXT: srlv $2, $2, $7
+; MIPS32-NEXT: or $2, $8, $2
+; MIPS32-NEXT: or $3, $3, $6
; MIPS32-NEXT: sllv $5, $5, $1
-; MIPS32-NEXT: lwl $6, 12($4)
-; MIPS32-NEXT: lwr $6, 15($4)
+; MIPS32-NEXT: lw $6, 12($4)
; MIPS32-NEXT: srl $4, $6, 1
-; MIPS32-NEXT: srlv $4, $4, $8
+; MIPS32-NEXT: srlv $4, $4, $7
; MIPS32-NEXT: or $4, $5, $4
; MIPS32-NEXT: sllv $5, $6, $1
; MIPS32-NEXT: jr $ra
@@ -544,48 +518,36 @@ define signext i128 @shl_i128(i128 signext %a, i128 signext %b) {
; MIPS32R2: # %bb.0: # %entry
; MIPS32R2-NEXT: addiu $sp, $sp, -32
; MIPS32R2-NEXT: .cfi_def_cfa_offset 32
-; MIPS32R2-NEXT: swl $zero, 28($sp)
-; MIPS32R2-NEXT: swl $zero, 24($sp)
-; MIPS32R2-NEXT: swl $zero, 20($sp)
-; MIPS32R2-NEXT: swl $zero, 16($sp)
-; MIPS32R2-NEXT: swl $7, 12($sp)
-; MIPS32R2-NEXT: swl $6, 8($sp)
-; MIPS32R2-NEXT: swl $5, 4($sp)
-; MIPS32R2-NEXT: swl $4, 0($sp)
-; MIPS32R2-NEXT: swr $zero, 31($sp)
-; MIPS32R2-NEXT: swr $zero, 27($sp)
-; MIPS32R2-NEXT: swr $zero, 23($sp)
-; MIPS32R2-NEXT: swr $zero, 19($sp)
-; MIPS32R2-NEXT: swr $7, 15($sp)
-; MIPS32R2-NEXT: swr $6, 11($sp)
-; MIPS32R2-NEXT: swr $5, 7($sp)
-; MIPS32R2-NEXT: swr $4, 3($sp)
; MIPS32R2-NEXT: lw $1, 60($sp)
-; MIPS32R2-NEXT: ext $2, $1, 3, 4
+; MIPS32R2-NEXT: srl $2, $1, 3
+; MIPS32R2-NEXT: sw $7, 12($sp)
+; MIPS32R2-NEXT: sw $6, 8($sp)
+; MIPS32R2-NEXT: sw $5, 4($sp)
+; MIPS32R2-NEXT: sw $4, 0($sp)
+; MIPS32R2-NEXT: andi $2, $2, 12
; MIPS32R2-NEXT: addiu $3, $sp, 0
; MIPS32R2-NEXT: addu $4, $3, $2
-; MIPS32R2-NEXT: lwl $5, 8($4)
-; MIPS32R2-NEXT: lwr $5, 11($4)
-; MIPS32R2-NEXT: srl $2, $5, 1
-; MIPS32R2-NEXT: lwl $3, 4($4)
-; MIPS32R2-NEXT: lwr $3, 7($4)
-; MIPS32R2-NEXT: andi $1, $1, 7
-; MIPS32R2-NEXT: not $6, $1
-; MIPS32R2-NEXT: sllv $7, $3, $1
-; MIPS32R2-NEXT: srlv $6, $2, $6
-; MIPS32R2-NEXT: lwl $2, 0($4)
-; MIPS32R2-NEXT: lwr $2, 3($4)
-; MIPS32R2-NEXT: sllv $2, $2, $1
-; MIPS32R2-NEXT: srl $3, $3, 1
-; MIPS32R2-NEXT: xori $8, $1, 31
-; MIPS32R2-NEXT: srlv $3, $3, $8
-; MIPS32R2-NEXT: or $2, $2, $3
-; MIPS32R2-NEXT: or $3, $7, $6
+; MIPS32R2-NEXT: sw $zero, 28($sp)
+; MIPS32R2-NEXT: sw $zero, 24($sp)
+; MIPS32R2-NEXT: sw $zero, 20($sp)
+; MIPS32R2-NEXT: sw $zero, 16($sp)
+; MIPS32R2-NEXT: lw $5, 8($4)
+; MIPS32R2-NEXT: lw $2, 4($4)
+; MIPS32R2-NEXT: sllv $3, $2, $1
+; MIPS32R2-NEXT: srl $6, $5, 1
+; MIPS32R2-NEXT: andi $7, $1, 31
+; MIPS32R2-NEXT: xori $7, $7, 31
+; MIPS32R2-NEXT: srlv $6, $6, $7
+; MIPS32R2-NEXT: lw $8, 0($4)
+; MIPS32R2-NEXT: sllv $8, $8, $1
+; MIPS32R2-NEXT: srl $2, $2, 1
+; MIPS32R2-NEXT: srlv $2, $2, $7
+; MIPS32R2-NEXT: or $2, $8, $2
+; MIPS32R2-NEXT: or $3, $3, $6
; MIPS32R2-NEXT: sllv $5, $5, $1
-; MIPS32R2-NEXT: lwl $6, 12($4)
-; MIPS32R2-NEXT: lwr $6, 15($4)
+; MIPS32R2-NEXT: lw $6, 12($4)
; MIPS32R2-NEXT: srl $4, $6, 1
-; MIPS32R2-NEXT: srlv $4, $4, $8
+; MIPS32R2-NEXT: srlv $4, $4, $7
; MIPS32R2-NEXT: or $4, $5, $4
; MIPS32R2-NEXT: sllv $5, $6, $1
; MIPS32R2-NEXT: jr $ra
@@ -596,11 +558,12 @@ define signext i128 @shl_i128(i128 signext %a, i128 signext %b) {
; MIPS32R6-NEXT: addiu $sp, $sp, -32
; MIPS32R6-NEXT: .cfi_def_cfa_offset 32
; MIPS32R6-NEXT: lw $1, 60($sp)
+; MIPS32R6-NEXT: srl $2, $1, 3
; MIPS32R6-NEXT: sw $7, 12($sp)
; MIPS32R6-NEXT: sw $6, 8($sp)
; MIPS32R6-NEXT: sw $5, 4($sp)
; MIPS32R6-NEXT: sw $4, 0($sp)
-; MIPS32R6-NEXT: ext $2, $1, 3, 4
+; MIPS32R6-NEXT: andi $2, $2, 12
; MIPS32R6-NEXT: addiu $3, $sp, 0
; MIPS32R6-NEXT: addu $4, $3, $2
; MIPS32R6-NEXT: sw $zero, 28($sp)
@@ -608,23 +571,22 @@ define signext i128 @shl_i128(i128 signext %a, i128 signext %b) {
; MIPS32R6-NEXT: sw $zero, 20($sp)
; MIPS32R6-NEXT: sw $zero, 16($sp)
; MIPS32R6-NEXT: lw $5, 8($4)
-; MIPS32R6-NEXT: srl $2, $5, 1
-; MIPS32R6-NEXT: lw $3, 4($4)
-; MIPS32R6-NEXT: andi $1, $1, 7
-; MIPS32R6-NEXT: not $6, $1
-; MIPS32R6-NEXT: sllv $7, $3, $1
-; MIPS32R6-NEXT: srlv $6, $2, $6
-; MIPS32R6-NEXT: lw $2, 0($4)
-; MIPS32R6-NEXT: sllv $2, $2, $1
-; MIPS32R6-NEXT: srl $3, $3, 1
-; MIPS32R6-NEXT: xori $8, $1, 31
-; MIPS32R6-NEXT: srlv $3, $3, $8
-; MIPS32R6-NEXT: or $2, $2, $3
-; MIPS32R6-NEXT: or $3, $7, $6
+; MIPS32R6-NEXT: lw $2, 4($4)
+; MIPS32R6-NEXT: sllv $3, $2, $1
+; MIPS32R6-NEXT: srl $6, $5, 1
+; MIPS32R6-NEXT: andi $7, $1, 31
+; MIPS32R6-NEXT: xori $7, $7, 31
+; MIPS32R6-NEXT: srlv $6, $6, $7
+; MIPS32R6-NEXT: lw $8, 0($4)
+; MIPS32R6-NEXT: sllv $8, $8, $1
+; MIPS32R6-NEXT: srl $2, $2, 1
+; MIPS32R6-NEXT: srlv $2, $2, $7
+; MIPS32R6-NEXT: or $2, $8, $2
+; MIPS32R6-NEXT: or $3, $3, $6
; MIPS32R6-NEXT: sllv $5, $5, $1
; MIPS32R6-NEXT: lw $6, 12($4)
; MIPS32R6-NEXT: srl $4, $6, 1
-; MIPS32R6-NEXT: srlv $4, $4, $8
+; MIPS32R6-NEXT: srlv $4, $4, $7
; MIPS32R6-NEXT: or $4, $5, $4
; MIPS32R6-NEXT: sllv $5, $6, $1
; MIPS32R6-NEXT: jr $ra
@@ -722,47 +684,32 @@ define signext i128 @shl_i128(i128 signext %a, i128 signext %b) {
; MMR3-NEXT: .cfi_offset 17, -4
; MMR3-NEXT: .cfi_offset 16, -8
; MMR3-NEXT: li16 $2, 0
-; MMR3-NEXT: swl $2, 28($sp)
-; MMR3-NEXT: swl $2, 24($sp)
-; MMR3-NEXT: swl $2, 20($sp)
-; MMR3-NEXT: swl $2, 16($sp)
-; MMR3-NEXT: swl $7, 12($sp)
-; MMR3-NEXT: swl $6, 8($sp)
-; MMR3-NEXT: swl $5, 4($sp)
-; MMR3-NEXT: swl $4, 0($sp)
-; MMR3-NEXT: swr $2, 31($sp)
-; MMR3-NEXT: swr $2, 27($sp)
-; MMR3-NEXT: swr $2, 23($sp)
-; MMR3-NEXT: swr $2, 19($sp)
-; MMR3-NEXT: swr $7, 15($sp)
-; MMR3-NEXT: swr $6, 11($sp)
-; MMR3-NEXT: swr $5, 7($sp)
-; MMR3-NEXT: swr $4, 3($sp)
+; MMR3-NEXT: sw $2, 28($sp)
+; MMR3-NEXT: sw $2, 24($sp)
+; MMR3-NEXT: sw $2, 20($sp)
+; MMR3-NEXT: sw $2, 16($sp)
+; MMR3-NEXT: swp $6, 8($sp)
+; MMR3-NEXT: swp $4, 0($sp)
; MMR3-NEXT: lw $2, 68($sp)
-; MMR3-NEXT: ext $3, $2, 3, 4
+; MMR3-NEXT: srl16 $3, $2, 3
+; MMR3-NEXT: andi $3, $3, 12
; MMR3-NEXT: addiur1sp $4, 0
; MMR3-NEXT: addu16 $4, $4, $3
-; MMR3-NEXT: lwl $6, 8($4)
-; MMR3-NEXT: lwr $6, 11($4)
-; MMR3-NEXT: srl16 $3, $6, 1
-; MMR3-NEXT: lwl $7, 4($4)
-; MMR3-NEXT: lwr $7, 7($4)
-; MMR3-NEXT: andi16 $5, $2, 7
-; MMR3-NEXT: not16 $2, $5
-; MMR3-NEXT: andi16 $2, $2, 31
+; MMR3-NEXT: lw16 $6, 8($4)
+; MMR3-NEXT: lw16 $7, 4($4)
+; MMR3-NEXT: andi16 $5, $2, 31
; MMR3-NEXT: sllv $16, $7, $5
-; MMR3-NEXT: srlv $3, $3, $2
-; MMR3-NEXT: lwl $1, 0($4)
-; MMR3-NEXT: lwr $1, 3($4)
-; MMR3-NEXT: sllv $17, $1, $5
-; MMR3-NEXT: srl16 $2, $7, 1
+; MMR3-NEXT: srl16 $2, $6, 1
; MMR3-NEXT: xori $1, $5, 31
+; MMR3-NEXT: srlv $3, $2, $1
+; MMR3-NEXT: lw16 $2, 0($4)
+; MMR3-NEXT: sllv $17, $2, $5
+; MMR3-NEXT: srl16 $2, $7, 1
; MMR3-NEXT: srlv $2, $2, $1
; MMR3-NEXT: or16 $2, $17
; MMR3-NEXT: or16 $3, $16
; MMR3-NEXT: sllv $6, $6, $5
-; MMR3-NEXT: lwl $7, 12($4)
-; MMR3-NEXT: lwr $7, 15($4)
+; MMR3-NEXT: lw16 $7, 12($4)
; MMR3-NEXT: srl16 $4, $7, 1
; MMR3-NEXT: srlv $4, $4, $1
; MMR3-NEXT: or16 $4, $6
@@ -785,30 +732,29 @@ define signext i128 @shl_i128(i128 signext %a, i128 signext %b) {
; MMR6-NEXT: sw $5, 4($sp)
; MMR6-NEXT: sw $4, 0($sp)
; MMR6-NEXT: lw $2, 60($sp)
-; MMR6-NEXT: ext $3, $2, 3, 4
+; MMR6-NEXT: srl16 $3, $2, 3
+; MMR6-NEXT: andi $3, $3, 12
; MMR6-NEXT: addiu $4, $sp, 0
; MMR6-NEXT: addu16 $4, $4, $3
-; MMR6-NEXT: lw16 $6, 8($4)
-; MMR6-NEXT: srl16 $3, $6, 1
-; MMR6-NEXT: lw16 $7, 4($4)
-; MMR6-NEXT: andi16 $5, $2, 7
-; MMR6-NEXT: not16 $2, $5
-; MMR6-NEXT: andi16 $2, $2, 31
-; MMR6-NEXT: sllv $1, $7, $5
-; MMR6-NEXT: srlv $3, $3, $2
+; MMR6-NEXT: lw16 $5, 8($4)
+; MMR6-NEXT: lw16 $3, 4($4)
+; MMR6-NEXT: andi16 $6, $2, 31
+; MMR6-NEXT: sllv $1, $3, $6
+; MMR6-NEXT: srl16 $2, $5, 1
+; MMR6-NEXT: xori $7, $6, 31
+; MMR6-NEXT: srlv $8, $2, $7
; MMR6-NEXT: lw16 $2, 0($4)
-; MMR6-NEXT: sllv $2, $2, $5
-; MMR6-NEXT: srl16 $7, $7, 1
-; MMR6-NEXT: xori $8, $5, 31
-; MMR6-NEXT: srlv $7, $7, $8
-; MMR6-NEXT: or $2, $2, $7
-; MMR6-NEXT: or $3, $1, $3
-; MMR6-NEXT: sllv $1, $6, $5
-; MMR6-NEXT: lw16 $6, 12($4)
-; MMR6-NEXT: srl16 $4, $6, 1
-; MMR6-NEXT: srlv $4, $4, $8
+; MMR6-NEXT: sllv $2, $2, $6
+; MMR6-NEXT: srl16 $3, $3, 1
+; MMR6-NEXT: srlv $3, $3, $7
+; MMR6-NEXT: or $2, $2, $3
+; MMR6-NEXT: or $3, $1, $8
+; MMR6-NEXT: sllv $1, $5, $6
+; MMR6-NEXT: lw16 $5, 12($4)
+; MMR6-NEXT: srl16 $4, $5, 1
+; MMR6-NEXT: srlv $4, $4, $7
; MMR6-NEXT: or $4, $1, $4
-; MMR6-NEXT: sllv $5, $6, $5
+; MMR6-NEXT: sllv $5, $5, $6
; MMR6-NEXT: addiu $sp, $sp, 32
; MMR6-NEXT: jrc $ra
entry:
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/srem.ll b/llvm/test/CodeGen/Mips/llvm-ir/srem.ll
index 6349d5c..29cb34b8 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/srem.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/srem.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=mips -mcpu=mips2 -relocation-model=pic \
-; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP32
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS2
; RUN: llc < %s -mtriple=mips -mcpu=mips32 -relocation-model=pic \
; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP32
; RUN: llc < %s -mtriple=mips -mcpu=mips32r2 -relocation-model=pic \
@@ -13,9 +13,9 @@
; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefix=GP32R6
; RUN: llc < %s -mtriple=mips64 -mcpu=mips3 -relocation-model=pic \
-; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP64
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS3
; RUN: llc < %s -mtriple=mips64 -mcpu=mips4 -relocation-model=pic \
-; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP64
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS3
; RUN: llc < %s -mtriple=mips64 -mcpu=mips64 -relocation-model=pic \
; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP64
; RUN: llc < %s -mtriple=mips64 -mcpu=mips64r2 -relocation-model=pic \
@@ -35,6 +35,11 @@
; RUN: FileCheck %s -check-prefix=MMR6
define signext i1 @srem_i1(i1 signext %a, i1 signext %b) {
+; MIPS2-LABEL: srem_i1:
+; MIPS2: # %bb.0: # %entry
+; MIPS2-NEXT: jr $ra
+; MIPS2-NEXT: addiu $2, $zero, 0
+;
; GP32-LABEL: srem_i1:
; GP32: # %bb.0: # %entry
; GP32-NEXT: jr $ra
@@ -45,6 +50,11 @@ define signext i1 @srem_i1(i1 signext %a, i1 signext %b) {
; GP32R6-NEXT: jr $ra
; GP32R6-NEXT: addiu $2, $zero, 0
;
+; MIPS3-LABEL: srem_i1:
+; MIPS3: # %bb.0: # %entry
+; MIPS3-NEXT: jr $ra
+; MIPS3-NEXT: addiu $2, $zero, 0
+;
; GP64-LABEL: srem_i1:
; GP64: # %bb.0: # %entry
; GP64-NEXT: jr $ra
@@ -70,6 +80,14 @@ entry:
}
define signext i8 @srem_i8(i8 signext %a, i8 signext %b) {
+; MIPS2-LABEL: srem_i8:
+; MIPS2: # %bb.0: # %entry
+; MIPS2-NEXT: div $zero, $4, $5
+; MIPS2-NEXT: teq $5, $zero, 7
+; MIPS2-NEXT: mfhi $2
+; MIPS2-NEXT: jr $ra
+; MIPS2-NEXT: nop
+;
; GP32-LABEL: srem_i8:
; GP32: # %bb.0: # %entry
; GP32-NEXT: div $zero, $4, $5
@@ -83,6 +101,14 @@ define signext i8 @srem_i8(i8 signext %a, i8 signext %b) {
; GP32R6-NEXT: teq $5, $zero, 7
; GP32R6-NEXT: jrc $ra
;
+; MIPS3-LABEL: srem_i8:
+; MIPS3: # %bb.0: # %entry
+; MIPS3-NEXT: div $zero, $4, $5
+; MIPS3-NEXT: teq $5, $zero, 7
+; MIPS3-NEXT: mfhi $2
+; MIPS3-NEXT: jr $ra
+; MIPS3-NEXT: nop
+;
; GP64-LABEL: srem_i8:
; GP64: # %bb.0: # %entry
; GP64-NEXT: div $zero, $4, $5
@@ -114,6 +140,14 @@ entry:
}
define signext i16 @srem_i16(i16 signext %a, i16 signext %b) {
+; MIPS2-LABEL: srem_i16:
+; MIPS2: # %bb.0: # %entry
+; MIPS2-NEXT: div $zero, $4, $5
+; MIPS2-NEXT: teq $5, $zero, 7
+; MIPS2-NEXT: mfhi $2
+; MIPS2-NEXT: jr $ra
+; MIPS2-NEXT: nop
+;
; GP32-LABEL: srem_i16:
; GP32: # %bb.0: # %entry
; GP32-NEXT: div $zero, $4, $5
@@ -127,6 +161,14 @@ define signext i16 @srem_i16(i16 signext %a, i16 signext %b) {
; GP32R6-NEXT: teq $5, $zero, 7
; GP32R6-NEXT: jrc $ra
;
+; MIPS3-LABEL: srem_i16:
+; MIPS3: # %bb.0: # %entry
+; MIPS3-NEXT: div $zero, $4, $5
+; MIPS3-NEXT: teq $5, $zero, 7
+; MIPS3-NEXT: mfhi $2
+; MIPS3-NEXT: jr $ra
+; MIPS3-NEXT: nop
+;
; GP64-LABEL: srem_i16:
; GP64: # %bb.0: # %entry
; GP64-NEXT: div $zero, $4, $5
@@ -158,6 +200,14 @@ entry:
}
define signext i32 @srem_i32(i32 signext %a, i32 signext %b) {
+; MIPS2-LABEL: srem_i32:
+; MIPS2: # %bb.0: # %entry
+; MIPS2-NEXT: div $zero, $4, $5
+; MIPS2-NEXT: teq $5, $zero, 7
+; MIPS2-NEXT: mfhi $2
+; MIPS2-NEXT: jr $ra
+; MIPS2-NEXT: nop
+;
; GP32-LABEL: srem_i32:
; GP32: # %bb.0: # %entry
; GP32-NEXT: div $zero, $4, $5
@@ -171,6 +221,14 @@ define signext i32 @srem_i32(i32 signext %a, i32 signext %b) {
; GP32R6-NEXT: teq $5, $zero, 7
; GP32R6-NEXT: jrc $ra
;
+; MIPS3-LABEL: srem_i32:
+; MIPS3: # %bb.0: # %entry
+; MIPS3-NEXT: div $zero, $4, $5
+; MIPS3-NEXT: teq $5, $zero, 7
+; MIPS3-NEXT: mfhi $2
+; MIPS3-NEXT: jr $ra
+; MIPS3-NEXT: nop
+;
; GP64-LABEL: srem_i32:
; GP64: # %bb.0: # %entry
; GP64-NEXT: div $zero, $4, $5
@@ -202,6 +260,22 @@ entry:
}
define signext i64 @srem_i64(i64 signext %a, i64 signext %b) {
+; MIPS2-LABEL: srem_i64:
+; MIPS2: # %bb.0: # %entry
+; MIPS2-NEXT: lui $2, %hi(_gp_disp)
+; MIPS2-NEXT: addiu $2, $2, %lo(_gp_disp)
+; MIPS2-NEXT: addiu $sp, $sp, -24
+; MIPS2-NEXT: .cfi_def_cfa_offset 24
+; MIPS2-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill
+; MIPS2-NEXT: .cfi_offset 31, -4
+; MIPS2-NEXT: addu $gp, $2, $25
+; MIPS2-NEXT: lw $25, %call16(__moddi3)($gp)
+; MIPS2-NEXT: jalr $25
+; MIPS2-NEXT: nop
+; MIPS2-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload
+; MIPS2-NEXT: jr $ra
+; MIPS2-NEXT: addiu $sp, $sp, 24
+;
; GP32-LABEL: srem_i64:
; GP32: # %bb.0: # %entry
; GP32-NEXT: lui $2, %hi(_gp_disp)
@@ -233,6 +307,14 @@ define signext i64 @srem_i64(i64 signext %a, i64 signext %b) {
; GP32R6-NEXT: jr $ra
; GP32R6-NEXT: addiu $sp, $sp, 24
;
+; MIPS3-LABEL: srem_i64:
+; MIPS3: # %bb.0: # %entry
+; MIPS3-NEXT: ddiv $zero, $4, $5
+; MIPS3-NEXT: teq $5, $zero, 7
+; MIPS3-NEXT: mfhi $2
+; MIPS3-NEXT: jr $ra
+; MIPS3-NEXT: nop
+;
; GP64-LABEL: srem_i64:
; GP64: # %bb.0: # %entry
; GP64-NEXT: ddiv $zero, $4, $5
@@ -284,6 +366,30 @@ entry:
}
define signext i128 @srem_i128(i128 signext %a, i128 signext %b) {
+; MIPS2-LABEL: srem_i128:
+; MIPS2: # %bb.0: # %entry
+; MIPS2-NEXT: lui $2, %hi(_gp_disp)
+; MIPS2-NEXT: addiu $2, $2, %lo(_gp_disp)
+; MIPS2-NEXT: addiu $sp, $sp, -40
+; MIPS2-NEXT: .cfi_def_cfa_offset 40
+; MIPS2-NEXT: sw $ra, 36($sp) # 4-byte Folded Spill
+; MIPS2-NEXT: .cfi_offset 31, -4
+; MIPS2-NEXT: addu $gp, $2, $25
+; MIPS2-NEXT: lw $1, 60($sp)
+; MIPS2-NEXT: lw $2, 64($sp)
+; MIPS2-NEXT: lw $3, 68($sp)
+; MIPS2-NEXT: sw $3, 28($sp)
+; MIPS2-NEXT: sw $2, 24($sp)
+; MIPS2-NEXT: sw $1, 20($sp)
+; MIPS2-NEXT: lw $1, 56($sp)
+; MIPS2-NEXT: sw $1, 16($sp)
+; MIPS2-NEXT: lw $25, %call16(__modti3)($gp)
+; MIPS2-NEXT: jalr $25
+; MIPS2-NEXT: nop
+; MIPS2-NEXT: lw $ra, 36($sp) # 4-byte Folded Reload
+; MIPS2-NEXT: jr $ra
+; MIPS2-NEXT: addiu $sp, $sp, 40
+;
; GP32-LABEL: srem_i128:
; GP32: # %bb.0: # %entry
; GP32-NEXT: lui $2, %hi(_gp_disp)
@@ -331,6 +437,25 @@ define signext i128 @srem_i128(i128 signext %a, i128 signext %b) {
; GP32R6-NEXT: jr $ra
; GP32R6-NEXT: addiu $sp, $sp, 40
;
+; MIPS3-LABEL: srem_i128:
+; MIPS3: # %bb.0: # %entry
+; MIPS3-NEXT: daddiu $sp, $sp, -16
+; MIPS3-NEXT: .cfi_def_cfa_offset 16
+; MIPS3-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill
+; MIPS3-NEXT: sd $gp, 0($sp) # 8-byte Folded Spill
+; MIPS3-NEXT: .cfi_offset 31, -8
+; MIPS3-NEXT: .cfi_offset 28, -16
+; MIPS3-NEXT: lui $1, %hi(%neg(%gp_rel(srem_i128)))
+; MIPS3-NEXT: daddu $1, $1, $25
+; MIPS3-NEXT: daddiu $gp, $1, %lo(%neg(%gp_rel(srem_i128)))
+; MIPS3-NEXT: ld $25, %call16(__modti3)($gp)
+; MIPS3-NEXT: jalr $25
+; MIPS3-NEXT: nop
+; MIPS3-NEXT: ld $gp, 0($sp) # 8-byte Folded Reload
+; MIPS3-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload
+; MIPS3-NEXT: jr $ra
+; MIPS3-NEXT: daddiu $sp, $sp, 16
+;
; GP64-LABEL: srem_i128:
; GP64: # %bb.0: # %entry
; GP64-NEXT: daddiu $sp, $sp, -16
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/two-consecutive-mult.ll b/llvm/test/CodeGen/Mips/llvm-ir/two-consecutive-mult.ll
new file mode 100644
index 0000000..db2c660
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/llvm-ir/two-consecutive-mult.ll
@@ -0,0 +1,60 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=mips -mcpu=mips2 -O3 -relocation-model=pic \
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS2
+; RUN: llc < %s -mtriple=mips -mcpu=mips32 -O3 -relocation-model=pic \
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS32
+
+; RUN: llc < %s -mtriple=mips64 -mcpu=mips3 -O3 -relocation-model=pic \
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS3
+; RUN: llc < %s -mtriple=mips64 -mcpu=mips64 -O3 -relocation-model=pic \
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS64
+
+define signext i32 @mult_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; MIPS2-LABEL: mult_i32:
+; MIPS2: # %bb.0: # %entry
+; MIPS2-NEXT: mult $4, $5
+; MIPS2-NEXT: mflo $1
+; MIPS2-NEXT: nop
+; MIPS2-NEXT: nop
+; MIPS2-NEXT: mult $1, $6
+; MIPS2-NEXT: mflo $2
+; MIPS2-NEXT: jr $ra
+; MIPS2-NEXT: nop
+;
+; MIPS32-LABEL: mult_i32:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: mul $1, $4, $5
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: mul $2, $1, $6
+;
+entry:
+ %mul = mul nsw i32 %a, %b
+ %mul1 = mul nsw i32 %mul, %c
+ ret i32 %mul1
+}
+
+define signext i64 @mul_i64(i64 signext %a, i64 signext %b, i64 signext %c) {
+; MIPS3-LABEL: mul_i64:
+; MIPS3: # %bb.0: # %entry
+; MIPS3-NEXT: dmult $4, $5
+; MIPS3-NEXT: mflo $1
+; MIPS3-NEXT: nop
+; MIPS3-NEXT: nop
+; MIPS3-NEXT: dmult $1, $6
+; MIPS3-NEXT: mflo $2
+; MIPS3-NEXT: jr $ra
+; MIPS3-NEXT: nop
+;
+; MIPS64-LABEL: mul_i64:
+; MIPS64: # %bb.0: # %entry
+; MIPS64-NEXT: dmult $4, $5
+; MIPS64-NEXT: mflo $1
+; MIPS64-NEXT: dmult $1, $6
+; MIPS64-NEXT: jr $ra
+; MIPS64-NEXT: mflo $2
+;
+entry:
+ %mul = mul i64 %a, %b
+ %mul1 = mul i64 %mul, %c
+ ret i64 %mul1
+}
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/two-consecutive-sdiv.ll b/llvm/test/CodeGen/Mips/llvm-ir/two-consecutive-sdiv.ll
new file mode 100644
index 0000000..4ec5ecc
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/llvm-ir/two-consecutive-sdiv.ll
@@ -0,0 +1,133 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=mips -mcpu=mips2 -relocation-model=pic \
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS2
+; RUN: llc < %s -mtriple=mips -mcpu=mips32 -relocation-model=pic \
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS32
+
+; RUN: llc < %s -mtriple=mips64 -mcpu=mips3 -relocation-model=pic \
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS3
+; RUN: llc < %s -mtriple=mips64 -mcpu=mips64 -relocation-model=pic \
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS64
+
+; RUN: llc < %s -mtriple=mips -mcpu=mips2 -O0 -relocation-model=pic \
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS2-O0
+; RUN: llc < %s -mtriple=mips -mcpu=mips32 -O0 -relocation-model=pic \
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS32-O0
+
+define signext i32 @sdiv_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; MIPS2-LABEL: sdiv_i32:
+; MIPS2: # %bb.0: # %entry
+; MIPS2-NEXT: div $zero, $4, $5
+; MIPS2-NEXT: teq $5, $zero, 7
+; MIPS2-NEXT: mflo $1
+; MIPS2-NEXT: nop
+; MIPS2-NEXT: nop
+; MIPS2-NEXT: div $zero, $1, $6
+; MIPS2-NEXT: teq $6, $zero, 7
+; MIPS2-NEXT: mflo $2
+; MIPS2-NEXT: jr $ra
+; MIPS2-NEXT: nop
+;
+; MIPS32-LABEL: sdiv_i32:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: div $zero, $4, $5
+; MIPS32-NEXT: teq $5, $zero, 7
+; MIPS32-NEXT: mflo $1
+; MIPS32-NEXT: div $zero, $1, $6
+; MIPS32-NEXT: teq $6, $zero, 7
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: mflo $2
+;
+entry:
+ %sdiv = sdiv i32 %a, %b
+ %sdiv1 = sdiv i32 %sdiv, %c
+ ret i32 %sdiv1
+}
+
+define signext i64 @sdiv_i64(i64 signext %a, i64 signext %b, i64 signext %c) {
+; MIPS3-LABEL: sdiv_i64:
+; MIPS3: # %bb.0: # %entry
+; MIPS3-NEXT: ddiv $zero, $4, $5
+; MIPS3-NEXT: teq $5, $zero, 7
+; MIPS3-NEXT: mflo $1
+; MIPS3-NEXT: nop
+; MIPS3-NEXT: nop
+; MIPS3-NEXT: ddiv $zero, $1, $6
+; MIPS3-NEXT: teq $6, $zero, 7
+; MIPS3-NEXT: mflo $2
+; MIPS3-NEXT: jr $ra
+; MIPS3-NEXT: nop
+;
+; MIPS64-LABEL: sdiv_i64:
+; MIPS64: # %bb.0: # %entry
+; MIPS64-NEXT: ddiv $zero, $4, $5
+; MIPS64-NEXT: teq $5, $zero, 7
+; MIPS64-NEXT: mflo $1
+; MIPS64-NEXT: ddiv $zero, $1, $6
+; MIPS64-NEXT: teq $6, $zero, 7
+; MIPS64-NEXT: jr $ra
+; MIPS64-NEXT: mflo $2
+;
+entry:
+ %sdiv = sdiv i64 %a, %b
+ %sdiv1 = sdiv i64 %sdiv, %c
+ ret i64 %sdiv1
+}
+
+define signext i32 @sdiv_lw_sdiv_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; MIPS2-O0-LABEL: sdiv_lw_sdiv_i32:
+; MIPS2-O0: # %bb.0: # %entry
+; MIPS2-O0-NEXT: addiu $sp, $sp, -16
+; MIPS2-O0-NEXT: .cfi_def_cfa_offset 16
+; MIPS2-O0-NEXT: sw $4, 12($sp)
+; MIPS2-O0-NEXT: sw $5, 8($sp)
+; MIPS2-O0-NEXT: sw $6, 4($sp)
+; MIPS2-O0-NEXT: lw $2, 12($sp)
+; MIPS2-O0-NEXT: lw $1, 8($sp)
+; MIPS2-O0-NEXT: div $zero, $2, $1
+; MIPS2-O0-NEXT: teq $1, $zero, 7
+; MIPS2-O0-NEXT: mflo $2
+; MIPS2-O0-NEXT: lw $1, 4($sp)
+; MIPS2-O0-NEXT: nop
+; MIPS2-O0-NEXT: div $zero, $2, $1
+; MIPS2-O0-NEXT: teq $1, $zero, 7
+; MIPS2-O0-NEXT: mflo $2
+; MIPS2-O0-NEXT: addiu $sp, $sp, 16
+; MIPS2-O0-NEXT: jr $ra
+; MIPS2-O0-NEXT: nop
+;
+; MIPS32-O0-LABEL: sdiv_lw_sdiv_i32:
+; MIPS32-O0: # %bb.0: # %entry
+; MIPS32-O0-NEXT: addiu $sp, $sp, -16
+; MIPS32-O0-NEXT: .cfi_def_cfa_offset 16
+; MIPS32-O0-NEXT: sw $4, 12($sp)
+; MIPS32-O0-NEXT: sw $5, 8($sp)
+; MIPS32-O0-NEXT: sw $6, 4($sp)
+; MIPS32-O0-NEXT: lw $2, 12($sp)
+; MIPS32-O0-NEXT: lw $1, 8($sp)
+; MIPS32-O0-NEXT: div $zero, $2, $1
+; MIPS32-O0-NEXT: teq $1, $zero, 7
+; MIPS32-O0-NEXT: mflo $2
+; MIPS32-O0-NEXT: lw $1, 4($sp)
+; MIPS32-O0-NEXT: div $zero, $2, $1
+; MIPS32-O0-NEXT: teq $1, $zero, 7
+; MIPS32-O0-NEXT: mflo $2
+; MIPS32-O0-NEXT: addiu $sp, $sp, 16
+; MIPS32-O0-NEXT: jr $ra
+; MIPS32-O0-NEXT: nop
+;
+entry:
+ %a.addr = alloca i32, align 4
+ %b.addr = alloca i32, align 4
+ %c.addr = alloca i32, align 4
+ store i32 %a, ptr %a.addr, align 4
+ store i32 %b, ptr %b.addr, align 4
+ store i32 %c, ptr %c.addr, align 4
+ %0 = load i32, ptr %a.addr, align 4
+ %1 = load i32, ptr %b.addr, align 4
+ %sdiv = sdiv i32 %0, %1
+ %2 = load i32, ptr %c.addr, align 4
+ %sdiv1 = sdiv i32 %sdiv, %2
+ ret i32 %sdiv1
+}
+
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/two-consecutive-srem.ll b/llvm/test/CodeGen/Mips/llvm-ir/two-consecutive-srem.ll
new file mode 100644
index 0000000..4f729b0
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/llvm-ir/two-consecutive-srem.ll
@@ -0,0 +1,133 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=mips -mcpu=mips2 -relocation-model=pic \
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS2
+; RUN: llc < %s -mtriple=mips -mcpu=mips32 -relocation-model=pic \
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS32
+
+; RUN: llc < %s -mtriple=mips64 -mcpu=mips3 -relocation-model=pic \
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS3
+; RUN: llc < %s -mtriple=mips64 -mcpu=mips64 -relocation-model=pic \
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS64
+
+; RUN: llc < %s -mtriple=mips -mcpu=mips2 -O0 -relocation-model=pic \
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS2-O0
+; RUN: llc < %s -mtriple=mips -mcpu=mips32 -O0 -relocation-model=pic \
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS32-O0
+
+define signext i32 @srem_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; MIPS2-LABEL: srem_i32:
+; MIPS2: # %bb.0: # %entry
+; MIPS2-NEXT: div $zero, $4, $5
+; MIPS2-NEXT: teq $5, $zero, 7
+; MIPS2-NEXT: mfhi $1
+; MIPS2-NEXT: nop
+; MIPS2-NEXT: nop
+; MIPS2-NEXT: div $zero, $1, $6
+; MIPS2-NEXT: teq $6, $zero, 7
+; MIPS2-NEXT: mfhi $2
+; MIPS2-NEXT: jr $ra
+; MIPS2-NEXT: nop
+;
+; MIPS32-LABEL: srem_i32:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: div $zero, $4, $5
+; MIPS32-NEXT: teq $5, $zero, 7
+; MIPS32-NEXT: mfhi $1
+; MIPS32-NEXT: div $zero, $1, $6
+; MIPS32-NEXT: teq $6, $zero, 7
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: mfhi $2
+;
+entry:
+ %rem = srem i32 %a, %b
+ %rem1 = srem i32 %rem, %c
+ ret i32 %rem1
+}
+
+define signext i64 @srem_i64(i64 signext %a, i64 signext %b, i64 signext %c) {
+; MIPS3-LABEL: srem_i64:
+; MIPS3: # %bb.0: # %entry
+; MIPS3-NEXT: ddiv $zero, $4, $5
+; MIPS3-NEXT: teq $5, $zero, 7
+; MIPS3-NEXT: mfhi $1
+; MIPS3-NEXT: nop
+; MIPS3-NEXT: nop
+; MIPS3-NEXT: ddiv $zero, $1, $6
+; MIPS3-NEXT: teq $6, $zero, 7
+; MIPS3-NEXT: mfhi $2
+; MIPS3-NEXT: jr $ra
+; MIPS3-NEXT: nop
+;
+; MIPS64-LABEL: srem_i64:
+; MIPS64: # %bb.0: # %entry
+; MIPS64-NEXT: ddiv $zero, $4, $5
+; MIPS64-NEXT: teq $5, $zero, 7
+; MIPS64-NEXT: mfhi $1
+; MIPS64-NEXT: ddiv $zero, $1, $6
+; MIPS64-NEXT: teq $6, $zero, 7
+; MIPS64-NEXT: jr $ra
+; MIPS64-NEXT: mfhi $2
+;
+entry:
+ %rem = srem i64 %a, %b
+ %rem1 = srem i64 %rem, %c
+ ret i64 %rem1
+}
+
+define signext i32 @srem_lw_srem_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; MIPS2-O0-LABEL: srem_lw_srem_i32:
+; MIPS2-O0: # %bb.0: # %entry
+; MIPS2-O0-NEXT: addiu $sp, $sp, -16
+; MIPS2-O0-NEXT: .cfi_def_cfa_offset 16
+; MIPS2-O0-NEXT: sw $4, 12($sp)
+; MIPS2-O0-NEXT: sw $5, 8($sp)
+; MIPS2-O0-NEXT: sw $6, 4($sp)
+; MIPS2-O0-NEXT: lw $2, 12($sp)
+; MIPS2-O0-NEXT: lw $1, 8($sp)
+; MIPS2-O0-NEXT: div $zero, $2, $1
+; MIPS2-O0-NEXT: teq $1, $zero, 7
+; MIPS2-O0-NEXT: mfhi $2
+; MIPS2-O0-NEXT: lw $1, 4($sp)
+; MIPS2-O0-NEXT: nop
+; MIPS2-O0-NEXT: div $zero, $2, $1
+; MIPS2-O0-NEXT: teq $1, $zero, 7
+; MIPS2-O0-NEXT: mfhi $2
+; MIPS2-O0-NEXT: addiu $sp, $sp, 16
+; MIPS2-O0-NEXT: jr $ra
+; MIPS2-O0-NEXT: nop
+;
+; MIPS32-O0-LABEL: srem_lw_srem_i32:
+; MIPS32-O0: # %bb.0: # %entry
+; MIPS32-O0-NEXT: addiu $sp, $sp, -16
+; MIPS32-O0-NEXT: .cfi_def_cfa_offset 16
+; MIPS32-O0-NEXT: sw $4, 12($sp)
+; MIPS32-O0-NEXT: sw $5, 8($sp)
+; MIPS32-O0-NEXT: sw $6, 4($sp)
+; MIPS32-O0-NEXT: lw $2, 12($sp)
+; MIPS32-O0-NEXT: lw $1, 8($sp)
+; MIPS32-O0-NEXT: div $zero, $2, $1
+; MIPS32-O0-NEXT: teq $1, $zero, 7
+; MIPS32-O0-NEXT: mfhi $2
+; MIPS32-O0-NEXT: lw $1, 4($sp)
+; MIPS32-O0-NEXT: div $zero, $2, $1
+; MIPS32-O0-NEXT: teq $1, $zero, 7
+; MIPS32-O0-NEXT: mfhi $2
+; MIPS32-O0-NEXT: addiu $sp, $sp, 16
+; MIPS32-O0-NEXT: jr $ra
+; MIPS32-O0-NEXT: nop
+;
+entry:
+ %a.addr = alloca i32, align 4
+ %b.addr = alloca i32, align 4
+ %c.addr = alloca i32, align 4
+ store i32 %a, ptr %a.addr, align 4
+ store i32 %b, ptr %b.addr, align 4
+ store i32 %c, ptr %c.addr, align 4
+ %0 = load i32, ptr %a.addr, align 4
+ %1 = load i32, ptr %b.addr, align 4
+ %rem = srem i32 %0, %1
+ %2 = load i32, ptr %c.addr, align 4
+ %rem1 = srem i32 %rem, %2
+ ret i32 %rem1
+}
+
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/two-consecutive-udiv.ll b/llvm/test/CodeGen/Mips/llvm-ir/two-consecutive-udiv.ll
new file mode 100644
index 0000000..97ac0d8
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/llvm-ir/two-consecutive-udiv.ll
@@ -0,0 +1,133 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=mips -mcpu=mips2 -relocation-model=pic \
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS2
+; RUN: llc < %s -mtriple=mips -mcpu=mips32 -relocation-model=pic \
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS32
+
+; RUN: llc < %s -mtriple=mips64 -mcpu=mips3 -relocation-model=pic \
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS3
+; RUN: llc < %s -mtriple=mips64 -mcpu=mips64 -relocation-model=pic \
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS64
+
+; RUN: llc < %s -mtriple=mips -mcpu=mips2 -O0 -relocation-model=pic \
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS2-O0
+; RUN: llc < %s -mtriple=mips -mcpu=mips32 -O0 -relocation-model=pic \
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS32-O0
+
+define signext i32 @udiv_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; MIPS2-LABEL: udiv_i32:
+; MIPS2: # %bb.0: # %entry
+; MIPS2-NEXT: divu $zero, $4, $5
+; MIPS2-NEXT: teq $5, $zero, 7
+; MIPS2-NEXT: mflo $1
+; MIPS2-NEXT: nop
+; MIPS2-NEXT: nop
+; MIPS2-NEXT: divu $zero, $1, $6
+; MIPS2-NEXT: teq $6, $zero, 7
+; MIPS2-NEXT: mflo $2
+; MIPS2-NEXT: jr $ra
+; MIPS2-NEXT: nop
+;
+; MIPS32-LABEL: udiv_i32:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: divu $zero, $4, $5
+; MIPS32-NEXT: teq $5, $zero, 7
+; MIPS32-NEXT: mflo $1
+; MIPS32-NEXT: divu $zero, $1, $6
+; MIPS32-NEXT: teq $6, $zero, 7
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: mflo $2
+;
+entry:
+ %udiv = udiv i32 %a, %b
+ %udiv1 = udiv i32 %udiv, %c
+ ret i32 %udiv1
+}
+
+define signext i64 @udiv_i64(i64 signext %a, i64 signext %b, i64 signext %c) {
+; MIPS3-LABEL: udiv_i64:
+; MIPS3: # %bb.0: # %entry
+; MIPS3-NEXT: ddivu $zero, $4, $5
+; MIPS3-NEXT: teq $5, $zero, 7
+; MIPS3-NEXT: mflo $1
+; MIPS3-NEXT: nop
+; MIPS3-NEXT: nop
+; MIPS3-NEXT: ddivu $zero, $1, $6
+; MIPS3-NEXT: teq $6, $zero, 7
+; MIPS3-NEXT: mflo $2
+; MIPS3-NEXT: jr $ra
+; MIPS3-NEXT: nop
+;
+; MIPS64-LABEL: udiv_i64:
+; MIPS64: # %bb.0: # %entry
+; MIPS64-NEXT: ddivu $zero, $4, $5
+; MIPS64-NEXT: teq $5, $zero, 7
+; MIPS64-NEXT: mflo $1
+; MIPS64-NEXT: ddivu $zero, $1, $6
+; MIPS64-NEXT: teq $6, $zero, 7
+; MIPS64-NEXT: jr $ra
+; MIPS64-NEXT: mflo $2
+;
+entry:
+ %udiv = udiv i64 %a, %b
+ %udiv1 = udiv i64 %udiv, %c
+ ret i64 %udiv1
+}
+
+define signext i32 @udiv_lw_udiv_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; MIPS2-O0-LABEL: udiv_lw_udiv_i32:
+; MIPS2-O0: # %bb.0: # %entry
+; MIPS2-O0-NEXT: addiu $sp, $sp, -16
+; MIPS2-O0-NEXT: .cfi_def_cfa_offset 16
+; MIPS2-O0-NEXT: sw $4, 12($sp)
+; MIPS2-O0-NEXT: sw $5, 8($sp)
+; MIPS2-O0-NEXT: sw $6, 4($sp)
+; MIPS2-O0-NEXT: lw $2, 12($sp)
+; MIPS2-O0-NEXT: lw $1, 8($sp)
+; MIPS2-O0-NEXT: divu $zero, $2, $1
+; MIPS2-O0-NEXT: teq $1, $zero, 7
+; MIPS2-O0-NEXT: mflo $2
+; MIPS2-O0-NEXT: lw $1, 4($sp)
+; MIPS2-O0-NEXT: nop
+; MIPS2-O0-NEXT: divu $zero, $2, $1
+; MIPS2-O0-NEXT: teq $1, $zero, 7
+; MIPS2-O0-NEXT: mflo $2
+; MIPS2-O0-NEXT: addiu $sp, $sp, 16
+; MIPS2-O0-NEXT: jr $ra
+; MIPS2-O0-NEXT: nop
+;
+; MIPS32-O0-LABEL: udiv_lw_udiv_i32:
+; MIPS32-O0: # %bb.0: # %entry
+; MIPS32-O0-NEXT: addiu $sp, $sp, -16
+; MIPS32-O0-NEXT: .cfi_def_cfa_offset 16
+; MIPS32-O0-NEXT: sw $4, 12($sp)
+; MIPS32-O0-NEXT: sw $5, 8($sp)
+; MIPS32-O0-NEXT: sw $6, 4($sp)
+; MIPS32-O0-NEXT: lw $2, 12($sp)
+; MIPS32-O0-NEXT: lw $1, 8($sp)
+; MIPS32-O0-NEXT: divu $zero, $2, $1
+; MIPS32-O0-NEXT: teq $1, $zero, 7
+; MIPS32-O0-NEXT: mflo $2
+; MIPS32-O0-NEXT: lw $1, 4($sp)
+; MIPS32-O0-NEXT: divu $zero, $2, $1
+; MIPS32-O0-NEXT: teq $1, $zero, 7
+; MIPS32-O0-NEXT: mflo $2
+; MIPS32-O0-NEXT: addiu $sp, $sp, 16
+; MIPS32-O0-NEXT: jr $ra
+; MIPS32-O0-NEXT: nop
+;
+entry:
+ %a.addr = alloca i32, align 4
+ %b.addr = alloca i32, align 4
+ %c.addr = alloca i32, align 4
+ store i32 %a, ptr %a.addr, align 4
+ store i32 %b, ptr %b.addr, align 4
+ store i32 %c, ptr %c.addr, align 4
+ %0 = load i32, ptr %a.addr, align 4
+ %1 = load i32, ptr %b.addr, align 4
+ %udiv = udiv i32 %0, %1
+ %2 = load i32, ptr %c.addr, align 4
+ %udiv1 = udiv i32 %udiv, %2
+ ret i32 %udiv1
+}
+
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/two-consecutive-urem.ll b/llvm/test/CodeGen/Mips/llvm-ir/two-consecutive-urem.ll
new file mode 100644
index 0000000..e1819f1
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/llvm-ir/two-consecutive-urem.ll
@@ -0,0 +1,133 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=mips -mcpu=mips2 -relocation-model=pic \
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS2
+; RUN: llc < %s -mtriple=mips -mcpu=mips32 -relocation-model=pic \
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS32
+
+; RUN: llc < %s -mtriple=mips64 -mcpu=mips3 -relocation-model=pic \
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS3
+; RUN: llc < %s -mtriple=mips64 -mcpu=mips64 -relocation-model=pic \
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS64
+
+; RUN: llc < %s -mtriple=mips -mcpu=mips2 -O0 -relocation-model=pic \
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS2-O0
+; RUN: llc < %s -mtriple=mips -mcpu=mips32 -O0 -relocation-model=pic \
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS32-O0
+
+define signext i32 @urem_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; MIPS2-LABEL: urem_i32:
+; MIPS2: # %bb.0: # %entry
+; MIPS2-NEXT: divu $zero, $4, $5
+; MIPS2-NEXT: teq $5, $zero, 7
+; MIPS2-NEXT: mfhi $1
+; MIPS2-NEXT: nop
+; MIPS2-NEXT: nop
+; MIPS2-NEXT: divu $zero, $1, $6
+; MIPS2-NEXT: teq $6, $zero, 7
+; MIPS2-NEXT: mfhi $2
+; MIPS2-NEXT: jr $ra
+; MIPS2-NEXT: nop
+;
+; MIPS32-LABEL: urem_i32:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: divu $zero, $4, $5
+; MIPS32-NEXT: teq $5, $zero, 7
+; MIPS32-NEXT: mfhi $1
+; MIPS32-NEXT: divu $zero, $1, $6
+; MIPS32-NEXT: teq $6, $zero, 7
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: mfhi $2
+;
+entry:
+ %urem = urem i32 %a, %b
+ %urem1 = urem i32 %urem, %c
+ ret i32 %urem1
+}
+
+define signext i64 @urem_i64(i64 signext %a, i64 signext %b, i64 signext %c) {
+; MIPS3-LABEL: urem_i64:
+; MIPS3: # %bb.0: # %entry
+; MIPS3-NEXT: ddivu $zero, $4, $5
+; MIPS3-NEXT: teq $5, $zero, 7
+; MIPS3-NEXT: mfhi $1
+; MIPS3-NEXT: nop
+; MIPS3-NEXT: nop
+; MIPS3-NEXT: ddivu $zero, $1, $6
+; MIPS3-NEXT: teq $6, $zero, 7
+; MIPS3-NEXT: mfhi $2
+; MIPS3-NEXT: jr $ra
+; MIPS3-NEXT: nop
+;
+; MIPS64-LABEL: urem_i64:
+; MIPS64: # %bb.0: # %entry
+; MIPS64-NEXT: ddivu $zero, $4, $5
+; MIPS64-NEXT: teq $5, $zero, 7
+; MIPS64-NEXT: mfhi $1
+; MIPS64-NEXT: ddivu $zero, $1, $6
+; MIPS64-NEXT: teq $6, $zero, 7
+; MIPS64-NEXT: jr $ra
+; MIPS64-NEXT: mfhi $2
+;
+entry:
+ %urem = urem i64 %a, %b
+ %urem1 = urem i64 %urem, %c
+ ret i64 %urem1
+}
+
+define signext i32 @urem_lw_urem_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; MIPS2-O0-LABEL: urem_lw_urem_i32:
+; MIPS2-O0: # %bb.0: # %entry
+; MIPS2-O0-NEXT: addiu $sp, $sp, -16
+; MIPS2-O0-NEXT: .cfi_def_cfa_offset 16
+; MIPS2-O0-NEXT: sw $4, 12($sp)
+; MIPS2-O0-NEXT: sw $5, 8($sp)
+; MIPS2-O0-NEXT: sw $6, 4($sp)
+; MIPS2-O0-NEXT: lw $2, 12($sp)
+; MIPS2-O0-NEXT: lw $1, 8($sp)
+; MIPS2-O0-NEXT: divu $zero, $2, $1
+; MIPS2-O0-NEXT: teq $1, $zero, 7
+; MIPS2-O0-NEXT: mfhi $2
+; MIPS2-O0-NEXT: lw $1, 4($sp)
+; MIPS2-O0-NEXT: nop
+; MIPS2-O0-NEXT: divu $zero, $2, $1
+; MIPS2-O0-NEXT: teq $1, $zero, 7
+; MIPS2-O0-NEXT: mfhi $2
+; MIPS2-O0-NEXT: addiu $sp, $sp, 16
+; MIPS2-O0-NEXT: jr $ra
+; MIPS2-O0-NEXT: nop
+;
+; MIPS32-O0-LABEL: urem_lw_urem_i32:
+; MIPS32-O0: # %bb.0: # %entry
+; MIPS32-O0-NEXT: addiu $sp, $sp, -16
+; MIPS32-O0-NEXT: .cfi_def_cfa_offset 16
+; MIPS32-O0-NEXT: sw $4, 12($sp)
+; MIPS32-O0-NEXT: sw $5, 8($sp)
+; MIPS32-O0-NEXT: sw $6, 4($sp)
+; MIPS32-O0-NEXT: lw $2, 12($sp)
+; MIPS32-O0-NEXT: lw $1, 8($sp)
+; MIPS32-O0-NEXT: divu $zero, $2, $1
+; MIPS32-O0-NEXT: teq $1, $zero, 7
+; MIPS32-O0-NEXT: mfhi $2
+; MIPS32-O0-NEXT: lw $1, 4($sp)
+; MIPS32-O0-NEXT: divu $zero, $2, $1
+; MIPS32-O0-NEXT: teq $1, $zero, 7
+; MIPS32-O0-NEXT: mfhi $2
+; MIPS32-O0-NEXT: addiu $sp, $sp, 16
+; MIPS32-O0-NEXT: jr $ra
+; MIPS32-O0-NEXT: nop
+;
+entry:
+ %a.addr = alloca i32, align 4
+ %b.addr = alloca i32, align 4
+ %c.addr = alloca i32, align 4
+ store i32 %a, ptr %a.addr, align 4
+ store i32 %b, ptr %b.addr, align 4
+ store i32 %c, ptr %c.addr, align 4
+ %0 = load i32, ptr %a.addr, align 4
+ %1 = load i32, ptr %b.addr, align 4
+ %rem = urem i32 %0, %1
+ %2 = load i32, ptr %c.addr, align 4
+ %urem1 = urem i32 %rem, %2
+ ret i32 %urem1
+}
+
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/udiv.ll b/llvm/test/CodeGen/Mips/llvm-ir/udiv.ll
index e3dd347..cc2c661 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/udiv.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/udiv.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=mips -mcpu=mips2 -relocation-model=pic \
-; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP32
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS2
; RUN: llc < %s -mtriple=mips -mcpu=mips32 -relocation-model=pic \
; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP32
; RUN: llc < %s -mtriple=mips -mcpu=mips32r2 -relocation-model=pic \
@@ -13,9 +13,9 @@
; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefix=GP32R6
; RUN: llc < %s -mtriple=mips64 -mcpu=mips3 -relocation-model=pic \
-; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP64
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS3
; RUN: llc < %s -mtriple=mips64 -mcpu=mips4 -relocation-model=pic \
-; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP64
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS3
; RUN: llc < %s -mtriple=mips64 -mcpu=mips64 -relocation-model=pic \
; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP64
; RUN: llc < %s -mtriple=mips64 -mcpu=mips64r2 -relocation-model=pic \
@@ -35,6 +35,11 @@
; RUN: FileCheck %s -check-prefix=MMR6
define zeroext i1 @udiv_i1(i1 zeroext %a, i1 zeroext %b) {
+; MIPS2-LABEL: udiv_i1:
+; MIPS2: # %bb.0: # %entry
+; MIPS2-NEXT: jr $ra
+; MIPS2-NEXT: move $2, $4
+;
; GP32-LABEL: udiv_i1:
; GP32: # %bb.0: # %entry
; GP32-NEXT: jr $ra
@@ -45,6 +50,11 @@ define zeroext i1 @udiv_i1(i1 zeroext %a, i1 zeroext %b) {
; GP32R6-NEXT: jr $ra
; GP32R6-NEXT: move $2, $4
;
+; MIPS3-LABEL: udiv_i1:
+; MIPS3: # %bb.0: # %entry
+; MIPS3-NEXT: jr $ra
+; MIPS3-NEXT: move $2, $4
+;
; GP64-LABEL: udiv_i1:
; GP64: # %bb.0: # %entry
; GP64-NEXT: jr $ra
@@ -70,6 +80,14 @@ entry:
}
define zeroext i8 @udiv_i8(i8 zeroext %a, i8 zeroext %b) {
+; MIPS2-LABEL: udiv_i8:
+; MIPS2: # %bb.0: # %entry
+; MIPS2-NEXT: divu $zero, $4, $5
+; MIPS2-NEXT: teq $5, $zero, 7
+; MIPS2-NEXT: mflo $2
+; MIPS2-NEXT: jr $ra
+; MIPS2-NEXT: nop
+;
; GP32-LABEL: udiv_i8:
; GP32: # %bb.0: # %entry
; GP32-NEXT: divu $zero, $4, $5
@@ -83,6 +101,14 @@ define zeroext i8 @udiv_i8(i8 zeroext %a, i8 zeroext %b) {
; GP32R6-NEXT: teq $5, $zero, 7
; GP32R6-NEXT: jrc $ra
;
+; MIPS3-LABEL: udiv_i8:
+; MIPS3: # %bb.0: # %entry
+; MIPS3-NEXT: divu $zero, $4, $5
+; MIPS3-NEXT: teq $5, $zero, 7
+; MIPS3-NEXT: mflo $2
+; MIPS3-NEXT: jr $ra
+; MIPS3-NEXT: nop
+;
; GP64-LABEL: udiv_i8:
; GP64: # %bb.0: # %entry
; GP64-NEXT: divu $zero, $4, $5
@@ -114,6 +140,14 @@ entry:
}
define zeroext i16 @udiv_i16(i16 zeroext %a, i16 zeroext %b) {
+; MIPS2-LABEL: udiv_i16:
+; MIPS2: # %bb.0: # %entry
+; MIPS2-NEXT: divu $zero, $4, $5
+; MIPS2-NEXT: teq $5, $zero, 7
+; MIPS2-NEXT: mflo $2
+; MIPS2-NEXT: jr $ra
+; MIPS2-NEXT: nop
+;
; GP32-LABEL: udiv_i16:
; GP32: # %bb.0: # %entry
; GP32-NEXT: divu $zero, $4, $5
@@ -127,6 +161,14 @@ define zeroext i16 @udiv_i16(i16 zeroext %a, i16 zeroext %b) {
; GP32R6-NEXT: teq $5, $zero, 7
; GP32R6-NEXT: jrc $ra
;
+; MIPS3-LABEL: udiv_i16:
+; MIPS3: # %bb.0: # %entry
+; MIPS3-NEXT: divu $zero, $4, $5
+; MIPS3-NEXT: teq $5, $zero, 7
+; MIPS3-NEXT: mflo $2
+; MIPS3-NEXT: jr $ra
+; MIPS3-NEXT: nop
+;
; GP64-LABEL: udiv_i16:
; GP64: # %bb.0: # %entry
; GP64-NEXT: divu $zero, $4, $5
@@ -158,6 +200,14 @@ entry:
}
define signext i32 @udiv_i32(i32 signext %a, i32 signext %b) {
+; MIPS2-LABEL: udiv_i32:
+; MIPS2: # %bb.0: # %entry
+; MIPS2-NEXT: divu $zero, $4, $5
+; MIPS2-NEXT: teq $5, $zero, 7
+; MIPS2-NEXT: mflo $2
+; MIPS2-NEXT: jr $ra
+; MIPS2-NEXT: nop
+;
; GP32-LABEL: udiv_i32:
; GP32: # %bb.0: # %entry
; GP32-NEXT: divu $zero, $4, $5
@@ -171,6 +221,14 @@ define signext i32 @udiv_i32(i32 signext %a, i32 signext %b) {
; GP32R6-NEXT: teq $5, $zero, 7
; GP32R6-NEXT: jrc $ra
;
+; MIPS3-LABEL: udiv_i32:
+; MIPS3: # %bb.0: # %entry
+; MIPS3-NEXT: divu $zero, $4, $5
+; MIPS3-NEXT: teq $5, $zero, 7
+; MIPS3-NEXT: mflo $2
+; MIPS3-NEXT: jr $ra
+; MIPS3-NEXT: nop
+;
; GP64-LABEL: udiv_i32:
; GP64: # %bb.0: # %entry
; GP64-NEXT: divu $zero, $4, $5
@@ -202,6 +260,22 @@ entry:
}
define signext i64 @udiv_i64(i64 signext %a, i64 signext %b) {
+; MIPS2-LABEL: udiv_i64:
+; MIPS2: # %bb.0: # %entry
+; MIPS2-NEXT: lui $2, %hi(_gp_disp)
+; MIPS2-NEXT: addiu $2, $2, %lo(_gp_disp)
+; MIPS2-NEXT: addiu $sp, $sp, -24
+; MIPS2-NEXT: .cfi_def_cfa_offset 24
+; MIPS2-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill
+; MIPS2-NEXT: .cfi_offset 31, -4
+; MIPS2-NEXT: addu $gp, $2, $25
+; MIPS2-NEXT: lw $25, %call16(__udivdi3)($gp)
+; MIPS2-NEXT: jalr $25
+; MIPS2-NEXT: nop
+; MIPS2-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload
+; MIPS2-NEXT: jr $ra
+; MIPS2-NEXT: addiu $sp, $sp, 24
+;
; GP32-LABEL: udiv_i64:
; GP32: # %bb.0: # %entry
; GP32-NEXT: lui $2, %hi(_gp_disp)
@@ -233,6 +307,14 @@ define signext i64 @udiv_i64(i64 signext %a, i64 signext %b) {
; GP32R6-NEXT: jr $ra
; GP32R6-NEXT: addiu $sp, $sp, 24
;
+; MIPS3-LABEL: udiv_i64:
+; MIPS3: # %bb.0: # %entry
+; MIPS3-NEXT: ddivu $zero, $4, $5
+; MIPS3-NEXT: teq $5, $zero, 7
+; MIPS3-NEXT: mflo $2
+; MIPS3-NEXT: jr $ra
+; MIPS3-NEXT: nop
+;
; GP64-LABEL: udiv_i64:
; GP64: # %bb.0: # %entry
; GP64-NEXT: ddivu $zero, $4, $5
@@ -284,6 +366,30 @@ entry:
}
define signext i128 @udiv_i128(i128 signext %a, i128 signext %b) {
+; MIPS2-LABEL: udiv_i128:
+; MIPS2: # %bb.0: # %entry
+; MIPS2-NEXT: lui $2, %hi(_gp_disp)
+; MIPS2-NEXT: addiu $2, $2, %lo(_gp_disp)
+; MIPS2-NEXT: addiu $sp, $sp, -40
+; MIPS2-NEXT: .cfi_def_cfa_offset 40
+; MIPS2-NEXT: sw $ra, 36($sp) # 4-byte Folded Spill
+; MIPS2-NEXT: .cfi_offset 31, -4
+; MIPS2-NEXT: addu $gp, $2, $25
+; MIPS2-NEXT: lw $1, 60($sp)
+; MIPS2-NEXT: lw $2, 64($sp)
+; MIPS2-NEXT: lw $3, 68($sp)
+; MIPS2-NEXT: sw $3, 28($sp)
+; MIPS2-NEXT: sw $2, 24($sp)
+; MIPS2-NEXT: sw $1, 20($sp)
+; MIPS2-NEXT: lw $1, 56($sp)
+; MIPS2-NEXT: sw $1, 16($sp)
+; MIPS2-NEXT: lw $25, %call16(__udivti3)($gp)
+; MIPS2-NEXT: jalr $25
+; MIPS2-NEXT: nop
+; MIPS2-NEXT: lw $ra, 36($sp) # 4-byte Folded Reload
+; MIPS2-NEXT: jr $ra
+; MIPS2-NEXT: addiu $sp, $sp, 40
+;
; GP32-LABEL: udiv_i128:
; GP32: # %bb.0: # %entry
; GP32-NEXT: lui $2, %hi(_gp_disp)
@@ -331,6 +437,25 @@ define signext i128 @udiv_i128(i128 signext %a, i128 signext %b) {
; GP32R6-NEXT: jr $ra
; GP32R6-NEXT: addiu $sp, $sp, 40
;
+; MIPS3-LABEL: udiv_i128:
+; MIPS3: # %bb.0: # %entry
+; MIPS3-NEXT: daddiu $sp, $sp, -16
+; MIPS3-NEXT: .cfi_def_cfa_offset 16
+; MIPS3-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill
+; MIPS3-NEXT: sd $gp, 0($sp) # 8-byte Folded Spill
+; MIPS3-NEXT: .cfi_offset 31, -8
+; MIPS3-NEXT: .cfi_offset 28, -16
+; MIPS3-NEXT: lui $1, %hi(%neg(%gp_rel(udiv_i128)))
+; MIPS3-NEXT: daddu $1, $1, $25
+; MIPS3-NEXT: daddiu $gp, $1, %lo(%neg(%gp_rel(udiv_i128)))
+; MIPS3-NEXT: ld $25, %call16(__udivti3)($gp)
+; MIPS3-NEXT: jalr $25
+; MIPS3-NEXT: nop
+; MIPS3-NEXT: ld $gp, 0($sp) # 8-byte Folded Reload
+; MIPS3-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload
+; MIPS3-NEXT: jr $ra
+; MIPS3-NEXT: daddiu $sp, $sp, 16
+;
; GP64-LABEL: udiv_i128:
; GP64: # %bb.0: # %entry
; GP64-NEXT: daddiu $sp, $sp, -16
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/urem.ll b/llvm/test/CodeGen/Mips/llvm-ir/urem.ll
index 4105d67..5da1f61 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/urem.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/urem.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=mips -mcpu=mips2 -relocation-model=pic \
-; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP32,GP32R0R2
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS2
; RUN: llc < %s -mtriple=mips -mcpu=mips32 -relocation-model=pic \
; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP32,GP32R0R2
; RUN: llc < %s -mtriple=mips -mcpu=mips32r2 -relocation-model=pic \
@@ -13,9 +13,9 @@
; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefix=GP32R6
; RUN: llc < %s -mtriple=mips64 -mcpu=mips3 -relocation-model=pic \
-; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP64,GP64R0R1
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS3
; RUN: llc < %s -mtriple=mips64 -mcpu=mips4 -relocation-model=pic \
-; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP64,GP64R0R1
+; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=MIPS3
; RUN: llc < %s -mtriple=mips64 -mcpu=mips64 -relocation-model=pic \
; RUN: -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP64,GP64R0R1
; RUN: llc < %s -mtriple=mips64 -mcpu=mips64r2 -relocation-model=pic \
@@ -35,6 +35,11 @@
; RUN: FileCheck %s -check-prefix=MMR6
define signext i1 @urem_i1(i1 signext %a, i1 signext %b) {
+; MIPS2-LABEL: urem_i1:
+; MIPS2: # %bb.0: # %entry
+; MIPS2-NEXT: jr $ra
+; MIPS2-NEXT: addiu $2, $zero, 0
+;
; GP32-LABEL: urem_i1:
; GP32: # %bb.0: # %entry
; GP32-NEXT: jr $ra
@@ -45,6 +50,11 @@ define signext i1 @urem_i1(i1 signext %a, i1 signext %b) {
; GP32R6-NEXT: jr $ra
; GP32R6-NEXT: addiu $2, $zero, 0
;
+; MIPS3-LABEL: urem_i1:
+; MIPS3: # %bb.0: # %entry
+; MIPS3-NEXT: jr $ra
+; MIPS3-NEXT: addiu $2, $zero, 0
+;
; GP64-LABEL: urem_i1:
; GP64: # %bb.0: # %entry
; GP64-NEXT: jr $ra
@@ -70,6 +80,17 @@ entry:
}
define signext i8 @urem_i8(i8 signext %a, i8 signext %b) {
+; MIPS2-LABEL: urem_i8:
+; MIPS2: # %bb.0: # %entry
+; MIPS2-NEXT: andi $1, $5, 255
+; MIPS2-NEXT: andi $2, $4, 255
+; MIPS2-NEXT: divu $zero, $2, $1
+; MIPS2-NEXT: teq $1, $zero, 7
+; MIPS2-NEXT: mfhi $1
+; MIPS2-NEXT: sll $1, $1, 24
+; MIPS2-NEXT: jr $ra
+; MIPS2-NEXT: sra $2, $1, 24
+;
; GP32R0R2-LABEL: urem_i8:
; GP32R0R2: # %bb.0: # %entry
; GP32R0R2-NEXT: andi $1, $5, 255
@@ -100,6 +121,17 @@ define signext i8 @urem_i8(i8 signext %a, i8 signext %b) {
; GP32R6-NEXT: jr $ra
; GP32R6-NEXT: seb $2, $2
;
+; MIPS3-LABEL: urem_i8:
+; MIPS3: # %bb.0: # %entry
+; MIPS3-NEXT: andi $1, $5, 255
+; MIPS3-NEXT: andi $2, $4, 255
+; MIPS3-NEXT: divu $zero, $2, $1
+; MIPS3-NEXT: teq $1, $zero, 7
+; MIPS3-NEXT: mfhi $1
+; MIPS3-NEXT: sll $1, $1, 24
+; MIPS3-NEXT: jr $ra
+; MIPS3-NEXT: sra $2, $1, 24
+;
; GP64R0R1-LABEL: urem_i8:
; GP64R0R1: # %bb.0: # %entry
; GP64R0R1-NEXT: andi $1, $5, 255
@@ -154,6 +186,17 @@ entry:
}
define signext i16 @urem_i16(i16 signext %a, i16 signext %b) {
+; MIPS2-LABEL: urem_i16:
+; MIPS2: # %bb.0: # %entry
+; MIPS2-NEXT: andi $1, $5, 65535
+; MIPS2-NEXT: andi $2, $4, 65535
+; MIPS2-NEXT: divu $zero, $2, $1
+; MIPS2-NEXT: teq $1, $zero, 7
+; MIPS2-NEXT: mfhi $1
+; MIPS2-NEXT: sll $1, $1, 16
+; MIPS2-NEXT: jr $ra
+; MIPS2-NEXT: sra $2, $1, 16
+;
; GP32R0R2-LABEL: urem_i16:
; GP32R0R2: # %bb.0: # %entry
; GP32R0R2-NEXT: andi $1, $5, 65535
@@ -184,6 +227,17 @@ define signext i16 @urem_i16(i16 signext %a, i16 signext %b) {
; GP32R6-NEXT: jr $ra
; GP32R6-NEXT: seh $2, $2
;
+; MIPS3-LABEL: urem_i16:
+; MIPS3: # %bb.0: # %entry
+; MIPS3-NEXT: andi $1, $5, 65535
+; MIPS3-NEXT: andi $2, $4, 65535
+; MIPS3-NEXT: divu $zero, $2, $1
+; MIPS3-NEXT: teq $1, $zero, 7
+; MIPS3-NEXT: mfhi $1
+; MIPS3-NEXT: sll $1, $1, 16
+; MIPS3-NEXT: jr $ra
+; MIPS3-NEXT: sra $2, $1, 16
+;
; GP64R0R1-LABEL: urem_i16:
; GP64R0R1: # %bb.0: # %entry
; GP64R0R1-NEXT: andi $1, $5, 65535
@@ -238,6 +292,14 @@ entry:
}
define signext i32 @urem_i32(i32 signext %a, i32 signext %b) {
+; MIPS2-LABEL: urem_i32:
+; MIPS2: # %bb.0: # %entry
+; MIPS2-NEXT: divu $zero, $4, $5
+; MIPS2-NEXT: teq $5, $zero, 7
+; MIPS2-NEXT: mfhi $2
+; MIPS2-NEXT: jr $ra
+; MIPS2-NEXT: nop
+;
; GP32-LABEL: urem_i32:
; GP32: # %bb.0: # %entry
; GP32-NEXT: divu $zero, $4, $5
@@ -251,6 +313,14 @@ define signext i32 @urem_i32(i32 signext %a, i32 signext %b) {
; GP32R6-NEXT: teq $5, $zero, 7
; GP32R6-NEXT: jrc $ra
;
+; MIPS3-LABEL: urem_i32:
+; MIPS3: # %bb.0: # %entry
+; MIPS3-NEXT: divu $zero, $4, $5
+; MIPS3-NEXT: teq $5, $zero, 7
+; MIPS3-NEXT: mfhi $2
+; MIPS3-NEXT: jr $ra
+; MIPS3-NEXT: nop
+;
; GP64-LABEL: urem_i32:
; GP64: # %bb.0: # %entry
; GP64-NEXT: divu $zero, $4, $5
@@ -282,6 +352,22 @@ entry:
}
define signext i64 @urem_i64(i64 signext %a, i64 signext %b) {
+; MIPS2-LABEL: urem_i64:
+; MIPS2: # %bb.0: # %entry
+; MIPS2-NEXT: lui $2, %hi(_gp_disp)
+; MIPS2-NEXT: addiu $2, $2, %lo(_gp_disp)
+; MIPS2-NEXT: addiu $sp, $sp, -24
+; MIPS2-NEXT: .cfi_def_cfa_offset 24
+; MIPS2-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill
+; MIPS2-NEXT: .cfi_offset 31, -4
+; MIPS2-NEXT: addu $gp, $2, $25
+; MIPS2-NEXT: lw $25, %call16(__umoddi3)($gp)
+; MIPS2-NEXT: jalr $25
+; MIPS2-NEXT: nop
+; MIPS2-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload
+; MIPS2-NEXT: jr $ra
+; MIPS2-NEXT: addiu $sp, $sp, 24
+;
; GP32-LABEL: urem_i64:
; GP32: # %bb.0: # %entry
; GP32-NEXT: lui $2, %hi(_gp_disp)
@@ -313,6 +399,14 @@ define signext i64 @urem_i64(i64 signext %a, i64 signext %b) {
; GP32R6-NEXT: jr $ra
; GP32R6-NEXT: addiu $sp, $sp, 24
;
+; MIPS3-LABEL: urem_i64:
+; MIPS3: # %bb.0: # %entry
+; MIPS3-NEXT: ddivu $zero, $4, $5
+; MIPS3-NEXT: teq $5, $zero, 7
+; MIPS3-NEXT: mfhi $2
+; MIPS3-NEXT: jr $ra
+; MIPS3-NEXT: nop
+;
; GP64-LABEL: urem_i64:
; GP64: # %bb.0: # %entry
; GP64-NEXT: ddivu $zero, $4, $5
@@ -364,6 +458,30 @@ entry:
}
define signext i128 @urem_i128(i128 signext %a, i128 signext %b) {
+; MIPS2-LABEL: urem_i128:
+; MIPS2: # %bb.0: # %entry
+; MIPS2-NEXT: lui $2, %hi(_gp_disp)
+; MIPS2-NEXT: addiu $2, $2, %lo(_gp_disp)
+; MIPS2-NEXT: addiu $sp, $sp, -40
+; MIPS2-NEXT: .cfi_def_cfa_offset 40
+; MIPS2-NEXT: sw $ra, 36($sp) # 4-byte Folded Spill
+; MIPS2-NEXT: .cfi_offset 31, -4
+; MIPS2-NEXT: addu $gp, $2, $25
+; MIPS2-NEXT: lw $1, 60($sp)
+; MIPS2-NEXT: lw $2, 64($sp)
+; MIPS2-NEXT: lw $3, 68($sp)
+; MIPS2-NEXT: sw $3, 28($sp)
+; MIPS2-NEXT: sw $2, 24($sp)
+; MIPS2-NEXT: sw $1, 20($sp)
+; MIPS2-NEXT: lw $1, 56($sp)
+; MIPS2-NEXT: sw $1, 16($sp)
+; MIPS2-NEXT: lw $25, %call16(__umodti3)($gp)
+; MIPS2-NEXT: jalr $25
+; MIPS2-NEXT: nop
+; MIPS2-NEXT: lw $ra, 36($sp) # 4-byte Folded Reload
+; MIPS2-NEXT: jr $ra
+; MIPS2-NEXT: addiu $sp, $sp, 40
+;
; GP32-LABEL: urem_i128:
; GP32: # %bb.0: # %entry
; GP32-NEXT: lui $2, %hi(_gp_disp)
@@ -411,6 +529,25 @@ define signext i128 @urem_i128(i128 signext %a, i128 signext %b) {
; GP32R6-NEXT: jr $ra
; GP32R6-NEXT: addiu $sp, $sp, 40
;
+; MIPS3-LABEL: urem_i128:
+; MIPS3: # %bb.0: # %entry
+; MIPS3-NEXT: daddiu $sp, $sp, -16
+; MIPS3-NEXT: .cfi_def_cfa_offset 16
+; MIPS3-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill
+; MIPS3-NEXT: sd $gp, 0($sp) # 8-byte Folded Spill
+; MIPS3-NEXT: .cfi_offset 31, -8
+; MIPS3-NEXT: .cfi_offset 28, -16
+; MIPS3-NEXT: lui $1, %hi(%neg(%gp_rel(urem_i128)))
+; MIPS3-NEXT: daddu $1, $1, $25
+; MIPS3-NEXT: daddiu $gp, $1, %lo(%neg(%gp_rel(urem_i128)))
+; MIPS3-NEXT: ld $25, %call16(__umodti3)($gp)
+; MIPS3-NEXT: jalr $25
+; MIPS3-NEXT: nop
+; MIPS3-NEXT: ld $gp, 0($sp) # 8-byte Folded Reload
+; MIPS3-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload
+; MIPS3-NEXT: jr $ra
+; MIPS3-NEXT: daddiu $sp, $sp, 16
+;
; GP64-LABEL: urem_i128:
; GP64: # %bb.0: # %entry
; GP64-NEXT: daddiu $sp, $sp, -16
diff --git a/llvm/test/CodeGen/NVPTX/fence-sm-90.ll b/llvm/test/CodeGen/NVPTX/fence-sm-90.ll
new file mode 100644
index 0000000..82eb5fb
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/fence-sm-90.ll
@@ -0,0 +1,30 @@
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_90 -mattr=+ptx78 | FileCheck %s
+; RUN: %if ptxas-12.2 %{ llc < %s -march=nvptx64 -mcpu=sm_90 -mattr=+ptx78 | %ptxas-verify -arch=sm_90 %}
+
+; CHECK-LABEL: fence_sc_cluster
+define void @fence_sc_cluster() local_unnamed_addr {
+ ; CHECK: fence.sc.cluster
+ fence syncscope("cluster") seq_cst
+ ret void
+}
+
+; CHECK-LABEL: fence_acq_rel_cluster
+define void @fence_acq_rel_cluster() local_unnamed_addr {
+ ; CHECK: fence.acq_rel.cluster
+ fence syncscope("cluster") acq_rel
+ ret void
+}
+
+; CHECK-LABEL: fence_release_cluster
+define void @fence_release_cluster() local_unnamed_addr {
+ ; CHECK: fence.acq_rel.cluster
+ fence syncscope("cluster") release
+ ret void
+}
+
+; CHECK-LABEL: fence_acquire_cluster
+define void @fence_acquire_cluster() local_unnamed_addr {
+ ; CHECK: fence.acq_rel.cluster
+ fence syncscope("cluster") acquire
+ ret void
+}
diff --git a/llvm/test/CodeGen/NVPTX/fence.ll b/llvm/test/CodeGen/NVPTX/fence.ll
index d3aace9..626685f 100644
--- a/llvm/test/CodeGen/NVPTX/fence.ll
+++ b/llvm/test/CodeGen/NVPTX/fence.ll
@@ -3,6 +3,8 @@
; RUN: llc < %s -march=nvptx64 -mcpu=sm_70 -mattr=+ptx60 | FileCheck %s --check-prefix=SM70
; RUN: %if ptxas-12.2 %{ llc < %s -march=nvptx64 -mcpu=sm_70 -mattr=+ptx60 | %ptxas-verify -arch=sm_70 %}
+; TODO: implement and test thread scope.
+
; CHECK-LABEL: fence_sc_sys
define void @fence_sc_sys() local_unnamed_addr {
; SM60: membar.sys
@@ -16,21 +18,85 @@ define void @fence_acq_rel_sys() local_unnamed_addr {
; SM60: membar.sys
; SM70: fence.acq_rel.sys
fence acq_rel
- ret void
+ ret void
}
; CHECK-LABEL: fence_release_sys
define void @fence_release_sys() local_unnamed_addr {
; SM60: membar.sys
- ; SM70: fence.acq_rel.sys
+ ; SM70: fence.acq_rel.sys
fence release
- ret void
+ ret void
}
; CHECK-LABEL: fence_acquire_sys
define void @fence_acquire_sys() local_unnamed_addr {
; SM60: membar.sys
- ; SM70: fence.acq_rel.sys
+ ; SM70: fence.acq_rel.sys
fence acquire
- ret void
+ ret void
+}
+
+; CHECK-LABEL: fence_sc_gpu
+define void @fence_sc_gpu() local_unnamed_addr {
+ ; SM60: membar.gl
+ ; SM70: fence.sc.gpu
+ fence syncscope("device") seq_cst
+ ret void
+}
+
+; CHECK-LABEL: fence_acq_rel_gpu
+define void @fence_acq_rel_gpu() local_unnamed_addr {
+ ; SM60: membar.gl
+ ; SM70: fence.acq_rel.gpu
+ fence syncscope("device") acq_rel
+ ret void
+}
+
+; CHECK-LABEL: fence_release_gpu
+define void @fence_release_gpu() local_unnamed_addr {
+ ; SM60: membar.gl
+ ; SM70: fence.acq_rel.gpu
+ fence syncscope("device") release
+ ret void
+}
+
+; CHECK-LABEL: fence_acquire_gpu
+define void @fence_acquire_gpu() local_unnamed_addr {
+ ; SM60: membar.gl
+ ; SM70: fence.acq_rel.gpu
+ fence syncscope("device") acquire
+ ret void
+}
+
+; CHECK-LABEL: fence_sc_cta
+define void @fence_sc_cta() local_unnamed_addr {
+ ; SM60: membar.cta
+ ; SM70: fence.sc.cta
+ fence syncscope("block") seq_cst
+ ret void
+}
+
+; CHECK-LABEL: fence_acq_rel_cta
+define void @fence_acq_rel_cta() local_unnamed_addr {
+ ; SM60: membar.cta
+ ; SM70: fence.acq_rel.cta
+ fence syncscope("block") acq_rel
+ ret void
+}
+
+; CHECK-LABEL: fence_release_cta
+define void @fence_release_cta() local_unnamed_addr {
+ ; SM60: membar.cta
+ ; SM70: fence.acq_rel.cta
+ fence syncscope("block") release
+ ret void
+}
+
+; CHECK-LABEL: fence_acquire_cta
+define void @fence_acquire_cta() local_unnamed_addr {
+ ; SM60: membar.cta
+ ; SM70: fence.acq_rel.cta
+ fence syncscope("block") acquire
+ ret void
} \ No newline at end of file
diff --git a/llvm/test/CodeGen/NVPTX/intrin-nocapture.ll b/llvm/test/CodeGen/NVPTX/intrin-nocapture.ll
deleted file mode 100644
index 040bbde..0000000
--- a/llvm/test/CodeGen/NVPTX/intrin-nocapture.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: opt < %s -O3 -S | FileCheck %s
-
-; Address space intrinsics were erroneously marked NoCapture, leading to bad
-; optimizations (such as the store below being eliminated as dead code). This
-; test makes sure we don't regress.
-
-declare void @foo(ptr addrspace(1))
-
-declare ptr addrspace(1) @llvm.nvvm.ptr.gen.to.global.p1.p0(ptr)
-
-; CHECK: @bar
-define void @bar() {
- %t1 = alloca i32
-; CHECK: call ptr addrspace(1) @llvm.nvvm.ptr.gen.to.global.p1.p0(ptr nonnull %t1)
-; CHECK-NEXT: store i32 10, ptr %t1
- %t2 = call ptr addrspace(1) @llvm.nvvm.ptr.gen.to.global.p1.p0(ptr %t1)
- store i32 10, ptr %t1
- call void @foo(ptr addrspace(1) %t2)
- ret void
-}
-
diff --git a/llvm/test/CodeGen/NVPTX/load-store-sm-70.ll b/llvm/test/CodeGen/NVPTX/load-store-sm-70.ll
index 9cea33d..4b200ea 100644
--- a/llvm/test/CodeGen/NVPTX/load-store-sm-70.ll
+++ b/llvm/test/CodeGen/NVPTX/load-store-sm-70.ll
@@ -1,10 +1,367 @@
; RUN: llc < %s -march=nvptx64 -mcpu=sm_70 -mattr=+ptx82 | FileCheck %s
; RUN: %if ptxas-12.2 %{ llc < %s -march=nvptx64 -mcpu=sm_70 -mattr=+ptx82 | %ptxas-verify -arch=sm_70 %}
+; TODO: fix "atomic load volatile acquire": generates "ld.acquire.sys;"
+; but should generate "ld.mmio.relaxed.sys; fence.acq_rel.sys;"
+; TODO: fix "atomic store volatile release": generates "st.release.sys;"
+; but should generate "fence.acq_rel.sys; st.mmio.relaxed.sys;"
+
+; TODO: fix "atomic load volatile seq_cst": generates "fence.sc.sys; ld.acquire.sys;"
+; but should generate "fence.sc.sys; ld.relaxed.mmio.sys; fence.acq_rel.sys;"
+; TODO: fix "atomic store volatile seq_cst": generates "fence.sc.sys; st.release.sys;"
+; but should generate "fence.sc.sys; st.relaxed.mmio.sys;"
+
+; TODO: add i1, <8 x i8>, and <6 x i8> vector tests.
+
+; TODO: add test for vectors that exceed 128-bit length
+; Per https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#vectors
+; vectors cannot exceed 128-bit in length, i.e., .v4.u64 is not allowed.
+
+; TODO: generate PTX that preserves Concurrent Forward Progress
+; for atomic operations to local statespace
+; by generating atomic or volatile operations.
+
+; TODO: design exposure for atomic operations on vector types.
+
+; TODO: implement and test thread scope.
+
+; TODO: add weak,atomic,volatile,atomic volatile tests
+; for .const and .param statespaces.
+
+; TODO: optimize .sys.shared into .cta.shared or .cluster.shared .
+
;; generic statespace
-; CHECK-LABEL: generic_acq_rel
-define void @generic_acq_rel(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
+; CHECK-LABEL: generic_unordered_gpu
+define void @generic_unordered_gpu(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
+ ; CHECK: ld.relaxed.gpu.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr %a syncscope("device") unordered, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.relaxed.gpu.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr %a syncscope("device") unordered, align 1
+
+ ; CHECK: ld.relaxed.gpu.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr %b syncscope("device") unordered, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.relaxed.gpu.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr %b syncscope("device") unordered, align 2
+
+ ; CHECK: ld.relaxed.gpu.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr %c syncscope("device") unordered, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.relaxed.gpu.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr %c syncscope("device") unordered, align 4
+
+ ; CHECK: ld.relaxed.gpu.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr %d syncscope("device") unordered, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.relaxed.gpu.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr %d syncscope("device") unordered, align 8
+
+ ; CHECK: ld.relaxed.gpu.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr %e syncscope("device") unordered, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.relaxed.gpu.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr %e syncscope("device") unordered, align 4
+
+ ; CHECK: ld.relaxed.gpu.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr %e syncscope("device") unordered, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.relaxed.gpu.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr %e syncscope("device") unordered, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: generic_unordered_volatile_gpu
+define void @generic_unordered_volatile_gpu(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
+ ; CHECK: ld.volatile.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr %a syncscope("device") unordered, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.volatile.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr %a syncscope("device") unordered, align 1
+
+ ; CHECK: ld.volatile.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr %b syncscope("device") unordered, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.volatile.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr %b syncscope("device") unordered, align 2
+
+ ; CHECK: ld.volatile.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr %c syncscope("device") unordered, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.volatile.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr %c syncscope("device") unordered, align 4
+
+ ; CHECK: ld.volatile.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr %d syncscope("device") unordered, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.volatile.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr %d syncscope("device") unordered, align 8
+
+ ; CHECK: ld.volatile.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr %e syncscope("device") unordered, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.volatile.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr %e syncscope("device") unordered, align 4
+
+ ; CHECK: ld.volatile.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr %e syncscope("device") unordered, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.volatile.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr %e syncscope("device") unordered, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: generic_unordered_cta
+define void @generic_unordered_cta(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
+ ; CHECK: ld.relaxed.cta.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr %a syncscope("block") unordered, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.relaxed.cta.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr %a syncscope("block") unordered, align 1
+
+ ; CHECK: ld.relaxed.cta.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr %b syncscope("block") unordered, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.relaxed.cta.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr %b syncscope("block") unordered, align 2
+
+ ; CHECK: ld.relaxed.cta.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr %c syncscope("block") unordered, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.relaxed.cta.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr %c syncscope("block") unordered, align 4
+
+ ; CHECK: ld.relaxed.cta.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr %d syncscope("block") unordered, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.relaxed.cta.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr %d syncscope("block") unordered, align 8
+
+ ; CHECK: ld.relaxed.cta.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr %e syncscope("block") unordered, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.relaxed.cta.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr %e syncscope("block") unordered, align 4
+
+ ; CHECK: ld.relaxed.cta.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr %e syncscope("block") unordered, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.relaxed.cta.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr %e syncscope("block") unordered, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: generic_unordered_volatile_cta
+define void @generic_unordered_volatile_cta(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
+ ; CHECK: ld.volatile.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr %a syncscope("block") unordered, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.volatile.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr %a syncscope("block") unordered, align 1
+
+ ; CHECK: ld.volatile.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr %b syncscope("block") unordered, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.volatile.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr %b syncscope("block") unordered, align 2
+
+ ; CHECK: ld.volatile.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr %c syncscope("block") unordered, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.volatile.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr %c syncscope("block") unordered, align 4
+
+ ; CHECK: ld.volatile.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr %d syncscope("block") unordered, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.volatile.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr %d syncscope("block") unordered, align 8
+
+ ; CHECK: ld.volatile.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr %e syncscope("block") unordered, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.volatile.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr %e syncscope("block") unordered, align 4
+
+ ; CHECK: ld.volatile.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr %e syncscope("block") unordered, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.volatile.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr %e syncscope("block") unordered, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: generic_monotonic_gpu
+define void @generic_monotonic_gpu(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
+ ; CHECK: ld.relaxed.gpu.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr %a syncscope("device") monotonic, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.relaxed.gpu.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr %a syncscope("device") monotonic, align 1
+
+ ; CHECK: ld.relaxed.gpu.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr %b syncscope("device") monotonic, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.relaxed.gpu.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr %b syncscope("device") monotonic, align 2
+
+ ; CHECK: ld.relaxed.gpu.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr %c syncscope("device") monotonic, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.relaxed.gpu.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr %c syncscope("device") monotonic, align 4
+
+ ; CHECK: ld.relaxed.gpu.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr %d syncscope("device") monotonic, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.relaxed.gpu.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr %d syncscope("device") monotonic, align 8
+
+ ; CHECK: ld.relaxed.gpu.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr %e syncscope("device") monotonic, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.relaxed.gpu.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr %e syncscope("device") monotonic, align 4
+
+ ; CHECK: ld.relaxed.gpu.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr %e syncscope("device") monotonic, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.relaxed.gpu.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr %e syncscope("device") monotonic, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: generic_monotonic_volatile_gpu
+define void @generic_monotonic_volatile_gpu(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
+ ; CHECK: ld.volatile.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr %a syncscope("device") monotonic, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.volatile.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr %a syncscope("device") monotonic, align 1
+
+ ; CHECK: ld.volatile.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr %b syncscope("device") monotonic, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.volatile.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr %b syncscope("device") monotonic, align 2
+
+ ; CHECK: ld.volatile.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr %c syncscope("device") monotonic, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.volatile.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr %c syncscope("device") monotonic, align 4
+
+ ; CHECK: ld.volatile.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr %d syncscope("device") monotonic, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.volatile.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr %d syncscope("device") monotonic, align 8
+
+ ; CHECK: ld.volatile.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr %e syncscope("device") monotonic, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.volatile.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr %e syncscope("device") monotonic, align 4
+
+ ; CHECK: ld.volatile.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr %e syncscope("device") monotonic, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.volatile.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr %e syncscope("device") monotonic, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: generic_monotonic_cta
+define void @generic_monotonic_cta(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
+ ; CHECK: ld.relaxed.cta.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr %a syncscope("block") monotonic, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.relaxed.cta.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr %a syncscope("block") monotonic, align 1
+
+ ; CHECK: ld.relaxed.cta.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr %b syncscope("block") monotonic, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.relaxed.cta.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr %b syncscope("block") monotonic, align 2
+
+ ; CHECK: ld.relaxed.cta.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr %c syncscope("block") monotonic, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.relaxed.cta.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr %c syncscope("block") monotonic, align 4
+
+ ; CHECK: ld.relaxed.cta.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr %d syncscope("block") monotonic, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.relaxed.cta.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr %d syncscope("block") monotonic, align 8
+
+ ; CHECK: ld.relaxed.cta.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr %e syncscope("block") monotonic, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.relaxed.cta.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr %e syncscope("block") monotonic, align 4
+
+ ; CHECK: ld.relaxed.cta.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr %e syncscope("block") monotonic, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.relaxed.cta.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr %e syncscope("block") monotonic, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: generic_monotonic_volatile_cta
+define void @generic_monotonic_volatile_cta(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
+ ; CHECK: ld.volatile.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr %a syncscope("block") monotonic, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.volatile.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr %a syncscope("block") monotonic, align 1
+
+ ; CHECK: ld.volatile.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr %b syncscope("block") monotonic, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.volatile.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr %b syncscope("block") monotonic, align 2
+
+ ; CHECK: ld.volatile.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr %c syncscope("block") monotonic, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.volatile.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr %c syncscope("block") monotonic, align 4
+
+ ; CHECK: ld.volatile.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr %d syncscope("block") monotonic, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.volatile.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr %d syncscope("block") monotonic, align 8
+
+ ; CHECK: ld.volatile.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr %e syncscope("block") monotonic, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.volatile.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr %e syncscope("block") monotonic, align 4
+
+ ; CHECK: ld.volatile.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr %e syncscope("block") monotonic, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.volatile.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr %e syncscope("block") monotonic, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: generic_acq_rel_sys
+define void @generic_acq_rel_sys(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
; CHECK: ld.acquire.sys.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%a.load = load atomic i8, ptr %a acquire, align 1
%a.add = add i8 %a.load, 1
@@ -31,7 +388,7 @@ define void @generic_acq_rel(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnam
; CHECK: ld.acquire.sys.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
%e.load = load atomic float, ptr %e acquire, align 4
- %e.add = fadd float %e.load, 1.0
+ %e.add = fadd float %e.load, 1.
; CHECK: st.release.sys.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
store atomic float %e.add, ptr %e release, align 4
@@ -44,8 +401,8 @@ define void @generic_acq_rel(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnam
ret void
}
-; CHECK-LABEL: generic_acq_rel_volatile
-define void @generic_acq_rel_volatile(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
+; CHECK-LABEL: generic_acq_rel_volatile_sys
+define void @generic_acq_rel_volatile_sys(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
; CHECK: ld.acquire.sys.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%a.load = load atomic volatile i8, ptr %a acquire, align 1
%a.add = add i8 %a.load, 1
@@ -72,7 +429,7 @@ define void @generic_acq_rel_volatile(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) lo
; CHECK: ld.acquire.sys.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
%e.load = load atomic volatile float, ptr %e acquire, align 4
- %e.add = fadd float %e.load, 1.0
+ %e.add = fadd float %e.load, 1.
; CHECK: st.release.sys.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
store atomic volatile float %e.add, ptr %e release, align 4
@@ -85,8 +442,172 @@ define void @generic_acq_rel_volatile(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) lo
ret void
}
-; CHECK-LABEL: generic_sc
-define void @generic_sc(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
+; CHECK-LABEL: generic_acq_rel_gpu
+define void @generic_acq_rel_gpu(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
+ ; CHECK: ld.acquire.gpu.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr %a syncscope("device") acquire, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.release.gpu.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr %a syncscope("device") release, align 1
+
+ ; CHECK: ld.acquire.gpu.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr %b syncscope("device") acquire, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.release.gpu.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr %b syncscope("device") release, align 2
+
+ ; CHECK: ld.acquire.gpu.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr %c syncscope("device") acquire, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.release.gpu.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr %c syncscope("device") release, align 4
+
+ ; CHECK: ld.acquire.gpu.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr %d syncscope("device") acquire, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.release.gpu.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr %d syncscope("device") release, align 8
+
+ ; CHECK: ld.acquire.gpu.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr %e syncscope("device") acquire, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.release.gpu.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr %e syncscope("device") release, align 4
+
+ ; CHECK: ld.acquire.gpu.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr %e syncscope("device") acquire, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.release.gpu.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr %e syncscope("device") release, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: generic_acq_rel_volatile_gpu
+define void @generic_acq_rel_volatile_gpu(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
+ ; CHECK: ld.acquire.sys.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr %a syncscope("device") acquire, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.release.sys.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr %a syncscope("device") release, align 1
+
+ ; CHECK: ld.acquire.sys.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr %b syncscope("device") acquire, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.release.sys.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr %b syncscope("device") release, align 2
+
+ ; CHECK: ld.acquire.sys.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr %c syncscope("device") acquire, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.release.sys.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr %c syncscope("device") release, align 4
+
+ ; CHECK: ld.acquire.sys.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr %d syncscope("device") acquire, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.release.sys.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr %d syncscope("device") release, align 8
+
+ ; CHECK: ld.acquire.sys.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr %e syncscope("device") acquire, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.release.sys.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr %e syncscope("device") release, align 4
+
+ ; CHECK: ld.acquire.sys.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr %e syncscope("device") acquire, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.release.sys.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr %e syncscope("device") release, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: generic_acq_rel_cta
+define void @generic_acq_rel_cta(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
+ ; CHECK: ld.acquire.cta.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr %a syncscope("block") acquire, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.release.cta.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr %a syncscope("block") release, align 1
+
+ ; CHECK: ld.acquire.cta.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr %b syncscope("block") acquire, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.release.cta.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr %b syncscope("block") release, align 2
+
+ ; CHECK: ld.acquire.cta.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr %c syncscope("block") acquire, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.release.cta.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr %c syncscope("block") release, align 4
+
+ ; CHECK: ld.acquire.cta.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr %d syncscope("block") acquire, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.release.cta.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr %d syncscope("block") release, align 8
+
+ ; CHECK: ld.acquire.cta.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr %e syncscope("block") acquire, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.release.cta.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr %e syncscope("block") release, align 4
+
+ ; CHECK: ld.acquire.cta.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr %e syncscope("block") acquire, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.release.cta.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr %e syncscope("block") release, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: generic_acq_rel_volatile_cta
+define void @generic_acq_rel_volatile_cta(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
+ ; CHECK: ld.acquire.sys.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr %a syncscope("block") acquire, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.release.sys.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr %a syncscope("block") release, align 1
+
+ ; CHECK: ld.acquire.sys.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr %b syncscope("block") acquire, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.release.sys.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr %b syncscope("block") release, align 2
+
+ ; CHECK: ld.acquire.sys.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr %c syncscope("block") acquire, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.release.sys.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr %c syncscope("block") release, align 4
+
+ ; CHECK: ld.acquire.sys.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr %d syncscope("block") acquire, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.release.sys.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr %d syncscope("block") release, align 8
+
+ ; CHECK: ld.acquire.sys.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr %e syncscope("block") acquire, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.release.sys.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr %e syncscope("block") release, align 4
+
+ ; CHECK: ld.acquire.sys.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr %e syncscope("block") acquire, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.release.sys.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr %e syncscope("block") release, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: generic_sc_sys
+define void @generic_sc_sys(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
; CHECK: fence.sc.sys
; CHECK: ld.acquire.sys.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%a.load = load atomic i8, ptr %a seq_cst, align 1
@@ -122,7 +643,7 @@ define void @generic_sc(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_ad
; CHECK: fence.sc.sys
; CHECK: ld.acquire.sys.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
%e.load = load atomic float, ptr %e seq_cst, align 4
- %e.add = fadd float %e.load, 1.0
+ %e.add = fadd float %e.load, 1.
; CHECK: fence.sc.sys
; CHECK: st.release.sys.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
store atomic float %e.add, ptr %e seq_cst, align 4
@@ -138,8 +659,8 @@ define void @generic_sc(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_ad
ret void
}
-; CHECK-LABEL: generic_sc_volatile
-define void @generic_sc_volatile(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
+; CHECK-LABEL: generic_sc_volatile_sys
+define void @generic_sc_volatile_sys(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
; CHECK: fence.sc.sys
; CHECK: ld.acquire.sys.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%a.load = load atomic volatile i8, ptr %a seq_cst, align 1
@@ -175,7 +696,7 @@ define void @generic_sc_volatile(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_u
; CHECK: fence.sc.sys
; CHECK: ld.acquire.sys.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
%e.load = load atomic volatile float, ptr %e seq_cst, align 4
- %e.add = fadd float %e.load, 1.0
+ %e.add = fadd float %e.load, 1.
; CHECK: fence.sc.sys
; CHECK: st.release.sys.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
store atomic volatile float %e.add, ptr %e seq_cst, align 4
@@ -191,10 +712,550 @@ define void @generic_sc_volatile(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_u
ret void
}
+; CHECK-LABEL: generic_sc_gpu
+define void @generic_sc_gpu(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
+ ; CHECK: fence.sc.gpu
+ ; CHECK: ld.acquire.gpu.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr %a syncscope("device") seq_cst, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: fence.sc.gpu
+ ; CHECK: st.release.gpu.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr %a syncscope("device") seq_cst, align 1
+
+ ; CHECK: fence.sc.gpu
+ ; CHECK: ld.acquire.gpu.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr %b syncscope("device") seq_cst, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: fence.sc.gpu
+ ; CHECK: st.release.gpu.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr %b syncscope("device") seq_cst, align 2
+
+ ; CHECK: fence.sc.gpu
+ ; CHECK: ld.acquire.gpu.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr %c syncscope("device") seq_cst, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: fence.sc.gpu
+ ; CHECK: st.release.gpu.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr %c syncscope("device") seq_cst, align 4
+
+ ; CHECK: fence.sc.gpu
+ ; CHECK: ld.acquire.gpu.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr %d syncscope("device") seq_cst, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: fence.sc.gpu
+ ; CHECK: st.release.gpu.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr %d syncscope("device") seq_cst, align 8
+
+ ; CHECK: fence.sc.gpu
+ ; CHECK: ld.acquire.gpu.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr %e syncscope("device") seq_cst, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: fence.sc.gpu
+ ; CHECK: st.release.gpu.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr %e syncscope("device") seq_cst, align 4
+
+ ; CHECK: fence.sc.gpu
+ ; CHECK: ld.acquire.gpu.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr %e syncscope("device") seq_cst, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: fence.sc.gpu
+ ; CHECK: st.release.gpu.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr %e syncscope("device") seq_cst, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: generic_sc_volatile_gpu
+define void @generic_sc_volatile_gpu(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr %a syncscope("device") seq_cst, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr %a syncscope("device") seq_cst, align 1
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr %b syncscope("device") seq_cst, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr %b syncscope("device") seq_cst, align 2
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr %c syncscope("device") seq_cst, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr %c syncscope("device") seq_cst, align 4
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr %d syncscope("device") seq_cst, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr %d syncscope("device") seq_cst, align 8
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr %e syncscope("device") seq_cst, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr %e syncscope("device") seq_cst, align 4
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr %e syncscope("device") seq_cst, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr %e syncscope("device") seq_cst, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: generic_sc_cta
+define void @generic_sc_cta(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
+ ; CHECK: fence.sc.cta
+ ; CHECK: ld.acquire.cta.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr %a syncscope("block") seq_cst, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: fence.sc.cta
+ ; CHECK: st.release.cta.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr %a syncscope("block") seq_cst, align 1
+
+ ; CHECK: fence.sc.cta
+ ; CHECK: ld.acquire.cta.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr %b syncscope("block") seq_cst, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: fence.sc.cta
+ ; CHECK: st.release.cta.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr %b syncscope("block") seq_cst, align 2
+
+ ; CHECK: fence.sc.cta
+ ; CHECK: ld.acquire.cta.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr %c syncscope("block") seq_cst, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: fence.sc.cta
+ ; CHECK: st.release.cta.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr %c syncscope("block") seq_cst, align 4
+
+ ; CHECK: fence.sc.cta
+ ; CHECK: ld.acquire.cta.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr %d syncscope("block") seq_cst, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: fence.sc.cta
+ ; CHECK: st.release.cta.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr %d syncscope("block") seq_cst, align 8
+
+ ; CHECK: fence.sc.cta
+ ; CHECK: ld.acquire.cta.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr %e syncscope("block") seq_cst, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: fence.sc.cta
+ ; CHECK: st.release.cta.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr %e syncscope("block") seq_cst, align 4
+
+ ; CHECK: fence.sc.cta
+ ; CHECK: ld.acquire.cta.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr %e syncscope("block") seq_cst, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: fence.sc.cta
+ ; CHECK: st.release.cta.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr %e syncscope("block") seq_cst, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: generic_sc_volatile_cta
+define void @generic_sc_volatile_cta(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr %a syncscope("block") seq_cst, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr %a syncscope("block") seq_cst, align 1
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr %b syncscope("block") seq_cst, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr %b syncscope("block") seq_cst, align 2
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr %c syncscope("block") seq_cst, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr %c syncscope("block") seq_cst, align 4
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr %d syncscope("block") seq_cst, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr %d syncscope("block") seq_cst, align 8
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr %e syncscope("block") seq_cst, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr %e syncscope("block") seq_cst, align 4
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr %e syncscope("block") seq_cst, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr %e syncscope("block") seq_cst, align 8
+
+ ret void
+}
+
;; global statespace
-; CHECK-LABEL: global_acq_rel
-define void @global_acq_rel(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
+; CHECK-LABEL: global_unordered_gpu
+define void @global_unordered_gpu(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
+ ; CHECK: ld.relaxed.gpu.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr addrspace(1) %a syncscope("device") unordered, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.relaxed.gpu.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr addrspace(1) %a syncscope("device") unordered, align 1
+
+ ; CHECK: ld.relaxed.gpu.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr addrspace(1) %b syncscope("device") unordered, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.relaxed.gpu.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr addrspace(1) %b syncscope("device") unordered, align 2
+
+ ; CHECK: ld.relaxed.gpu.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr addrspace(1) %c syncscope("device") unordered, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.relaxed.gpu.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr addrspace(1) %c syncscope("device") unordered, align 4
+
+ ; CHECK: ld.relaxed.gpu.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr addrspace(1) %d syncscope("device") unordered, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.relaxed.gpu.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr addrspace(1) %d syncscope("device") unordered, align 8
+
+ ; CHECK: ld.relaxed.gpu.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr addrspace(1) %e syncscope("device") unordered, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.relaxed.gpu.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr addrspace(1) %e syncscope("device") unordered, align 4
+
+ ; CHECK: ld.relaxed.gpu.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr addrspace(1) %e syncscope("device") unordered, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.relaxed.gpu.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr addrspace(1) %e syncscope("device") unordered, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: global_unordered_volatile_gpu
+define void @global_unordered_volatile_gpu(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
+ ; CHECK: ld.mmio.relaxed.sys.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr addrspace(1) %a syncscope("device") unordered, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.mmio.relaxed.sys.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr addrspace(1) %a syncscope("device") unordered, align 1
+
+ ; CHECK: ld.mmio.relaxed.sys.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr addrspace(1) %b syncscope("device") unordered, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.mmio.relaxed.sys.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr addrspace(1) %b syncscope("device") unordered, align 2
+
+ ; CHECK: ld.mmio.relaxed.sys.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr addrspace(1) %c syncscope("device") unordered, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.mmio.relaxed.sys.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr addrspace(1) %c syncscope("device") unordered, align 4
+
+ ; CHECK: ld.mmio.relaxed.sys.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr addrspace(1) %d syncscope("device") unordered, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.mmio.relaxed.sys.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr addrspace(1) %d syncscope("device") unordered, align 8
+
+ ; CHECK: ld.mmio.relaxed.sys.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr addrspace(1) %e syncscope("device") unordered, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.mmio.relaxed.sys.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr addrspace(1) %e syncscope("device") unordered, align 4
+
+ ; CHECK: ld.mmio.relaxed.sys.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr addrspace(1) %e syncscope("device") unordered, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.mmio.relaxed.sys.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr addrspace(1) %e syncscope("device") unordered, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: global_unordered_cta
+define void @global_unordered_cta(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
+ ; CHECK: ld.relaxed.cta.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr addrspace(1) %a syncscope("block") unordered, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.relaxed.cta.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr addrspace(1) %a syncscope("block") unordered, align 1
+
+ ; CHECK: ld.relaxed.cta.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr addrspace(1) %b syncscope("block") unordered, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.relaxed.cta.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr addrspace(1) %b syncscope("block") unordered, align 2
+
+ ; CHECK: ld.relaxed.cta.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr addrspace(1) %c syncscope("block") unordered, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.relaxed.cta.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr addrspace(1) %c syncscope("block") unordered, align 4
+
+ ; CHECK: ld.relaxed.cta.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr addrspace(1) %d syncscope("block") unordered, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.relaxed.cta.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr addrspace(1) %d syncscope("block") unordered, align 8
+
+ ; CHECK: ld.relaxed.cta.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr addrspace(1) %e syncscope("block") unordered, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.relaxed.cta.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr addrspace(1) %e syncscope("block") unordered, align 4
+
+ ; CHECK: ld.relaxed.cta.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr addrspace(1) %e syncscope("block") unordered, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.relaxed.cta.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr addrspace(1) %e syncscope("block") unordered, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: global_unordered_volatile_cta
+define void @global_unordered_volatile_cta(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
+ ; CHECK: ld.mmio.relaxed.sys.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr addrspace(1) %a syncscope("block") unordered, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.mmio.relaxed.sys.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr addrspace(1) %a syncscope("block") unordered, align 1
+
+ ; CHECK: ld.mmio.relaxed.sys.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr addrspace(1) %b syncscope("block") unordered, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.mmio.relaxed.sys.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr addrspace(1) %b syncscope("block") unordered, align 2
+
+ ; CHECK: ld.mmio.relaxed.sys.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr addrspace(1) %c syncscope("block") unordered, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.mmio.relaxed.sys.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr addrspace(1) %c syncscope("block") unordered, align 4
+
+ ; CHECK: ld.mmio.relaxed.sys.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr addrspace(1) %d syncscope("block") unordered, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.mmio.relaxed.sys.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr addrspace(1) %d syncscope("block") unordered, align 8
+
+ ; CHECK: ld.mmio.relaxed.sys.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr addrspace(1) %e syncscope("block") unordered, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.mmio.relaxed.sys.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr addrspace(1) %e syncscope("block") unordered, align 4
+
+ ; CHECK: ld.mmio.relaxed.sys.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr addrspace(1) %e syncscope("block") unordered, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.mmio.relaxed.sys.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr addrspace(1) %e syncscope("block") unordered, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: global_monotonic_gpu
+define void @global_monotonic_gpu(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
+ ; CHECK: ld.relaxed.gpu.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr addrspace(1) %a syncscope("device") monotonic, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.relaxed.gpu.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr addrspace(1) %a syncscope("device") monotonic, align 1
+
+ ; CHECK: ld.relaxed.gpu.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr addrspace(1) %b syncscope("device") monotonic, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.relaxed.gpu.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr addrspace(1) %b syncscope("device") monotonic, align 2
+
+ ; CHECK: ld.relaxed.gpu.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr addrspace(1) %c syncscope("device") monotonic, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.relaxed.gpu.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr addrspace(1) %c syncscope("device") monotonic, align 4
+
+ ; CHECK: ld.relaxed.gpu.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr addrspace(1) %d syncscope("device") monotonic, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.relaxed.gpu.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr addrspace(1) %d syncscope("device") monotonic, align 8
+
+ ; CHECK: ld.relaxed.gpu.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr addrspace(1) %e syncscope("device") monotonic, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.relaxed.gpu.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr addrspace(1) %e syncscope("device") monotonic, align 4
+
+ ; CHECK: ld.relaxed.gpu.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr addrspace(1) %e syncscope("device") monotonic, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.relaxed.gpu.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr addrspace(1) %e syncscope("device") monotonic, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: global_monotonic_volatile_gpu
+define void @global_monotonic_volatile_gpu(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
+ ; CHECK: ld.mmio.relaxed.sys.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr addrspace(1) %a syncscope("device") monotonic, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.mmio.relaxed.sys.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr addrspace(1) %a syncscope("device") monotonic, align 1
+
+ ; CHECK: ld.mmio.relaxed.sys.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr addrspace(1) %b syncscope("device") monotonic, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.mmio.relaxed.sys.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr addrspace(1) %b syncscope("device") monotonic, align 2
+
+ ; CHECK: ld.mmio.relaxed.sys.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr addrspace(1) %c syncscope("device") monotonic, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.mmio.relaxed.sys.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr addrspace(1) %c syncscope("device") monotonic, align 4
+
+ ; CHECK: ld.mmio.relaxed.sys.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr addrspace(1) %d syncscope("device") monotonic, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.mmio.relaxed.sys.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr addrspace(1) %d syncscope("device") monotonic, align 8
+
+ ; CHECK: ld.mmio.relaxed.sys.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr addrspace(1) %e syncscope("device") monotonic, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.mmio.relaxed.sys.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr addrspace(1) %e syncscope("device") monotonic, align 4
+
+ ; CHECK: ld.mmio.relaxed.sys.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr addrspace(1) %e syncscope("device") monotonic, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.mmio.relaxed.sys.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr addrspace(1) %e syncscope("device") monotonic, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: global_monotonic_cta
+define void @global_monotonic_cta(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
+ ; CHECK: ld.relaxed.cta.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr addrspace(1) %a syncscope("block") monotonic, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.relaxed.cta.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr addrspace(1) %a syncscope("block") monotonic, align 1
+
+ ; CHECK: ld.relaxed.cta.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr addrspace(1) %b syncscope("block") monotonic, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.relaxed.cta.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr addrspace(1) %b syncscope("block") monotonic, align 2
+
+ ; CHECK: ld.relaxed.cta.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr addrspace(1) %c syncscope("block") monotonic, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.relaxed.cta.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr addrspace(1) %c syncscope("block") monotonic, align 4
+
+ ; CHECK: ld.relaxed.cta.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr addrspace(1) %d syncscope("block") monotonic, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.relaxed.cta.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr addrspace(1) %d syncscope("block") monotonic, align 8
+
+ ; CHECK: ld.relaxed.cta.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr addrspace(1) %e syncscope("block") monotonic, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.relaxed.cta.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr addrspace(1) %e syncscope("block") monotonic, align 4
+
+ ; CHECK: ld.relaxed.cta.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr addrspace(1) %e syncscope("block") monotonic, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.relaxed.cta.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr addrspace(1) %e syncscope("block") monotonic, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: global_monotonic_volatile_cta
+define void @global_monotonic_volatile_cta(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
+ ; CHECK: ld.mmio.relaxed.sys.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr addrspace(1) %a syncscope("block") monotonic, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.mmio.relaxed.sys.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr addrspace(1) %a syncscope("block") monotonic, align 1
+
+ ; CHECK: ld.mmio.relaxed.sys.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr addrspace(1) %b syncscope("block") monotonic, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.mmio.relaxed.sys.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr addrspace(1) %b syncscope("block") monotonic, align 2
+
+ ; CHECK: ld.mmio.relaxed.sys.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr addrspace(1) %c syncscope("block") monotonic, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.mmio.relaxed.sys.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr addrspace(1) %c syncscope("block") monotonic, align 4
+
+ ; CHECK: ld.mmio.relaxed.sys.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr addrspace(1) %d syncscope("block") monotonic, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.mmio.relaxed.sys.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr addrspace(1) %d syncscope("block") monotonic, align 8
+
+ ; CHECK: ld.mmio.relaxed.sys.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr addrspace(1) %e syncscope("block") monotonic, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.mmio.relaxed.sys.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr addrspace(1) %e syncscope("block") monotonic, align 4
+
+ ; CHECK: ld.mmio.relaxed.sys.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr addrspace(1) %e syncscope("block") monotonic, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.mmio.relaxed.sys.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr addrspace(1) %e syncscope("block") monotonic, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: global_acq_rel_sys
+define void @global_acq_rel_sys(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
; CHECK: ld.acquire.sys.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%a.load = load atomic i8, ptr addrspace(1) %a acquire, align 1
%a.add = add i8 %a.load, 1
@@ -221,7 +1282,7 @@ define void @global_acq_rel(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrsp
; CHECK: ld.acquire.sys.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
%e.load = load atomic float, ptr addrspace(1) %e acquire, align 4
- %e.add = fadd float %e.load, 1.0
+ %e.add = fadd float %e.load, 1.
; CHECK: st.release.sys.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
store atomic float %e.add, ptr addrspace(1) %e release, align 4
@@ -234,8 +1295,8 @@ define void @global_acq_rel(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrsp
ret void
}
-; CHECK-LABEL: global_acq_rel_volatile
-define void @global_acq_rel_volatile(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
+; CHECK-LABEL: global_acq_rel_volatile_sys
+define void @global_acq_rel_volatile_sys(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
; CHECK: ld.acquire.sys.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%a.load = load atomic volatile i8, ptr addrspace(1) %a acquire, align 1
%a.add = add i8 %a.load, 1
@@ -262,7 +1323,7 @@ define void @global_acq_rel_volatile(ptr addrspace(1) %a, ptr addrspace(1) %b, p
; CHECK: ld.acquire.sys.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
%e.load = load atomic volatile float, ptr addrspace(1) %e acquire, align 4
- %e.add = fadd float %e.load, 1.0
+ %e.add = fadd float %e.load, 1.
; CHECK: st.release.sys.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
store atomic volatile float %e.add, ptr addrspace(1) %e release, align 4
@@ -275,8 +1336,172 @@ define void @global_acq_rel_volatile(ptr addrspace(1) %a, ptr addrspace(1) %b, p
ret void
}
-; CHECK-LABEL: global_seq_cst
-define void @global_seq_cst(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
+; CHECK-LABEL: global_acq_rel_gpu
+define void @global_acq_rel_gpu(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
+ ; CHECK: ld.acquire.gpu.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr addrspace(1) %a syncscope("device") acquire, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.release.gpu.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr addrspace(1) %a syncscope("device") release, align 1
+
+ ; CHECK: ld.acquire.gpu.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr addrspace(1) %b syncscope("device") acquire, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.release.gpu.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr addrspace(1) %b syncscope("device") release, align 2
+
+ ; CHECK: ld.acquire.gpu.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr addrspace(1) %c syncscope("device") acquire, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.release.gpu.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr addrspace(1) %c syncscope("device") release, align 4
+
+ ; CHECK: ld.acquire.gpu.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr addrspace(1) %d syncscope("device") acquire, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.release.gpu.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr addrspace(1) %d syncscope("device") release, align 8
+
+ ; CHECK: ld.acquire.gpu.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr addrspace(1) %e syncscope("device") acquire, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.release.gpu.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr addrspace(1) %e syncscope("device") release, align 4
+
+ ; CHECK: ld.acquire.gpu.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr addrspace(1) %e syncscope("device") acquire, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.release.gpu.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr addrspace(1) %e syncscope("device") release, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: global_acq_rel_volatile_gpu
+define void @global_acq_rel_volatile_gpu(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
+ ; CHECK: ld.acquire.sys.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr addrspace(1) %a syncscope("device") acquire, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.release.sys.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr addrspace(1) %a syncscope("device") release, align 1
+
+ ; CHECK: ld.acquire.sys.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr addrspace(1) %b syncscope("device") acquire, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.release.sys.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr addrspace(1) %b syncscope("device") release, align 2
+
+ ; CHECK: ld.acquire.sys.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr addrspace(1) %c syncscope("device") acquire, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.release.sys.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr addrspace(1) %c syncscope("device") release, align 4
+
+ ; CHECK: ld.acquire.sys.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr addrspace(1) %d syncscope("device") acquire, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.release.sys.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr addrspace(1) %d syncscope("device") release, align 8
+
+ ; CHECK: ld.acquire.sys.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr addrspace(1) %e syncscope("device") acquire, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.release.sys.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr addrspace(1) %e syncscope("device") release, align 4
+
+ ; CHECK: ld.acquire.sys.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr addrspace(1) %e syncscope("device") acquire, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.release.sys.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr addrspace(1) %e syncscope("device") release, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: global_acq_rel_cta
+define void @global_acq_rel_cta(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
+ ; CHECK: ld.acquire.cta.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr addrspace(1) %a syncscope("block") acquire, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.release.cta.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr addrspace(1) %a syncscope("block") release, align 1
+
+ ; CHECK: ld.acquire.cta.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr addrspace(1) %b syncscope("block") acquire, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.release.cta.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr addrspace(1) %b syncscope("block") release, align 2
+
+ ; CHECK: ld.acquire.cta.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr addrspace(1) %c syncscope("block") acquire, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.release.cta.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr addrspace(1) %c syncscope("block") release, align 4
+
+ ; CHECK: ld.acquire.cta.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr addrspace(1) %d syncscope("block") acquire, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.release.cta.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr addrspace(1) %d syncscope("block") release, align 8
+
+ ; CHECK: ld.acquire.cta.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr addrspace(1) %e syncscope("block") acquire, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.release.cta.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr addrspace(1) %e syncscope("block") release, align 4
+
+ ; CHECK: ld.acquire.cta.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr addrspace(1) %e syncscope("block") acquire, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.release.cta.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr addrspace(1) %e syncscope("block") release, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: global_acq_rel_volatile_cta
+define void @global_acq_rel_volatile_cta(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
+ ; CHECK: ld.acquire.sys.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr addrspace(1) %a syncscope("block") acquire, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.release.sys.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr addrspace(1) %a syncscope("block") release, align 1
+
+ ; CHECK: ld.acquire.sys.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr addrspace(1) %b syncscope("block") acquire, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.release.sys.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr addrspace(1) %b syncscope("block") release, align 2
+
+ ; CHECK: ld.acquire.sys.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr addrspace(1) %c syncscope("block") acquire, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.release.sys.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr addrspace(1) %c syncscope("block") release, align 4
+
+ ; CHECK: ld.acquire.sys.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr addrspace(1) %d syncscope("block") acquire, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.release.sys.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr addrspace(1) %d syncscope("block") release, align 8
+
+ ; CHECK: ld.acquire.sys.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr addrspace(1) %e syncscope("block") acquire, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.release.sys.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr addrspace(1) %e syncscope("block") release, align 4
+
+ ; CHECK: ld.acquire.sys.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr addrspace(1) %e syncscope("block") acquire, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.release.sys.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr addrspace(1) %e syncscope("block") release, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: global_seq_cst_sys
+define void @global_seq_cst_sys(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
; CHECK: fence.sc.sys
; CHECK: ld.acquire.sys.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%a.load = load atomic i8, ptr addrspace(1) %a seq_cst, align 1
@@ -312,7 +1537,7 @@ define void @global_seq_cst(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrsp
; CHECK: fence.sc.sys
; CHECK: ld.acquire.sys.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
%e.load = load atomic float, ptr addrspace(1) %e seq_cst, align 4
- %e.add = fadd float %e.load, 1.0
+ %e.add = fadd float %e.load, 1.
; CHECK: fence.sc.sys
; CHECK: st.release.sys.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
store atomic float %e.add, ptr addrspace(1) %e seq_cst, align 4
@@ -328,8 +1553,8 @@ define void @global_seq_cst(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrsp
ret void
}
-; CHECK-LABEL: global_seq_cst_volatile
-define void @global_seq_cst_volatile(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
+; CHECK-LABEL: global_seq_cst_volatile_sys
+define void @global_seq_cst_volatile_sys(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
; CHECK: fence.sc.sys
; CHECK: ld.acquire.sys.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%a.load = load atomic volatile i8, ptr addrspace(1) %a seq_cst, align 1
@@ -365,7 +1590,7 @@ define void @global_seq_cst_volatile(ptr addrspace(1) %a, ptr addrspace(1) %b, p
; CHECK: fence.sc.sys
; CHECK: ld.acquire.sys.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
%e.load = load atomic volatile float, ptr addrspace(1) %e seq_cst, align 4
- %e.add = fadd float %e.load, 1.0
+ %e.add = fadd float %e.load, 1.
; CHECK: fence.sc.sys
; CHECK: st.release.sys.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
store atomic volatile float %e.add, ptr addrspace(1) %e seq_cst, align 4
@@ -381,10 +1606,550 @@ define void @global_seq_cst_volatile(ptr addrspace(1) %a, ptr addrspace(1) %b, p
ret void
}
+; CHECK-LABEL: global_seq_cst_gpu
+define void @global_seq_cst_gpu(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
+ ; CHECK: fence.sc.gpu
+ ; CHECK: ld.acquire.gpu.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr addrspace(1) %a syncscope("device") seq_cst, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: fence.sc.gpu
+ ; CHECK: st.release.gpu.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr addrspace(1) %a syncscope("device") seq_cst, align 1
+
+ ; CHECK: fence.sc.gpu
+ ; CHECK: ld.acquire.gpu.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr addrspace(1) %b syncscope("device") seq_cst, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: fence.sc.gpu
+ ; CHECK: st.release.gpu.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr addrspace(1) %b syncscope("device") seq_cst, align 2
+
+ ; CHECK: fence.sc.gpu
+ ; CHECK: ld.acquire.gpu.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr addrspace(1) %c syncscope("device") seq_cst, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: fence.sc.gpu
+ ; CHECK: st.release.gpu.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr addrspace(1) %c syncscope("device") seq_cst, align 4
+
+ ; CHECK: fence.sc.gpu
+ ; CHECK: ld.acquire.gpu.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr addrspace(1) %d syncscope("device") seq_cst, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: fence.sc.gpu
+ ; CHECK: st.release.gpu.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr addrspace(1) %d syncscope("device") seq_cst, align 8
+
+ ; CHECK: fence.sc.gpu
+ ; CHECK: ld.acquire.gpu.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr addrspace(1) %e syncscope("device") seq_cst, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: fence.sc.gpu
+ ; CHECK: st.release.gpu.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr addrspace(1) %e syncscope("device") seq_cst, align 4
+
+ ; CHECK: fence.sc.gpu
+ ; CHECK: ld.acquire.gpu.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr addrspace(1) %e syncscope("device") seq_cst, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: fence.sc.gpu
+ ; CHECK: st.release.gpu.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr addrspace(1) %e syncscope("device") seq_cst, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: global_seq_cst_volatile_gpu
+define void @global_seq_cst_volatile_gpu(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr addrspace(1) %a syncscope("device") seq_cst, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr addrspace(1) %a syncscope("device") seq_cst, align 1
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr addrspace(1) %b syncscope("device") seq_cst, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr addrspace(1) %b syncscope("device") seq_cst, align 2
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr addrspace(1) %c syncscope("device") seq_cst, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr addrspace(1) %c syncscope("device") seq_cst, align 4
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr addrspace(1) %d syncscope("device") seq_cst, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr addrspace(1) %d syncscope("device") seq_cst, align 8
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr addrspace(1) %e syncscope("device") seq_cst, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr addrspace(1) %e syncscope("device") seq_cst, align 4
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr addrspace(1) %e syncscope("device") seq_cst, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr addrspace(1) %e syncscope("device") seq_cst, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: global_seq_cst_cta
+define void @global_seq_cst_cta(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
+ ; CHECK: fence.sc.cta
+ ; CHECK: ld.acquire.cta.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr addrspace(1) %a syncscope("block") seq_cst, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: fence.sc.cta
+ ; CHECK: st.release.cta.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr addrspace(1) %a syncscope("block") seq_cst, align 1
+
+ ; CHECK: fence.sc.cta
+ ; CHECK: ld.acquire.cta.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr addrspace(1) %b syncscope("block") seq_cst, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: fence.sc.cta
+ ; CHECK: st.release.cta.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr addrspace(1) %b syncscope("block") seq_cst, align 2
+
+ ; CHECK: fence.sc.cta
+ ; CHECK: ld.acquire.cta.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr addrspace(1) %c syncscope("block") seq_cst, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: fence.sc.cta
+ ; CHECK: st.release.cta.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr addrspace(1) %c syncscope("block") seq_cst, align 4
+
+ ; CHECK: fence.sc.cta
+ ; CHECK: ld.acquire.cta.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr addrspace(1) %d syncscope("block") seq_cst, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: fence.sc.cta
+ ; CHECK: st.release.cta.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr addrspace(1) %d syncscope("block") seq_cst, align 8
+
+ ; CHECK: fence.sc.cta
+ ; CHECK: ld.acquire.cta.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr addrspace(1) %e syncscope("block") seq_cst, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: fence.sc.cta
+ ; CHECK: st.release.cta.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr addrspace(1) %e syncscope("block") seq_cst, align 4
+
+ ; CHECK: fence.sc.cta
+ ; CHECK: ld.acquire.cta.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr addrspace(1) %e syncscope("block") seq_cst, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: fence.sc.cta
+ ; CHECK: st.release.cta.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr addrspace(1) %e syncscope("block") seq_cst, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: global_seq_cst_volatile_cta
+define void @global_seq_cst_volatile_cta(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr addrspace(1) %a syncscope("block") seq_cst, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr addrspace(1) %a syncscope("block") seq_cst, align 1
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr addrspace(1) %b syncscope("block") seq_cst, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr addrspace(1) %b syncscope("block") seq_cst, align 2
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr addrspace(1) %c syncscope("block") seq_cst, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr addrspace(1) %c syncscope("block") seq_cst, align 4
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr addrspace(1) %d syncscope("block") seq_cst, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr addrspace(1) %d syncscope("block") seq_cst, align 8
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr addrspace(1) %e syncscope("block") seq_cst, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr addrspace(1) %e syncscope("block") seq_cst, align 4
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr addrspace(1) %e syncscope("block") seq_cst, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr addrspace(1) %e syncscope("block") seq_cst, align 8
+
+ ret void
+}
+
;; shared statespace
-; CHECK-LABEL: shared_acq_rel
-define void @shared_acq_rel(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
+; CHECK-LABEL: shared_unordered_gpu
+define void @shared_unordered_gpu(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
+ ; CHECK: ld.relaxed.gpu.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr addrspace(3) %a syncscope("device") unordered, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.relaxed.gpu.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr addrspace(3) %a syncscope("device") unordered, align 1
+
+ ; CHECK: ld.relaxed.gpu.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr addrspace(3) %b syncscope("device") unordered, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.relaxed.gpu.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr addrspace(3) %b syncscope("device") unordered, align 2
+
+ ; CHECK: ld.relaxed.gpu.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr addrspace(3) %c syncscope("device") unordered, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.relaxed.gpu.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr addrspace(3) %c syncscope("device") unordered, align 4
+
+ ; CHECK: ld.relaxed.gpu.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr addrspace(3) %d syncscope("device") unordered, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.relaxed.gpu.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr addrspace(3) %d syncscope("device") unordered, align 8
+
+ ; CHECK: ld.relaxed.gpu.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr addrspace(3) %e syncscope("device") unordered, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.relaxed.gpu.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr addrspace(3) %e syncscope("device") unordered, align 4
+
+ ; CHECK: ld.relaxed.gpu.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr addrspace(3) %e syncscope("device") unordered, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.relaxed.gpu.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr addrspace(3) %e syncscope("device") unordered, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: shared_unordered_volatile_gpu
+define void @shared_unordered_volatile_gpu(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
+ ; CHECK: ld.volatile.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr addrspace(3) %a syncscope("device") unordered, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.volatile.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr addrspace(3) %a syncscope("device") unordered, align 1
+
+ ; CHECK: ld.volatile.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr addrspace(3) %b syncscope("device") unordered, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.volatile.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr addrspace(3) %b syncscope("device") unordered, align 2
+
+ ; CHECK: ld.volatile.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr addrspace(3) %c syncscope("device") unordered, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.volatile.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr addrspace(3) %c syncscope("device") unordered, align 4
+
+ ; CHECK: ld.volatile.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr addrspace(3) %d syncscope("device") unordered, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.volatile.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr addrspace(3) %d syncscope("device") unordered, align 8
+
+ ; CHECK: ld.volatile.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr addrspace(3) %e syncscope("device") unordered, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.volatile.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr addrspace(3) %e syncscope("device") unordered, align 4
+
+ ; CHECK: ld.volatile.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr addrspace(3) %e syncscope("device") unordered, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.volatile.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr addrspace(3) %e syncscope("device") unordered, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: shared_unordered_cta
+define void @shared_unordered_cta(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
+ ; CHECK: ld.relaxed.cta.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr addrspace(3) %a syncscope("block") unordered, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.relaxed.cta.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr addrspace(3) %a syncscope("block") unordered, align 1
+
+ ; CHECK: ld.relaxed.cta.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr addrspace(3) %b syncscope("block") unordered, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.relaxed.cta.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr addrspace(3) %b syncscope("block") unordered, align 2
+
+ ; CHECK: ld.relaxed.cta.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr addrspace(3) %c syncscope("block") unordered, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.relaxed.cta.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr addrspace(3) %c syncscope("block") unordered, align 4
+
+ ; CHECK: ld.relaxed.cta.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr addrspace(3) %d syncscope("block") unordered, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.relaxed.cta.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr addrspace(3) %d syncscope("block") unordered, align 8
+
+ ; CHECK: ld.relaxed.cta.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr addrspace(3) %e syncscope("block") unordered, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.relaxed.cta.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr addrspace(3) %e syncscope("block") unordered, align 4
+
+ ; CHECK: ld.relaxed.cta.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr addrspace(3) %e syncscope("block") unordered, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.relaxed.cta.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr addrspace(3) %e syncscope("block") unordered, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: shared_unordered_volatile_cta
+define void @shared_unordered_volatile_cta(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
+ ; CHECK: ld.volatile.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr addrspace(3) %a syncscope("block") unordered, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.volatile.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr addrspace(3) %a syncscope("block") unordered, align 1
+
+ ; CHECK: ld.volatile.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr addrspace(3) %b syncscope("block") unordered, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.volatile.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr addrspace(3) %b syncscope("block") unordered, align 2
+
+ ; CHECK: ld.volatile.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr addrspace(3) %c syncscope("block") unordered, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.volatile.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr addrspace(3) %c syncscope("block") unordered, align 4
+
+ ; CHECK: ld.volatile.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr addrspace(3) %d syncscope("block") unordered, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.volatile.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr addrspace(3) %d syncscope("block") unordered, align 8
+
+ ; CHECK: ld.volatile.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr addrspace(3) %e syncscope("block") unordered, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.volatile.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr addrspace(3) %e syncscope("block") unordered, align 4
+
+ ; CHECK: ld.volatile.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr addrspace(3) %e syncscope("block") unordered, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.volatile.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr addrspace(3) %e syncscope("block") unordered, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: shared_monotonic_gpu
+define void @shared_monotonic_gpu(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
+ ; CHECK: ld.relaxed.gpu.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr addrspace(3) %a syncscope("device") monotonic, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.relaxed.gpu.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr addrspace(3) %a syncscope("device") monotonic, align 1
+
+ ; CHECK: ld.relaxed.gpu.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr addrspace(3) %b syncscope("device") monotonic, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.relaxed.gpu.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr addrspace(3) %b syncscope("device") monotonic, align 2
+
+ ; CHECK: ld.relaxed.gpu.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr addrspace(3) %c syncscope("device") monotonic, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.relaxed.gpu.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr addrspace(3) %c syncscope("device") monotonic, align 4
+
+ ; CHECK: ld.relaxed.gpu.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr addrspace(3) %d syncscope("device") monotonic, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.relaxed.gpu.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr addrspace(3) %d syncscope("device") monotonic, align 8
+
+ ; CHECK: ld.relaxed.gpu.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr addrspace(3) %e syncscope("device") monotonic, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.relaxed.gpu.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr addrspace(3) %e syncscope("device") monotonic, align 4
+
+ ; CHECK: ld.relaxed.gpu.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr addrspace(3) %e syncscope("device") monotonic, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.relaxed.gpu.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr addrspace(3) %e syncscope("device") monotonic, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: shared_monotonic_volatile_gpu
+define void @shared_monotonic_volatile_gpu(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
+ ; CHECK: ld.volatile.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr addrspace(3) %a syncscope("device") monotonic, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.volatile.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr addrspace(3) %a syncscope("device") monotonic, align 1
+
+ ; CHECK: ld.volatile.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr addrspace(3) %b syncscope("device") monotonic, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.volatile.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr addrspace(3) %b syncscope("device") monotonic, align 2
+
+ ; CHECK: ld.volatile.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr addrspace(3) %c syncscope("device") monotonic, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.volatile.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr addrspace(3) %c syncscope("device") monotonic, align 4
+
+ ; CHECK: ld.volatile.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr addrspace(3) %d syncscope("device") monotonic, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.volatile.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr addrspace(3) %d syncscope("device") monotonic, align 8
+
+ ; CHECK: ld.volatile.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr addrspace(3) %e syncscope("device") monotonic, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.volatile.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr addrspace(3) %e syncscope("device") monotonic, align 4
+
+ ; CHECK: ld.volatile.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr addrspace(3) %e syncscope("device") monotonic, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.volatile.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr addrspace(3) %e syncscope("device") monotonic, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: shared_monotonic_cta
+define void @shared_monotonic_cta(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
+ ; CHECK: ld.relaxed.cta.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr addrspace(3) %a syncscope("block") monotonic, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.relaxed.cta.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr addrspace(3) %a syncscope("block") monotonic, align 1
+
+ ; CHECK: ld.relaxed.cta.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr addrspace(3) %b syncscope("block") monotonic, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.relaxed.cta.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr addrspace(3) %b syncscope("block") monotonic, align 2
+
+ ; CHECK: ld.relaxed.cta.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr addrspace(3) %c syncscope("block") monotonic, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.relaxed.cta.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr addrspace(3) %c syncscope("block") monotonic, align 4
+
+ ; CHECK: ld.relaxed.cta.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr addrspace(3) %d syncscope("block") monotonic, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.relaxed.cta.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr addrspace(3) %d syncscope("block") monotonic, align 8
+
+ ; CHECK: ld.relaxed.cta.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr addrspace(3) %e syncscope("block") monotonic, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.relaxed.cta.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr addrspace(3) %e syncscope("block") monotonic, align 4
+
+ ; CHECK: ld.relaxed.cta.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr addrspace(3) %e syncscope("block") monotonic, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.relaxed.cta.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr addrspace(3) %e syncscope("block") monotonic, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: shared_monotonic_volatile_cta
+define void @shared_monotonic_volatile_cta(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
+ ; CHECK: ld.volatile.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr addrspace(3) %a syncscope("block") monotonic, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.volatile.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr addrspace(3) %a syncscope("block") monotonic, align 1
+
+ ; CHECK: ld.volatile.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr addrspace(3) %b syncscope("block") monotonic, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.volatile.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr addrspace(3) %b syncscope("block") monotonic, align 2
+
+ ; CHECK: ld.volatile.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr addrspace(3) %c syncscope("block") monotonic, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.volatile.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr addrspace(3) %c syncscope("block") monotonic, align 4
+
+ ; CHECK: ld.volatile.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr addrspace(3) %d syncscope("block") monotonic, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.volatile.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr addrspace(3) %d syncscope("block") monotonic, align 8
+
+ ; CHECK: ld.volatile.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr addrspace(3) %e syncscope("block") monotonic, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.volatile.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr addrspace(3) %e syncscope("block") monotonic, align 4
+
+ ; CHECK: ld.volatile.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr addrspace(3) %e syncscope("block") monotonic, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.volatile.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr addrspace(3) %e syncscope("block") monotonic, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: shared_acq_rel_sys
+define void @shared_acq_rel_sys(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
; CHECK: ld.acquire.sys.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%a.load = load atomic i8, ptr addrspace(3) %a acquire, align 1
%a.add = add i8 %a.load, 1
@@ -411,7 +2176,7 @@ define void @shared_acq_rel(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrsp
; CHECK: ld.acquire.sys.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
%e.load = load atomic float, ptr addrspace(3) %e acquire, align 4
- %e.add = fadd float %e.load, 1.0
+ %e.add = fadd float %e.load, 1.
; CHECK: st.release.sys.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
store atomic float %e.add, ptr addrspace(3) %e release, align 4
@@ -424,8 +2189,8 @@ define void @shared_acq_rel(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrsp
ret void
}
-; CHECK-LABEL: shared_acq_rel_volatile
-define void @shared_acq_rel_volatile(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
+; CHECK-LABEL: shared_acq_rel_volatile_sys
+define void @shared_acq_rel_volatile_sys(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
; CHECK: ld.acquire.sys.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%a.load = load atomic volatile i8, ptr addrspace(3) %a acquire, align 1
%a.add = add i8 %a.load, 1
@@ -452,7 +2217,7 @@ define void @shared_acq_rel_volatile(ptr addrspace(3) %a, ptr addrspace(3) %b, p
; CHECK: ld.acquire.sys.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
%e.load = load atomic volatile float, ptr addrspace(3) %e acquire, align 4
- %e.add = fadd float %e.load, 1.0
+ %e.add = fadd float %e.load, 1.
; CHECK: st.release.sys.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
store atomic volatile float %e.add, ptr addrspace(3) %e release, align 4
@@ -465,8 +2230,172 @@ define void @shared_acq_rel_volatile(ptr addrspace(3) %a, ptr addrspace(3) %b, p
ret void
}
-; CHECK-LABEL: shared_seq_cst
-define void @shared_seq_cst(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
+; CHECK-LABEL: shared_acq_rel_gpu
+define void @shared_acq_rel_gpu(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
+ ; CHECK: ld.acquire.gpu.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr addrspace(3) %a syncscope("device") acquire, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.release.gpu.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr addrspace(3) %a syncscope("device") release, align 1
+
+ ; CHECK: ld.acquire.gpu.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr addrspace(3) %b syncscope("device") acquire, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.release.gpu.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr addrspace(3) %b syncscope("device") release, align 2
+
+ ; CHECK: ld.acquire.gpu.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr addrspace(3) %c syncscope("device") acquire, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.release.gpu.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr addrspace(3) %c syncscope("device") release, align 4
+
+ ; CHECK: ld.acquire.gpu.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr addrspace(3) %d syncscope("device") acquire, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.release.gpu.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr addrspace(3) %d syncscope("device") release, align 8
+
+ ; CHECK: ld.acquire.gpu.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr addrspace(3) %e syncscope("device") acquire, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.release.gpu.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr addrspace(3) %e syncscope("device") release, align 4
+
+ ; CHECK: ld.acquire.gpu.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr addrspace(3) %e syncscope("device") acquire, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.release.gpu.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr addrspace(3) %e syncscope("device") release, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: shared_acq_rel_volatile_gpu
+define void @shared_acq_rel_volatile_gpu(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
+ ; CHECK: ld.acquire.sys.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr addrspace(3) %a syncscope("device") acquire, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.release.sys.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr addrspace(3) %a syncscope("device") release, align 1
+
+ ; CHECK: ld.acquire.sys.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr addrspace(3) %b syncscope("device") acquire, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.release.sys.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr addrspace(3) %b syncscope("device") release, align 2
+
+ ; CHECK: ld.acquire.sys.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr addrspace(3) %c syncscope("device") acquire, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.release.sys.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr addrspace(3) %c syncscope("device") release, align 4
+
+ ; CHECK: ld.acquire.sys.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr addrspace(3) %d syncscope("device") acquire, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.release.sys.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr addrspace(3) %d syncscope("device") release, align 8
+
+ ; CHECK: ld.acquire.sys.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr addrspace(3) %e syncscope("device") acquire, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.release.sys.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr addrspace(3) %e syncscope("device") release, align 4
+
+ ; CHECK: ld.acquire.sys.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr addrspace(3) %e syncscope("device") acquire, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.release.sys.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr addrspace(3) %e syncscope("device") release, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: shared_acq_rel_cta
+define void @shared_acq_rel_cta(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
+ ; CHECK: ld.acquire.cta.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr addrspace(3) %a syncscope("block") acquire, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.release.cta.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr addrspace(3) %a syncscope("block") release, align 1
+
+ ; CHECK: ld.acquire.cta.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr addrspace(3) %b syncscope("block") acquire, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.release.cta.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr addrspace(3) %b syncscope("block") release, align 2
+
+ ; CHECK: ld.acquire.cta.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr addrspace(3) %c syncscope("block") acquire, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.release.cta.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr addrspace(3) %c syncscope("block") release, align 4
+
+ ; CHECK: ld.acquire.cta.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr addrspace(3) %d syncscope("block") acquire, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.release.cta.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr addrspace(3) %d syncscope("block") release, align 8
+
+ ; CHECK: ld.acquire.cta.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr addrspace(3) %e syncscope("block") acquire, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.release.cta.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr addrspace(3) %e syncscope("block") release, align 4
+
+ ; CHECK: ld.acquire.cta.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr addrspace(3) %e syncscope("block") acquire, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.release.cta.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr addrspace(3) %e syncscope("block") release, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: shared_acq_rel_volatile_cta
+define void @shared_acq_rel_volatile_cta(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
+ ; CHECK: ld.acquire.sys.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr addrspace(3) %a syncscope("block") acquire, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.release.sys.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr addrspace(3) %a syncscope("block") release, align 1
+
+ ; CHECK: ld.acquire.sys.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr addrspace(3) %b syncscope("block") acquire, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.release.sys.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr addrspace(3) %b syncscope("block") release, align 2
+
+ ; CHECK: ld.acquire.sys.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr addrspace(3) %c syncscope("block") acquire, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.release.sys.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr addrspace(3) %c syncscope("block") release, align 4
+
+ ; CHECK: ld.acquire.sys.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr addrspace(3) %d syncscope("block") acquire, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.release.sys.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr addrspace(3) %d syncscope("block") release, align 8
+
+ ; CHECK: ld.acquire.sys.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr addrspace(3) %e syncscope("block") acquire, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.release.sys.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr addrspace(3) %e syncscope("block") release, align 4
+
+ ; CHECK: ld.acquire.sys.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr addrspace(3) %e syncscope("block") acquire, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.release.sys.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr addrspace(3) %e syncscope("block") release, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: shared_seq_cst_sys
+define void @shared_seq_cst_sys(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
; CHECK: fence.sc.sys
; CHECK: ld.acquire.sys.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%a.load = load atomic i8, ptr addrspace(3) %a seq_cst, align 1
@@ -502,7 +2431,7 @@ define void @shared_seq_cst(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrsp
; CHECK: fence.sc.sys
; CHECK: ld.acquire.sys.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
%e.load = load atomic float, ptr addrspace(3) %e seq_cst, align 4
- %e.add = fadd float %e.load, 1.0
+ %e.add = fadd float %e.load, 1.
; CHECK: fence.sc.sys
; CHECK: st.release.sys.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
store atomic float %e.add, ptr addrspace(3) %e seq_cst, align 4
@@ -510,16 +2439,16 @@ define void @shared_seq_cst(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrsp
; CHECK: fence.sc.sys
; CHECK: ld.acquire.sys.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
%f.load = load atomic double, ptr addrspace(3) %e seq_cst, align 8
- %f.add = fadd double %f.load, 1.
- ; CHECK: fence.sc.sys
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: fence.sc.sys
; CHECK: st.release.sys.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
store atomic double %f.add, ptr addrspace(3) %e seq_cst, align 8
ret void
}
-; CHECK-LABEL: shared_seq_cst_volatile
-define void @shared_seq_cst_volatile(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
+; CHECK-LABEL: shared_seq_cst_volatile_sys
+define void @shared_seq_cst_volatile_sys(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
; CHECK: fence.sc.sys
; CHECK: ld.acquire.sys.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%a.load = load atomic volatile i8, ptr addrspace(3) %a seq_cst, align 1
@@ -555,7 +2484,7 @@ define void @shared_seq_cst_volatile(ptr addrspace(3) %a, ptr addrspace(3) %b, p
; CHECK: fence.sc.sys
; CHECK: ld.acquire.sys.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
%e.load = load atomic volatile float, ptr addrspace(3) %e seq_cst, align 4
- %e.add = fadd float %e.load, 1.0
+ %e.add = fadd float %e.load, 1.
; CHECK: fence.sc.sys
; CHECK: st.release.sys.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
store atomic volatile float %e.add, ptr addrspace(3) %e seq_cst, align 4
@@ -571,13 +2500,550 @@ define void @shared_seq_cst_volatile(ptr addrspace(3) %a, ptr addrspace(3) %b, p
ret void
}
+; CHECK-LABEL: shared_seq_cst_gpu
+define void @shared_seq_cst_gpu(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
+ ; CHECK: fence.sc.gpu
+ ; CHECK: ld.acquire.gpu.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr addrspace(3) %a syncscope("device") seq_cst, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: fence.sc.gpu
+ ; CHECK: st.release.gpu.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr addrspace(3) %a syncscope("device") seq_cst, align 1
+
+ ; CHECK: fence.sc.gpu
+ ; CHECK: ld.acquire.gpu.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr addrspace(3) %b syncscope("device") seq_cst, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: fence.sc.gpu
+ ; CHECK: st.release.gpu.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr addrspace(3) %b syncscope("device") seq_cst, align 2
+
+ ; CHECK: fence.sc.gpu
+ ; CHECK: ld.acquire.gpu.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr addrspace(3) %c syncscope("device") seq_cst, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: fence.sc.gpu
+ ; CHECK: st.release.gpu.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr addrspace(3) %c syncscope("device") seq_cst, align 4
+
+ ; CHECK: fence.sc.gpu
+ ; CHECK: ld.acquire.gpu.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr addrspace(3) %d syncscope("device") seq_cst, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: fence.sc.gpu
+ ; CHECK: st.release.gpu.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr addrspace(3) %d syncscope("device") seq_cst, align 8
+
+ ; CHECK: fence.sc.gpu
+ ; CHECK: ld.acquire.gpu.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr addrspace(3) %e syncscope("device") seq_cst, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: fence.sc.gpu
+ ; CHECK: st.release.gpu.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr addrspace(3) %e syncscope("device") seq_cst, align 4
+
+ ; CHECK: fence.sc.gpu
+ ; CHECK: ld.acquire.gpu.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr addrspace(3) %e syncscope("device") seq_cst, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: fence.sc.gpu
+ ; CHECK: st.release.gpu.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr addrspace(3) %e syncscope("device") seq_cst, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: shared_seq_cst_volatile_gpu
+define void @shared_seq_cst_volatile_gpu(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr addrspace(3) %a syncscope("device") seq_cst, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr addrspace(3) %a syncscope("device") seq_cst, align 1
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr addrspace(3) %b syncscope("device") seq_cst, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr addrspace(3) %b syncscope("device") seq_cst, align 2
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr addrspace(3) %c syncscope("device") seq_cst, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr addrspace(3) %c syncscope("device") seq_cst, align 4
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr addrspace(3) %d syncscope("device") seq_cst, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr addrspace(3) %d syncscope("device") seq_cst, align 8
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr addrspace(3) %e syncscope("device") seq_cst, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr addrspace(3) %e syncscope("device") seq_cst, align 4
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr addrspace(3) %e syncscope("device") seq_cst, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr addrspace(3) %e syncscope("device") seq_cst, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: shared_seq_cst_cta
+define void @shared_seq_cst_cta(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
+ ; CHECK: fence.sc.cta
+ ; CHECK: ld.acquire.cta.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr addrspace(3) %a syncscope("block") seq_cst, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: fence.sc.cta
+ ; CHECK: st.release.cta.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr addrspace(3) %a syncscope("block") seq_cst, align 1
+
+ ; CHECK: fence.sc.cta
+ ; CHECK: ld.acquire.cta.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr addrspace(3) %b syncscope("block") seq_cst, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: fence.sc.cta
+ ; CHECK: st.release.cta.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr addrspace(3) %b syncscope("block") seq_cst, align 2
+
+ ; CHECK: fence.sc.cta
+ ; CHECK: ld.acquire.cta.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr addrspace(3) %c syncscope("block") seq_cst, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: fence.sc.cta
+ ; CHECK: st.release.cta.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr addrspace(3) %c syncscope("block") seq_cst, align 4
+
+ ; CHECK: fence.sc.cta
+ ; CHECK: ld.acquire.cta.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr addrspace(3) %d syncscope("block") seq_cst, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: fence.sc.cta
+ ; CHECK: st.release.cta.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr addrspace(3) %d syncscope("block") seq_cst, align 8
+
+ ; CHECK: fence.sc.cta
+ ; CHECK: ld.acquire.cta.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr addrspace(3) %e syncscope("block") seq_cst, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: fence.sc.cta
+ ; CHECK: st.release.cta.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr addrspace(3) %e syncscope("block") seq_cst, align 4
+
+ ; CHECK: fence.sc.cta
+ ; CHECK: ld.acquire.cta.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr addrspace(3) %e syncscope("block") seq_cst, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: fence.sc.cta
+ ; CHECK: st.release.cta.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr addrspace(3) %e syncscope("block") seq_cst, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: shared_seq_cst_volatile_cta
+define void @shared_seq_cst_volatile_cta(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr addrspace(3) %a syncscope("block") seq_cst, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr addrspace(3) %a syncscope("block") seq_cst, align 1
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr addrspace(3) %b syncscope("block") seq_cst, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr addrspace(3) %b syncscope("block") seq_cst, align 2
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr addrspace(3) %c syncscope("block") seq_cst, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr addrspace(3) %c syncscope("block") seq_cst, align 4
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr addrspace(3) %d syncscope("block") seq_cst, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr addrspace(3) %d syncscope("block") seq_cst, align 8
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr addrspace(3) %e syncscope("block") seq_cst, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr addrspace(3) %e syncscope("block") seq_cst, align 4
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr addrspace(3) %e syncscope("block") seq_cst, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr addrspace(3) %e syncscope("block") seq_cst, align 8
+
+ ret void
+}
+
;; local statespace
-; CHECK-LABEL: local_acq_rel
-define void @local_acq_rel(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
- ; TODO: generate PTX that preserves Concurrent Forward Progress
- ; by using PTX atomic operations.
+; CHECK-LABEL: local_unordered_gpu
+define void @local_unordered_gpu(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
+ ; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr addrspace(5) %a syncscope("device") unordered, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.local.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr addrspace(5) %a syncscope("device") unordered, align 1
+
+ ; CHECK: ld.local.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr addrspace(5) %b syncscope("device") unordered, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.local.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr addrspace(5) %b syncscope("device") unordered, align 2
+
+ ; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr addrspace(5) %c syncscope("device") unordered, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr addrspace(5) %c syncscope("device") unordered, align 4
+
+ ; CHECK: ld.local.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr addrspace(5) %d syncscope("device") unordered, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.local.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr addrspace(5) %d syncscope("device") unordered, align 8
+
+ ; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr addrspace(5) %e syncscope("device") unordered, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr addrspace(5) %e syncscope("device") unordered, align 4
+
+ ; CHECK: ld.local.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr addrspace(5) %e syncscope("device") unordered, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr addrspace(5) %e syncscope("device") unordered, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: local_unordered_volatile_gpu
+define void @local_unordered_volatile_gpu(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
+ ; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr addrspace(5) %a syncscope("device") unordered, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.local.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr addrspace(5) %a syncscope("device") unordered, align 1
+
+ ; CHECK: ld.local.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr addrspace(5) %b syncscope("device") unordered, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.local.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr addrspace(5) %b syncscope("device") unordered, align 2
+
+ ; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr addrspace(5) %c syncscope("device") unordered, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr addrspace(5) %c syncscope("device") unordered, align 4
+
+ ; CHECK: ld.local.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr addrspace(5) %d syncscope("device") unordered, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.local.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr addrspace(5) %d syncscope("device") unordered, align 8
+
+ ; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr addrspace(5) %e syncscope("device") unordered, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr addrspace(5) %e syncscope("device") unordered, align 4
+
+ ; CHECK: ld.local.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr addrspace(5) %e syncscope("device") unordered, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr addrspace(5) %e syncscope("device") unordered, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: local_unordered_cta
+define void @local_unordered_cta(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
+ ; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr addrspace(5) %a syncscope("block") unordered, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.local.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr addrspace(5) %a syncscope("block") unordered, align 1
+
+ ; CHECK: ld.local.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr addrspace(5) %b syncscope("block") unordered, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.local.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr addrspace(5) %b syncscope("block") unordered, align 2
+
+ ; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr addrspace(5) %c syncscope("block") unordered, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr addrspace(5) %c syncscope("block") unordered, align 4
+
+ ; CHECK: ld.local.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr addrspace(5) %d syncscope("block") unordered, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.local.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr addrspace(5) %d syncscope("block") unordered, align 8
+
+ ; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr addrspace(5) %e syncscope("block") unordered, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr addrspace(5) %e syncscope("block") unordered, align 4
+
+ ; CHECK: ld.local.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr addrspace(5) %e syncscope("block") unordered, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr addrspace(5) %e syncscope("block") unordered, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: local_unordered_volatile_cta
+define void @local_unordered_volatile_cta(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
+ ; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr addrspace(5) %a syncscope("block") unordered, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.local.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr addrspace(5) %a syncscope("block") unordered, align 1
+
+ ; CHECK: ld.local.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr addrspace(5) %b syncscope("block") unordered, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.local.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr addrspace(5) %b syncscope("block") unordered, align 2
+
+ ; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr addrspace(5) %c syncscope("block") unordered, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr addrspace(5) %c syncscope("block") unordered, align 4
+
+ ; CHECK: ld.local.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr addrspace(5) %d syncscope("block") unordered, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.local.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr addrspace(5) %d syncscope("block") unordered, align 8
+
+ ; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr addrspace(5) %e syncscope("block") unordered, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr addrspace(5) %e syncscope("block") unordered, align 4
+
+ ; CHECK: ld.local.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr addrspace(5) %e syncscope("block") unordered, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr addrspace(5) %e syncscope("block") unordered, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: local_monotonic_gpu
+define void @local_monotonic_gpu(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
+ ; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr addrspace(5) %a syncscope("device") monotonic, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.local.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr addrspace(5) %a syncscope("device") monotonic, align 1
+
+ ; CHECK: ld.local.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr addrspace(5) %b syncscope("device") monotonic, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.local.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr addrspace(5) %b syncscope("device") monotonic, align 2
+
+ ; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr addrspace(5) %c syncscope("device") monotonic, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr addrspace(5) %c syncscope("device") monotonic, align 4
+
+ ; CHECK: ld.local.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr addrspace(5) %d syncscope("device") monotonic, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.local.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr addrspace(5) %d syncscope("device") monotonic, align 8
+
+ ; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr addrspace(5) %e syncscope("device") monotonic, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr addrspace(5) %e syncscope("device") monotonic, align 4
+
+ ; CHECK: ld.local.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr addrspace(5) %e syncscope("device") monotonic, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr addrspace(5) %e syncscope("device") monotonic, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: local_monotonic_volatile_gpu
+define void @local_monotonic_volatile_gpu(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
+ ; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr addrspace(5) %a syncscope("device") monotonic, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.local.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr addrspace(5) %a syncscope("device") monotonic, align 1
+
+ ; CHECK: ld.local.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr addrspace(5) %b syncscope("device") monotonic, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.local.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr addrspace(5) %b syncscope("device") monotonic, align 2
+
+ ; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr addrspace(5) %c syncscope("device") monotonic, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr addrspace(5) %c syncscope("device") monotonic, align 4
+ ; CHECK: ld.local.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr addrspace(5) %d syncscope("device") monotonic, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.local.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr addrspace(5) %d syncscope("device") monotonic, align 8
+
+ ; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr addrspace(5) %e syncscope("device") monotonic, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr addrspace(5) %e syncscope("device") monotonic, align 4
+
+ ; CHECK: ld.local.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr addrspace(5) %e syncscope("device") monotonic, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr addrspace(5) %e syncscope("device") monotonic, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: local_monotonic_cta
+define void @local_monotonic_cta(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
+ ; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr addrspace(5) %a syncscope("block") monotonic, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.local.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr addrspace(5) %a syncscope("block") monotonic, align 1
+
+ ; CHECK: ld.local.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr addrspace(5) %b syncscope("block") monotonic, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.local.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr addrspace(5) %b syncscope("block") monotonic, align 2
+
+ ; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr addrspace(5) %c syncscope("block") monotonic, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr addrspace(5) %c syncscope("block") monotonic, align 4
+
+ ; CHECK: ld.local.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr addrspace(5) %d syncscope("block") monotonic, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.local.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr addrspace(5) %d syncscope("block") monotonic, align 8
+
+ ; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr addrspace(5) %e syncscope("block") monotonic, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr addrspace(5) %e syncscope("block") monotonic, align 4
+
+ ; CHECK: ld.local.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr addrspace(5) %e syncscope("block") monotonic, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr addrspace(5) %e syncscope("block") monotonic, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: local_monotonic_volatile_cta
+define void @local_monotonic_volatile_cta(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
+ ; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr addrspace(5) %a syncscope("block") monotonic, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.local.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr addrspace(5) %a syncscope("block") monotonic, align 1
+
+ ; CHECK: ld.local.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr addrspace(5) %b syncscope("block") monotonic, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.local.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr addrspace(5) %b syncscope("block") monotonic, align 2
+
+ ; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr addrspace(5) %c syncscope("block") monotonic, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr addrspace(5) %c syncscope("block") monotonic, align 4
+
+ ; CHECK: ld.local.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr addrspace(5) %d syncscope("block") monotonic, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.local.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr addrspace(5) %d syncscope("block") monotonic, align 8
+
+ ; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr addrspace(5) %e syncscope("block") monotonic, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr addrspace(5) %e syncscope("block") monotonic, align 4
+
+ ; CHECK: ld.local.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr addrspace(5) %e syncscope("block") monotonic, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr addrspace(5) %e syncscope("block") monotonic, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: local_acq_rel_sys
+define void @local_acq_rel_sys(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%a.load = load atomic i8, ptr addrspace(5) %a acquire, align 1
%a.add = add i8 %a.load, 1
@@ -604,7 +3070,7 @@ define void @local_acq_rel(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspa
; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
%e.load = load atomic float, ptr addrspace(5) %e acquire, align 4
- %e.add = fadd float %e.load, 1.0
+ %e.add = fadd float %e.load, 1.
; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
store atomic float %e.add, ptr addrspace(5) %e release, align 4
@@ -617,11 +3083,8 @@ define void @local_acq_rel(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspa
ret void
}
-; CHECK-LABEL: local_acq_rel_volatile
-define void @local_acq_rel_volatile(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
- ; TODO: generate PTX that preserves Concurrent Forward Progress
- ; by using PTX atomic operations.
-
+; CHECK-LABEL: local_acq_rel_volatile_sys
+define void @local_acq_rel_volatile_sys(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%a.load = load atomic volatile i8, ptr addrspace(5) %a acquire, align 1
%a.add = add i8 %a.load, 1
@@ -648,7 +3111,7 @@ define void @local_acq_rel_volatile(ptr addrspace(5) %a, ptr addrspace(5) %b, pt
; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
%e.load = load atomic volatile float, ptr addrspace(5) %e acquire, align 4
- %e.add = fadd float %e.load, 1.0
+ %e.add = fadd float %e.load, 1.
; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
store atomic volatile float %e.add, ptr addrspace(5) %e release, align 4
@@ -661,11 +3124,172 @@ define void @local_acq_rel_volatile(ptr addrspace(5) %a, ptr addrspace(5) %b, pt
ret void
}
-; CHECK-LABEL: local_seq_cst
-define void @local_seq_cst(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
- ; TODO: generate PTX that preserves Concurrent Forward Progress
- ; by using PTX atomic operations.
+; CHECK-LABEL: local_acq_rel_gpu
+define void @local_acq_rel_gpu(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
+ ; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr addrspace(5) %a syncscope("device") acquire, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.local.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr addrspace(5) %a syncscope("device") release, align 1
+ ; CHECK: ld.local.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr addrspace(5) %b syncscope("device") acquire, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.local.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr addrspace(5) %b syncscope("device") release, align 2
+
+ ; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr addrspace(5) %c syncscope("device") acquire, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr addrspace(5) %c syncscope("device") release, align 4
+
+ ; CHECK: ld.local.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr addrspace(5) %d syncscope("device") acquire, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.local.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr addrspace(5) %d syncscope("device") release, align 8
+
+ ; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr addrspace(5) %e syncscope("device") acquire, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr addrspace(5) %e syncscope("device") release, align 4
+
+ ; CHECK: ld.local.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr addrspace(5) %e syncscope("device") acquire, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr addrspace(5) %e syncscope("device") release, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: local_acq_rel_volatile_gpu
+define void @local_acq_rel_volatile_gpu(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
+ ; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr addrspace(5) %a syncscope("device") acquire, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.local.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr addrspace(5) %a syncscope("device") release, align 1
+
+ ; CHECK: ld.local.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr addrspace(5) %b syncscope("device") acquire, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.local.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr addrspace(5) %b syncscope("device") release, align 2
+
+ ; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr addrspace(5) %c syncscope("device") acquire, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr addrspace(5) %c syncscope("device") release, align 4
+
+ ; CHECK: ld.local.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr addrspace(5) %d syncscope("device") acquire, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.local.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr addrspace(5) %d syncscope("device") release, align 8
+
+ ; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr addrspace(5) %e syncscope("device") acquire, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr addrspace(5) %e syncscope("device") release, align 4
+
+ ; CHECK: ld.local.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr addrspace(5) %e syncscope("device") acquire, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr addrspace(5) %e syncscope("device") release, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: local_acq_rel_cta
+define void @local_acq_rel_cta(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
+ ; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr addrspace(5) %a syncscope("block") acquire, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.local.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr addrspace(5) %a syncscope("block") release, align 1
+
+ ; CHECK: ld.local.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr addrspace(5) %b syncscope("block") acquire, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.local.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr addrspace(5) %b syncscope("block") release, align 2
+
+ ; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr addrspace(5) %c syncscope("block") acquire, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr addrspace(5) %c syncscope("block") release, align 4
+
+ ; CHECK: ld.local.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr addrspace(5) %d syncscope("block") acquire, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.local.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr addrspace(5) %d syncscope("block") release, align 8
+
+ ; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr addrspace(5) %e syncscope("block") acquire, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr addrspace(5) %e syncscope("block") release, align 4
+
+ ; CHECK: ld.local.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr addrspace(5) %e syncscope("block") acquire, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr addrspace(5) %e syncscope("block") release, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: local_acq_rel_volatile_cta
+define void @local_acq_rel_volatile_cta(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
+ ; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr addrspace(5) %a syncscope("block") acquire, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.local.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr addrspace(5) %a syncscope("block") release, align 1
+
+ ; CHECK: ld.local.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr addrspace(5) %b syncscope("block") acquire, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.local.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr addrspace(5) %b syncscope("block") release, align 2
+
+ ; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr addrspace(5) %c syncscope("block") acquire, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr addrspace(5) %c syncscope("block") release, align 4
+
+ ; CHECK: ld.local.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr addrspace(5) %d syncscope("block") acquire, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.local.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr addrspace(5) %d syncscope("block") release, align 8
+
+ ; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr addrspace(5) %e syncscope("block") acquire, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr addrspace(5) %e syncscope("block") release, align 4
+
+ ; CHECK: ld.local.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr addrspace(5) %e syncscope("block") acquire, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr addrspace(5) %e syncscope("block") release, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: local_seq_cst_sys
+define void @local_seq_cst_sys(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%a.load = load atomic i8, ptr addrspace(5) %a seq_cst, align 1
%a.add = add i8 %a.load, 1
@@ -692,7 +3316,7 @@ define void @local_seq_cst(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspa
; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
%e.load = load atomic float, ptr addrspace(5) %e seq_cst, align 4
- %e.add = fadd float %e.load, 1.0
+ %e.add = fadd float %e.load, 1.
; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
store atomic float %e.add, ptr addrspace(5) %e seq_cst, align 4
@@ -705,11 +3329,8 @@ define void @local_seq_cst(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspa
ret void
}
-; CHECK-LABEL: local_seq_cst_volatile
-define void @local_seq_cst_volatile(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
- ; TODO: generate PTX that preserves Concurrent Forward Progress
- ; by using PTX atomic operations.
-
+; CHECK-LABEL: local_seq_cst_volatile_sys
+define void @local_seq_cst_volatile_sys(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%a.load = load atomic volatile i8, ptr addrspace(5) %a seq_cst, align 1
%a.add = add i8 %a.load, 1
@@ -736,7 +3357,7 @@ define void @local_seq_cst_volatile(ptr addrspace(5) %a, ptr addrspace(5) %b, pt
; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
%e.load = load atomic volatile float, ptr addrspace(5) %e seq_cst, align 4
- %e.add = fadd float %e.load, 1.0
+ %e.add = fadd float %e.load, 1.
; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
store atomic volatile float %e.add, ptr addrspace(5) %e seq_cst, align 4
@@ -746,10 +3367,169 @@ define void @local_seq_cst_volatile(ptr addrspace(5) %a, ptr addrspace(5) %b, pt
; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
store atomic volatile double %f.add, ptr addrspace(5) %e seq_cst, align 8
- ; TODO: LLVM IR Verifier does not support atomics on vector types.
+ ret void
+}
+
+; CHECK-LABEL: local_seq_cst_gpu
+define void @local_seq_cst_gpu(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
+ ; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr addrspace(5) %a syncscope("device") seq_cst, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.local.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr addrspace(5) %a syncscope("device") seq_cst, align 1
+
+ ; CHECK: ld.local.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr addrspace(5) %b syncscope("device") seq_cst, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.local.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr addrspace(5) %b syncscope("device") seq_cst, align 2
+
+ ; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr addrspace(5) %c syncscope("device") seq_cst, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr addrspace(5) %c syncscope("device") seq_cst, align 4
+
+ ; CHECK: ld.local.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr addrspace(5) %d syncscope("device") seq_cst, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.local.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr addrspace(5) %d syncscope("device") seq_cst, align 8
+
+ ; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr addrspace(5) %e syncscope("device") seq_cst, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr addrspace(5) %e syncscope("device") seq_cst, align 4
+
+ ; CHECK: ld.local.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr addrspace(5) %e syncscope("device") seq_cst, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr addrspace(5) %e syncscope("device") seq_cst, align 8
ret void
}
-; TODO: add plain,atomic,volatile,atomic volatile tests
-; for .const and .param statespaces \ No newline at end of file
+; CHECK-LABEL: local_seq_cst_volatile_gpu
+define void @local_seq_cst_volatile_gpu(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
+ ; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr addrspace(5) %a syncscope("device") seq_cst, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.local.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr addrspace(5) %a syncscope("device") seq_cst, align 1
+
+ ; CHECK: ld.local.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr addrspace(5) %b syncscope("device") seq_cst, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.local.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr addrspace(5) %b syncscope("device") seq_cst, align 2
+
+ ; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr addrspace(5) %c syncscope("device") seq_cst, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr addrspace(5) %c syncscope("device") seq_cst, align 4
+
+ ; CHECK: ld.local.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr addrspace(5) %d syncscope("device") seq_cst, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.local.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr addrspace(5) %d syncscope("device") seq_cst, align 8
+
+ ; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr addrspace(5) %e syncscope("device") seq_cst, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr addrspace(5) %e syncscope("device") seq_cst, align 4
+
+ ; CHECK: ld.local.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr addrspace(5) %e syncscope("device") seq_cst, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr addrspace(5) %e syncscope("device") seq_cst, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: local_seq_cst_cta
+define void @local_seq_cst_cta(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
+ ; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr addrspace(5) %a syncscope("block") seq_cst, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.local.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr addrspace(5) %a syncscope("block") seq_cst, align 1
+
+ ; CHECK: ld.local.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr addrspace(5) %b syncscope("block") seq_cst, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.local.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr addrspace(5) %b syncscope("block") seq_cst, align 2
+
+ ; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr addrspace(5) %c syncscope("block") seq_cst, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr addrspace(5) %c syncscope("block") seq_cst, align 4
+
+ ; CHECK: ld.local.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr addrspace(5) %d syncscope("block") seq_cst, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.local.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr addrspace(5) %d syncscope("block") seq_cst, align 8
+
+ ; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr addrspace(5) %e syncscope("block") seq_cst, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr addrspace(5) %e syncscope("block") seq_cst, align 4
+
+ ; CHECK: ld.local.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr addrspace(5) %e syncscope("block") seq_cst, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr addrspace(5) %e syncscope("block") seq_cst, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: local_seq_cst_volatile_cta
+define void @local_seq_cst_volatile_cta(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
+ ; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr addrspace(5) %a syncscope("block") seq_cst, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.local.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr addrspace(5) %a syncscope("block") seq_cst, align 1
+
+ ; CHECK: ld.local.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr addrspace(5) %b syncscope("block") seq_cst, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.local.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr addrspace(5) %b syncscope("block") seq_cst, align 2
+
+ ; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr addrspace(5) %c syncscope("block") seq_cst, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr addrspace(5) %c syncscope("block") seq_cst, align 4
+
+ ; CHECK: ld.local.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr addrspace(5) %d syncscope("block") seq_cst, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.local.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr addrspace(5) %d syncscope("block") seq_cst, align 8
+
+ ; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr addrspace(5) %e syncscope("block") seq_cst, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr addrspace(5) %e syncscope("block") seq_cst, align 4
+
+ ; CHECK: ld.local.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr addrspace(5) %e syncscope("block") seq_cst, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr addrspace(5) %e syncscope("block") seq_cst, align 8
+
+ ret void
+}
diff --git a/llvm/test/CodeGen/NVPTX/load-store-sm-90.ll b/llvm/test/CodeGen/NVPTX/load-store-sm-90.ll
new file mode 100644
index 0000000..645170d
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/load-store-sm-90.ll
@@ -0,0 +1,1423 @@
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_90 -mattr=+ptx78 | FileCheck %s
+; RUN: %if ptxas-12.2 %{ llc < %s -march=nvptx64 -mcpu=sm_90 -mattr=+ptx78 | %ptxas-verify -arch=sm_90 %}
+
+; TODO: fix "atomic load volatile acquire": generates "ld.acquire.sys;"
+; but should generate "ld.mmio.relaxed.sys; fence.acq_rel.sys;"
+; TODO: fix "atomic store volatile release": generates "st.release.sys;"
+; but should generate "fence.acq_rel.sys; st.mmio.relaxed.sys;"
+
+; TODO: fix "atomic load volatile seq_cst": generates "fence.sc.sys; ld.acquire.sys;"
+; but should generate "fence.sc.sys; ld.relaxed.mmio.sys; fence.acq_rel.sys;"
+; TODO: fix "atomic store volatile seq_cst": generates "fence.sc.sys; st.release.sys;"
+; but should generate "fence.sc.sys; st.relaxed.mmio.sys;"
+
+; TODO: add i1, <8 x i8>, and <6 x i8> vector tests.
+
+; TODO: add test for vectors that exceed 128-bit length
+; Per https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#vectors
+; vectors cannot exceed 128-bit in length, i.e., .v4.u64 is not allowed.
+
+; TODO: generate PTX that preserves Concurrent Forward Progress
+; for atomic operations to local statespace
+; by generating atomic or volatile operations.
+
+; TODO: design exposure for atomic operations on vector types.
+
+; TODO: implement and test thread scope.
+
+; TODO: add weak,atomic,volatile,atomic volatile tests
+; for .const and .param statespaces.
+
+; TODO: optimize .shared.sys into .shared.cta or .shared.cluster .
+
+;; generic statespace
+
+; CHECK-LABEL: generic_unordered_cluster
+define void @generic_unordered_cluster(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
+ ; CHECK: ld.relaxed.cluster.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr %a syncscope("cluster") unordered, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.relaxed.cluster.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr %a syncscope("cluster") unordered, align 1
+
+ ; CHECK: ld.relaxed.cluster.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr %b syncscope("cluster") unordered, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.relaxed.cluster.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr %b syncscope("cluster") unordered, align 2
+
+ ; CHECK: ld.relaxed.cluster.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr %c syncscope("cluster") unordered, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.relaxed.cluster.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr %c syncscope("cluster") unordered, align 4
+
+ ; CHECK: ld.relaxed.cluster.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr %d syncscope("cluster") unordered, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.relaxed.cluster.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr %d syncscope("cluster") unordered, align 8
+
+ ; CHECK: ld.relaxed.cluster.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr %e syncscope("cluster") unordered, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.relaxed.cluster.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr %e syncscope("cluster") unordered, align 4
+
+ ; CHECK: ld.relaxed.cluster.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr %e syncscope("cluster") unordered, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.relaxed.cluster.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr %e syncscope("cluster") unordered, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: generic_unordered_volatile_cluster
+define void @generic_unordered_volatile_cluster(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
+ ; CHECK: ld.volatile.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr %a syncscope("cluster") unordered, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.volatile.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr %a syncscope("cluster") unordered, align 1
+
+ ; CHECK: ld.volatile.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr %b syncscope("cluster") unordered, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.volatile.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr %b syncscope("cluster") unordered, align 2
+
+ ; CHECK: ld.volatile.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr %c syncscope("cluster") unordered, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.volatile.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr %c syncscope("cluster") unordered, align 4
+
+ ; CHECK: ld.volatile.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr %d syncscope("cluster") unordered, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.volatile.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr %d syncscope("cluster") unordered, align 8
+
+ ; CHECK: ld.volatile.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr %e syncscope("cluster") unordered, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.volatile.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr %e syncscope("cluster") unordered, align 4
+
+ ; CHECK: ld.volatile.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr %e syncscope("cluster") unordered, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.volatile.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr %e syncscope("cluster") unordered, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: generic_monotonic_cluster
+define void @generic_monotonic_cluster(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
+ ; CHECK: ld.relaxed.cluster.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr %a syncscope("cluster") monotonic, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.relaxed.cluster.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr %a syncscope("cluster") monotonic, align 1
+
+ ; CHECK: ld.relaxed.cluster.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr %b syncscope("cluster") monotonic, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.relaxed.cluster.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr %b syncscope("cluster") monotonic, align 2
+
+ ; CHECK: ld.relaxed.cluster.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr %c syncscope("cluster") monotonic, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.relaxed.cluster.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr %c syncscope("cluster") monotonic, align 4
+
+ ; CHECK: ld.relaxed.cluster.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr %d syncscope("cluster") monotonic, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.relaxed.cluster.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr %d syncscope("cluster") monotonic, align 8
+
+ ; CHECK: ld.relaxed.cluster.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr %e syncscope("cluster") monotonic, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.relaxed.cluster.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr %e syncscope("cluster") monotonic, align 4
+
+ ; CHECK: ld.relaxed.cluster.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr %e syncscope("cluster") monotonic, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.relaxed.cluster.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr %e syncscope("cluster") monotonic, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: generic_monotonic_volatile_cluster
+define void @generic_monotonic_volatile_cluster(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
+ ; CHECK: ld.volatile.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr %a syncscope("cluster") monotonic, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.volatile.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr %a syncscope("cluster") monotonic, align 1
+
+ ; CHECK: ld.volatile.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr %b syncscope("cluster") monotonic, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.volatile.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr %b syncscope("cluster") monotonic, align 2
+
+ ; CHECK: ld.volatile.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr %c syncscope("cluster") monotonic, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.volatile.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr %c syncscope("cluster") monotonic, align 4
+
+ ; CHECK: ld.volatile.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr %d syncscope("cluster") monotonic, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.volatile.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr %d syncscope("cluster") monotonic, align 8
+
+ ; CHECK: ld.volatile.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr %e syncscope("cluster") monotonic, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.volatile.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr %e syncscope("cluster") monotonic, align 4
+
+ ; CHECK: ld.volatile.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr %e syncscope("cluster") monotonic, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.volatile.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr %e syncscope("cluster") monotonic, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: generic_acq_rel_cluster
+define void @generic_acq_rel_cluster(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
+ ; CHECK: ld.acquire.cluster.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr %a syncscope("cluster") acquire, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.release.cluster.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr %a syncscope("cluster") release, align 1
+
+ ; CHECK: ld.acquire.cluster.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr %b syncscope("cluster") acquire, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.release.cluster.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr %b syncscope("cluster") release, align 2
+
+ ; CHECK: ld.acquire.cluster.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr %c syncscope("cluster") acquire, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.release.cluster.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr %c syncscope("cluster") release, align 4
+
+ ; CHECK: ld.acquire.cluster.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr %d syncscope("cluster") acquire, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.release.cluster.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr %d syncscope("cluster") release, align 8
+
+ ; CHECK: ld.acquire.cluster.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr %e syncscope("cluster") acquire, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.release.cluster.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr %e syncscope("cluster") release, align 4
+
+ ; CHECK: ld.acquire.cluster.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr %e syncscope("cluster") acquire, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.release.cluster.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr %e syncscope("cluster") release, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: generic_acq_rel_volatile_cluster
+define void @generic_acq_rel_volatile_cluster(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
+ ; CHECK: ld.acquire.sys.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr %a syncscope("cluster") acquire, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.release.sys.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr %a syncscope("cluster") release, align 1
+
+ ; CHECK: ld.acquire.sys.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr %b syncscope("cluster") acquire, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.release.sys.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr %b syncscope("cluster") release, align 2
+
+ ; CHECK: ld.acquire.sys.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr %c syncscope("cluster") acquire, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.release.sys.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr %c syncscope("cluster") release, align 4
+
+ ; CHECK: ld.acquire.sys.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr %d syncscope("cluster") acquire, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.release.sys.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr %d syncscope("cluster") release, align 8
+
+ ; CHECK: ld.acquire.sys.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr %e syncscope("cluster") acquire, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.release.sys.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr %e syncscope("cluster") release, align 4
+
+ ; CHECK: ld.acquire.sys.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr %e syncscope("cluster") acquire, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.release.sys.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr %e syncscope("cluster") release, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: generic_sc_cluster
+define void @generic_sc_cluster(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
+ ; CHECK: fence.sc.cluster
+ ; CHECK: ld.acquire.cluster.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr %a syncscope("cluster") seq_cst, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: fence.sc.cluster
+ ; CHECK: st.release.cluster.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr %a syncscope("cluster") seq_cst, align 1
+
+ ; CHECK: fence.sc.cluster
+ ; CHECK: ld.acquire.cluster.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr %b syncscope("cluster") seq_cst, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: fence.sc.cluster
+ ; CHECK: st.release.cluster.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr %b syncscope("cluster") seq_cst, align 2
+
+ ; CHECK: fence.sc.cluster
+ ; CHECK: ld.acquire.cluster.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr %c syncscope("cluster") seq_cst, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: fence.sc.cluster
+ ; CHECK: st.release.cluster.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr %c syncscope("cluster") seq_cst, align 4
+
+ ; CHECK: fence.sc.cluster
+ ; CHECK: ld.acquire.cluster.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr %d syncscope("cluster") seq_cst, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: fence.sc.cluster
+ ; CHECK: st.release.cluster.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr %d syncscope("cluster") seq_cst, align 8
+
+ ; CHECK: fence.sc.cluster
+ ; CHECK: ld.acquire.cluster.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr %e syncscope("cluster") seq_cst, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: fence.sc.cluster
+ ; CHECK: st.release.cluster.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr %e syncscope("cluster") seq_cst, align 4
+
+ ; CHECK: fence.sc.cluster
+ ; CHECK: ld.acquire.cluster.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr %e syncscope("cluster") seq_cst, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: fence.sc.cluster
+ ; CHECK: st.release.cluster.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr %e syncscope("cluster") seq_cst, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: generic_sc_volatile_cluster
+define void @generic_sc_volatile_cluster(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr %a syncscope("cluster") seq_cst, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr %a syncscope("cluster") seq_cst, align 1
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr %b syncscope("cluster") seq_cst, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr %b syncscope("cluster") seq_cst, align 2
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr %c syncscope("cluster") seq_cst, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr %c syncscope("cluster") seq_cst, align 4
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr %d syncscope("cluster") seq_cst, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr %d syncscope("cluster") seq_cst, align 8
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr %e syncscope("cluster") seq_cst, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr %e syncscope("cluster") seq_cst, align 4
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr %e syncscope("cluster") seq_cst, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr %e syncscope("cluster") seq_cst, align 8
+
+ ret void
+}
+
+;; global statespace
+
+; CHECK-LABEL: global_unordered_cluster
+define void @global_unordered_cluster(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
+ ; CHECK: ld.relaxed.cluster.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr addrspace(1) %a syncscope("cluster") unordered, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.relaxed.cluster.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr addrspace(1) %a syncscope("cluster") unordered, align 1
+
+ ; CHECK: ld.relaxed.cluster.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr addrspace(1) %b syncscope("cluster") unordered, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.relaxed.cluster.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr addrspace(1) %b syncscope("cluster") unordered, align 2
+
+ ; CHECK: ld.relaxed.cluster.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr addrspace(1) %c syncscope("cluster") unordered, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.relaxed.cluster.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr addrspace(1) %c syncscope("cluster") unordered, align 4
+
+ ; CHECK: ld.relaxed.cluster.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr addrspace(1) %d syncscope("cluster") unordered, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.relaxed.cluster.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr addrspace(1) %d syncscope("cluster") unordered, align 8
+
+ ; CHECK: ld.relaxed.cluster.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr addrspace(1) %e syncscope("cluster") unordered, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.relaxed.cluster.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr addrspace(1) %e syncscope("cluster") unordered, align 4
+
+ ; CHECK: ld.relaxed.cluster.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr addrspace(1) %e syncscope("cluster") unordered, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.relaxed.cluster.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr addrspace(1) %e syncscope("cluster") unordered, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: global_unordered_volatile_cluster
+define void @global_unordered_volatile_cluster(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
+ ; CHECK: ld.volatile.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr addrspace(1) %a syncscope("cluster") unordered, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.volatile.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr addrspace(1) %a syncscope("cluster") unordered, align 1
+
+ ; CHECK: ld.volatile.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr addrspace(1) %b syncscope("cluster") unordered, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.volatile.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr addrspace(1) %b syncscope("cluster") unordered, align 2
+
+ ; CHECK: ld.volatile.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr addrspace(1) %c syncscope("cluster") unordered, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.volatile.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr addrspace(1) %c syncscope("cluster") unordered, align 4
+
+ ; CHECK: ld.volatile.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr addrspace(1) %d syncscope("cluster") unordered, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.volatile.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr addrspace(1) %d syncscope("cluster") unordered, align 8
+
+ ; CHECK: ld.volatile.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr addrspace(1) %e syncscope("cluster") unordered, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.volatile.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr addrspace(1) %e syncscope("cluster") unordered, align 4
+
+ ; CHECK: ld.volatile.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr addrspace(1) %e syncscope("cluster") unordered, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.volatile.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr addrspace(1) %e syncscope("cluster") unordered, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: global_monotonic_cluster
+define void @global_monotonic_cluster(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
+ ; CHECK: ld.relaxed.cluster.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr addrspace(1) %a syncscope("cluster") monotonic, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.relaxed.cluster.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr addrspace(1) %a syncscope("cluster") monotonic, align 1
+
+ ; CHECK: ld.relaxed.cluster.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr addrspace(1) %b syncscope("cluster") monotonic, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.relaxed.cluster.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr addrspace(1) %b syncscope("cluster") monotonic, align 2
+
+ ; CHECK: ld.relaxed.cluster.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr addrspace(1) %c syncscope("cluster") monotonic, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.relaxed.cluster.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr addrspace(1) %c syncscope("cluster") monotonic, align 4
+
+ ; CHECK: ld.relaxed.cluster.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr addrspace(1) %d syncscope("cluster") monotonic, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.relaxed.cluster.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr addrspace(1) %d syncscope("cluster") monotonic, align 8
+
+ ; CHECK: ld.relaxed.cluster.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr addrspace(1) %e syncscope("cluster") monotonic, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.relaxed.cluster.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr addrspace(1) %e syncscope("cluster") monotonic, align 4
+
+ ; CHECK: ld.relaxed.cluster.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr addrspace(1) %e syncscope("cluster") monotonic, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.relaxed.cluster.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr addrspace(1) %e syncscope("cluster") monotonic, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: global_monotonic_volatile_cluster
+define void @global_monotonic_volatile_cluster(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
+ ; CHECK: ld.volatile.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr addrspace(1) %a syncscope("cluster") monotonic, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.volatile.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr addrspace(1) %a syncscope("cluster") monotonic, align 1
+
+ ; CHECK: ld.volatile.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr addrspace(1) %b syncscope("cluster") monotonic, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.volatile.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr addrspace(1) %b syncscope("cluster") monotonic, align 2
+
+ ; CHECK: ld.volatile.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr addrspace(1) %c syncscope("cluster") monotonic, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.volatile.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr addrspace(1) %c syncscope("cluster") monotonic, align 4
+
+ ; CHECK: ld.volatile.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr addrspace(1) %d syncscope("cluster") monotonic, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.volatile.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr addrspace(1) %d syncscope("cluster") monotonic, align 8
+
+ ; CHECK: ld.volatile.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr addrspace(1) %e syncscope("cluster") monotonic, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.volatile.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr addrspace(1) %e syncscope("cluster") monotonic, align 4
+
+ ; CHECK: ld.volatile.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr addrspace(1) %e syncscope("cluster") monotonic, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.volatile.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr addrspace(1) %e syncscope("cluster") monotonic, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: global_acq_rel_cluster
+define void @global_acq_rel_cluster(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
+ ; CHECK: ld.acquire.cluster.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr addrspace(1) %a syncscope("cluster") acquire, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.release.cluster.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr addrspace(1) %a syncscope("cluster") release, align 1
+
+ ; CHECK: ld.acquire.cluster.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr addrspace(1) %b syncscope("cluster") acquire, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.release.cluster.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr addrspace(1) %b syncscope("cluster") release, align 2
+
+ ; CHECK: ld.acquire.cluster.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr addrspace(1) %c syncscope("cluster") acquire, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.release.cluster.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr addrspace(1) %c syncscope("cluster") release, align 4
+
+ ; CHECK: ld.acquire.cluster.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr addrspace(1) %d syncscope("cluster") acquire, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.release.cluster.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr addrspace(1) %d syncscope("cluster") release, align 8
+
+ ; CHECK: ld.acquire.cluster.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr addrspace(1) %e syncscope("cluster") acquire, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.release.cluster.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr addrspace(1) %e syncscope("cluster") release, align 4
+
+ ; CHECK: ld.acquire.cluster.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr addrspace(1) %e syncscope("cluster") acquire, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.release.cluster.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr addrspace(1) %e syncscope("cluster") release, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: global_acq_rel_volatile_cluster
+define void @global_acq_rel_volatile_cluster(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
+ ; CHECK: ld.acquire.sys.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr addrspace(1) %a syncscope("cluster") acquire, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.release.sys.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr addrspace(1) %a syncscope("cluster") release, align 1
+
+ ; CHECK: ld.acquire.sys.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr addrspace(1) %b syncscope("cluster") acquire, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.release.sys.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr addrspace(1) %b syncscope("cluster") release, align 2
+
+ ; CHECK: ld.acquire.sys.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr addrspace(1) %c syncscope("cluster") acquire, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.release.sys.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr addrspace(1) %c syncscope("cluster") release, align 4
+
+ ; CHECK: ld.acquire.sys.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr addrspace(1) %d syncscope("cluster") acquire, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.release.sys.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr addrspace(1) %d syncscope("cluster") release, align 8
+
+ ; CHECK: ld.acquire.sys.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr addrspace(1) %e syncscope("cluster") acquire, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.release.sys.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr addrspace(1) %e syncscope("cluster") release, align 4
+
+ ; CHECK: ld.acquire.sys.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr addrspace(1) %e syncscope("cluster") acquire, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.release.sys.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr addrspace(1) %e syncscope("cluster") release, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: global_seq_cst_cluster
+define void @global_seq_cst_cluster(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
+ ; CHECK: fence.sc.cluster
+ ; CHECK: ld.acquire.cluster.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr addrspace(1) %a syncscope("cluster") seq_cst, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: fence.sc.cluster
+ ; CHECK: st.release.cluster.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr addrspace(1) %a syncscope("cluster") seq_cst, align 1
+
+ ; CHECK: fence.sc.cluster
+ ; CHECK: ld.acquire.cluster.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr addrspace(1) %b syncscope("cluster") seq_cst, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: fence.sc.cluster
+ ; CHECK: st.release.cluster.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr addrspace(1) %b syncscope("cluster") seq_cst, align 2
+
+ ; CHECK: fence.sc.cluster
+ ; CHECK: ld.acquire.cluster.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr addrspace(1) %c syncscope("cluster") seq_cst, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: fence.sc.cluster
+ ; CHECK: st.release.cluster.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr addrspace(1) %c syncscope("cluster") seq_cst, align 4
+
+ ; CHECK: fence.sc.cluster
+ ; CHECK: ld.acquire.cluster.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr addrspace(1) %d syncscope("cluster") seq_cst, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: fence.sc.cluster
+ ; CHECK: st.release.cluster.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr addrspace(1) %d syncscope("cluster") seq_cst, align 8
+
+ ; CHECK: fence.sc.cluster
+ ; CHECK: ld.acquire.cluster.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr addrspace(1) %e syncscope("cluster") seq_cst, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: fence.sc.cluster
+ ; CHECK: st.release.cluster.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr addrspace(1) %e syncscope("cluster") seq_cst, align 4
+
+ ; CHECK: fence.sc.cluster
+ ; CHECK: ld.acquire.cluster.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr addrspace(1) %e syncscope("cluster") seq_cst, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: fence.sc.cluster
+ ; CHECK: st.release.cluster.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr addrspace(1) %e syncscope("cluster") seq_cst, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: global_seq_cst_volatile_cluster
+define void @global_seq_cst_volatile_cluster(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr addrspace(1) %a syncscope("cluster") seq_cst, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr addrspace(1) %a syncscope("cluster") seq_cst, align 1
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr addrspace(1) %b syncscope("cluster") seq_cst, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr addrspace(1) %b syncscope("cluster") seq_cst, align 2
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr addrspace(1) %c syncscope("cluster") seq_cst, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr addrspace(1) %c syncscope("cluster") seq_cst, align 4
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr addrspace(1) %d syncscope("cluster") seq_cst, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr addrspace(1) %d syncscope("cluster") seq_cst, align 8
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr addrspace(1) %e syncscope("cluster") seq_cst, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr addrspace(1) %e syncscope("cluster") seq_cst, align 4
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr addrspace(1) %e syncscope("cluster") seq_cst, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr addrspace(1) %e syncscope("cluster") seq_cst, align 8
+
+ ret void
+}
+
+;; shared
+
+; CHECK-LABEL: shared_unordered_cluster
+define void @shared_unordered_cluster(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
+ ; CHECK: ld.relaxed.cluster.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr addrspace(3) %a syncscope("cluster") unordered, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.relaxed.cluster.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr addrspace(3) %a syncscope("cluster") unordered, align 1
+
+ ; CHECK: ld.relaxed.cluster.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr addrspace(3) %b syncscope("cluster") unordered, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.relaxed.cluster.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr addrspace(3) %b syncscope("cluster") unordered, align 2
+
+ ; CHECK: ld.relaxed.cluster.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr addrspace(3) %c syncscope("cluster") unordered, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.relaxed.cluster.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr addrspace(3) %c syncscope("cluster") unordered, align 4
+
+ ; CHECK: ld.relaxed.cluster.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr addrspace(3) %d syncscope("cluster") unordered, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.relaxed.cluster.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr addrspace(3) %d syncscope("cluster") unordered, align 8
+
+ ; CHECK: ld.relaxed.cluster.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr addrspace(3) %e syncscope("cluster") unordered, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.relaxed.cluster.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr addrspace(3) %e syncscope("cluster") unordered, align 4
+
+ ; CHECK: ld.relaxed.cluster.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr addrspace(3) %e syncscope("cluster") unordered, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.relaxed.cluster.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr addrspace(3) %e syncscope("cluster") unordered, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: shared_unordered_volatile_cluster
+define void @shared_unordered_volatile_cluster(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
+ ; CHECK: ld.volatile.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr addrspace(3) %a syncscope("cluster") unordered, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.volatile.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr addrspace(3) %a syncscope("cluster") unordered, align 1
+
+ ; CHECK: ld.volatile.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr addrspace(3) %b syncscope("cluster") unordered, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.volatile.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr addrspace(3) %b syncscope("cluster") unordered, align 2
+
+ ; CHECK: ld.volatile.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr addrspace(3) %c syncscope("cluster") unordered, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.volatile.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr addrspace(3) %c syncscope("cluster") unordered, align 4
+
+ ; CHECK: ld.volatile.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr addrspace(3) %d syncscope("cluster") unordered, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.volatile.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr addrspace(3) %d syncscope("cluster") unordered, align 8
+
+ ; CHECK: ld.volatile.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr addrspace(3) %e syncscope("cluster") unordered, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.volatile.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr addrspace(3) %e syncscope("cluster") unordered, align 4
+
+ ; CHECK: ld.volatile.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr addrspace(3) %e syncscope("cluster") unordered, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.volatile.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr addrspace(3) %e syncscope("cluster") unordered, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: shared_monotonic_cluster
+define void @shared_monotonic_cluster(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
+ ; CHECK: ld.relaxed.cluster.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr addrspace(3) %a syncscope("cluster") monotonic, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.relaxed.cluster.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr addrspace(3) %a syncscope("cluster") monotonic, align 1
+
+ ; CHECK: ld.relaxed.cluster.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr addrspace(3) %b syncscope("cluster") monotonic, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.relaxed.cluster.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr addrspace(3) %b syncscope("cluster") monotonic, align 2
+
+ ; CHECK: ld.relaxed.cluster.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr addrspace(3) %c syncscope("cluster") monotonic, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.relaxed.cluster.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr addrspace(3) %c syncscope("cluster") monotonic, align 4
+
+ ; CHECK: ld.relaxed.cluster.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr addrspace(3) %d syncscope("cluster") monotonic, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.relaxed.cluster.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr addrspace(3) %d syncscope("cluster") monotonic, align 8
+
+ ; CHECK: ld.relaxed.cluster.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr addrspace(3) %e syncscope("cluster") monotonic, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.relaxed.cluster.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr addrspace(3) %e syncscope("cluster") monotonic, align 4
+
+ ; CHECK: ld.relaxed.cluster.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr addrspace(3) %e syncscope("cluster") monotonic, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.relaxed.cluster.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr addrspace(3) %e syncscope("cluster") monotonic, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: shared_monotonic_volatile_cluster
+define void @shared_monotonic_volatile_cluster(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
+ ; CHECK: ld.volatile.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr addrspace(3) %a syncscope("cluster") monotonic, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.volatile.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr addrspace(3) %a syncscope("cluster") monotonic, align 1
+
+ ; CHECK: ld.volatile.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr addrspace(3) %b syncscope("cluster") monotonic, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.volatile.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr addrspace(3) %b syncscope("cluster") monotonic, align 2
+
+ ; CHECK: ld.volatile.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr addrspace(3) %c syncscope("cluster") monotonic, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.volatile.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr addrspace(3) %c syncscope("cluster") monotonic, align 4
+
+ ; CHECK: ld.volatile.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr addrspace(3) %d syncscope("cluster") monotonic, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.volatile.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr addrspace(3) %d syncscope("cluster") monotonic, align 8
+
+ ; CHECK: ld.volatile.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr addrspace(3) %e syncscope("cluster") monotonic, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.volatile.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr addrspace(3) %e syncscope("cluster") monotonic, align 4
+
+ ; CHECK: ld.volatile.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr addrspace(3) %e syncscope("cluster") monotonic, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.volatile.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr addrspace(3) %e syncscope("cluster") monotonic, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: shared_acq_rel_cluster
+define void @shared_acq_rel_cluster(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
+ ; CHECK: ld.acquire.cluster.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr addrspace(3) %a syncscope("cluster") acquire, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.release.cluster.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr addrspace(3) %a syncscope("cluster") release, align 1
+
+ ; CHECK: ld.acquire.cluster.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr addrspace(3) %b syncscope("cluster") acquire, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.release.cluster.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr addrspace(3) %b syncscope("cluster") release, align 2
+
+ ; CHECK: ld.acquire.cluster.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr addrspace(3) %c syncscope("cluster") acquire, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.release.cluster.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr addrspace(3) %c syncscope("cluster") release, align 4
+
+ ; CHECK: ld.acquire.cluster.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr addrspace(3) %d syncscope("cluster") acquire, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.release.cluster.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr addrspace(3) %d syncscope("cluster") release, align 8
+
+ ; CHECK: ld.acquire.cluster.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr addrspace(3) %e syncscope("cluster") acquire, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.release.cluster.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr addrspace(3) %e syncscope("cluster") release, align 4
+
+ ; CHECK: ld.acquire.cluster.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr addrspace(3) %e syncscope("cluster") acquire, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.release.cluster.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr addrspace(3) %e syncscope("cluster") release, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: shared_acq_rel_volatile_cluster
+define void @shared_acq_rel_volatile_cluster(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
+ ; CHECK: ld.acquire.sys.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr addrspace(3) %a syncscope("cluster") acquire, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.release.sys.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr addrspace(3) %a syncscope("cluster") release, align 1
+
+ ; CHECK: ld.acquire.sys.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr addrspace(3) %b syncscope("cluster") acquire, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.release.sys.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr addrspace(3) %b syncscope("cluster") release, align 2
+
+ ; CHECK: ld.acquire.sys.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr addrspace(3) %c syncscope("cluster") acquire, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.release.sys.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr addrspace(3) %c syncscope("cluster") release, align 4
+
+ ; CHECK: ld.acquire.sys.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr addrspace(3) %d syncscope("cluster") acquire, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.release.sys.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr addrspace(3) %d syncscope("cluster") release, align 8
+
+ ; CHECK: ld.acquire.sys.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr addrspace(3) %e syncscope("cluster") acquire, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.release.sys.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr addrspace(3) %e syncscope("cluster") release, align 4
+
+ ; CHECK: ld.acquire.sys.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr addrspace(3) %e syncscope("cluster") acquire, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.release.sys.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr addrspace(3) %e syncscope("cluster") release, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: shared_seq_cst_cluster
+define void @shared_seq_cst_cluster(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
+ ; CHECK: fence.sc.cluster
+ ; CHECK: ld.acquire.cluster.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr addrspace(3) %a syncscope("cluster") seq_cst, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: fence.sc.cluster
+ ; CHECK: st.release.cluster.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr addrspace(3) %a syncscope("cluster") seq_cst, align 1
+
+ ; CHECK: fence.sc.cluster
+ ; CHECK: ld.acquire.cluster.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr addrspace(3) %b syncscope("cluster") seq_cst, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: fence.sc.cluster
+ ; CHECK: st.release.cluster.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr addrspace(3) %b syncscope("cluster") seq_cst, align 2
+
+ ; CHECK: fence.sc.cluster
+ ; CHECK: ld.acquire.cluster.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr addrspace(3) %c syncscope("cluster") seq_cst, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: fence.sc.cluster
+ ; CHECK: st.release.cluster.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr addrspace(3) %c syncscope("cluster") seq_cst, align 4
+
+ ; CHECK: fence.sc.cluster
+ ; CHECK: ld.acquire.cluster.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr addrspace(3) %d syncscope("cluster") seq_cst, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: fence.sc.cluster
+ ; CHECK: st.release.cluster.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr addrspace(3) %d syncscope("cluster") seq_cst, align 8
+
+ ; CHECK: fence.sc.cluster
+ ; CHECK: ld.acquire.cluster.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr addrspace(3) %e syncscope("cluster") seq_cst, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: fence.sc.cluster
+ ; CHECK: st.release.cluster.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr addrspace(3) %e syncscope("cluster") seq_cst, align 4
+
+ ; CHECK: fence.sc.cluster
+ ; CHECK: ld.acquire.cluster.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr addrspace(3) %e syncscope("cluster") seq_cst, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: fence.sc.cluster
+ ; CHECK: st.release.cluster.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr addrspace(3) %e syncscope("cluster") seq_cst, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: shared_seq_cst_volatile_cluster
+define void @shared_seq_cst_volatile_cluster(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr addrspace(3) %a syncscope("cluster") seq_cst, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr addrspace(3) %a syncscope("cluster") seq_cst, align 1
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr addrspace(3) %b syncscope("cluster") seq_cst, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr addrspace(3) %b syncscope("cluster") seq_cst, align 2
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr addrspace(3) %c syncscope("cluster") seq_cst, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr addrspace(3) %c syncscope("cluster") seq_cst, align 4
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr addrspace(3) %d syncscope("cluster") seq_cst, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr addrspace(3) %d syncscope("cluster") seq_cst, align 8
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr addrspace(3) %e syncscope("cluster") seq_cst, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr addrspace(3) %e syncscope("cluster") seq_cst, align 4
+
+ ; CHECK: fence.sc.sys
+ ; CHECK: ld.acquire.sys.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr addrspace(3) %e syncscope("cluster") seq_cst, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: fence.sc.sys
+ ; CHECK: st.release.sys.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr addrspace(3) %e syncscope("cluster") seq_cst, align 8
+
+ ret void
+}
+
+;; local statespace
+
+; CHECK-LABEL: local_unordered_cluster
+define void @local_unordered_cluster(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
+ ; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr addrspace(5) %a syncscope("cluster") unordered, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.local.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr addrspace(5) %a syncscope("cluster") unordered, align 1
+
+ ; CHECK: ld.local.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr addrspace(5) %b syncscope("cluster") unordered, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.local.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr addrspace(5) %b syncscope("cluster") unordered, align 2
+
+ ; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr addrspace(5) %c syncscope("cluster") unordered, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr addrspace(5) %c syncscope("cluster") unordered, align 4
+
+ ; CHECK: ld.local.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr addrspace(5) %d syncscope("cluster") unordered, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.local.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr addrspace(5) %d syncscope("cluster") unordered, align 8
+
+ ; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr addrspace(5) %e syncscope("cluster") unordered, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr addrspace(5) %e syncscope("cluster") unordered, align 4
+
+ ; CHECK: ld.local.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr addrspace(5) %e syncscope("cluster") unordered, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr addrspace(5) %e syncscope("cluster") unordered, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: local_unordered_volatile_cluster
+define void @local_unordered_volatile_cluster(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
+ ; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr addrspace(5) %a syncscope("cluster") unordered, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.local.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr addrspace(5) %a syncscope("cluster") unordered, align 1
+
+ ; CHECK: ld.local.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr addrspace(5) %b syncscope("cluster") unordered, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.local.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr addrspace(5) %b syncscope("cluster") unordered, align 2
+
+ ; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr addrspace(5) %c syncscope("cluster") unordered, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr addrspace(5) %c syncscope("cluster") unordered, align 4
+
+ ; CHECK: ld.local.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr addrspace(5) %d syncscope("cluster") unordered, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.local.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr addrspace(5) %d syncscope("cluster") unordered, align 8
+
+ ; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr addrspace(5) %e syncscope("cluster") unordered, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr addrspace(5) %e syncscope("cluster") unordered, align 4
+
+ ; CHECK: ld.local.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr addrspace(5) %e syncscope("cluster") unordered, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr addrspace(5) %e syncscope("cluster") unordered, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: local_monotonic_cluster
+define void @local_monotonic_cluster(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
+ ; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr addrspace(5) %a syncscope("cluster") monotonic, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.local.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr addrspace(5) %a syncscope("cluster") monotonic, align 1
+
+ ; CHECK: ld.local.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr addrspace(5) %b syncscope("cluster") monotonic, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.local.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr addrspace(5) %b syncscope("cluster") monotonic, align 2
+
+ ; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr addrspace(5) %c syncscope("cluster") monotonic, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr addrspace(5) %c syncscope("cluster") monotonic, align 4
+
+ ; CHECK: ld.local.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr addrspace(5) %d syncscope("cluster") monotonic, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.local.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr addrspace(5) %d syncscope("cluster") monotonic, align 8
+
+ ; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr addrspace(5) %e syncscope("cluster") monotonic, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr addrspace(5) %e syncscope("cluster") monotonic, align 4
+
+ ; CHECK: ld.local.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr addrspace(5) %e syncscope("cluster") monotonic, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr addrspace(5) %e syncscope("cluster") monotonic, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: local_monotonic_volatile_cluster
+define void @local_monotonic_volatile_cluster(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
+ ; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr addrspace(5) %a syncscope("cluster") monotonic, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.local.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr addrspace(5) %a syncscope("cluster") monotonic, align 1
+
+ ; CHECK: ld.local.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr addrspace(5) %b syncscope("cluster") monotonic, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.local.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr addrspace(5) %b syncscope("cluster") monotonic, align 2
+
+ ; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr addrspace(5) %c syncscope("cluster") monotonic, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr addrspace(5) %c syncscope("cluster") monotonic, align 4
+
+ ; CHECK: ld.local.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr addrspace(5) %d syncscope("cluster") monotonic, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.local.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr addrspace(5) %d syncscope("cluster") monotonic, align 8
+
+ ; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr addrspace(5) %e syncscope("cluster") monotonic, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr addrspace(5) %e syncscope("cluster") monotonic, align 4
+
+ ; CHECK: ld.local.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr addrspace(5) %e syncscope("cluster") monotonic, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr addrspace(5) %e syncscope("cluster") monotonic, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: local_acq_rel_cluster
+define void @local_acq_rel_cluster(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
+ ; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr addrspace(5) %a syncscope("cluster") acquire, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.local.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr addrspace(5) %a syncscope("cluster") release, align 1
+
+ ; CHECK: ld.local.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr addrspace(5) %b syncscope("cluster") acquire, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.local.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr addrspace(5) %b syncscope("cluster") release, align 2
+
+ ; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr addrspace(5) %c syncscope("cluster") acquire, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr addrspace(5) %c syncscope("cluster") release, align 4
+
+ ; CHECK: ld.local.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr addrspace(5) %d syncscope("cluster") acquire, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.local.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr addrspace(5) %d syncscope("cluster") release, align 8
+
+ ; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr addrspace(5) %e syncscope("cluster") acquire, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr addrspace(5) %e syncscope("cluster") release, align 4
+
+ ; CHECK: ld.local.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr addrspace(5) %e syncscope("cluster") acquire, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr addrspace(5) %e syncscope("cluster") release, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: local_acq_rel_volatile_cluster
+define void @local_acq_rel_volatile_cluster(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
+ ; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr addrspace(5) %a syncscope("cluster") acquire, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.local.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr addrspace(5) %a syncscope("cluster") release, align 1
+
+ ; CHECK: ld.local.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr addrspace(5) %b syncscope("cluster") acquire, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.local.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr addrspace(5) %b syncscope("cluster") release, align 2
+
+ ; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr addrspace(5) %c syncscope("cluster") acquire, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr addrspace(5) %c syncscope("cluster") release, align 4
+
+ ; CHECK: ld.local.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr addrspace(5) %d syncscope("cluster") acquire, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.local.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr addrspace(5) %d syncscope("cluster") release, align 8
+
+ ; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr addrspace(5) %e syncscope("cluster") acquire, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr addrspace(5) %e syncscope("cluster") release, align 4
+
+ ; CHECK: ld.local.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr addrspace(5) %e syncscope("cluster") acquire, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr addrspace(5) %e syncscope("cluster") release, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: local_seq_cst_cluster
+define void @local_seq_cst_cluster(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
+ ; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic i8, ptr addrspace(5) %a syncscope("cluster") seq_cst, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.local.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i8 %a.add, ptr addrspace(5) %a syncscope("cluster") seq_cst, align 1
+
+ ; CHECK: ld.local.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic i16, ptr addrspace(5) %b syncscope("cluster") seq_cst, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.local.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic i16 %b.add, ptr addrspace(5) %b syncscope("cluster") seq_cst, align 2
+
+ ; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic i32, ptr addrspace(5) %c syncscope("cluster") seq_cst, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic i32 %c.add, ptr addrspace(5) %c syncscope("cluster") seq_cst, align 4
+
+ ; CHECK: ld.local.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic i64, ptr addrspace(5) %d syncscope("cluster") seq_cst, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.local.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic i64 %d.add, ptr addrspace(5) %d syncscope("cluster") seq_cst, align 8
+
+ ; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic float, ptr addrspace(5) %e syncscope("cluster") seq_cst, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic float %e.add, ptr addrspace(5) %e syncscope("cluster") seq_cst, align 4
+
+ ; CHECK: ld.local.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic double, ptr addrspace(5) %e syncscope("cluster") seq_cst, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic double %f.add, ptr addrspace(5) %e syncscope("cluster") seq_cst, align 8
+
+ ret void
+}
+
+; CHECK-LABEL: local_seq_cst_volatile_cluster
+define void @local_seq_cst_volatile_cluster(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
+ ; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %a.load = load atomic volatile i8, ptr addrspace(5) %a syncscope("cluster") seq_cst, align 1
+ %a.add = add i8 %a.load, 1
+ ; CHECK: st.local.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i8 %a.add, ptr addrspace(5) %a syncscope("cluster") seq_cst, align 1
+
+ ; CHECK: ld.local.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %b.load = load atomic volatile i16, ptr addrspace(5) %b syncscope("cluster") seq_cst, align 2
+ %b.add = add i16 %b.load, 1
+ ; CHECK: st.local.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
+ store atomic volatile i16 %b.add, ptr addrspace(5) %b syncscope("cluster") seq_cst, align 2
+
+ ; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %c.load = load atomic volatile i32, ptr addrspace(5) %c syncscope("cluster") seq_cst, align 4
+ %c.add = add i32 %c.load, 1
+ ; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store atomic volatile i32 %c.add, ptr addrspace(5) %c syncscope("cluster") seq_cst, align 4
+
+ ; CHECK: ld.local.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %d.load = load atomic volatile i64, ptr addrspace(5) %d syncscope("cluster") seq_cst, align 8
+ %d.add = add i64 %d.load, 1
+ ; CHECK: st.local.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
+ store atomic volatile i64 %d.add, ptr addrspace(5) %d syncscope("cluster") seq_cst, align 8
+
+ ; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %e.load = load atomic volatile float, ptr addrspace(5) %e syncscope("cluster") seq_cst, align 4
+ %e.add = fadd float %e.load, 1.
+ ; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
+ store atomic volatile float %e.add, ptr addrspace(5) %e syncscope("cluster") seq_cst, align 4
+
+ ; CHECK: ld.local.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %f.load = load atomic volatile double, ptr addrspace(5) %e syncscope("cluster") seq_cst, align 8
+ %f.add = fadd double %f.load, 1.
+ ; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
+ store atomic volatile double %f.add, ptr addrspace(5) %e syncscope("cluster") seq_cst, align 8
+
+ ret void
+}
diff --git a/llvm/test/CodeGen/NVPTX/load-store.ll b/llvm/test/CodeGen/NVPTX/load-store.ll
index aac73f7..f922fd9 100644
--- a/llvm/test/CodeGen/NVPTX/load-store.ll
+++ b/llvm/test/CodeGen/NVPTX/load-store.ll
@@ -9,10 +9,21 @@
; Per https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#vectors
; vectors cannot exceed 128-bit in length, i.e., .v4.u64 is not allowed.
+; TODO: generate PTX that preserves Concurrent Forward Progress
+; for atomic operations to local statespace
+; by generating atomic or volatile operations.
+
+; TODO: design exposure for atomic operations on vector types.
+
+; TODO: add weak,atomic,volatile,atomic volatile tests
+; for .const and .param statespaces.
+
+; TODO: optimize .sys.shared into .cta.shared or .cluster.shared .
+
; generic statespace
-; CHECK-LABEL: generic_plain
-define void @generic_plain(ptr %a, ptr %b, ptr %c, ptr %d) local_unnamed_addr {
+; CHECK-LABEL: generic_weak
+define void @generic_weak(ptr %a, ptr %b, ptr %c, ptr %d) local_unnamed_addr {
; CHECK: ld.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%a.load = load i8, ptr %a
%a.add = add i8 %a.load, 1
@@ -238,198 +249,198 @@ define void @generic_volatile(ptr %a, ptr %b, ptr %c, ptr %d) local_unnamed_addr
ret void
}
-; CHECK-LABEL: generic_monotonic
-define void @generic_monotonic(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
+; CHECK-LABEL: generic_unordered_sys
+define void @generic_unordered_sys(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
; SM60: ld.volatile.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.relaxed.sys.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load atomic i8, ptr %a monotonic, align 1
+ %a.load = load atomic i8, ptr %a unordered, align 1
%a.add = add i8 %a.load, 1
; SM60: st.volatile.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
; SM70: st.relaxed.sys.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic i8 %a.add, ptr %a monotonic, align 1
+ store atomic i8 %a.add, ptr %a unordered, align 1
; SM60: ld.volatile.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.relaxed.sys.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load atomic i16, ptr %b monotonic, align 2
+ %b.load = load atomic i16, ptr %b unordered, align 2
%b.add = add i16 %b.load, 1
; SM60: st.volatile.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
; SM70: st.relaxed.sys.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic i16 %b.add, ptr %b monotonic, align 2
+ store atomic i16 %b.add, ptr %b unordered, align 2
; SM60: ld.volatile.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.relaxed.sys.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load atomic i32, ptr %c monotonic, align 4
+ %c.load = load atomic i32, ptr %c unordered, align 4
%c.add = add i32 %c.load, 1
; SM60: st.volatile.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
; SM70: st.relaxed.sys.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store atomic i32 %c.add, ptr %c monotonic, align 4
+ store atomic i32 %c.add, ptr %c unordered, align 4
; SM60: ld.volatile.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.relaxed.sys.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load atomic i64, ptr %d monotonic, align 8
+ %d.load = load atomic i64, ptr %d unordered, align 8
%d.add = add i64 %d.load, 1
; SM60: st.volatile.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
; SM70: st.relaxed.sys.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store atomic i64 %d.add, ptr %d monotonic, align 8
+ store atomic i64 %d.add, ptr %d unordered, align 8
; SM60: ld.volatile.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.relaxed.sys.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load atomic float, ptr %e monotonic, align 4
+ %e.load = load atomic float, ptr %e unordered, align 4
%e.add = fadd float %e.load, 1.0
; SM60: st.volatile.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
; SM70: st.relaxed.sys.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store atomic float %e.add, ptr %e monotonic, align 4
+ store atomic float %e.add, ptr %e unordered, align 4
; SM60: ld.volatile.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.relaxed.sys.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load atomic double, ptr %e monotonic, align 8
+ %f.load = load atomic double, ptr %e unordered, align 8
%f.add = fadd double %f.load, 1.
; SM60: st.volatile.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
; SM70: st.relaxed.sys.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store atomic double %f.add, ptr %e monotonic, align 8
+ store atomic double %f.add, ptr %e unordered, align 8
ret void
}
-; CHECK-LABEL: generic_monotonic_volatile
-define void @generic_monotonic_volatile(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
+; CHECK-LABEL: generic_unordered_volatile_sys
+define void @generic_unordered_volatile_sys(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
; CHECK: ld.volatile.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load atomic volatile i8, ptr %a monotonic, align 1
+ %a.load = load atomic volatile i8, ptr %a unordered, align 1
%a.add = add i8 %a.load, 1
; CHECK: st.volatile.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic volatile i8 %a.add, ptr %a monotonic, align 1
+ store atomic volatile i8 %a.add, ptr %a unordered, align 1
; CHECK: ld.volatile.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load atomic volatile i16, ptr %b monotonic, align 2
+ %b.load = load atomic volatile i16, ptr %b unordered, align 2
%b.add = add i16 %b.load, 1
; CHECK: st.volatile.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic volatile i16 %b.add, ptr %b monotonic, align 2
+ store atomic volatile i16 %b.add, ptr %b unordered, align 2
; CHECK: ld.volatile.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load atomic volatile i32, ptr %c monotonic, align 4
+ %c.load = load atomic volatile i32, ptr %c unordered, align 4
%c.add = add i32 %c.load, 1
; CHECK: st.volatile.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store atomic volatile i32 %c.add, ptr %c monotonic, align 4
+ store atomic volatile i32 %c.add, ptr %c unordered, align 4
; CHECK: ld.volatile.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load atomic volatile i64, ptr %d monotonic, align 8
+ %d.load = load atomic volatile i64, ptr %d unordered, align 8
%d.add = add i64 %d.load, 1
; CHECK: st.volatile.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store atomic volatile i64 %d.add, ptr %d monotonic, align 8
+ store atomic volatile i64 %d.add, ptr %d unordered, align 8
; CHECK: ld.volatile.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load atomic volatile float, ptr %e monotonic, align 4
+ %e.load = load atomic volatile float, ptr %e unordered, align 4
%e.add = fadd float %e.load, 1.0
; CHECK: st.volatile.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store atomic volatile float %e.add, ptr %e monotonic, align 4
+ store atomic volatile float %e.add, ptr %e unordered, align 4
; CHECK: ld.volatile.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load atomic volatile double, ptr %e monotonic, align 8
+ %f.load = load atomic volatile double, ptr %e unordered, align 8
%f.add = fadd double %f.load, 1.
; CHECK: st.volatile.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store atomic volatile double %f.add, ptr %e monotonic, align 8
+ store atomic volatile double %f.add, ptr %e unordered, align 8
ret void
}
-; CHECK-LABEL: generic_unordered
-define void @generic_unordered(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
+; CHECK-LABEL: generic_monotonic_sys
+define void @generic_monotonic_sys(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
; SM60: ld.volatile.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.relaxed.sys.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load atomic i8, ptr %a unordered, align 1
+ %a.load = load atomic i8, ptr %a monotonic, align 1
%a.add = add i8 %a.load, 1
; SM60: st.volatile.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
; SM70: st.relaxed.sys.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic i8 %a.add, ptr %a unordered, align 1
+ store atomic i8 %a.add, ptr %a monotonic, align 1
; SM60: ld.volatile.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.relaxed.sys.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load atomic i16, ptr %b unordered, align 2
+ %b.load = load atomic i16, ptr %b monotonic, align 2
%b.add = add i16 %b.load, 1
; SM60: st.volatile.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
; SM70: st.relaxed.sys.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic i16 %b.add, ptr %b unordered, align 2
+ store atomic i16 %b.add, ptr %b monotonic, align 2
; SM60: ld.volatile.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.relaxed.sys.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load atomic i32, ptr %c unordered, align 4
+ %c.load = load atomic i32, ptr %c monotonic, align 4
%c.add = add i32 %c.load, 1
; SM60: st.volatile.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
; SM70: st.relaxed.sys.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store atomic i32 %c.add, ptr %c unordered, align 4
+ store atomic i32 %c.add, ptr %c monotonic, align 4
; SM60: ld.volatile.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.relaxed.sys.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load atomic i64, ptr %d unordered, align 8
+ %d.load = load atomic i64, ptr %d monotonic, align 8
%d.add = add i64 %d.load, 1
; SM60: st.volatile.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
; SM70: st.relaxed.sys.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store atomic i64 %d.add, ptr %d unordered, align 8
+ store atomic i64 %d.add, ptr %d monotonic, align 8
; SM60: ld.volatile.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.relaxed.sys.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load atomic float, ptr %e unordered, align 4
- %e.add = fadd float %e.load, 1.0
+ %e.load = load atomic float, ptr %e monotonic, align 4
+ %e.add = fadd float %e.load, 1.
; SM60: st.volatile.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
; SM70: st.relaxed.sys.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store atomic float %e.add, ptr %e unordered, align 4
+ store atomic float %e.add, ptr %e monotonic, align 4
; SM60: ld.volatile.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.relaxed.sys.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load atomic double, ptr %e unordered, align 8
+ %f.load = load atomic double, ptr %e monotonic, align 8
%f.add = fadd double %f.load, 1.
; SM60: st.volatile.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
; SM70: st.relaxed.sys.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store atomic double %f.add, ptr %e unordered, align 8
+ store atomic double %f.add, ptr %e monotonic, align 8
ret void
}
-; CHECK-LABEL: generic_unordered_volatile
-define void @generic_unordered_volatile(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
+; CHECK-LABEL: generic_monotonic_volatile_sys
+define void @generic_monotonic_volatile_sys(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) local_unnamed_addr {
; CHECK: ld.volatile.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load atomic volatile i8, ptr %a unordered, align 1
+ %a.load = load atomic volatile i8, ptr %a monotonic, align 1
%a.add = add i8 %a.load, 1
; CHECK: st.volatile.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic volatile i8 %a.add, ptr %a unordered, align 1
+ store atomic volatile i8 %a.add, ptr %a monotonic, align 1
; CHECK: ld.volatile.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load atomic volatile i16, ptr %b unordered, align 2
+ %b.load = load atomic volatile i16, ptr %b monotonic, align 2
%b.add = add i16 %b.load, 1
; CHECK: st.volatile.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic volatile i16 %b.add, ptr %b unordered, align 2
+ store atomic volatile i16 %b.add, ptr %b monotonic, align 2
; CHECK: ld.volatile.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load atomic volatile i32, ptr %c unordered, align 4
+ %c.load = load atomic volatile i32, ptr %c monotonic, align 4
%c.add = add i32 %c.load, 1
; CHECK: st.volatile.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store atomic volatile i32 %c.add, ptr %c unordered, align 4
+ store atomic volatile i32 %c.add, ptr %c monotonic, align 4
; CHECK: ld.volatile.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load atomic volatile i64, ptr %d unordered, align 8
+ %d.load = load atomic volatile i64, ptr %d monotonic, align 8
%d.add = add i64 %d.load, 1
; CHECK: st.volatile.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store atomic volatile i64 %d.add, ptr %d unordered, align 8
+ store atomic volatile i64 %d.add, ptr %d monotonic, align 8
; CHECK: ld.volatile.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load atomic volatile float, ptr %e unordered, align 4
- %e.add = fadd float %e.load, 1.0
+ %e.load = load atomic volatile float, ptr %e monotonic, align 4
+ %e.add = fadd float %e.load, 1.
; CHECK: st.volatile.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store atomic volatile float %e.add, ptr %e unordered, align 4
+ store atomic volatile float %e.add, ptr %e monotonic, align 4
; CHECK: ld.volatile.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load atomic volatile double, ptr %e unordered, align 8
+ %f.load = load atomic volatile double, ptr %e monotonic, align 8
%f.add = fadd double %f.load, 1.
; CHECK: st.volatile.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store atomic volatile double %f.add, ptr %e unordered, align 8
+ store atomic volatile double %f.add, ptr %e monotonic, align 8
ret void
}
;; global statespace
-; CHECK-LABEL: global_plain
-define void @global_plain(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d) local_unnamed_addr {
+; CHECK-LABEL: global_weak
+define void @global_weak(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d) local_unnamed_addr {
; CHECK: ld.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%a.load = load i8, ptr addrspace(1) %a
%a.add = add i8 %a.load, 1
@@ -630,222 +641,222 @@ define void @global_volatile(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrs
ret void
}
-; CHECK-LABEL: global_monotonic
-define void @global_monotonic(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
+; CHECK-LABEL: global_unordered_sys
+define void @global_unordered_sys(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
; SM60: ld.volatile.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.relaxed.sys.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load atomic i8, ptr addrspace(1) %a monotonic, align 1
+ %a.load = load atomic i8, ptr addrspace(1) %a unordered, align 1
%a.add = add i8 %a.load, 1
; SM60: st.volatile.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
; SM70: st.relaxed.sys.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic i8 %a.add, ptr addrspace(1) %a monotonic, align 1
+ store atomic i8 %a.add, ptr addrspace(1) %a unordered, align 1
; SM60: ld.volatile.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.relaxed.sys.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load atomic i16, ptr addrspace(1) %b monotonic, align 2
+ %b.load = load atomic i16, ptr addrspace(1) %b unordered, align 2
%b.add = add i16 %b.load, 1
; SM60: st.volatile.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
; SM70: st.relaxed.sys.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic i16 %b.add, ptr addrspace(1) %b monotonic, align 2
+ store atomic i16 %b.add, ptr addrspace(1) %b unordered, align 2
; SM60: ld.volatile.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.relaxed.sys.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load atomic i32, ptr addrspace(1) %c monotonic, align 4
+ %c.load = load atomic i32, ptr addrspace(1) %c unordered, align 4
%c.add = add i32 %c.load, 1
; SM60: st.volatile.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
; SM70: st.relaxed.sys.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store atomic i32 %c.add, ptr addrspace(1) %c monotonic, align 4
+ store atomic i32 %c.add, ptr addrspace(1) %c unordered, align 4
; SM60: ld.volatile.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.relaxed.sys.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load atomic i64, ptr addrspace(1) %d monotonic, align 8
+ %d.load = load atomic i64, ptr addrspace(1) %d unordered, align 8
%d.add = add i64 %d.load, 1
; SM60: st.volatile.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
; SM70: st.relaxed.sys.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store atomic i64 %d.add, ptr addrspace(1) %d monotonic, align 8
+ store atomic i64 %d.add, ptr addrspace(1) %d unordered, align 8
; SM60: ld.volatile.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.relaxed.sys.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load atomic float, ptr addrspace(1) %e monotonic, align 4
+ %e.load = load atomic float, ptr addrspace(1) %e unordered, align 4
%e.add = fadd float %e.load, 1.0
; SM60: st.volatile.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
; SM70: st.relaxed.sys.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store atomic float %e.add, ptr addrspace(1) %e monotonic, align 4
+ store atomic float %e.add, ptr addrspace(1) %e unordered, align 4
; SM60: ld.volatile.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.relaxed.sys.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load atomic double, ptr addrspace(1) %e monotonic, align 8
+ %f.load = load atomic double, ptr addrspace(1) %e unordered, align 8
%f.add = fadd double %f.load, 1.
; SM60: st.volatile.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
; SM70: st.relaxed.sys.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store atomic double %f.add, ptr addrspace(1) %e monotonic, align 8
+ store atomic double %f.add, ptr addrspace(1) %e unordered, align 8
ret void
}
-; CHECK-LABEL: global_monotonic_volatile
-define void @global_monotonic_volatile(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
+; CHECK-LABEL: global_unordered_volatile_sys
+define void @global_unordered_volatile_sys(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
; SM60: ld.volatile.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.mmio.relaxed.sys.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load atomic volatile i8, ptr addrspace(1) %a monotonic, align 1
+ %a.load = load atomic volatile i8, ptr addrspace(1) %a unordered, align 1
%a.add = add i8 %a.load, 1
; SM60: st.volatile.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
; SM70: st.mmio.relaxed.sys.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic volatile i8 %a.add, ptr addrspace(1) %a monotonic, align 1
+ store atomic volatile i8 %a.add, ptr addrspace(1) %a unordered, align 1
; SM60: ld.volatile.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.mmio.relaxed.sys.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load atomic volatile i16, ptr addrspace(1) %b monotonic, align 2
+ %b.load = load atomic volatile i16, ptr addrspace(1) %b unordered, align 2
%b.add = add i16 %b.load, 1
; SM60: st.volatile.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
; SM70: st.mmio.relaxed.sys.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic volatile i16 %b.add, ptr addrspace(1) %b monotonic, align 2
+ store atomic volatile i16 %b.add, ptr addrspace(1) %b unordered, align 2
; SM60: ld.volatile.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.mmio.relaxed.sys.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load atomic volatile i32, ptr addrspace(1) %c monotonic, align 4
+ %c.load = load atomic volatile i32, ptr addrspace(1) %c unordered, align 4
%c.add = add i32 %c.load, 1
; SM60: st.volatile.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
; SM70: st.mmio.relaxed.sys.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store atomic volatile i32 %c.add, ptr addrspace(1) %c monotonic, align 4
+ store atomic volatile i32 %c.add, ptr addrspace(1) %c unordered, align 4
; SM60: ld.volatile.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.mmio.relaxed.sys.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load atomic volatile i64, ptr addrspace(1) %d monotonic, align 8
+ %d.load = load atomic volatile i64, ptr addrspace(1) %d unordered, align 8
%d.add = add i64 %d.load, 1
; SM60: st.volatile.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
; SM70: st.mmio.relaxed.sys.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store atomic volatile i64 %d.add, ptr addrspace(1) %d monotonic, align 8
+ store atomic volatile i64 %d.add, ptr addrspace(1) %d unordered, align 8
; SM60: ld.volatile.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.mmio.relaxed.sys.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load atomic volatile float, ptr addrspace(1) %e monotonic, align 4
+ %e.load = load atomic volatile float, ptr addrspace(1) %e unordered, align 4
%e.add = fadd float %e.load, 1.0
; SM60: st.volatile.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
; SM70: st.mmio.relaxed.sys.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store atomic volatile float %e.add, ptr addrspace(1) %e monotonic, align 4
+ store atomic volatile float %e.add, ptr addrspace(1) %e unordered, align 4
; SM60: ld.volatile.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.mmio.relaxed.sys.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load atomic volatile double, ptr addrspace(1) %e monotonic, align 8
+ %f.load = load atomic volatile double, ptr addrspace(1) %e unordered, align 8
%f.add = fadd double %f.load, 1.
; SM60: st.volatile.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
; SM70: st.mmio.relaxed.sys.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store atomic volatile double %f.add, ptr addrspace(1) %e monotonic, align 8
+ store atomic volatile double %f.add, ptr addrspace(1) %e unordered, align 8
ret void
}
-; CHECK-LABEL: global_unordered
-define void @global_unordered(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
+; CHECK-LABEL: global_monotonic_sys
+define void @global_monotonic_sys(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
; SM60: ld.volatile.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.relaxed.sys.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load atomic i8, ptr addrspace(1) %a unordered, align 1
+ %a.load = load atomic i8, ptr addrspace(1) %a monotonic, align 1
%a.add = add i8 %a.load, 1
; SM60: st.volatile.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
; SM70: st.relaxed.sys.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic i8 %a.add, ptr addrspace(1) %a unordered, align 1
+ store atomic i8 %a.add, ptr addrspace(1) %a monotonic, align 1
; SM60: ld.volatile.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.relaxed.sys.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load atomic i16, ptr addrspace(1) %b unordered, align 2
+ %b.load = load atomic i16, ptr addrspace(1) %b monotonic, align 2
%b.add = add i16 %b.load, 1
; SM60: st.volatile.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
; SM70: st.relaxed.sys.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic i16 %b.add, ptr addrspace(1) %b unordered, align 2
+ store atomic i16 %b.add, ptr addrspace(1) %b monotonic, align 2
; SM60: ld.volatile.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.relaxed.sys.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load atomic i32, ptr addrspace(1) %c unordered, align 4
+ %c.load = load atomic i32, ptr addrspace(1) %c monotonic, align 4
%c.add = add i32 %c.load, 1
; SM60: st.volatile.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
; SM70: st.relaxed.sys.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store atomic i32 %c.add, ptr addrspace(1) %c unordered, align 4
+ store atomic i32 %c.add, ptr addrspace(1) %c monotonic, align 4
; SM60: ld.volatile.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.relaxed.sys.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load atomic i64, ptr addrspace(1) %d unordered, align 8
+ %d.load = load atomic i64, ptr addrspace(1) %d monotonic, align 8
%d.add = add i64 %d.load, 1
; SM60: st.volatile.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
; SM70: st.relaxed.sys.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store atomic i64 %d.add, ptr addrspace(1) %d unordered, align 8
+ store atomic i64 %d.add, ptr addrspace(1) %d monotonic, align 8
; SM60: ld.volatile.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.relaxed.sys.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load atomic float, ptr addrspace(1) %e unordered, align 4
- %e.add = fadd float %e.load, 1.0
+ %e.load = load atomic float, ptr addrspace(1) %e monotonic, align 4
+ %e.add = fadd float %e.load, 1.
; SM60: st.volatile.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
; SM70: st.relaxed.sys.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store atomic float %e.add, ptr addrspace(1) %e unordered, align 4
+ store atomic float %e.add, ptr addrspace(1) %e monotonic, align 4
; SM60: ld.volatile.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.relaxed.sys.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load atomic double, ptr addrspace(1) %e unordered, align 8
+ %f.load = load atomic double, ptr addrspace(1) %e monotonic, align 8
%f.add = fadd double %f.load, 1.
; SM60: st.volatile.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
; SM70: st.relaxed.sys.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store atomic double %f.add, ptr addrspace(1) %e unordered, align 8
+ store atomic double %f.add, ptr addrspace(1) %e monotonic, align 8
ret void
}
-; CHECK-LABEL: global_unordered_volatile
-define void @global_unordered_volatile(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
+; CHECK-LABEL: global_monotonic_volatile_sys
+define void @global_monotonic_volatile_sys(ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %d, ptr addrspace(1) %e) local_unnamed_addr {
; SM60: ld.volatile.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.mmio.relaxed.sys.global.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load atomic volatile i8, ptr addrspace(1) %a unordered, align 1
+ %a.load = load atomic volatile i8, ptr addrspace(1) %a monotonic, align 1
%a.add = add i8 %a.load, 1
; SM60: st.volatile.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
; SM70: st.mmio.relaxed.sys.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic volatile i8 %a.add, ptr addrspace(1) %a unordered, align 1
+ store atomic volatile i8 %a.add, ptr addrspace(1) %a monotonic, align 1
; SM60: ld.volatile.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.mmio.relaxed.sys.global.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load atomic volatile i16, ptr addrspace(1) %b unordered, align 2
+ %b.load = load atomic volatile i16, ptr addrspace(1) %b monotonic, align 2
%b.add = add i16 %b.load, 1
; SM60: st.volatile.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
; SM70: st.mmio.relaxed.sys.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic volatile i16 %b.add, ptr addrspace(1) %b unordered, align 2
+ store atomic volatile i16 %b.add, ptr addrspace(1) %b monotonic, align 2
; SM60: ld.volatile.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.mmio.relaxed.sys.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load atomic volatile i32, ptr addrspace(1) %c unordered, align 4
+ %c.load = load atomic volatile i32, ptr addrspace(1) %c monotonic, align 4
%c.add = add i32 %c.load, 1
; SM60: st.volatile.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
; SM70: st.mmio.relaxed.sys.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store atomic volatile i32 %c.add, ptr addrspace(1) %c unordered, align 4
+ store atomic volatile i32 %c.add, ptr addrspace(1) %c monotonic, align 4
; SM60: ld.volatile.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.mmio.relaxed.sys.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load atomic volatile i64, ptr addrspace(1) %d unordered, align 8
+ %d.load = load atomic volatile i64, ptr addrspace(1) %d monotonic, align 8
%d.add = add i64 %d.load, 1
; SM60: st.volatile.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
; SM70: st.mmio.relaxed.sys.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store atomic volatile i64 %d.add, ptr addrspace(1) %d unordered, align 8
+ store atomic volatile i64 %d.add, ptr addrspace(1) %d monotonic, align 8
; SM60: ld.volatile.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.mmio.relaxed.sys.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load atomic volatile float, ptr addrspace(1) %e unordered, align 4
- %e.add = fadd float %e.load, 1.0
+ %e.load = load atomic volatile float, ptr addrspace(1) %e monotonic, align 4
+ %e.add = fadd float %e.load, 1.
; SM60: st.volatile.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
; SM70: st.mmio.relaxed.sys.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store atomic volatile float %e.add, ptr addrspace(1) %e unordered, align 4
+ store atomic volatile float %e.add, ptr addrspace(1) %e monotonic, align 4
; SM60: ld.volatile.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.mmio.relaxed.sys.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load atomic volatile double, ptr addrspace(1) %e unordered, align 8
+ %f.load = load atomic volatile double, ptr addrspace(1) %e monotonic, align 8
%f.add = fadd double %f.load, 1.
; SM60: st.volatile.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
; SM70: st.mmio.relaxed.sys.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store atomic volatile double %f.add, ptr addrspace(1) %e unordered, align 8
+ store atomic volatile double %f.add, ptr addrspace(1) %e monotonic, align 8
ret void
}
;; shared statespace
-; CHECK-LABEL: shared_plain
-define void @shared_plain(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d) local_unnamed_addr {
+; CHECK-LABEL: shared_weak
+define void @shared_weak(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d) local_unnamed_addr {
; CHECK: ld.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%a.load = load i8, ptr addrspace(3) %a
%a.add = add i8 %a.load, 1
@@ -1046,202 +1057,198 @@ define void @shared_volatile(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrs
ret void
}
-; CHECK-LABEL: shared_monotonic
-define void @shared_monotonic(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
- ; TODO: optimize .sys.shared to .cta.shared or .cluster.shared.
-
+; CHECK-LABEL: shared_unordered_sys
+define void @shared_unordered_sys(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
; SM60: ld.volatile.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.relaxed.sys.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load atomic i8, ptr addrspace(3) %a monotonic, align 1
+ %a.load = load atomic i8, ptr addrspace(3) %a unordered, align 1
%a.add = add i8 %a.load, 1
; SM60: st.volatile.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
; SM70: st.relaxed.sys.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic i8 %a.add, ptr addrspace(3) %a monotonic, align 1
+ store atomic i8 %a.add, ptr addrspace(3) %a unordered, align 1
; SM60: ld.volatile.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.relaxed.sys.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load atomic i16, ptr addrspace(3) %b monotonic, align 2
+ %b.load = load atomic i16, ptr addrspace(3) %b unordered, align 2
%b.add = add i16 %b.load, 1
; SM60: st.volatile.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
; SM70: st.relaxed.sys.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic i16 %b.add, ptr addrspace(3) %b monotonic, align 2
+ store atomic i16 %b.add, ptr addrspace(3) %b unordered, align 2
; SM60: ld.volatile.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.relaxed.sys.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load atomic i32, ptr addrspace(3) %c monotonic, align 4
+ %c.load = load atomic i32, ptr addrspace(3) %c unordered, align 4
%c.add = add i32 %c.load, 1
; SM60: st.volatile.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
; SM70: st.relaxed.sys.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store atomic i32 %c.add, ptr addrspace(3) %c monotonic, align 4
+ store atomic i32 %c.add, ptr addrspace(3) %c unordered, align 4
; SM60: ld.volatile.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.relaxed.sys.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load atomic i64, ptr addrspace(3) %d monotonic, align 8
+ %d.load = load atomic i64, ptr addrspace(3) %d unordered, align 8
%d.add = add i64 %d.load, 1
; SM60: st.volatile.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
; SM70: st.relaxed.sys.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store atomic i64 %d.add, ptr addrspace(3) %d monotonic, align 8
+ store atomic i64 %d.add, ptr addrspace(3) %d unordered, align 8
; SM60: ld.volatile.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.relaxed.sys.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load atomic float, ptr addrspace(3) %e monotonic, align 4
+ %e.load = load atomic float, ptr addrspace(3) %e unordered, align 4
%e.add = fadd float %e.load, 1.0
; SM60: st.volatile.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
; SM70: st.relaxed.sys.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store atomic float %e.add, ptr addrspace(3) %e monotonic, align 4
+ store atomic float %e.add, ptr addrspace(3) %e unordered, align 4
; SM60: ld.volatile.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.relaxed.sys.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load atomic double, ptr addrspace(3) %e monotonic, align 8
+ %f.load = load atomic double, ptr addrspace(3) %e unordered, align 8
%f.add = fadd double %f.load, 1.
; SM60: st.volatile.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
; SM70: st.relaxed.sys.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store atomic double %f.add, ptr addrspace(3) %e monotonic, align 8
+ store atomic double %f.add, ptr addrspace(3) %e unordered, align 8
ret void
}
-; CHECK-LABEL: shared_monotonic_volatile
-define void @shared_monotonic_volatile(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
+; CHECK-LABEL: shared_unordered_volatile_sys
+define void @shared_unordered_volatile_sys(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
; CHECK: ld.volatile.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load atomic volatile i8, ptr addrspace(3) %a monotonic, align 1
+ %a.load = load atomic volatile i8, ptr addrspace(3) %a unordered, align 1
%a.add = add i8 %a.load, 1
; CHECK: st.volatile.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic volatile i8 %a.add, ptr addrspace(3) %a monotonic, align 1
+ store atomic volatile i8 %a.add, ptr addrspace(3) %a unordered, align 1
; CHECK: ld.volatile.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load atomic volatile i16, ptr addrspace(3) %b monotonic, align 2
+ %b.load = load atomic volatile i16, ptr addrspace(3) %b unordered, align 2
%b.add = add i16 %b.load, 1
; CHECK: st.volatile.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic volatile i16 %b.add, ptr addrspace(3) %b monotonic, align 2
+ store atomic volatile i16 %b.add, ptr addrspace(3) %b unordered, align 2
; CHECK: ld.volatile.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load atomic volatile i32, ptr addrspace(3) %c monotonic, align 4
+ %c.load = load atomic volatile i32, ptr addrspace(3) %c unordered, align 4
%c.add = add i32 %c.load, 1
; CHECK: st.volatile.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store atomic volatile i32 %c.add, ptr addrspace(3) %c monotonic, align 4
+ store atomic volatile i32 %c.add, ptr addrspace(3) %c unordered, align 4
; CHECK: ld.volatile.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load atomic volatile i64, ptr addrspace(3) %d monotonic, align 8
+ %d.load = load atomic volatile i64, ptr addrspace(3) %d unordered, align 8
%d.add = add i64 %d.load, 1
; CHECK: st.volatile.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store atomic volatile i64 %d.add, ptr addrspace(3) %d monotonic, align 8
+ store atomic volatile i64 %d.add, ptr addrspace(3) %d unordered, align 8
; CHECK: ld.volatile.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load atomic volatile float, ptr addrspace(3) %e monotonic, align 4
+ %e.load = load atomic volatile float, ptr addrspace(3) %e unordered, align 4
%e.add = fadd float %e.load, 1.0
; CHECK: st.volatile.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store atomic volatile float %e.add, ptr addrspace(3) %e monotonic, align 4
+ store atomic volatile float %e.add, ptr addrspace(3) %e unordered, align 4
; CHECK: ld.volatile.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load atomic volatile double, ptr addrspace(3) %e monotonic, align 8
+ %f.load = load atomic volatile double, ptr addrspace(3) %e unordered, align 8
%f.add = fadd double %f.load, 1.
; CHECK: st.volatile.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store atomic volatile double %f.add, ptr addrspace(3) %e monotonic, align 8
+ store atomic volatile double %f.add, ptr addrspace(3) %e unordered, align 8
ret void
}
-; CHECK-LABEL: shared_unordered
-define void @shared_unordered(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
- ; TODO: optimize .sys.shared to .cta.shared or .cluster.shared.
-
+; CHECK-LABEL: shared_monotonic_sys
+define void @shared_monotonic_sys(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
; SM60: ld.volatile.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.relaxed.sys.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load atomic i8, ptr addrspace(3) %a unordered, align 1
+ %a.load = load atomic i8, ptr addrspace(3) %a monotonic, align 1
%a.add = add i8 %a.load, 1
; SM60: st.volatile.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
; SM70: st.relaxed.sys.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic i8 %a.add, ptr addrspace(3) %a unordered, align 1
+ store atomic i8 %a.add, ptr addrspace(3) %a monotonic, align 1
; SM60: ld.volatile.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.relaxed.sys.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load atomic i16, ptr addrspace(3) %b unordered, align 2
+ %b.load = load atomic i16, ptr addrspace(3) %b monotonic, align 2
%b.add = add i16 %b.load, 1
; SM60: st.volatile.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
; SM70: st.relaxed.sys.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic i16 %b.add, ptr addrspace(3) %b unordered, align 2
+ store atomic i16 %b.add, ptr addrspace(3) %b monotonic, align 2
; SM60: ld.volatile.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.relaxed.sys.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load atomic i32, ptr addrspace(3) %c unordered, align 4
+ %c.load = load atomic i32, ptr addrspace(3) %c monotonic, align 4
%c.add = add i32 %c.load, 1
; SM60: st.volatile.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
; SM70: st.relaxed.sys.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store atomic i32 %c.add, ptr addrspace(3) %c unordered, align 4
+ store atomic i32 %c.add, ptr addrspace(3) %c monotonic, align 4
; SM60: ld.volatile.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.relaxed.sys.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load atomic i64, ptr addrspace(3) %d unordered, align 8
+ %d.load = load atomic i64, ptr addrspace(3) %d monotonic, align 8
%d.add = add i64 %d.load, 1
; SM60: st.volatile.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
; SM70: st.relaxed.sys.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store atomic i64 %d.add, ptr addrspace(3) %d unordered, align 8
+ store atomic i64 %d.add, ptr addrspace(3) %d monotonic, align 8
; SM60: ld.volatile.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.relaxed.sys.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load atomic float, ptr addrspace(3) %e unordered, align 4
- %e.add = fadd float %e.load, 1.0
+ %e.load = load atomic float, ptr addrspace(3) %e monotonic, align 4
+ %e.add = fadd float %e.load, 1.
; SM60: st.volatile.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
; SM70: st.relaxed.sys.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store atomic float %e.add, ptr addrspace(3) %e unordered, align 4
+ store atomic float %e.add, ptr addrspace(3) %e monotonic, align 4
; SM60: ld.volatile.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
; SM70: ld.relaxed.sys.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load atomic double, ptr addrspace(3) %e unordered, align 8
+ %f.load = load atomic double, ptr addrspace(3) %e monotonic, align 8
%f.add = fadd double %f.load, 1.
; SM60: st.volatile.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
; SM70: st.relaxed.sys.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store atomic double %f.add, ptr addrspace(3) %e unordered, align 8
+ store atomic double %f.add, ptr addrspace(3) %e monotonic, align 8
ret void
}
-; CHECK-LABEL: shared_unordered_volatile
-define void @shared_unordered_volatile(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
+; CHECK-LABEL: shared_monotonic_volatile_sys
+define void @shared_monotonic_volatile_sys(ptr addrspace(3) %a, ptr addrspace(3) %b, ptr addrspace(3) %c, ptr addrspace(3) %d, ptr addrspace(3) %e) local_unnamed_addr {
; CHECK: ld.volatile.shared.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load atomic volatile i8, ptr addrspace(3) %a unordered, align 1
+ %a.load = load atomic volatile i8, ptr addrspace(3) %a monotonic, align 1
%a.add = add i8 %a.load, 1
; CHECK: st.volatile.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic volatile i8 %a.add, ptr addrspace(3) %a unordered, align 1
+ store atomic volatile i8 %a.add, ptr addrspace(3) %a monotonic, align 1
; CHECK: ld.volatile.shared.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load atomic volatile i16, ptr addrspace(3) %b unordered, align 2
+ %b.load = load atomic volatile i16, ptr addrspace(3) %b monotonic, align 2
%b.add = add i16 %b.load, 1
; CHECK: st.volatile.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic volatile i16 %b.add, ptr addrspace(3) %b unordered, align 2
+ store atomic volatile i16 %b.add, ptr addrspace(3) %b monotonic, align 2
; CHECK: ld.volatile.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load atomic volatile i32, ptr addrspace(3) %c unordered, align 4
+ %c.load = load atomic volatile i32, ptr addrspace(3) %c monotonic, align 4
%c.add = add i32 %c.load, 1
; CHECK: st.volatile.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store atomic volatile i32 %c.add, ptr addrspace(3) %c unordered, align 4
+ store atomic volatile i32 %c.add, ptr addrspace(3) %c monotonic, align 4
; CHECK: ld.volatile.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load atomic volatile i64, ptr addrspace(3) %d unordered, align 8
+ %d.load = load atomic volatile i64, ptr addrspace(3) %d monotonic, align 8
%d.add = add i64 %d.load, 1
; CHECK: st.volatile.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store atomic volatile i64 %d.add, ptr addrspace(3) %d unordered, align 8
+ store atomic volatile i64 %d.add, ptr addrspace(3) %d monotonic, align 8
; CHECK: ld.volatile.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load atomic volatile float, ptr addrspace(3) %e unordered, align 4
- %e.add = fadd float %e.load, 1.0
+ %e.load = load atomic volatile float, ptr addrspace(3) %e monotonic, align 4
+ %e.add = fadd float %e.load, 1.
; CHECK: st.volatile.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store atomic volatile float %e.add, ptr addrspace(3) %e unordered, align 4
+ store atomic volatile float %e.add, ptr addrspace(3) %e monotonic, align 4
; CHECK: ld.volatile.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load atomic volatile double, ptr addrspace(3) %e unordered, align 8
+ %f.load = load atomic volatile double, ptr addrspace(3) %e monotonic, align 8
%f.add = fadd double %f.load, 1.
; CHECK: st.volatile.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store atomic volatile double %f.add, ptr addrspace(3) %e unordered, align 8
+ store atomic volatile double %f.add, ptr addrspace(3) %e monotonic, align 8
ret void
}
;; local statespace
-; CHECK-LABEL: local_plain
-define void @local_plain(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d) local_unnamed_addr {
+; CHECK-LABEL: local_weak
+define void @local_weak(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d) local_unnamed_addr {
; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%a.load = load i8, ptr addrspace(5) %a
%a.add = add i8 %a.load, 1
@@ -1343,9 +1350,6 @@ define void @local_plain(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace
; CHECK-LABEL: local_volatile
define void @local_volatile(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d) local_unnamed_addr {
- ; TODO: generate PTX that preserves Concurrent Forward Progress
- ; by using volatile operations.
-
; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
%a.load = load volatile i8, ptr addrspace(5) %a
%a.add = add i8 %a.load, 1
@@ -1445,175 +1449,166 @@ define void @local_volatile(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrsp
ret void
}
-; CHECK-LABEL: local_monotonic
-define void @local_monotonic(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
- ; TODO: generate PTX that preserves Concurrent Forward Progress
- ; by using PTX atomic operations.
-
+; CHECK-LABEL: local_unordered_sys
+define void @local_unordered_sys(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load atomic i8, ptr addrspace(5) %a monotonic, align 1
+ %a.load = load atomic i8, ptr addrspace(5) %a unordered, align 1
%a.add = add i8 %a.load, 1
; CHECK: st.local.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic i8 %a.add, ptr addrspace(5) %a monotonic, align 1
+ store atomic i8 %a.add, ptr addrspace(5) %a unordered, align 1
; CHECK: ld.local.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load atomic i16, ptr addrspace(5) %b monotonic, align 2
+ %b.load = load atomic i16, ptr addrspace(5) %b unordered, align 2
%b.add = add i16 %b.load, 1
; CHECK: st.local.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic i16 %b.add, ptr addrspace(5) %b monotonic, align 2
+ store atomic i16 %b.add, ptr addrspace(5) %b unordered, align 2
; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load atomic i32, ptr addrspace(5) %c monotonic, align 4
+ %c.load = load atomic i32, ptr addrspace(5) %c unordered, align 4
%c.add = add i32 %c.load, 1
; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store atomic i32 %c.add, ptr addrspace(5) %c monotonic, align 4
+ store atomic i32 %c.add, ptr addrspace(5) %c unordered, align 4
; CHECK: ld.local.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load atomic i64, ptr addrspace(5) %d monotonic, align 8
+ %d.load = load atomic i64, ptr addrspace(5) %d unordered, align 8
%d.add = add i64 %d.load, 1
; CHECK: st.local.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store atomic i64 %d.add, ptr addrspace(5) %d monotonic, align 8
+ store atomic i64 %d.add, ptr addrspace(5) %d unordered, align 8
; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load atomic float, ptr addrspace(5) %e monotonic, align 4
+ %e.load = load atomic float, ptr addrspace(5) %e unordered, align 4
%e.add = fadd float %e.load, 1.0
; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store atomic float %e.add, ptr addrspace(5) %e monotonic, align 4
+ store atomic float %e.add, ptr addrspace(5) %e unordered, align 4
; CHECK: ld.local.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load atomic double, ptr addrspace(5) %e monotonic, align 8
+ %f.load = load atomic double, ptr addrspace(5) %e unordered, align 8
%f.add = fadd double %f.load, 1.
; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store atomic double %f.add, ptr addrspace(5) %e monotonic, align 8
+ store atomic double %f.add, ptr addrspace(5) %e unordered, align 8
ret void
}
-; CHECK-LABEL: local_monotonic_volatile
-define void @local_monotonic_volatile(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
- ; TODO: generate PTX that preserves Concurrent Forward Progress
- ; by generating atomic or volatile operations
-
+; CHECK-LABEL: local_unordered_volatile_sys
+define void @local_unordered_volatile_sys(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load atomic volatile i8, ptr addrspace(5) %a monotonic, align 1
+ %a.load = load atomic volatile i8, ptr addrspace(5) %a unordered, align 1
%a.add = add i8 %a.load, 1
; CHECK: st.local.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic volatile i8 %a.add, ptr addrspace(5) %a monotonic, align 1
+ store atomic volatile i8 %a.add, ptr addrspace(5) %a unordered, align 1
; CHECK: ld.local.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load atomic volatile i16, ptr addrspace(5) %b monotonic, align 2
+ %b.load = load atomic volatile i16, ptr addrspace(5) %b unordered, align 2
%b.add = add i16 %b.load, 1
; CHECK: st.local.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic volatile i16 %b.add, ptr addrspace(5) %b monotonic, align 2
+ store atomic volatile i16 %b.add, ptr addrspace(5) %b unordered, align 2
; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load atomic volatile i32, ptr addrspace(5) %c monotonic, align 4
+ %c.load = load atomic volatile i32, ptr addrspace(5) %c unordered, align 4
%c.add = add i32 %c.load, 1
; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store atomic volatile i32 %c.add, ptr addrspace(5) %c monotonic, align 4
+ store atomic volatile i32 %c.add, ptr addrspace(5) %c unordered, align 4
; CHECK: ld.local.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load atomic volatile i64, ptr addrspace(5) %d monotonic, align 8
+ %d.load = load atomic volatile i64, ptr addrspace(5) %d unordered, align 8
%d.add = add i64 %d.load, 1
; CHECK: st.local.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store atomic volatile i64 %d.add, ptr addrspace(5) %d monotonic, align 8
+ store atomic volatile i64 %d.add, ptr addrspace(5) %d unordered, align 8
; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load atomic volatile float, ptr addrspace(5) %e monotonic, align 4
+ %e.load = load atomic volatile float, ptr addrspace(5) %e unordered, align 4
%e.add = fadd float %e.load, 1.0
; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store atomic volatile float %e.add, ptr addrspace(5) %e monotonic, align 4
+ store atomic volatile float %e.add, ptr addrspace(5) %e unordered, align 4
; CHECK: ld.local.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load atomic volatile double, ptr addrspace(5) %e monotonic, align 8
+ %f.load = load atomic volatile double, ptr addrspace(5) %e unordered, align 8
%f.add = fadd double %f.load, 1.
; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store atomic volatile double %f.add, ptr addrspace(5) %e monotonic, align 8
+ store atomic volatile double %f.add, ptr addrspace(5) %e unordered, align 8
ret void
}
-; CHECK-LABEL: local_unordered
-define void @local_unordered(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
+; CHECK-LABEL: local_monotonic_sys
+define void @local_monotonic_sys(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load atomic i8, ptr addrspace(5) %a unordered, align 1
+ %a.load = load atomic i8, ptr addrspace(5) %a monotonic, align 1
%a.add = add i8 %a.load, 1
; CHECK: st.local.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic i8 %a.add, ptr addrspace(5) %a unordered, align 1
+ store atomic i8 %a.add, ptr addrspace(5) %a monotonic, align 1
; CHECK: ld.local.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load atomic i16, ptr addrspace(5) %b unordered, align 2
+ %b.load = load atomic i16, ptr addrspace(5) %b monotonic, align 2
%b.add = add i16 %b.load, 1
; CHECK: st.local.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic i16 %b.add, ptr addrspace(5) %b unordered, align 2
+ store atomic i16 %b.add, ptr addrspace(5) %b monotonic, align 2
; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load atomic i32, ptr addrspace(5) %c unordered, align 4
+ %c.load = load atomic i32, ptr addrspace(5) %c monotonic, align 4
%c.add = add i32 %c.load, 1
; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store atomic i32 %c.add, ptr addrspace(5) %c unordered, align 4
+ store atomic i32 %c.add, ptr addrspace(5) %c monotonic, align 4
; CHECK: ld.local.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load atomic i64, ptr addrspace(5) %d unordered, align 8
+ %d.load = load atomic i64, ptr addrspace(5) %d monotonic, align 8
%d.add = add i64 %d.load, 1
; CHECK: st.local.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store atomic i64 %d.add, ptr addrspace(5) %d unordered, align 8
+ store atomic i64 %d.add, ptr addrspace(5) %d monotonic, align 8
; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load atomic float, ptr addrspace(5) %e unordered, align 4
- %e.add = fadd float %e.load, 1.0
+ %e.load = load atomic float, ptr addrspace(5) %e monotonic, align 4
+ %e.add = fadd float %e.load, 1.
; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store atomic float %e.add, ptr addrspace(5) %e unordered, align 4
+ store atomic float %e.add, ptr addrspace(5) %e monotonic, align 4
; CHECK: ld.local.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load atomic double, ptr addrspace(5) %e unordered, align 8
+ %f.load = load atomic double, ptr addrspace(5) %e monotonic, align 8
%f.add = fadd double %f.load, 1.
; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store atomic double %f.add, ptr addrspace(5) %e unordered, align 8
+ store atomic double %f.add, ptr addrspace(5) %e monotonic, align 8
ret void
}
-; CHECK-LABEL: local_unordered_volatile
-define void @local_unordered_volatile(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
+; CHECK-LABEL: local_monotonic_volatile
+define void @local_monotonic_volatile(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
; CHECK: ld.local.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %a.load = load atomic volatile i8, ptr addrspace(5) %a unordered, align 1
+ %a.load = load atomic volatile i8, ptr addrspace(5) %a monotonic, align 1
%a.add = add i8 %a.load, 1
; CHECK: st.local.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic volatile i8 %a.add, ptr addrspace(5) %a unordered, align 1
+ store atomic volatile i8 %a.add, ptr addrspace(5) %a monotonic, align 1
; CHECK: ld.local.u16 %rs{{[0-9]+}}, [%rd{{[0-9]+}}]
- %b.load = load atomic volatile i16, ptr addrspace(5) %b unordered, align 2
+ %b.load = load atomic volatile i16, ptr addrspace(5) %b monotonic, align 2
%b.add = add i16 %b.load, 1
; CHECK: st.local.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}}
- store atomic volatile i16 %b.add, ptr addrspace(5) %b unordered, align 2
+ store atomic volatile i16 %b.add, ptr addrspace(5) %b monotonic, align 2
; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
- %c.load = load atomic volatile i32, ptr addrspace(5) %c unordered, align 4
+ %c.load = load atomic volatile i32, ptr addrspace(5) %c monotonic, align 4
%c.add = add i32 %c.load, 1
; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
- store atomic volatile i32 %c.add, ptr addrspace(5) %c unordered, align 4
+ store atomic volatile i32 %c.add, ptr addrspace(5) %c monotonic, align 4
; CHECK: ld.local.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %d.load = load atomic volatile i64, ptr addrspace(5) %d unordered, align 8
+ %d.load = load atomic volatile i64, ptr addrspace(5) %d monotonic, align 8
%d.add = add i64 %d.load, 1
; CHECK: st.local.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}}
- store atomic volatile i64 %d.add, ptr addrspace(5) %d unordered, align 8
+ store atomic volatile i64 %d.add, ptr addrspace(5) %d monotonic, align 8
; CHECK: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
- %e.load = load atomic volatile float, ptr addrspace(5) %e unordered, align 4
- %e.add = fadd float %e.load, 1.0
+ %e.load = load atomic volatile float, ptr addrspace(5) %e monotonic, align 4
+ %e.add = fadd float %e.load, 1.
; CHECK: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}}
- store atomic volatile float %e.add, ptr addrspace(5) %e unordered, align 4
+ store atomic volatile float %e.add, ptr addrspace(5) %e monotonic, align 4
; CHECK: ld.local.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
- %f.load = load atomic volatile double, ptr addrspace(5) %e unordered, align 8
+ %f.load = load atomic volatile double, ptr addrspace(5) %e monotonic, align 8
%f.add = fadd double %f.load, 1.
; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
- store atomic volatile double %f.add, ptr addrspace(5) %e unordered, align 8
+ store atomic volatile double %f.add, ptr addrspace(5) %e monotonic, align 8
ret void
}
-
-; TODO: add plain,atomic,volatile,atomic volatile tests
-; for .const and .param statespaces \ No newline at end of file
diff --git a/llvm/test/CodeGen/NVPTX/rotate.ll b/llvm/test/CodeGen/NVPTX/rotate.ll
index 20c7ae5..9ec5bcd 100644
--- a/llvm/test/CodeGen/NVPTX/rotate.ll
+++ b/llvm/test/CodeGen/NVPTX/rotate.ll
@@ -9,26 +9,29 @@ declare i32 @llvm.nvvm.rotate.b32(i32, i32)
declare i64 @llvm.nvvm.rotate.b64(i64, i32)
declare i64 @llvm.nvvm.rotate.right.b64(i64, i32)
+declare i64 @llvm.fshl.i64(i64, i64, i64)
+declare i64 @llvm.fshr.i64(i64, i64, i64)
+declare i32 @llvm.fshl.i32(i32, i32, i32)
+declare i32 @llvm.fshr.i32(i32, i32, i32)
+
+
; SM20: rotate32
; SM35: rotate32
define i32 @rotate32(i32 %a, i32 %b) {
; SM20-LABEL: rotate32(
; SM20: {
-; SM20-NEXT: .reg .b32 %r<4>;
+; SM20-NEXT: .reg .b32 %r<9>;
; SM20-EMPTY:
; SM20-NEXT: // %bb.0:
; SM20-NEXT: ld.param.u32 %r1, [rotate32_param_0];
; SM20-NEXT: ld.param.u32 %r2, [rotate32_param_1];
-; SM20-NEXT: {
-; SM20-NEXT: .reg .b32 %lhs;
-; SM20-NEXT: .reg .b32 %rhs;
-; SM20-NEXT: .reg .b32 %amt2;
-; SM20-NEXT: shl.b32 %lhs, %r1, %r2;
-; SM20-NEXT: sub.s32 %amt2, 32, %r2;
-; SM20-NEXT: shr.b32 %rhs, %r1, %amt2;
-; SM20-NEXT: add.u32 %r3, %lhs, %rhs;
-; SM20-NEXT: }
-; SM20-NEXT: st.param.b32 [func_retval0+0], %r3;
+; SM20-NEXT: and.b32 %r3, %r2, 31;
+; SM20-NEXT: shl.b32 %r4, %r1, %r3;
+; SM20-NEXT: neg.s32 %r5, %r2;
+; SM20-NEXT: and.b32 %r6, %r5, 31;
+; SM20-NEXT: shr.u32 %r7, %r1, %r6;
+; SM20-NEXT: or.b32 %r8, %r4, %r7;
+; SM20-NEXT: st.param.b32 [func_retval0+0], %r8;
; SM20-NEXT: ret;
;
; SM35-LABEL: rotate32(
@@ -50,45 +53,36 @@ define i32 @rotate32(i32 %a, i32 %b) {
define i64 @rotate64(i64 %a, i32 %b) {
; SM20-LABEL: rotate64(
; SM20: {
-; SM20-NEXT: .reg .b32 %r<2>;
-; SM20-NEXT: .reg .b64 %rd<3>;
+; SM20-NEXT: .reg .b32 %r<5>;
+; SM20-NEXT: .reg .b64 %rd<5>;
; SM20-EMPTY:
; SM20-NEXT: // %bb.0:
; SM20-NEXT: ld.param.u64 %rd1, [rotate64_param_0];
; SM20-NEXT: ld.param.u32 %r1, [rotate64_param_1];
-; SM20-NEXT: {
-; SM20-NEXT: .reg .b64 %lhs;
-; SM20-NEXT: .reg .b64 %rhs;
-; SM20-NEXT: .reg .u32 %amt2;
-; SM20-NEXT: and.b32 %amt2, %r1, 63;
-; SM20-NEXT: shl.b64 %lhs, %rd1, %amt2;
-; SM20-NEXT: sub.u32 %amt2, 64, %amt2;
-; SM20-NEXT: shr.b64 %rhs, %rd1, %amt2;
-; SM20-NEXT: add.u64 %rd2, %lhs, %rhs;
-; SM20-NEXT: }
-; SM20-NEXT: st.param.b64 [func_retval0+0], %rd2;
+; SM20-NEXT: and.b32 %r2, %r1, 63;
+; SM20-NEXT: shl.b64 %rd2, %rd1, %r2;
+; SM20-NEXT: neg.s32 %r3, %r1;
+; SM20-NEXT: and.b32 %r4, %r3, 63;
+; SM20-NEXT: shr.u64 %rd3, %rd1, %r4;
+; SM20-NEXT: or.b64 %rd4, %rd2, %rd3;
+; SM20-NEXT: st.param.b64 [func_retval0+0], %rd4;
; SM20-NEXT: ret;
;
; SM35-LABEL: rotate64(
; SM35: {
-; SM35-NEXT: .reg .b32 %r<6>;
-; SM35-NEXT: .reg .b64 %rd<3>;
+; SM35-NEXT: .reg .b32 %r<5>;
+; SM35-NEXT: .reg .b64 %rd<5>;
; SM35-EMPTY:
; SM35-NEXT: // %bb.0:
; SM35-NEXT: ld.param.u64 %rd1, [rotate64_param_0];
-; SM35-NEXT: {
-; SM35-NEXT: .reg .b32 %dummy;
-; SM35-NEXT: mov.b64 {%dummy,%r1}, %rd1;
-; SM35-NEXT: }
-; SM35-NEXT: {
-; SM35-NEXT: .reg .b32 %dummy;
-; SM35-NEXT: mov.b64 {%r2,%dummy}, %rd1;
-; SM35-NEXT: }
-; SM35-NEXT: ld.param.u32 %r3, [rotate64_param_1];
-; SM35-NEXT: shf.l.wrap.b32 %r4, %r2, %r1, %r3;
-; SM35-NEXT: shf.l.wrap.b32 %r5, %r1, %r2, %r3;
-; SM35-NEXT: mov.b64 %rd2, {%r5, %r4};
-; SM35-NEXT: st.param.b64 [func_retval0+0], %rd2;
+; SM35-NEXT: ld.param.u32 %r1, [rotate64_param_1];
+; SM35-NEXT: and.b32 %r2, %r1, 63;
+; SM35-NEXT: shl.b64 %rd2, %rd1, %r2;
+; SM35-NEXT: neg.s32 %r3, %r1;
+; SM35-NEXT: and.b32 %r4, %r3, 63;
+; SM35-NEXT: shr.u64 %rd3, %rd1, %r4;
+; SM35-NEXT: or.b64 %rd4, %rd2, %rd3;
+; SM35-NEXT: st.param.b64 [func_retval0+0], %rd4;
; SM35-NEXT: ret;
%val = tail call i64 @llvm.nvvm.rotate.b64(i64 %a, i32 %b)
ret i64 %val
@@ -99,45 +93,36 @@ define i64 @rotate64(i64 %a, i32 %b) {
define i64 @rotateright64(i64 %a, i32 %b) {
; SM20-LABEL: rotateright64(
; SM20: {
-; SM20-NEXT: .reg .b32 %r<2>;
-; SM20-NEXT: .reg .b64 %rd<3>;
+; SM20-NEXT: .reg .b32 %r<5>;
+; SM20-NEXT: .reg .b64 %rd<5>;
; SM20-EMPTY:
; SM20-NEXT: // %bb.0:
; SM20-NEXT: ld.param.u64 %rd1, [rotateright64_param_0];
; SM20-NEXT: ld.param.u32 %r1, [rotateright64_param_1];
-; SM20-NEXT: {
-; SM20-NEXT: .reg .b64 %lhs;
-; SM20-NEXT: .reg .b64 %rhs;
-; SM20-NEXT: .reg .u32 %amt2;
-; SM20-NEXT: and.b32 %amt2, %r1, 63;
-; SM20-NEXT: shr.b64 %lhs, %rd1, %amt2;
-; SM20-NEXT: sub.u32 %amt2, 64, %amt2;
-; SM20-NEXT: shl.b64 %rhs, %rd1, %amt2;
-; SM20-NEXT: add.u64 %rd2, %lhs, %rhs;
-; SM20-NEXT: }
-; SM20-NEXT: st.param.b64 [func_retval0+0], %rd2;
+; SM20-NEXT: and.b32 %r2, %r1, 63;
+; SM20-NEXT: shr.u64 %rd2, %rd1, %r2;
+; SM20-NEXT: neg.s32 %r3, %r1;
+; SM20-NEXT: and.b32 %r4, %r3, 63;
+; SM20-NEXT: shl.b64 %rd3, %rd1, %r4;
+; SM20-NEXT: or.b64 %rd4, %rd2, %rd3;
+; SM20-NEXT: st.param.b64 [func_retval0+0], %rd4;
; SM20-NEXT: ret;
;
; SM35-LABEL: rotateright64(
; SM35: {
-; SM35-NEXT: .reg .b32 %r<6>;
-; SM35-NEXT: .reg .b64 %rd<3>;
+; SM35-NEXT: .reg .b32 %r<5>;
+; SM35-NEXT: .reg .b64 %rd<5>;
; SM35-EMPTY:
; SM35-NEXT: // %bb.0:
; SM35-NEXT: ld.param.u64 %rd1, [rotateright64_param_0];
-; SM35-NEXT: {
-; SM35-NEXT: .reg .b32 %dummy;
-; SM35-NEXT: mov.b64 {%r1,%dummy}, %rd1;
-; SM35-NEXT: }
-; SM35-NEXT: {
-; SM35-NEXT: .reg .b32 %dummy;
-; SM35-NEXT: mov.b64 {%dummy,%r2}, %rd1;
-; SM35-NEXT: }
-; SM35-NEXT: ld.param.u32 %r3, [rotateright64_param_1];
-; SM35-NEXT: shf.r.wrap.b32 %r4, %r2, %r1, %r3;
-; SM35-NEXT: shf.r.wrap.b32 %r5, %r1, %r2, %r3;
-; SM35-NEXT: mov.b64 %rd2, {%r5, %r4};
-; SM35-NEXT: st.param.b64 [func_retval0+0], %rd2;
+; SM35-NEXT: ld.param.u32 %r1, [rotateright64_param_1];
+; SM35-NEXT: and.b32 %r2, %r1, 63;
+; SM35-NEXT: shr.u64 %rd2, %rd1, %r2;
+; SM35-NEXT: neg.s32 %r3, %r1;
+; SM35-NEXT: and.b32 %r4, %r3, 63;
+; SM35-NEXT: shl.b64 %rd3, %rd1, %r4;
+; SM35-NEXT: or.b64 %rd4, %rd2, %rd3;
+; SM35-NEXT: st.param.b64 [func_retval0+0], %rd4;
; SM35-NEXT: ret;
%val = tail call i64 @llvm.nvvm.rotate.right.b64(i64 %a, i32 %b)
ret i64 %val
@@ -148,18 +133,14 @@ define i64 @rotateright64(i64 %a, i32 %b) {
define i32 @rotl0(i32 %x) {
; SM20-LABEL: rotl0(
; SM20: {
-; SM20-NEXT: .reg .b32 %r<3>;
+; SM20-NEXT: .reg .b32 %r<5>;
; SM20-EMPTY:
; SM20-NEXT: // %bb.0:
; SM20-NEXT: ld.param.u32 %r1, [rotl0_param_0];
-; SM20-NEXT: {
-; SM20-NEXT: .reg .b32 %lhs;
-; SM20-NEXT: .reg .b32 %rhs;
-; SM20-NEXT: shl.b32 %lhs, %r1, 8;
-; SM20-NEXT: shr.b32 %rhs, %r1, 24;
-; SM20-NEXT: add.u32 %r2, %lhs, %rhs;
-; SM20-NEXT: }
-; SM20-NEXT: st.param.b32 [func_retval0+0], %r2;
+; SM20-NEXT: shr.u32 %r2, %r1, 24;
+; SM20-NEXT: shl.b32 %r3, %r1, 8;
+; SM20-NEXT: or.b32 %r4, %r3, %r2;
+; SM20-NEXT: st.param.b32 [func_retval0+0], %r4;
; SM20-NEXT: ret;
;
; SM35-LABEL: rotl0(
@@ -177,51 +158,40 @@ define i32 @rotl0(i32 %x) {
ret i32 %t2
}
-declare i64 @llvm.fshl.i64(i64, i64, i64)
-declare i64 @llvm.fshr.i64(i64, i64, i64)
-
; SM35: rotl64
define i64 @rotl64(i64 %a, i64 %n) {
; SM20-LABEL: rotl64(
; SM20: {
-; SM20-NEXT: .reg .b32 %r<2>;
-; SM20-NEXT: .reg .b64 %rd<3>;
+; SM20-NEXT: .reg .b32 %r<5>;
+; SM20-NEXT: .reg .b64 %rd<5>;
; SM20-EMPTY:
; SM20-NEXT: // %bb.0:
; SM20-NEXT: ld.param.u64 %rd1, [rotl64_param_0];
; SM20-NEXT: ld.param.u32 %r1, [rotl64_param_1];
-; SM20-NEXT: {
-; SM20-NEXT: .reg .b64 %lhs;
-; SM20-NEXT: .reg .b64 %rhs;
-; SM20-NEXT: .reg .u32 %amt2;
-; SM20-NEXT: and.b32 %amt2, %r1, 63;
-; SM20-NEXT: shl.b64 %lhs, %rd1, %amt2;
-; SM20-NEXT: sub.u32 %amt2, 64, %amt2;
-; SM20-NEXT: shr.b64 %rhs, %rd1, %amt2;
-; SM20-NEXT: add.u64 %rd2, %lhs, %rhs;
-; SM20-NEXT: }
-; SM20-NEXT: st.param.b64 [func_retval0+0], %rd2;
+; SM20-NEXT: and.b32 %r2, %r1, 63;
+; SM20-NEXT: shl.b64 %rd2, %rd1, %r2;
+; SM20-NEXT: neg.s32 %r3, %r1;
+; SM20-NEXT: and.b32 %r4, %r3, 63;
+; SM20-NEXT: shr.u64 %rd3, %rd1, %r4;
+; SM20-NEXT: or.b64 %rd4, %rd2, %rd3;
+; SM20-NEXT: st.param.b64 [func_retval0+0], %rd4;
; SM20-NEXT: ret;
;
; SM35-LABEL: rotl64(
; SM35: {
-; SM35-NEXT: .reg .b32 %r<2>;
-; SM35-NEXT: .reg .b64 %rd<3>;
+; SM35-NEXT: .reg .b32 %r<5>;
+; SM35-NEXT: .reg .b64 %rd<5>;
; SM35-EMPTY:
; SM35-NEXT: // %bb.0:
; SM35-NEXT: ld.param.u64 %rd1, [rotl64_param_0];
; SM35-NEXT: ld.param.u32 %r1, [rotl64_param_1];
-; SM35-NEXT: {
-; SM35-NEXT: .reg .b64 %lhs;
-; SM35-NEXT: .reg .b64 %rhs;
-; SM35-NEXT: .reg .u32 %amt2;
-; SM35-NEXT: and.b32 %amt2, %r1, 63;
-; SM35-NEXT: shl.b64 %lhs, %rd1, %amt2;
-; SM35-NEXT: sub.u32 %amt2, 64, %amt2;
-; SM35-NEXT: shr.b64 %rhs, %rd1, %amt2;
-; SM35-NEXT: add.u64 %rd2, %lhs, %rhs;
-; SM35-NEXT: }
-; SM35-NEXT: st.param.b64 [func_retval0+0], %rd2;
+; SM35-NEXT: and.b32 %r2, %r1, 63;
+; SM35-NEXT: shl.b64 %rd2, %rd1, %r2;
+; SM35-NEXT: neg.s32 %r3, %r1;
+; SM35-NEXT: and.b32 %r4, %r3, 63;
+; SM35-NEXT: shr.u64 %rd3, %rd1, %r4;
+; SM35-NEXT: or.b64 %rd4, %rd2, %rd3;
+; SM35-NEXT: st.param.b64 [func_retval0+0], %rd4;
; SM35-NEXT: ret;
%val = tail call i64 @llvm.fshl.i64(i64 %a, i64 %a, i64 %n)
ret i64 %val
@@ -231,34 +201,26 @@ define i64 @rotl64(i64 %a, i64 %n) {
define i64 @rotl64_imm(i64 %a) {
; SM20-LABEL: rotl64_imm(
; SM20: {
-; SM20-NEXT: .reg .b64 %rd<3>;
+; SM20-NEXT: .reg .b64 %rd<5>;
; SM20-EMPTY:
; SM20-NEXT: // %bb.0:
; SM20-NEXT: ld.param.u64 %rd1, [rotl64_imm_param_0];
-; SM20-NEXT: {
-; SM20-NEXT: .reg .b64 %lhs;
-; SM20-NEXT: .reg .b64 %rhs;
-; SM20-NEXT: shl.b64 %lhs, %rd1, 2;
-; SM20-NEXT: shr.b64 %rhs, %rd1, 62;
-; SM20-NEXT: add.u64 %rd2, %lhs, %rhs;
-; SM20-NEXT: }
-; SM20-NEXT: st.param.b64 [func_retval0+0], %rd2;
+; SM20-NEXT: shr.u64 %rd2, %rd1, 62;
+; SM20-NEXT: shl.b64 %rd3, %rd1, 2;
+; SM20-NEXT: or.b64 %rd4, %rd3, %rd2;
+; SM20-NEXT: st.param.b64 [func_retval0+0], %rd4;
; SM20-NEXT: ret;
;
; SM35-LABEL: rotl64_imm(
; SM35: {
-; SM35-NEXT: .reg .b64 %rd<3>;
+; SM35-NEXT: .reg .b64 %rd<5>;
; SM35-EMPTY:
; SM35-NEXT: // %bb.0:
; SM35-NEXT: ld.param.u64 %rd1, [rotl64_imm_param_0];
-; SM35-NEXT: {
-; SM35-NEXT: .reg .b64 %lhs;
-; SM35-NEXT: .reg .b64 %rhs;
-; SM35-NEXT: shl.b64 %lhs, %rd1, 2;
-; SM35-NEXT: shr.b64 %rhs, %rd1, 62;
-; SM35-NEXT: add.u64 %rd2, %lhs, %rhs;
-; SM35-NEXT: }
-; SM35-NEXT: st.param.b64 [func_retval0+0], %rd2;
+; SM35-NEXT: shr.u64 %rd2, %rd1, 62;
+; SM35-NEXT: shl.b64 %rd3, %rd1, 2;
+; SM35-NEXT: or.b64 %rd4, %rd3, %rd2;
+; SM35-NEXT: st.param.b64 [func_retval0+0], %rd4;
; SM35-NEXT: ret;
%val = tail call i64 @llvm.fshl.i64(i64 %a, i64 %a, i64 66)
ret i64 %val
@@ -268,44 +230,36 @@ define i64 @rotl64_imm(i64 %a) {
define i64 @rotr64(i64 %a, i64 %n) {
; SM20-LABEL: rotr64(
; SM20: {
-; SM20-NEXT: .reg .b32 %r<2>;
-; SM20-NEXT: .reg .b64 %rd<3>;
+; SM20-NEXT: .reg .b32 %r<5>;
+; SM20-NEXT: .reg .b64 %rd<5>;
; SM20-EMPTY:
; SM20-NEXT: // %bb.0:
; SM20-NEXT: ld.param.u64 %rd1, [rotr64_param_0];
; SM20-NEXT: ld.param.u32 %r1, [rotr64_param_1];
-; SM20-NEXT: {
-; SM20-NEXT: .reg .b64 %lhs;
-; SM20-NEXT: .reg .b64 %rhs;
-; SM20-NEXT: .reg .u32 %amt2;
-; SM20-NEXT: and.b32 %amt2, %r1, 63;
-; SM20-NEXT: shr.b64 %lhs, %rd1, %amt2;
-; SM20-NEXT: sub.u32 %amt2, 64, %amt2;
-; SM20-NEXT: shl.b64 %rhs, %rd1, %amt2;
-; SM20-NEXT: add.u64 %rd2, %lhs, %rhs;
-; SM20-NEXT: }
-; SM20-NEXT: st.param.b64 [func_retval0+0], %rd2;
+; SM20-NEXT: and.b32 %r2, %r1, 63;
+; SM20-NEXT: shr.u64 %rd2, %rd1, %r2;
+; SM20-NEXT: neg.s32 %r3, %r1;
+; SM20-NEXT: and.b32 %r4, %r3, 63;
+; SM20-NEXT: shl.b64 %rd3, %rd1, %r4;
+; SM20-NEXT: or.b64 %rd4, %rd2, %rd3;
+; SM20-NEXT: st.param.b64 [func_retval0+0], %rd4;
; SM20-NEXT: ret;
;
; SM35-LABEL: rotr64(
; SM35: {
-; SM35-NEXT: .reg .b32 %r<2>;
-; SM35-NEXT: .reg .b64 %rd<3>;
+; SM35-NEXT: .reg .b32 %r<5>;
+; SM35-NEXT: .reg .b64 %rd<5>;
; SM35-EMPTY:
; SM35-NEXT: // %bb.0:
; SM35-NEXT: ld.param.u64 %rd1, [rotr64_param_0];
; SM35-NEXT: ld.param.u32 %r1, [rotr64_param_1];
-; SM35-NEXT: {
-; SM35-NEXT: .reg .b64 %lhs;
-; SM35-NEXT: .reg .b64 %rhs;
-; SM35-NEXT: .reg .u32 %amt2;
-; SM35-NEXT: and.b32 %amt2, %r1, 63;
-; SM35-NEXT: shr.b64 %lhs, %rd1, %amt2;
-; SM35-NEXT: sub.u32 %amt2, 64, %amt2;
-; SM35-NEXT: shl.b64 %rhs, %rd1, %amt2;
-; SM35-NEXT: add.u64 %rd2, %lhs, %rhs;
-; SM35-NEXT: }
-; SM35-NEXT: st.param.b64 [func_retval0+0], %rd2;
+; SM35-NEXT: and.b32 %r2, %r1, 63;
+; SM35-NEXT: shr.u64 %rd2, %rd1, %r2;
+; SM35-NEXT: neg.s32 %r3, %r1;
+; SM35-NEXT: and.b32 %r4, %r3, 63;
+; SM35-NEXT: shl.b64 %rd3, %rd1, %r4;
+; SM35-NEXT: or.b64 %rd4, %rd2, %rd3;
+; SM35-NEXT: st.param.b64 [func_retval0+0], %rd4;
; SM35-NEXT: ret;
%val = tail call i64 @llvm.fshr.i64(i64 %a, i64 %a, i64 %n)
ret i64 %val
@@ -315,35 +269,180 @@ define i64 @rotr64(i64 %a, i64 %n) {
define i64 @rotr64_imm(i64 %a) {
; SM20-LABEL: rotr64_imm(
; SM20: {
-; SM20-NEXT: .reg .b64 %rd<3>;
+; SM20-NEXT: .reg .b64 %rd<5>;
; SM20-EMPTY:
; SM20-NEXT: // %bb.0:
; SM20-NEXT: ld.param.u64 %rd1, [rotr64_imm_param_0];
-; SM20-NEXT: {
-; SM20-NEXT: .reg .b64 %lhs;
-; SM20-NEXT: .reg .b64 %rhs;
-; SM20-NEXT: shl.b64 %lhs, %rd1, 62;
-; SM20-NEXT: shr.b64 %rhs, %rd1, 2;
-; SM20-NEXT: add.u64 %rd2, %lhs, %rhs;
-; SM20-NEXT: }
-; SM20-NEXT: st.param.b64 [func_retval0+0], %rd2;
+; SM20-NEXT: shl.b64 %rd2, %rd1, 62;
+; SM20-NEXT: shr.u64 %rd3, %rd1, 2;
+; SM20-NEXT: or.b64 %rd4, %rd3, %rd2;
+; SM20-NEXT: st.param.b64 [func_retval0+0], %rd4;
; SM20-NEXT: ret;
;
; SM35-LABEL: rotr64_imm(
; SM35: {
-; SM35-NEXT: .reg .b64 %rd<3>;
+; SM35-NEXT: .reg .b64 %rd<5>;
; SM35-EMPTY:
; SM35-NEXT: // %bb.0:
; SM35-NEXT: ld.param.u64 %rd1, [rotr64_imm_param_0];
-; SM35-NEXT: {
-; SM35-NEXT: .reg .b64 %lhs;
-; SM35-NEXT: .reg .b64 %rhs;
-; SM35-NEXT: shl.b64 %lhs, %rd1, 62;
-; SM35-NEXT: shr.b64 %rhs, %rd1, 2;
-; SM35-NEXT: add.u64 %rd2, %lhs, %rhs;
-; SM35-NEXT: }
-; SM35-NEXT: st.param.b64 [func_retval0+0], %rd2;
+; SM35-NEXT: shl.b64 %rd2, %rd1, 62;
+; SM35-NEXT: shr.u64 %rd3, %rd1, 2;
+; SM35-NEXT: or.b64 %rd4, %rd3, %rd2;
+; SM35-NEXT: st.param.b64 [func_retval0+0], %rd4;
; SM35-NEXT: ret;
%val = tail call i64 @llvm.fshr.i64(i64 %a, i64 %a, i64 66)
ret i64 %val
}
+
+define i32 @funnel_shift_right_32(i32 %a, i32 %b, i32 %c) {
+; SM20-LABEL: funnel_shift_right_32(
+; SM20: {
+; SM20-NEXT: .reg .b32 %r<11>;
+; SM20-EMPTY:
+; SM20-NEXT: // %bb.0:
+; SM20-NEXT: ld.param.u32 %r1, [funnel_shift_right_32_param_0];
+; SM20-NEXT: ld.param.u32 %r2, [funnel_shift_right_32_param_2];
+; SM20-NEXT: and.b32 %r3, %r2, 31;
+; SM20-NEXT: ld.param.u32 %r4, [funnel_shift_right_32_param_1];
+; SM20-NEXT: shr.u32 %r5, %r4, %r3;
+; SM20-NEXT: shl.b32 %r6, %r1, 1;
+; SM20-NEXT: not.b32 %r7, %r2;
+; SM20-NEXT: and.b32 %r8, %r7, 31;
+; SM20-NEXT: shl.b32 %r9, %r6, %r8;
+; SM20-NEXT: or.b32 %r10, %r9, %r5;
+; SM20-NEXT: st.param.b32 [func_retval0+0], %r10;
+; SM20-NEXT: ret;
+;
+; SM35-LABEL: funnel_shift_right_32(
+; SM35: {
+; SM35-NEXT: .reg .b32 %r<5>;
+; SM35-EMPTY:
+; SM35-NEXT: // %bb.0:
+; SM35-NEXT: ld.param.u32 %r1, [funnel_shift_right_32_param_0];
+; SM35-NEXT: ld.param.u32 %r2, [funnel_shift_right_32_param_1];
+; SM35-NEXT: ld.param.u32 %r3, [funnel_shift_right_32_param_2];
+; SM35-NEXT: shf.r.wrap.b32 %r4, %r1, %r2, %r3;
+; SM35-NEXT: st.param.b32 [func_retval0+0], %r4;
+; SM35-NEXT: ret;
+ %val = call i32 @llvm.fshr.i32(i32 %a, i32 %b, i32 %c)
+ ret i32 %val
+}
+
+define i32 @funnel_shift_left_32(i32 %a, i32 %b, i32 %c) {
+; SM20-LABEL: funnel_shift_left_32(
+; SM20: {
+; SM20-NEXT: .reg .b32 %r<11>;
+; SM20-EMPTY:
+; SM20-NEXT: // %bb.0:
+; SM20-NEXT: ld.param.u32 %r1, [funnel_shift_left_32_param_0];
+; SM20-NEXT: ld.param.u32 %r2, [funnel_shift_left_32_param_2];
+; SM20-NEXT: and.b32 %r3, %r2, 31;
+; SM20-NEXT: shl.b32 %r4, %r1, %r3;
+; SM20-NEXT: ld.param.u32 %r5, [funnel_shift_left_32_param_1];
+; SM20-NEXT: shr.u32 %r6, %r5, 1;
+; SM20-NEXT: not.b32 %r7, %r2;
+; SM20-NEXT: and.b32 %r8, %r7, 31;
+; SM20-NEXT: shr.u32 %r9, %r6, %r8;
+; SM20-NEXT: or.b32 %r10, %r4, %r9;
+; SM20-NEXT: st.param.b32 [func_retval0+0], %r10;
+; SM20-NEXT: ret;
+;
+; SM35-LABEL: funnel_shift_left_32(
+; SM35: {
+; SM35-NEXT: .reg .b32 %r<5>;
+; SM35-EMPTY:
+; SM35-NEXT: // %bb.0:
+; SM35-NEXT: ld.param.u32 %r1, [funnel_shift_left_32_param_0];
+; SM35-NEXT: ld.param.u32 %r2, [funnel_shift_left_32_param_1];
+; SM35-NEXT: ld.param.u32 %r3, [funnel_shift_left_32_param_2];
+; SM35-NEXT: shf.l.wrap.b32 %r4, %r1, %r2, %r3;
+; SM35-NEXT: st.param.b32 [func_retval0+0], %r4;
+; SM35-NEXT: ret;
+ %val = call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 %c)
+ ret i32 %val
+}
+
+define i64 @funnel_shift_right_64(i64 %a, i64 %b, i64 %c) {
+; SM20-LABEL: funnel_shift_right_64(
+; SM20: {
+; SM20-NEXT: .reg .b32 %r<5>;
+; SM20-NEXT: .reg .b64 %rd<7>;
+; SM20-EMPTY:
+; SM20-NEXT: // %bb.0:
+; SM20-NEXT: ld.param.u64 %rd1, [funnel_shift_right_64_param_0];
+; SM20-NEXT: ld.param.u32 %r1, [funnel_shift_right_64_param_2];
+; SM20-NEXT: and.b32 %r2, %r1, 63;
+; SM20-NEXT: ld.param.u64 %rd2, [funnel_shift_right_64_param_1];
+; SM20-NEXT: shr.u64 %rd3, %rd2, %r2;
+; SM20-NEXT: shl.b64 %rd4, %rd1, 1;
+; SM20-NEXT: not.b32 %r3, %r1;
+; SM20-NEXT: and.b32 %r4, %r3, 63;
+; SM20-NEXT: shl.b64 %rd5, %rd4, %r4;
+; SM20-NEXT: or.b64 %rd6, %rd5, %rd3;
+; SM20-NEXT: st.param.b64 [func_retval0+0], %rd6;
+; SM20-NEXT: ret;
+;
+; SM35-LABEL: funnel_shift_right_64(
+; SM35: {
+; SM35-NEXT: .reg .b32 %r<5>;
+; SM35-NEXT: .reg .b64 %rd<7>;
+; SM35-EMPTY:
+; SM35-NEXT: // %bb.0:
+; SM35-NEXT: ld.param.u64 %rd1, [funnel_shift_right_64_param_0];
+; SM35-NEXT: ld.param.u32 %r1, [funnel_shift_right_64_param_2];
+; SM35-NEXT: and.b32 %r2, %r1, 63;
+; SM35-NEXT: ld.param.u64 %rd2, [funnel_shift_right_64_param_1];
+; SM35-NEXT: shr.u64 %rd3, %rd2, %r2;
+; SM35-NEXT: shl.b64 %rd4, %rd1, 1;
+; SM35-NEXT: not.b32 %r3, %r1;
+; SM35-NEXT: and.b32 %r4, %r3, 63;
+; SM35-NEXT: shl.b64 %rd5, %rd4, %r4;
+; SM35-NEXT: or.b64 %rd6, %rd5, %rd3;
+; SM35-NEXT: st.param.b64 [func_retval0+0], %rd6;
+; SM35-NEXT: ret;
+ %val = call i64 @llvm.fshr.i64(i64 %a, i64 %b, i64 %c)
+ ret i64 %val
+}
+
+define i64 @funnel_shift_left_64(i64 %a, i64 %b, i64 %c) {
+; SM20-LABEL: funnel_shift_left_64(
+; SM20: {
+; SM20-NEXT: .reg .b32 %r<5>;
+; SM20-NEXT: .reg .b64 %rd<7>;
+; SM20-EMPTY:
+; SM20-NEXT: // %bb.0:
+; SM20-NEXT: ld.param.u64 %rd1, [funnel_shift_left_64_param_0];
+; SM20-NEXT: ld.param.u32 %r1, [funnel_shift_left_64_param_2];
+; SM20-NEXT: and.b32 %r2, %r1, 63;
+; SM20-NEXT: shl.b64 %rd2, %rd1, %r2;
+; SM20-NEXT: ld.param.u64 %rd3, [funnel_shift_left_64_param_1];
+; SM20-NEXT: shr.u64 %rd4, %rd3, 1;
+; SM20-NEXT: not.b32 %r3, %r1;
+; SM20-NEXT: and.b32 %r4, %r3, 63;
+; SM20-NEXT: shr.u64 %rd5, %rd4, %r4;
+; SM20-NEXT: or.b64 %rd6, %rd2, %rd5;
+; SM20-NEXT: st.param.b64 [func_retval0+0], %rd6;
+; SM20-NEXT: ret;
+;
+; SM35-LABEL: funnel_shift_left_64(
+; SM35: {
+; SM35-NEXT: .reg .b32 %r<5>;
+; SM35-NEXT: .reg .b64 %rd<7>;
+; SM35-EMPTY:
+; SM35-NEXT: // %bb.0:
+; SM35-NEXT: ld.param.u64 %rd1, [funnel_shift_left_64_param_0];
+; SM35-NEXT: ld.param.u32 %r1, [funnel_shift_left_64_param_2];
+; SM35-NEXT: and.b32 %r2, %r1, 63;
+; SM35-NEXT: shl.b64 %rd2, %rd1, %r2;
+; SM35-NEXT: ld.param.u64 %rd3, [funnel_shift_left_64_param_1];
+; SM35-NEXT: shr.u64 %rd4, %rd3, 1;
+; SM35-NEXT: not.b32 %r3, %r1;
+; SM35-NEXT: and.b32 %r4, %r3, 63;
+; SM35-NEXT: shr.u64 %rd5, %rd4, %r4;
+; SM35-NEXT: or.b64 %rd6, %rd2, %rd5;
+; SM35-NEXT: st.param.b64 [func_retval0+0], %rd6;
+; SM35-NEXT: ret;
+ %val = call i64 @llvm.fshl.i64(i64 %a, i64 %b, i64 %c)
+ ret i64 %val
+}
+
diff --git a/llvm/test/CodeGen/NVPTX/rotate_64.ll b/llvm/test/CodeGen/NVPTX/rotate_64.ll
index 64659ce..05fdb02 100644
--- a/llvm/test/CodeGen/NVPTX/rotate_64.ll
+++ b/llvm/test/CodeGen/NVPTX/rotate_64.ll
@@ -1,25 +1,38 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc < %s -march=nvptx64 | FileCheck %s
; RUN: %if ptxas %{ llc < %s -march=nvptx64 | %ptxas-verify %}
declare i64 @llvm.nvvm.rotate.b64(i64, i32)
declare i64 @llvm.nvvm.rotate.right.b64(i64, i32)
-; CHECK: rotate64
define i64 @rotate64(i64 %a, i32 %b) {
-; CHECK: shl.b64 [[LHS:%.*]], [[RD1:%.*]], 3;
-; CHECK: shr.b64 [[RHS:%.*]], [[RD1]], 61;
-; CHECK: add.u64 [[RD2:%.*]], [[LHS]], [[RHS]];
-; CHECK: ret
+; CHECK-LABEL: rotate64(
+; CHECK: {
+; CHECK-NEXT: .reg .b64 %rd<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u64 %rd1, [rotate64_param_0];
+; CHECK-NEXT: shr.u64 %rd2, %rd1, 61;
+; CHECK-NEXT: shl.b64 %rd3, %rd1, 3;
+; CHECK-NEXT: or.b64 %rd4, %rd3, %rd2;
+; CHECK-NEXT: st.param.b64 [func_retval0+0], %rd4;
+; CHECK-NEXT: ret;
%val = tail call i64 @llvm.nvvm.rotate.b64(i64 %a, i32 3)
ret i64 %val
}
-; CHECK: rotateright64
define i64 @rotateright64(i64 %a, i32 %b) {
-; CHECK: shl.b64 [[LHS:%.*]], [[RD1:%.*]], 61;
-; CHECK: shr.b64 [[RHS:%.*]], [[RD1]], 3;
-; CHECK: add.u64 [[RD2:%.*]], [[LHS]], [[RHS]];
-; CHECK: ret
+; CHECK-LABEL: rotateright64(
+; CHECK: {
+; CHECK-NEXT: .reg .b64 %rd<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u64 %rd1, [rotateright64_param_0];
+; CHECK-NEXT: shl.b64 %rd2, %rd1, 61;
+; CHECK-NEXT: shr.u64 %rd3, %rd1, 3;
+; CHECK-NEXT: or.b64 %rd4, %rd3, %rd2;
+; CHECK-NEXT: st.param.b64 [func_retval0+0], %rd4;
+; CHECK-NEXT: ret;
%val = tail call i64 @llvm.nvvm.rotate.right.b64(i64 %a, i32 3)
ret i64 %val
}
diff --git a/llvm/test/CodeGen/NVPTX/unreachable.ll b/llvm/test/CodeGen/NVPTX/unreachable.ll
index 011497c..f911890 100644
--- a/llvm/test/CodeGen/NVPTX/unreachable.ll
+++ b/llvm/test/CodeGen/NVPTX/unreachable.ll
@@ -1,18 +1,23 @@
-; RUN: llc < %s -march=nvptx -mcpu=sm_20 -verify-machineinstrs \
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 -verify-machineinstrs -trap-unreachable=false \
; RUN: | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-NOTRAP
-; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 -verify-machineinstrs \
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 -verify-machineinstrs -trap-unreachable=false \
; RUN: | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-NOTRAP
-; RUN: llc < %s -march=nvptx -mcpu=sm_20 -verify-machineinstrs -trap-unreachable \
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 -verify-machineinstrs -trap-unreachable -no-trap-after-noreturn \
+; RUN: | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-NOTRAP
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 -verify-machineinstrs -trap-unreachable -no-trap-after-noreturn \
+; RUN: | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-NOTRAP
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 -verify-machineinstrs -trap-unreachable -no-trap-after-noreturn=false \
; RUN: | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-TRAP
-; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 -verify-machineinstrs -trap-unreachable \
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 -verify-machineinstrs -trap-unreachable -no-trap-after-noreturn=false \
; RUN: | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-TRAP
; RUN: %if ptxas && !ptxas-12.0 %{ llc < %s -march=nvptx -mcpu=sm_20 -verify-machineinstrs | %ptxas-verify %}
; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mcpu=sm_20 -verify-machineinstrs | %ptxas-verify %}
; CHECK: .extern .func throw
declare void @throw() #0
+declare void @llvm.trap() #0
-; CHECK: .entry kernel_func
+; CHECK-LABEL: .entry kernel_func
define void @kernel_func() {
; CHECK: call.uni
; CHECK: throw,
@@ -24,6 +29,17 @@ define void @kernel_func() {
unreachable
}
+; CHECK-LABEL: kernel_func_2
+define void @kernel_func_2() {
+; CHECK: trap; exit;
+ call void @llvm.trap()
+
+;; Make sure we avoid emitting two trap instructions.
+; CHECK-NOT: trap;
+; CHECK-NOT: exit;
+ unreachable
+}
+
attributes #0 = { noreturn }
diff --git a/llvm/test/CodeGen/PowerPC/ctrloop-sh.ll b/llvm/test/CodeGen/PowerPC/ctrloop-sh.ll
index c48361e..72de456 100644
--- a/llvm/test/CodeGen/PowerPC/ctrloop-sh.ll
+++ b/llvm/test/CodeGen/PowerPC/ctrloop-sh.ll
@@ -8,58 +8,52 @@ define void @foo1(ptr %a, ptr readonly %b, ptr readonly %c) #0 {
; CHECK-LABEL: foo1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stwu 1, -64(1)
-; CHECK-NEXT: stw 28, 48(1) # 4-byte Folded Spill
-; CHECK-NEXT: li 8, 2048
; CHECK-NEXT: stw 29, 52(1) # 4-byte Folded Spill
-; CHECK-NEXT: li 6, 0
+; CHECK-NEXT: li 7, 2048
; CHECK-NEXT: stw 30, 56(1) # 4-byte Folded Spill
-; CHECK-NEXT: li 7, 7
-; CHECK-NEXT: mtctr 8
-; CHECK-NEXT: addi 8, 1, 16
+; CHECK-NEXT: li 6, 0
+; CHECK-NEXT: mtctr 7
+; CHECK-NEXT: addi 7, 1, 16
; CHECK-NEXT: .LBB0_1: # %for.body
; CHECK-NEXT: #
-; CHECK-NEXT: lwz 9, 0(4)
-; CHECK-NEXT: lwz 10, 4(4)
-; CHECK-NEXT: lwz 11, 8(4)
-; CHECK-NEXT: lwz 12, 12(4)
-; CHECK-NEXT: lwz 0, 12(5)
+; CHECK-NEXT: lwz 8, 0(4)
+; CHECK-NEXT: lwz 9, 4(4)
+; CHECK-NEXT: lwz 10, 8(4)
+; CHECK-NEXT: lwz 11, 12(4)
+; CHECK-NEXT: lwz 12, 12(5)
; CHECK-NEXT: stw 6, 44(1)
; CHECK-NEXT: stw 6, 40(1)
; CHECK-NEXT: stw 6, 36(1)
; CHECK-NEXT: stw 6, 32(1)
-; CHECK-NEXT: stw 12, 28(1)
-; CHECK-NEXT: clrlwi 12, 0, 29
-; CHECK-NEXT: stw 11, 24(1)
-; CHECK-NEXT: nand 11, 0, 7
-; CHECK-NEXT: stw 10, 20(1)
-; CHECK-NEXT: subfic 29, 12, 32
-; CHECK-NEXT: stw 9, 16(1)
-; CHECK-NEXT: rlwinm 9, 0, 29, 28, 31
-; CHECK-NEXT: lwzux 10, 9, 8
-; CHECK-NEXT: clrlwi 11, 11, 27
-; CHECK-NEXT: lwz 0, 8(9)
-; CHECK-NEXT: slw 10, 10, 12
-; CHECK-NEXT: lwz 30, 4(9)
-; CHECK-NEXT: lwz 9, 12(9)
-; CHECK-NEXT: slw 28, 30, 12
-; CHECK-NEXT: srw 30, 30, 29
-; CHECK-NEXT: srw 29, 9, 29
-; CHECK-NEXT: slw 9, 9, 12
-; CHECK-NEXT: slw 12, 0, 12
-; CHECK-NEXT: srwi 0, 0, 1
-; CHECK-NEXT: stw 9, 12(3)
-; CHECK-NEXT: or 9, 12, 29
-; CHECK-NEXT: srw 11, 0, 11
-; CHECK-NEXT: stw 9, 8(3)
-; CHECK-NEXT: or 9, 10, 30
-; CHECK-NEXT: stw 9, 0(3)
-; CHECK-NEXT: or 9, 28, 11
-; CHECK-NEXT: stw 9, 4(3)
+; CHECK-NEXT: stw 11, 28(1)
+; CHECK-NEXT: stw 10, 24(1)
+; CHECK-NEXT: clrlwi 10, 12, 27
+; CHECK-NEXT: stw 9, 20(1)
+; CHECK-NEXT: stw 8, 16(1)
+; CHECK-NEXT: rlwinm 8, 12, 29, 28, 29
+; CHECK-NEXT: lwzux 9, 8, 7
+; CHECK-NEXT: subfic 12, 10, 32
+; CHECK-NEXT: lwz 11, 8(8)
+; CHECK-NEXT: slw 9, 9, 10
+; CHECK-NEXT: lwz 0, 4(8)
+; CHECK-NEXT: lwz 8, 12(8)
+; CHECK-NEXT: srw 30, 11, 12
+; CHECK-NEXT: slw 29, 0, 10
+; CHECK-NEXT: srw 0, 0, 12
+; CHECK-NEXT: srw 12, 8, 12
+; CHECK-NEXT: slw 11, 11, 10
+; CHECK-NEXT: slw 8, 8, 10
+; CHECK-NEXT: stw 8, 12(3)
+; CHECK-NEXT: or 8, 11, 12
+; CHECK-NEXT: stw 8, 8(3)
+; CHECK-NEXT: or 8, 9, 0
+; CHECK-NEXT: stw 8, 0(3)
+; CHECK-NEXT: or 8, 29, 30
+; CHECK-NEXT: stw 8, 4(3)
; CHECK-NEXT: bdnz .LBB0_1
; CHECK-NEXT: # %bb.2: # %for.end
; CHECK-NEXT: lwz 30, 56(1) # 4-byte Folded Reload
; CHECK-NEXT: lwz 29, 52(1) # 4-byte Folded Reload
-; CHECK-NEXT: lwz 28, 48(1) # 4-byte Folded Reload
; CHECK-NEXT: addi 1, 1, 64
; CHECK-NEXT: blr
entry:
@@ -83,59 +77,53 @@ for.end: ; preds = %for.body
define void @foo2(ptr %a, ptr readonly %b, ptr readonly %c) #0 {
; CHECK-LABEL: foo2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: stwu 1, -64(1)
-; CHECK-NEXT: stw 29, 52(1) # 4-byte Folded Spill
-; CHECK-NEXT: li 7, 2048
-; CHECK-NEXT: stw 30, 56(1) # 4-byte Folded Spill
-; CHECK-NEXT: li 6, 7
-; CHECK-NEXT: mtctr 7
-; CHECK-NEXT: addi 7, 1, 36
+; CHECK-NEXT: stwu 1, -48(1)
+; CHECK-NEXT: stw 30, 40(1) # 4-byte Folded Spill
+; CHECK-NEXT: li 6, 2048
+; CHECK-NEXT: mtctr 6
+; CHECK-NEXT: addi 6, 1, 24
; CHECK-NEXT: .LBB1_1: # %for.body
; CHECK-NEXT: #
-; CHECK-NEXT: lwz 8, 0(4)
-; CHECK-NEXT: lwz 10, 8(4)
-; CHECK-NEXT: lwz 12, 12(5)
-; CHECK-NEXT: lwz 9, 4(4)
-; CHECK-NEXT: lwz 11, 12(4)
-; CHECK-NEXT: stw 10, 44(1)
-; CHECK-NEXT: rlwinm 10, 12, 29, 28, 31
-; CHECK-NEXT: stw 8, 36(1)
-; CHECK-NEXT: srawi 8, 8, 31
-; CHECK-NEXT: stw 11, 48(1)
-; CHECK-NEXT: clrlwi 11, 12, 29
-; CHECK-NEXT: stw 9, 40(1)
-; CHECK-NEXT: nand 9, 12, 6
-; CHECK-NEXT: stw 8, 32(1)
-; CHECK-NEXT: subfic 30, 11, 32
+; CHECK-NEXT: lwz 7, 0(4)
+; CHECK-NEXT: lwz 8, 4(4)
+; CHECK-NEXT: lwz 11, 12(5)
+; CHECK-NEXT: lwz 9, 8(4)
+; CHECK-NEXT: lwz 10, 12(4)
; CHECK-NEXT: stw 8, 28(1)
-; CHECK-NEXT: clrlwi 9, 9, 27
-; CHECK-NEXT: stw 8, 24(1)
-; CHECK-NEXT: stw 8, 20(1)
-; CHECK-NEXT: sub 8, 7, 10
-; CHECK-NEXT: lwz 10, 4(8)
-; CHECK-NEXT: lwz 12, 8(8)
-; CHECK-NEXT: lwz 0, 0(8)
-; CHECK-NEXT: lwz 8, 12(8)
-; CHECK-NEXT: srw 29, 12, 11
-; CHECK-NEXT: slw 12, 12, 30
-; CHECK-NEXT: slw 30, 0, 30
-; CHECK-NEXT: srw 8, 8, 11
-; CHECK-NEXT: sraw 0, 0, 11
-; CHECK-NEXT: srw 11, 10, 11
-; CHECK-NEXT: slwi 10, 10, 1
-; CHECK-NEXT: or 8, 12, 8
-; CHECK-NEXT: slw 9, 10, 9
-; CHECK-NEXT: stw 8, 12(3)
-; CHECK-NEXT: or 8, 30, 11
-; CHECK-NEXT: stw 8, 4(3)
-; CHECK-NEXT: or 8, 29, 9
-; CHECK-NEXT: stw 0, 0(3)
-; CHECK-NEXT: stw 8, 8(3)
+; CHECK-NEXT: rlwinm 8, 11, 29, 28, 29
+; CHECK-NEXT: stw 7, 24(1)
+; CHECK-NEXT: srawi 7, 7, 31
+; CHECK-NEXT: stw 10, 36(1)
+; CHECK-NEXT: clrlwi 10, 11, 27
+; CHECK-NEXT: stw 9, 32(1)
+; CHECK-NEXT: subfic 12, 10, 32
+; CHECK-NEXT: stw 7, 20(1)
+; CHECK-NEXT: stw 7, 16(1)
+; CHECK-NEXT: stw 7, 12(1)
+; CHECK-NEXT: stw 7, 8(1)
+; CHECK-NEXT: sub 7, 6, 8
+; CHECK-NEXT: lwz 8, 4(7)
+; CHECK-NEXT: lwz 9, 0(7)
+; CHECK-NEXT: lwz 11, 12(7)
+; CHECK-NEXT: srw 0, 8, 10
+; CHECK-NEXT: lwz 7, 8(7)
+; CHECK-NEXT: slw 30, 9, 12
+; CHECK-NEXT: slw 8, 8, 12
+; CHECK-NEXT: srw 11, 11, 10
+; CHECK-NEXT: slw 12, 7, 12
+; CHECK-NEXT: srw 7, 7, 10
+; CHECK-NEXT: or 7, 8, 7
+; CHECK-NEXT: stw 7, 8(3)
+; CHECK-NEXT: or 7, 12, 11
+; CHECK-NEXT: sraw 9, 9, 10
+; CHECK-NEXT: stw 7, 12(3)
+; CHECK-NEXT: or 7, 30, 0
+; CHECK-NEXT: stw 9, 0(3)
+; CHECK-NEXT: stw 7, 4(3)
; CHECK-NEXT: bdnz .LBB1_1
; CHECK-NEXT: # %bb.2: # %for.end
-; CHECK-NEXT: lwz 30, 56(1) # 4-byte Folded Reload
-; CHECK-NEXT: lwz 29, 52(1) # 4-byte Folded Reload
-; CHECK-NEXT: addi 1, 1, 64
+; CHECK-NEXT: lwz 30, 40(1) # 4-byte Folded Reload
+; CHECK-NEXT: addi 1, 1, 48
; CHECK-NEXT: blr
entry:
br label %for.body
@@ -159,59 +147,53 @@ define void @foo3(ptr %a, ptr readonly %b, ptr readonly %c) #0 {
; CHECK-LABEL: foo3:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stwu 1, -64(1)
-; CHECK-NEXT: stw 28, 48(1) # 4-byte Folded Spill
-; CHECK-NEXT: li 8, 2048
; CHECK-NEXT: stw 29, 52(1) # 4-byte Folded Spill
-; CHECK-NEXT: li 6, 0
+; CHECK-NEXT: li 7, 2048
; CHECK-NEXT: stw 30, 56(1) # 4-byte Folded Spill
-; CHECK-NEXT: li 7, 7
-; CHECK-NEXT: mtctr 8
-; CHECK-NEXT: addi 8, 1, 32
+; CHECK-NEXT: li 6, 0
+; CHECK-NEXT: mtctr 7
+; CHECK-NEXT: addi 7, 1, 32
; CHECK-NEXT: .LBB2_1: # %for.body
; CHECK-NEXT: #
-; CHECK-NEXT: lwz 10, 4(4)
-; CHECK-NEXT: lwz 0, 12(5)
-; CHECK-NEXT: lwz 9, 0(4)
-; CHECK-NEXT: lwz 11, 8(4)
-; CHECK-NEXT: lwz 12, 12(4)
-; CHECK-NEXT: stw 10, 36(1)
-; CHECK-NEXT: rlwinm 10, 0, 29, 28, 31
+; CHECK-NEXT: lwz 8, 0(4)
+; CHECK-NEXT: lwz 12, 12(5)
+; CHECK-NEXT: lwz 9, 4(4)
+; CHECK-NEXT: lwz 10, 8(4)
+; CHECK-NEXT: lwz 11, 12(4)
+; CHECK-NEXT: stw 8, 32(1)
+; CHECK-NEXT: rlwinm 8, 12, 29, 28, 29
; CHECK-NEXT: stw 6, 28(1)
-; CHECK-NEXT: sub 10, 8, 10
+; CHECK-NEXT: sub 8, 7, 8
; CHECK-NEXT: stw 6, 24(1)
; CHECK-NEXT: stw 6, 20(1)
; CHECK-NEXT: stw 6, 16(1)
-; CHECK-NEXT: stw 12, 44(1)
-; CHECK-NEXT: clrlwi 12, 0, 29
-; CHECK-NEXT: stw 11, 40(1)
-; CHECK-NEXT: subfic 29, 12, 32
-; CHECK-NEXT: stw 9, 32(1)
-; CHECK-NEXT: nand 9, 0, 7
-; CHECK-NEXT: lwz 11, 4(10)
-; CHECK-NEXT: clrlwi 9, 9, 27
-; CHECK-NEXT: lwz 0, 8(10)
-; CHECK-NEXT: lwz 30, 0(10)
-; CHECK-NEXT: lwz 10, 12(10)
-; CHECK-NEXT: srw 28, 0, 12
-; CHECK-NEXT: slw 0, 0, 29
-; CHECK-NEXT: slw 29, 30, 29
-; CHECK-NEXT: srw 10, 10, 12
-; CHECK-NEXT: srw 30, 30, 12
-; CHECK-NEXT: srw 12, 11, 12
-; CHECK-NEXT: slwi 11, 11, 1
-; CHECK-NEXT: slw 9, 11, 9
-; CHECK-NEXT: or 10, 0, 10
-; CHECK-NEXT: stw 10, 12(3)
-; CHECK-NEXT: or 10, 29, 12
-; CHECK-NEXT: or 9, 28, 9
-; CHECK-NEXT: stw 30, 0(3)
-; CHECK-NEXT: stw 10, 4(3)
-; CHECK-NEXT: stw 9, 8(3)
+; CHECK-NEXT: stw 11, 44(1)
+; CHECK-NEXT: clrlwi 11, 12, 27
+; CHECK-NEXT: stw 10, 40(1)
+; CHECK-NEXT: subfic 0, 11, 32
+; CHECK-NEXT: stw 9, 36(1)
+; CHECK-NEXT: lwz 9, 4(8)
+; CHECK-NEXT: lwz 10, 0(8)
+; CHECK-NEXT: lwz 12, 12(8)
+; CHECK-NEXT: srw 30, 9, 11
+; CHECK-NEXT: lwz 8, 8(8)
+; CHECK-NEXT: slw 29, 10, 0
+; CHECK-NEXT: slw 9, 9, 0
+; CHECK-NEXT: srw 12, 12, 11
+; CHECK-NEXT: slw 0, 8, 0
+; CHECK-NEXT: srw 8, 8, 11
+; CHECK-NEXT: or 8, 9, 8
+; CHECK-NEXT: stw 8, 8(3)
+; CHECK-NEXT: or 8, 0, 12
+; CHECK-NEXT: srw 10, 10, 11
+; CHECK-NEXT: stw 8, 12(3)
+; CHECK-NEXT: or 8, 29, 30
+; CHECK-NEXT: stw 10, 0(3)
+; CHECK-NEXT: stw 8, 4(3)
; CHECK-NEXT: bdnz .LBB2_1
; CHECK-NEXT: # %bb.2: # %for.end
; CHECK-NEXT: lwz 30, 56(1) # 4-byte Folded Reload
; CHECK-NEXT: lwz 29, 52(1) # 4-byte Folded Reload
-; CHECK-NEXT: lwz 28, 48(1) # 4-byte Folded Reload
; CHECK-NEXT: addi 1, 1, 64
; CHECK-NEXT: blr
entry:
diff --git a/llvm/test/CodeGen/PowerPC/pr59074.ll b/llvm/test/CodeGen/PowerPC/pr59074.ll
index 3e328c6..d3ca113 100644
--- a/llvm/test/CodeGen/PowerPC/pr59074.ll
+++ b/llvm/test/CodeGen/PowerPC/pr59074.ll
@@ -32,37 +32,36 @@ define void @pr59074(ptr %0) {
; LE32-NEXT: li 7, 0
; LE32-NEXT: li 8, 12
; LE32-NEXT: xxswapd 0, 0
+; LE32-NEXT: rlwimi 5, 6, 0, 30, 28
; LE32-NEXT: addi 4, 4, -12
-; LE32-NEXT: rlwinm 9, 4, 29, 28, 31
-; LE32-NEXT: stxvd2x 0, 6, 5
+; LE32-NEXT: rlwinm 9, 4, 29, 28, 29
+; LE32-NEXT: stxvd2x 0, 0, 5
; LE32-NEXT: stw 7, 44(1)
; LE32-NEXT: stw 7, 40(1)
; LE32-NEXT: stw 7, 36(1)
; LE32-NEXT: stw 8, 16(1)
+; LE32-NEXT: clrlwi 4, 4, 27
; LE32-NEXT: lwzux 5, 9, 6
-; LE32-NEXT: li 6, 7
-; LE32-NEXT: lwz 7, 8(9)
-; LE32-NEXT: nand 6, 4, 6
-; LE32-NEXT: lwz 8, 4(9)
-; LE32-NEXT: clrlwi 4, 4, 29
-; LE32-NEXT: lwz 9, 12(9)
-; LE32-NEXT: clrlwi 6, 6, 27
+; LE32-NEXT: lwz 6, 8(9)
+; LE32-NEXT: lwz 7, 4(9)
+; LE32-NEXT: lwz 8, 12(9)
+; LE32-NEXT: xori 9, 4, 31
; LE32-NEXT: subfic 11, 4, 32
; LE32-NEXT: srw 5, 5, 4
-; LE32-NEXT: slwi 10, 7, 1
-; LE32-NEXT: srw 7, 7, 4
-; LE32-NEXT: slw 6, 10, 6
-; LE32-NEXT: srw 10, 8, 4
-; LE32-NEXT: slw 8, 8, 11
-; LE32-NEXT: slw 11, 9, 11
-; LE32-NEXT: srw 4, 9, 4
-; LE32-NEXT: or 5, 8, 5
-; LE32-NEXT: or 7, 11, 7
-; LE32-NEXT: or 6, 10, 6
+; LE32-NEXT: slwi 10, 6, 1
+; LE32-NEXT: srw 6, 6, 4
+; LE32-NEXT: slw 9, 10, 9
+; LE32-NEXT: srw 10, 7, 4
+; LE32-NEXT: slw 7, 7, 11
+; LE32-NEXT: slw 11, 8, 11
+; LE32-NEXT: srw 4, 8, 4
+; LE32-NEXT: or 5, 7, 5
+; LE32-NEXT: or 6, 11, 6
+; LE32-NEXT: or 7, 10, 9
; LE32-NEXT: stw 4, 12(3)
-; LE32-NEXT: stw 7, 8(3)
+; LE32-NEXT: stw 6, 8(3)
; LE32-NEXT: stw 5, 0(3)
-; LE32-NEXT: stw 6, 4(3)
+; LE32-NEXT: stw 7, 4(3)
; LE32-NEXT: addi 1, 1, 80
; LE32-NEXT: blr
;
@@ -89,37 +88,33 @@ define void @pr59074(ptr %0) {
; BE32-NEXT: li 6, 12
; BE32-NEXT: li 7, 0
; BE32-NEXT: addi 8, 1, -48
-; BE32-NEXT: li 10, 7
; BE32-NEXT: stxvw4x 0, 0, 5
-; BE32-NEXT: addi 4, 4, -12
; BE32-NEXT: stw 6, -36(1)
+; BE32-NEXT: addi 4, 4, -12
; BE32-NEXT: stw 7, -40(1)
; BE32-NEXT: stw 7, -44(1)
-; BE32-NEXT: rlwinm 9, 4, 29, 28, 31
; BE32-NEXT: stw 7, -48(1)
+; BE32-NEXT: rlwinm 9, 4, 29, 28, 29
+; BE32-NEXT: clrlwi 4, 4, 27
; BE32-NEXT: sub 5, 8, 9
-; BE32-NEXT: nand 6, 4, 10
-; BE32-NEXT: clrlwi 4, 4, 29
-; BE32-NEXT: clrlwi 6, 6, 27
-; BE32-NEXT: lwz 7, 4(5)
-; BE32-NEXT: lwz 8, 8(5)
-; BE32-NEXT: lwz 9, 0(5)
-; BE32-NEXT: lwz 5, 12(5)
-; BE32-NEXT: slwi 10, 7, 1
-; BE32-NEXT: srw 11, 8, 4
-; BE32-NEXT: srw 7, 7, 4
-; BE32-NEXT: srw 5, 5, 4
-; BE32-NEXT: slw 6, 10, 6
+; BE32-NEXT: lwz 6, 4(5)
+; BE32-NEXT: lwz 7, 0(5)
+; BE32-NEXT: lwz 8, 12(5)
+; BE32-NEXT: lwz 5, 8(5)
; BE32-NEXT: subfic 10, 4, 32
-; BE32-NEXT: srw 4, 9, 4
-; BE32-NEXT: slw 8, 8, 10
-; BE32-NEXT: slw 10, 9, 10
-; BE32-NEXT: or 6, 11, 6
-; BE32-NEXT: or 7, 10, 7
-; BE32-NEXT: or 5, 8, 5
+; BE32-NEXT: srw 9, 6, 4
+; BE32-NEXT: slw 11, 7, 10
+; BE32-NEXT: srw 8, 8, 4
+; BE32-NEXT: slw 6, 6, 10
+; BE32-NEXT: slw 10, 5, 10
+; BE32-NEXT: srw 5, 5, 4
+; BE32-NEXT: srw 4, 7, 4
+; BE32-NEXT: or 7, 11, 9
+; BE32-NEXT: or 8, 10, 8
+; BE32-NEXT: or 5, 6, 5
; BE32-NEXT: stw 4, 0(3)
-; BE32-NEXT: stw 6, 8(3)
-; BE32-NEXT: stw 5, 12(3)
+; BE32-NEXT: stw 5, 8(3)
+; BE32-NEXT: stw 8, 12(3)
; BE32-NEXT: stw 7, 4(3)
; BE32-NEXT: blr
entry:
diff --git a/llvm/test/CodeGen/PowerPC/wide-scalar-shift-by-byte-multiple-legalization.ll b/llvm/test/CodeGen/PowerPC/wide-scalar-shift-by-byte-multiple-legalization.ll
index f6fdb4a..4f1b7bd 100644
--- a/llvm/test/CodeGen/PowerPC/wide-scalar-shift-by-byte-multiple-legalization.ll
+++ b/llvm/test/CodeGen/PowerPC/wide-scalar-shift-by-byte-multiple-legalization.ll
@@ -233,9 +233,96 @@ define void @lshr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; LE-32BIT-NEXT: lwz 9, 8(3)
; LE-32BIT-NEXT: lwz 3, 12(3)
; LE-32BIT-NEXT: lwz 4, 12(4)
+; LE-32BIT-NEXT: stw 6, 28(1)
+; LE-32BIT-NEXT: stw 6, 24(1)
+; LE-32BIT-NEXT: stw 6, 20(1)
+; LE-32BIT-NEXT: stw 6, 16(1)
+; LE-32BIT-NEXT: rlwinm 6, 4, 0, 28, 29
; LE-32BIT-NEXT: stw 3, 44(1)
; LE-32BIT-NEXT: addi 3, 1, 32
-; LE-32BIT-NEXT: clrlwi 4, 4, 28
+; LE-32BIT-NEXT: stw 9, 40(1)
+; LE-32BIT-NEXT: sub 3, 3, 6
+; LE-32BIT-NEXT: stw 8, 36(1)
+; LE-32BIT-NEXT: rlwinm 4, 4, 3, 27, 28
+; LE-32BIT-NEXT: stw 7, 32(1)
+; LE-32BIT-NEXT: subfic 9, 4, 32
+; LE-32BIT-NEXT: lwz 6, 4(3)
+; LE-32BIT-NEXT: lwz 7, 0(3)
+; LE-32BIT-NEXT: lwz 8, 12(3)
+; LE-32BIT-NEXT: srw 10, 6, 4
+; LE-32BIT-NEXT: lwz 3, 8(3)
+; LE-32BIT-NEXT: slw 11, 7, 9
+; LE-32BIT-NEXT: slw 6, 6, 9
+; LE-32BIT-NEXT: srw 8, 8, 4
+; LE-32BIT-NEXT: slw 9, 3, 9
+; LE-32BIT-NEXT: srw 3, 3, 4
+; LE-32BIT-NEXT: or 3, 6, 3
+; LE-32BIT-NEXT: stw 3, 8(5)
+; LE-32BIT-NEXT: or 3, 9, 8
+; LE-32BIT-NEXT: srw 4, 7, 4
+; LE-32BIT-NEXT: stw 3, 12(5)
+; LE-32BIT-NEXT: or 3, 11, 10
+; LE-32BIT-NEXT: stw 4, 0(5)
+; LE-32BIT-NEXT: stw 3, 4(5)
+; LE-32BIT-NEXT: addi 1, 1, 48
+; LE-32BIT-NEXT: blr
+ %src = load i128, ptr %src.ptr, align 1
+ %byteOff = load i128, ptr %byteOff.ptr, align 1
+ %bitOff = shl i128 %byteOff, 3
+ %res = lshr i128 %src, %bitOff
+ store i128 %res, ptr %dst, align 1
+ ret void
+}
+
+define void @lshr_16bytes_wordOff(ptr %src.ptr, ptr %wordOff.ptr, ptr %dst) nounwind {
+; LE-64BIT-LABEL: lshr_16bytes_wordOff:
+; LE-64BIT: # %bb.0:
+; LE-64BIT-NEXT: lwz 4, 0(4)
+; LE-64BIT-NEXT: ld 6, 8(3)
+; LE-64BIT-NEXT: ld 3, 0(3)
+; LE-64BIT-NEXT: slwi 4, 4, 5
+; LE-64BIT-NEXT: subfic 7, 4, 64
+; LE-64BIT-NEXT: srd 3, 3, 4
+; LE-64BIT-NEXT: sld 7, 6, 7
+; LE-64BIT-NEXT: or 3, 3, 7
+; LE-64BIT-NEXT: addi 7, 4, -64
+; LE-64BIT-NEXT: srd 4, 6, 4
+; LE-64BIT-NEXT: srd 7, 6, 7
+; LE-64BIT-NEXT: std 4, 8(5)
+; LE-64BIT-NEXT: or 3, 3, 7
+; LE-64BIT-NEXT: std 3, 0(5)
+; LE-64BIT-NEXT: blr
+;
+; BE-LABEL: lshr_16bytes_wordOff:
+; BE: # %bb.0:
+; BE-NEXT: lwz 4, 12(4)
+; BE-NEXT: ld 6, 0(3)
+; BE-NEXT: ld 3, 8(3)
+; BE-NEXT: slwi 4, 4, 5
+; BE-NEXT: subfic 7, 4, 64
+; BE-NEXT: srd 3, 3, 4
+; BE-NEXT: sld 7, 6, 7
+; BE-NEXT: addi 8, 4, -64
+; BE-NEXT: or 3, 3, 7
+; BE-NEXT: srd 7, 6, 8
+; BE-NEXT: srd 4, 6, 4
+; BE-NEXT: or 3, 3, 7
+; BE-NEXT: std 4, 0(5)
+; BE-NEXT: std 3, 8(5)
+; BE-NEXT: blr
+;
+; LE-32BIT-LABEL: lshr_16bytes_wordOff:
+; LE-32BIT: # %bb.0:
+; LE-32BIT-NEXT: stwu 1, -48(1)
+; LE-32BIT-NEXT: lwz 7, 0(3)
+; LE-32BIT-NEXT: li 6, 0
+; LE-32BIT-NEXT: lwz 8, 4(3)
+; LE-32BIT-NEXT: lwz 9, 8(3)
+; LE-32BIT-NEXT: lwz 3, 12(3)
+; LE-32BIT-NEXT: lwz 4, 12(4)
+; LE-32BIT-NEXT: stw 3, 44(1)
+; LE-32BIT-NEXT: addi 3, 1, 32
+; LE-32BIT-NEXT: rlwinm 4, 4, 2, 28, 29
; LE-32BIT-NEXT: stw 6, 28(1)
; LE-32BIT-NEXT: sub 3, 3, 4
; LE-32BIT-NEXT: stw 6, 24(1)
@@ -255,12 +342,13 @@ define void @lshr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; LE-32BIT-NEXT: addi 1, 1, 48
; LE-32BIT-NEXT: blr
%src = load i128, ptr %src.ptr, align 1
- %byteOff = load i128, ptr %byteOff.ptr, align 1
- %bitOff = shl i128 %byteOff, 3
+ %wordOff = load i128, ptr %wordOff.ptr, align 1
+ %bitOff = shl i128 %wordOff, 5
%res = lshr i128 %src, %bitOff
store i128 %res, ptr %dst, align 1
ret void
}
+
define void @shl_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; LE-64BIT-LABEL: shl_16bytes:
; LE-64BIT: # %bb.0:
@@ -309,7 +397,93 @@ define void @shl_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; LE-32BIT-NEXT: lwz 4, 12(4)
; LE-32BIT-NEXT: stw 6, 44(1)
; LE-32BIT-NEXT: stw 6, 40(1)
-; LE-32BIT-NEXT: clrlwi 4, 4, 28
+; LE-32BIT-NEXT: stw 6, 36(1)
+; LE-32BIT-NEXT: stw 6, 32(1)
+; LE-32BIT-NEXT: rlwinm 6, 4, 0, 28, 29
+; LE-32BIT-NEXT: stw 3, 28(1)
+; LE-32BIT-NEXT: addi 3, 1, 16
+; LE-32BIT-NEXT: stw 9, 24(1)
+; LE-32BIT-NEXT: rlwinm 4, 4, 3, 27, 28
+; LE-32BIT-NEXT: stw 8, 20(1)
+; LE-32BIT-NEXT: subfic 8, 4, 32
+; LE-32BIT-NEXT: stw 7, 16(1)
+; LE-32BIT-NEXT: lwzux 3, 6, 3
+; LE-32BIT-NEXT: lwz 9, 4(6)
+; LE-32BIT-NEXT: slw 3, 3, 4
+; LE-32BIT-NEXT: lwz 7, 8(6)
+; LE-32BIT-NEXT: lwz 6, 12(6)
+; LE-32BIT-NEXT: slw 11, 9, 4
+; LE-32BIT-NEXT: srw 9, 9, 8
+; LE-32BIT-NEXT: srw 10, 7, 8
+; LE-32BIT-NEXT: srw 8, 6, 8
+; LE-32BIT-NEXT: slw 7, 7, 4
+; LE-32BIT-NEXT: slw 4, 6, 4
+; LE-32BIT-NEXT: or 3, 3, 9
+; LE-32BIT-NEXT: stw 4, 12(5)
+; LE-32BIT-NEXT: or 4, 7, 8
+; LE-32BIT-NEXT: stw 3, 0(5)
+; LE-32BIT-NEXT: or 3, 11, 10
+; LE-32BIT-NEXT: stw 4, 8(5)
+; LE-32BIT-NEXT: stw 3, 4(5)
+; LE-32BIT-NEXT: addi 1, 1, 48
+; LE-32BIT-NEXT: blr
+ %src = load i128, ptr %src.ptr, align 1
+ %byteOff = load i128, ptr %byteOff.ptr, align 1
+ %bitOff = shl i128 %byteOff, 3
+ %res = shl i128 %src, %bitOff
+ store i128 %res, ptr %dst, align 1
+ ret void
+}
+
+define void @shl_16bytes_wordOff(ptr %src.ptr, ptr %wordOff.ptr, ptr %dst) nounwind {
+; LE-64BIT-LABEL: shl_16bytes_wordOff:
+; LE-64BIT: # %bb.0:
+; LE-64BIT-NEXT: lwz 4, 0(4)
+; LE-64BIT-NEXT: ld 6, 0(3)
+; LE-64BIT-NEXT: ld 3, 8(3)
+; LE-64BIT-NEXT: slwi 4, 4, 5
+; LE-64BIT-NEXT: subfic 7, 4, 64
+; LE-64BIT-NEXT: sld 3, 3, 4
+; LE-64BIT-NEXT: srd 7, 6, 7
+; LE-64BIT-NEXT: or 3, 3, 7
+; LE-64BIT-NEXT: addi 7, 4, -64
+; LE-64BIT-NEXT: sld 4, 6, 4
+; LE-64BIT-NEXT: sld 7, 6, 7
+; LE-64BIT-NEXT: std 4, 0(5)
+; LE-64BIT-NEXT: or 3, 3, 7
+; LE-64BIT-NEXT: std 3, 8(5)
+; LE-64BIT-NEXT: blr
+;
+; BE-LABEL: shl_16bytes_wordOff:
+; BE: # %bb.0:
+; BE-NEXT: lwz 4, 12(4)
+; BE-NEXT: ld 6, 8(3)
+; BE-NEXT: ld 3, 0(3)
+; BE-NEXT: slwi 4, 4, 5
+; BE-NEXT: subfic 7, 4, 64
+; BE-NEXT: sld 3, 3, 4
+; BE-NEXT: srd 7, 6, 7
+; BE-NEXT: addi 8, 4, -64
+; BE-NEXT: or 3, 3, 7
+; BE-NEXT: sld 7, 6, 8
+; BE-NEXT: sld 4, 6, 4
+; BE-NEXT: or 3, 3, 7
+; BE-NEXT: std 4, 8(5)
+; BE-NEXT: std 3, 0(5)
+; BE-NEXT: blr
+;
+; LE-32BIT-LABEL: shl_16bytes_wordOff:
+; LE-32BIT: # %bb.0:
+; LE-32BIT-NEXT: stwu 1, -48(1)
+; LE-32BIT-NEXT: lwz 7, 0(3)
+; LE-32BIT-NEXT: li 6, 0
+; LE-32BIT-NEXT: lwz 8, 4(3)
+; LE-32BIT-NEXT: lwz 9, 8(3)
+; LE-32BIT-NEXT: lwz 3, 12(3)
+; LE-32BIT-NEXT: lwz 4, 12(4)
+; LE-32BIT-NEXT: stw 6, 44(1)
+; LE-32BIT-NEXT: stw 6, 40(1)
+; LE-32BIT-NEXT: rlwinm 4, 4, 2, 28, 29
; LE-32BIT-NEXT: stw 6, 36(1)
; LE-32BIT-NEXT: stw 6, 32(1)
; LE-32BIT-NEXT: stw 3, 28(1)
@@ -328,12 +502,13 @@ define void @shl_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; LE-32BIT-NEXT: addi 1, 1, 48
; LE-32BIT-NEXT: blr
%src = load i128, ptr %src.ptr, align 1
- %byteOff = load i128, ptr %byteOff.ptr, align 1
- %bitOff = shl i128 %byteOff, 3
+ %wordOff = load i128, ptr %wordOff.ptr, align 1
+ %bitOff = shl i128 %wordOff, 5
%res = shl i128 %src, %bitOff
store i128 %res, ptr %dst, align 1
ret void
}
+
define void @ashr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; LE-64BIT-LABEL: ashr_16bytes:
; LE-64BIT: # %bb.0:
@@ -361,17 +536,17 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; BE-NEXT: slwi 4, 4, 3
; BE-NEXT: addi 7, 4, -64
; BE-NEXT: cmpwi 7, 1
-; BE-NEXT: blt 0, .LBB8_2
+; BE-NEXT: blt 0, .LBB10_2
; BE-NEXT: # %bb.1:
; BE-NEXT: srad 3, 6, 7
-; BE-NEXT: b .LBB8_3
-; BE-NEXT: .LBB8_2:
+; BE-NEXT: b .LBB10_3
+; BE-NEXT: .LBB10_2:
; BE-NEXT: ld 3, 8(3)
; BE-NEXT: subfic 7, 4, 64
; BE-NEXT: sld 7, 6, 7
; BE-NEXT: srd 3, 3, 4
; BE-NEXT: or 3, 3, 7
-; BE-NEXT: .LBB8_3:
+; BE-NEXT: .LBB10_3:
; BE-NEXT: srad 4, 6, 4
; BE-NEXT: std 3, 8(5)
; BE-NEXT: std 4, 0(5)
@@ -388,7 +563,100 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; LE-32BIT-NEXT: lwz 4, 12(4)
; LE-32BIT-NEXT: stw 3, 44(1)
; LE-32BIT-NEXT: srawi 3, 7, 31
-; LE-32BIT-NEXT: clrlwi 4, 4, 28
+; LE-32BIT-NEXT: stw 7, 32(1)
+; LE-32BIT-NEXT: rlwinm 7, 4, 0, 28, 29
+; LE-32BIT-NEXT: stw 9, 40(1)
+; LE-32BIT-NEXT: rlwinm 4, 4, 3, 27, 28
+; LE-32BIT-NEXT: stw 8, 36(1)
+; LE-32BIT-NEXT: subfic 9, 4, 32
+; LE-32BIT-NEXT: stw 3, 28(1)
+; LE-32BIT-NEXT: stw 3, 24(1)
+; LE-32BIT-NEXT: stw 3, 20(1)
+; LE-32BIT-NEXT: stw 3, 16(1)
+; LE-32BIT-NEXT: sub 3, 6, 7
+; LE-32BIT-NEXT: lwz 6, 4(3)
+; LE-32BIT-NEXT: lwz 7, 0(3)
+; LE-32BIT-NEXT: lwz 8, 12(3)
+; LE-32BIT-NEXT: srw 10, 6, 4
+; LE-32BIT-NEXT: lwz 3, 8(3)
+; LE-32BIT-NEXT: slw 11, 7, 9
+; LE-32BIT-NEXT: slw 6, 6, 9
+; LE-32BIT-NEXT: srw 8, 8, 4
+; LE-32BIT-NEXT: slw 9, 3, 9
+; LE-32BIT-NEXT: srw 3, 3, 4
+; LE-32BIT-NEXT: or 3, 6, 3
+; LE-32BIT-NEXT: stw 3, 8(5)
+; LE-32BIT-NEXT: or 3, 9, 8
+; LE-32BIT-NEXT: sraw 4, 7, 4
+; LE-32BIT-NEXT: stw 3, 12(5)
+; LE-32BIT-NEXT: or 3, 11, 10
+; LE-32BIT-NEXT: stw 4, 0(5)
+; LE-32BIT-NEXT: stw 3, 4(5)
+; LE-32BIT-NEXT: addi 1, 1, 48
+; LE-32BIT-NEXT: blr
+ %src = load i128, ptr %src.ptr, align 1
+ %byteOff = load i128, ptr %byteOff.ptr, align 1
+ %bitOff = shl i128 %byteOff, 3
+ %res = ashr i128 %src, %bitOff
+ store i128 %res, ptr %dst, align 1
+ ret void
+}
+
+define void @ashr_16bytes_wordOff(ptr %src.ptr, ptr %wordOff.ptr, ptr %dst) nounwind {
+; LE-64BIT-LABEL: ashr_16bytes_wordOff:
+; LE-64BIT: # %bb.0:
+; LE-64BIT-NEXT: lwz 4, 0(4)
+; LE-64BIT-NEXT: ld 6, 8(3)
+; LE-64BIT-NEXT: ld 3, 0(3)
+; LE-64BIT-NEXT: slwi 4, 4, 5
+; LE-64BIT-NEXT: subfic 7, 4, 64
+; LE-64BIT-NEXT: srd 3, 3, 4
+; LE-64BIT-NEXT: sld 7, 6, 7
+; LE-64BIT-NEXT: or 3, 3, 7
+; LE-64BIT-NEXT: addi 7, 4, -64
+; LE-64BIT-NEXT: srad 4, 6, 4
+; LE-64BIT-NEXT: cmpwi 7, 1
+; LE-64BIT-NEXT: srad 8, 6, 7
+; LE-64BIT-NEXT: std 4, 8(5)
+; LE-64BIT-NEXT: isellt 3, 3, 8
+; LE-64BIT-NEXT: std 3, 0(5)
+; LE-64BIT-NEXT: blr
+;
+; BE-LABEL: ashr_16bytes_wordOff:
+; BE: # %bb.0:
+; BE-NEXT: lwz 4, 12(4)
+; BE-NEXT: ld 6, 0(3)
+; BE-NEXT: slwi 4, 4, 5
+; BE-NEXT: addi 7, 4, -64
+; BE-NEXT: cmpwi 7, 1
+; BE-NEXT: blt 0, .LBB11_2
+; BE-NEXT: # %bb.1:
+; BE-NEXT: srad 3, 6, 7
+; BE-NEXT: b .LBB11_3
+; BE-NEXT: .LBB11_2:
+; BE-NEXT: ld 3, 8(3)
+; BE-NEXT: subfic 7, 4, 64
+; BE-NEXT: sld 7, 6, 7
+; BE-NEXT: srd 3, 3, 4
+; BE-NEXT: or 3, 3, 7
+; BE-NEXT: .LBB11_3:
+; BE-NEXT: srad 4, 6, 4
+; BE-NEXT: std 3, 8(5)
+; BE-NEXT: std 4, 0(5)
+; BE-NEXT: blr
+;
+; LE-32BIT-LABEL: ashr_16bytes_wordOff:
+; LE-32BIT: # %bb.0:
+; LE-32BIT-NEXT: stwu 1, -48(1)
+; LE-32BIT-NEXT: lwz 7, 0(3)
+; LE-32BIT-NEXT: addi 6, 1, 32
+; LE-32BIT-NEXT: lwz 8, 4(3)
+; LE-32BIT-NEXT: lwz 9, 8(3)
+; LE-32BIT-NEXT: lwz 3, 12(3)
+; LE-32BIT-NEXT: lwz 4, 12(4)
+; LE-32BIT-NEXT: stw 3, 44(1)
+; LE-32BIT-NEXT: srawi 3, 7, 31
+; LE-32BIT-NEXT: rlwinm 4, 4, 2, 28, 29
; LE-32BIT-NEXT: stw 9, 40(1)
; LE-32BIT-NEXT: stw 8, 36(1)
; LE-32BIT-NEXT: stw 7, 32(1)
@@ -408,8 +676,8 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; LE-32BIT-NEXT: addi 1, 1, 48
; LE-32BIT-NEXT: blr
%src = load i128, ptr %src.ptr, align 1
- %byteOff = load i128, ptr %byteOff.ptr, align 1
- %bitOff = shl i128 %byteOff, 3
+ %wordOff = load i128, ptr %wordOff.ptr, align 1
+ %bitOff = shl i128 %wordOff, 5
%res = ashr i128 %src, %bitOff
store i128 %res, ptr %dst, align 1
ret void
@@ -422,12 +690,324 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; LE-64BIT-NEXT: lxvd2x 1, 0, 3
; LE-64BIT-NEXT: xxlxor 2, 2, 2
; LE-64BIT-NEXT: addi 7, 1, -64
+; LE-64BIT-NEXT: li 8, 32
+; LE-64BIT-NEXT: lxvd2x 0, 3, 6
+; LE-64BIT-NEXT: lwz 3, 0(4)
+; LE-64BIT-NEXT: li 4, 48
+; LE-64BIT-NEXT: stxvd2x 2, 7, 4
+; LE-64BIT-NEXT: stxvd2x 2, 7, 8
+; LE-64BIT-NEXT: rlwinm 4, 3, 0, 27, 28
+; LE-64BIT-NEXT: rlwinm 3, 3, 3, 26, 28
+; LE-64BIT-NEXT: stxvd2x 0, 7, 6
+; LE-64BIT-NEXT: stxvd2x 1, 0, 7
+; LE-64BIT-NEXT: ldux 6, 4, 7
+; LE-64BIT-NEXT: subfic 7, 3, 64
+; LE-64BIT-NEXT: ld 8, 8(4)
+; LE-64BIT-NEXT: ld 9, 16(4)
+; LE-64BIT-NEXT: ld 4, 24(4)
+; LE-64BIT-NEXT: srd 6, 6, 3
+; LE-64BIT-NEXT: sld 10, 8, 7
+; LE-64BIT-NEXT: sld 11, 4, 7
+; LE-64BIT-NEXT: srd 8, 8, 3
+; LE-64BIT-NEXT: sld 7, 9, 7
+; LE-64BIT-NEXT: or 6, 10, 6
+; LE-64BIT-NEXT: srd 10, 9, 3
+; LE-64BIT-NEXT: srd 3, 4, 3
+; LE-64BIT-NEXT: or 7, 7, 8
+; LE-64BIT-NEXT: std 3, 24(5)
+; LE-64BIT-NEXT: or 3, 11, 10
+; LE-64BIT-NEXT: std 7, 8(5)
+; LE-64BIT-NEXT: std 6, 0(5)
+; LE-64BIT-NEXT: std 3, 16(5)
+; LE-64BIT-NEXT: blr
+;
+; BE-LABEL: lshr_32bytes:
+; BE: # %bb.0:
+; BE-NEXT: ld 6, 0(3)
+; BE-NEXT: ld 7, 8(3)
+; BE-NEXT: ld 8, 16(3)
+; BE-NEXT: ld 3, 24(3)
+; BE-NEXT: lwz 4, 28(4)
+; BE-NEXT: li 9, 0
+; BE-NEXT: addi 10, 1, -32
+; BE-NEXT: std 9, -40(1)
+; BE-NEXT: std 9, -48(1)
+; BE-NEXT: std 9, -56(1)
+; BE-NEXT: std 9, -64(1)
+; BE-NEXT: std 3, -8(1)
+; BE-NEXT: rlwinm 3, 4, 0, 27, 28
+; BE-NEXT: neg 3, 3
+; BE-NEXT: std 8, -16(1)
+; BE-NEXT: std 7, -24(1)
+; BE-NEXT: std 6, -32(1)
+; BE-NEXT: extsw 3, 3
+; BE-NEXT: ldux 3, 10, 3
+; BE-NEXT: rlwinm 4, 4, 3, 26, 28
+; BE-NEXT: subfic 9, 4, 64
+; BE-NEXT: ld 6, 8(10)
+; BE-NEXT: ld 7, 24(10)
+; BE-NEXT: ld 8, 16(10)
+; BE-NEXT: sld 10, 3, 9
+; BE-NEXT: srd 3, 3, 4
+; BE-NEXT: std 3, 0(5)
+; BE-NEXT: srd 11, 6, 4
+; BE-NEXT: srd 7, 7, 4
+; BE-NEXT: sld 6, 6, 9
+; BE-NEXT: sld 9, 8, 9
+; BE-NEXT: srd 8, 8, 4
+; BE-NEXT: or 10, 10, 11
+; BE-NEXT: or 7, 9, 7
+; BE-NEXT: or 6, 6, 8
+; BE-NEXT: std 6, 16(5)
+; BE-NEXT: std 7, 24(5)
+; BE-NEXT: std 10, 8(5)
+; BE-NEXT: blr
+;
+; LE-32BIT-LABEL: lshr_32bytes:
+; LE-32BIT: # %bb.0:
+; LE-32BIT-NEXT: stwu 1, -112(1)
+; LE-32BIT-NEXT: lwz 7, 0(3)
+; LE-32BIT-NEXT: li 6, 0
+; LE-32BIT-NEXT: lwz 8, 4(3)
+; LE-32BIT-NEXT: lwz 9, 8(3)
+; LE-32BIT-NEXT: lwz 10, 12(3)
+; LE-32BIT-NEXT: lwz 11, 16(3)
+; LE-32BIT-NEXT: lwz 12, 20(3)
+; LE-32BIT-NEXT: lwz 0, 24(3)
+; LE-32BIT-NEXT: lwz 3, 28(3)
+; LE-32BIT-NEXT: lwz 4, 28(4)
+; LE-32BIT-NEXT: stw 6, 44(1)
+; LE-32BIT-NEXT: stw 6, 40(1)
+; LE-32BIT-NEXT: stw 6, 36(1)
+; LE-32BIT-NEXT: stw 6, 32(1)
+; LE-32BIT-NEXT: stw 6, 28(1)
+; LE-32BIT-NEXT: stw 6, 24(1)
+; LE-32BIT-NEXT: stw 6, 20(1)
+; LE-32BIT-NEXT: stw 6, 16(1)
+; LE-32BIT-NEXT: rlwinm 6, 4, 0, 27, 29
+; LE-32BIT-NEXT: stw 3, 76(1)
+; LE-32BIT-NEXT: addi 3, 1, 48
+; LE-32BIT-NEXT: stw 25, 84(1) # 4-byte Folded Spill
+; LE-32BIT-NEXT: sub 3, 3, 6
+; LE-32BIT-NEXT: stw 26, 88(1) # 4-byte Folded Spill
+; LE-32BIT-NEXT: rlwinm 4, 4, 3, 27, 28
+; LE-32BIT-NEXT: stw 27, 92(1) # 4-byte Folded Spill
+; LE-32BIT-NEXT: stw 28, 96(1) # 4-byte Folded Spill
+; LE-32BIT-NEXT: stw 29, 100(1) # 4-byte Folded Spill
+; LE-32BIT-NEXT: stw 30, 104(1) # 4-byte Folded Spill
+; LE-32BIT-NEXT: stw 0, 72(1)
+; LE-32BIT-NEXT: subfic 0, 4, 32
+; LE-32BIT-NEXT: stw 12, 68(1)
+; LE-32BIT-NEXT: stw 11, 64(1)
+; LE-32BIT-NEXT: stw 10, 60(1)
+; LE-32BIT-NEXT: stw 9, 56(1)
+; LE-32BIT-NEXT: stw 8, 52(1)
+; LE-32BIT-NEXT: stw 7, 48(1)
+; LE-32BIT-NEXT: lwz 6, 4(3)
+; LE-32BIT-NEXT: lwz 7, 0(3)
+; LE-32BIT-NEXT: lwz 8, 12(3)
+; LE-32BIT-NEXT: srw 30, 6, 4
+; LE-32BIT-NEXT: lwz 9, 8(3)
+; LE-32BIT-NEXT: slw 29, 7, 0
+; LE-32BIT-NEXT: lwz 10, 20(3)
+; LE-32BIT-NEXT: srw 28, 8, 4
+; LE-32BIT-NEXT: lwz 11, 16(3)
+; LE-32BIT-NEXT: slw 27, 9, 0
+; LE-32BIT-NEXT: lwz 12, 28(3)
+; LE-32BIT-NEXT: slw 6, 6, 0
+; LE-32BIT-NEXT: lwz 3, 24(3)
+; LE-32BIT-NEXT: srw 26, 10, 4
+; LE-32BIT-NEXT: slw 25, 11, 0
+; LE-32BIT-NEXT: slw 8, 8, 0
+; LE-32BIT-NEXT: slw 10, 10, 0
+; LE-32BIT-NEXT: slw 0, 3, 0
+; LE-32BIT-NEXT: srw 3, 3, 4
+; LE-32BIT-NEXT: srw 12, 12, 4
+; LE-32BIT-NEXT: or 3, 10, 3
+; LE-32BIT-NEXT: srw 11, 11, 4
+; LE-32BIT-NEXT: stw 3, 24(5)
+; LE-32BIT-NEXT: or 3, 0, 12
+; LE-32BIT-NEXT: stw 3, 28(5)
+; LE-32BIT-NEXT: or 3, 8, 11
+; LE-32BIT-NEXT: srw 9, 9, 4
+; LE-32BIT-NEXT: stw 3, 16(5)
+; LE-32BIT-NEXT: or 3, 25, 26
+; LE-32BIT-NEXT: stw 3, 20(5)
+; LE-32BIT-NEXT: or 3, 6, 9
+; LE-32BIT-NEXT: stw 3, 8(5)
+; LE-32BIT-NEXT: or 3, 27, 28
+; LE-32BIT-NEXT: srw 4, 7, 4
+; LE-32BIT-NEXT: stw 3, 12(5)
+; LE-32BIT-NEXT: or 3, 29, 30
+; LE-32BIT-NEXT: stw 4, 0(5)
+; LE-32BIT-NEXT: stw 3, 4(5)
+; LE-32BIT-NEXT: lwz 30, 104(1) # 4-byte Folded Reload
+; LE-32BIT-NEXT: lwz 29, 100(1) # 4-byte Folded Reload
+; LE-32BIT-NEXT: lwz 28, 96(1) # 4-byte Folded Reload
+; LE-32BIT-NEXT: lwz 27, 92(1) # 4-byte Folded Reload
+; LE-32BIT-NEXT: lwz 26, 88(1) # 4-byte Folded Reload
+; LE-32BIT-NEXT: lwz 25, 84(1) # 4-byte Folded Reload
+; LE-32BIT-NEXT: addi 1, 1, 112
+; LE-32BIT-NEXT: blr
+ %src = load i256, ptr %src.ptr, align 1
+ %byteOff = load i256, ptr %byteOff.ptr, align 1
+ %bitOff = shl i256 %byteOff, 3
+ %res = lshr i256 %src, %bitOff
+ store i256 %res, ptr %dst, align 1
+ ret void
+}
+
+define void @lshr_32bytes_wordOff(ptr %src.ptr, ptr %wordOff.ptr, ptr %dst) nounwind {
+; LE-64BIT-LABEL: lshr_32bytes_wordOff:
+; LE-64BIT: # %bb.0:
+; LE-64BIT-NEXT: li 6, 16
+; LE-64BIT-NEXT: lxvd2x 1, 0, 3
+; LE-64BIT-NEXT: xxlxor 2, 2, 2
+; LE-64BIT-NEXT: addi 7, 1, -64
+; LE-64BIT-NEXT: li 8, 32
+; LE-64BIT-NEXT: lxvd2x 0, 3, 6
+; LE-64BIT-NEXT: lwz 3, 0(4)
+; LE-64BIT-NEXT: li 4, 48
+; LE-64BIT-NEXT: stxvd2x 2, 7, 4
+; LE-64BIT-NEXT: stxvd2x 2, 7, 8
+; LE-64BIT-NEXT: rlwinm 4, 3, 2, 27, 28
+; LE-64BIT-NEXT: rlwinm 3, 3, 5, 26, 26
+; LE-64BIT-NEXT: stxvd2x 0, 7, 6
+; LE-64BIT-NEXT: stxvd2x 1, 0, 7
+; LE-64BIT-NEXT: ldux 6, 4, 7
+; LE-64BIT-NEXT: subfic 7, 3, 64
+; LE-64BIT-NEXT: ld 8, 8(4)
+; LE-64BIT-NEXT: ld 9, 16(4)
+; LE-64BIT-NEXT: ld 4, 24(4)
+; LE-64BIT-NEXT: srd 6, 6, 3
+; LE-64BIT-NEXT: sld 10, 8, 7
+; LE-64BIT-NEXT: sld 11, 4, 7
+; LE-64BIT-NEXT: srd 8, 8, 3
+; LE-64BIT-NEXT: sld 7, 9, 7
+; LE-64BIT-NEXT: or 6, 10, 6
+; LE-64BIT-NEXT: srd 10, 9, 3
+; LE-64BIT-NEXT: srd 3, 4, 3
+; LE-64BIT-NEXT: or 7, 7, 8
+; LE-64BIT-NEXT: std 3, 24(5)
+; LE-64BIT-NEXT: or 3, 11, 10
+; LE-64BIT-NEXT: std 7, 8(5)
+; LE-64BIT-NEXT: std 6, 0(5)
+; LE-64BIT-NEXT: std 3, 16(5)
+; LE-64BIT-NEXT: blr
+;
+; BE-LABEL: lshr_32bytes_wordOff:
+; BE: # %bb.0:
+; BE-NEXT: ld 6, 0(3)
+; BE-NEXT: ld 7, 8(3)
+; BE-NEXT: ld 8, 16(3)
+; BE-NEXT: ld 3, 24(3)
+; BE-NEXT: lwz 4, 28(4)
+; BE-NEXT: li 9, 0
+; BE-NEXT: addi 10, 1, -32
+; BE-NEXT: std 9, -40(1)
+; BE-NEXT: std 9, -48(1)
+; BE-NEXT: std 9, -56(1)
+; BE-NEXT: std 9, -64(1)
+; BE-NEXT: std 3, -8(1)
+; BE-NEXT: rlwinm 3, 4, 2, 27, 28
+; BE-NEXT: neg 3, 3
+; BE-NEXT: std 8, -16(1)
+; BE-NEXT: std 7, -24(1)
+; BE-NEXT: std 6, -32(1)
+; BE-NEXT: extsw 3, 3
+; BE-NEXT: ldux 3, 10, 3
+; BE-NEXT: rlwinm 4, 4, 5, 26, 26
+; BE-NEXT: subfic 9, 4, 64
+; BE-NEXT: ld 6, 8(10)
+; BE-NEXT: ld 7, 24(10)
+; BE-NEXT: ld 8, 16(10)
+; BE-NEXT: sld 10, 3, 9
+; BE-NEXT: srd 3, 3, 4
+; BE-NEXT: std 3, 0(5)
+; BE-NEXT: srd 11, 6, 4
+; BE-NEXT: srd 7, 7, 4
+; BE-NEXT: sld 6, 6, 9
+; BE-NEXT: sld 9, 8, 9
+; BE-NEXT: srd 8, 8, 4
+; BE-NEXT: or 10, 10, 11
+; BE-NEXT: or 7, 9, 7
+; BE-NEXT: or 6, 6, 8
+; BE-NEXT: std 6, 16(5)
+; BE-NEXT: std 7, 24(5)
+; BE-NEXT: std 10, 8(5)
+; BE-NEXT: blr
+;
+; LE-32BIT-LABEL: lshr_32bytes_wordOff:
+; LE-32BIT: # %bb.0:
+; LE-32BIT-NEXT: stwu 1, -80(1)
+; LE-32BIT-NEXT: lwz 7, 0(3)
+; LE-32BIT-NEXT: li 6, 0
+; LE-32BIT-NEXT: lwz 8, 4(3)
+; LE-32BIT-NEXT: lwz 9, 8(3)
+; LE-32BIT-NEXT: lwz 10, 12(3)
+; LE-32BIT-NEXT: lwz 11, 16(3)
+; LE-32BIT-NEXT: lwz 12, 20(3)
+; LE-32BIT-NEXT: lwz 0, 24(3)
+; LE-32BIT-NEXT: lwz 3, 28(3)
+; LE-32BIT-NEXT: lwz 4, 28(4)
+; LE-32BIT-NEXT: stw 3, 76(1)
+; LE-32BIT-NEXT: addi 3, 1, 48
+; LE-32BIT-NEXT: rlwinm 4, 4, 2, 27, 29
+; LE-32BIT-NEXT: stw 6, 44(1)
+; LE-32BIT-NEXT: sub 3, 3, 4
+; LE-32BIT-NEXT: stw 6, 40(1)
+; LE-32BIT-NEXT: stw 6, 36(1)
+; LE-32BIT-NEXT: stw 6, 32(1)
+; LE-32BIT-NEXT: stw 6, 28(1)
+; LE-32BIT-NEXT: stw 6, 24(1)
+; LE-32BIT-NEXT: stw 6, 20(1)
+; LE-32BIT-NEXT: stw 6, 16(1)
+; LE-32BIT-NEXT: stw 0, 72(1)
+; LE-32BIT-NEXT: stw 12, 68(1)
+; LE-32BIT-NEXT: stw 11, 64(1)
+; LE-32BIT-NEXT: stw 10, 60(1)
+; LE-32BIT-NEXT: stw 9, 56(1)
+; LE-32BIT-NEXT: stw 8, 52(1)
+; LE-32BIT-NEXT: stw 7, 48(1)
+; LE-32BIT-NEXT: lwz 4, 4(3)
+; LE-32BIT-NEXT: lwz 6, 0(3)
+; LE-32BIT-NEXT: lwz 7, 12(3)
+; LE-32BIT-NEXT: lwz 8, 8(3)
+; LE-32BIT-NEXT: lwz 9, 20(3)
+; LE-32BIT-NEXT: lwz 10, 16(3)
+; LE-32BIT-NEXT: lwz 11, 24(3)
+; LE-32BIT-NEXT: lwz 3, 28(3)
+; LE-32BIT-NEXT: stw 11, 24(5)
+; LE-32BIT-NEXT: stw 3, 28(5)
+; LE-32BIT-NEXT: stw 10, 16(5)
+; LE-32BIT-NEXT: stw 9, 20(5)
+; LE-32BIT-NEXT: stw 8, 8(5)
+; LE-32BIT-NEXT: stw 7, 12(5)
+; LE-32BIT-NEXT: stw 6, 0(5)
+; LE-32BIT-NEXT: stw 4, 4(5)
+; LE-32BIT-NEXT: addi 1, 1, 80
+; LE-32BIT-NEXT: blr
+ %src = load i256, ptr %src.ptr, align 1
+ %wordOff = load i256, ptr %wordOff.ptr, align 1
+ %bitOff = shl i256 %wordOff, 5
+ %res = lshr i256 %src, %bitOff
+ store i256 %res, ptr %dst, align 1
+ ret void
+}
+
+define void @lshr_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) nounwind {
+; LE-64BIT-LABEL: lshr_32bytes_dwordOff:
+; LE-64BIT: # %bb.0:
+; LE-64BIT-NEXT: li 6, 16
+; LE-64BIT-NEXT: lxvd2x 1, 0, 3
+; LE-64BIT-NEXT: xxlxor 2, 2, 2
+; LE-64BIT-NEXT: addi 7, 1, -64
; LE-64BIT-NEXT: lxvd2x 0, 3, 6
; LE-64BIT-NEXT: lwz 3, 0(4)
; LE-64BIT-NEXT: li 4, 48
; LE-64BIT-NEXT: stxvd2x 2, 7, 4
; LE-64BIT-NEXT: li 4, 32
-; LE-64BIT-NEXT: clrldi 3, 3, 59
+; LE-64BIT-NEXT: rlwinm 3, 3, 3, 27, 28
; LE-64BIT-NEXT: stxvd2x 2, 7, 4
; LE-64BIT-NEXT: stxvd2x 0, 7, 6
; LE-64BIT-NEXT: stxvd2x 1, 0, 7
@@ -438,25 +1018,24 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; LE-64BIT-NEXT: stxvd2x 0, 0, 5
; LE-64BIT-NEXT: blr
;
-; BE-LABEL: lshr_32bytes:
+; BE-LABEL: lshr_32bytes_dwordOff:
; BE: # %bb.0:
-; BE-NEXT: ld 6, 0(3)
-; BE-NEXT: ld 7, 8(3)
-; BE-NEXT: ld 8, 16(3)
+; BE-NEXT: ld 7, 0(3)
+; BE-NEXT: ld 8, 8(3)
+; BE-NEXT: ld 9, 16(3)
; BE-NEXT: ld 3, 24(3)
; BE-NEXT: lwz 4, 28(4)
-; BE-NEXT: addi 9, 1, -64
-; BE-NEXT: li 10, 0
-; BE-NEXT: std 10, 24(9)
-; BE-NEXT: std 10, 16(9)
-; BE-NEXT: std 10, 8(9)
-; BE-NEXT: std 10, -64(1)
-; BE-NEXT: std 3, 56(9)
-; BE-NEXT: clrlwi 3, 4, 27
+; BE-NEXT: li 6, 0
+; BE-NEXT: std 6, -40(1)
+; BE-NEXT: std 6, -48(1)
+; BE-NEXT: std 6, -56(1)
+; BE-NEXT: std 6, -64(1)
+; BE-NEXT: std 3, -8(1)
+; BE-NEXT: rlwinm 3, 4, 3, 27, 28
; BE-NEXT: neg 3, 3
-; BE-NEXT: std 8, 48(9)
-; BE-NEXT: std 7, 40(9)
-; BE-NEXT: std 6, 32(9)
+; BE-NEXT: std 9, -16(1)
+; BE-NEXT: std 8, -24(1)
+; BE-NEXT: std 7, -32(1)
; BE-NEXT: extsw 3, 3
; BE-NEXT: addi 4, 1, -32
; BE-NEXT: ldux 3, 4, 3
@@ -469,7 +1048,7 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; BE-NEXT: std 6, 8(5)
; BE-NEXT: blr
;
-; LE-32BIT-LABEL: lshr_32bytes:
+; LE-32BIT-LABEL: lshr_32bytes_dwordOff:
; LE-32BIT: # %bb.0:
; LE-32BIT-NEXT: stwu 1, -80(1)
; LE-32BIT-NEXT: lwz 7, 0(3)
@@ -484,7 +1063,7 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; LE-32BIT-NEXT: lwz 4, 28(4)
; LE-32BIT-NEXT: stw 3, 76(1)
; LE-32BIT-NEXT: addi 3, 1, 48
-; LE-32BIT-NEXT: clrlwi 4, 4, 27
+; LE-32BIT-NEXT: rlwinm 4, 4, 3, 27, 28
; LE-32BIT-NEXT: stw 6, 44(1)
; LE-32BIT-NEXT: sub 3, 3, 4
; LE-32BIT-NEXT: stw 6, 40(1)
@@ -520,16 +1099,329 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; LE-32BIT-NEXT: addi 1, 1, 80
; LE-32BIT-NEXT: blr
%src = load i256, ptr %src.ptr, align 1
- %byteOff = load i256, ptr %byteOff.ptr, align 1
- %bitOff = shl i256 %byteOff, 3
+ %dwordOff = load i256, ptr %dwordOff.ptr, align 1
+ %bitOff = shl i256 %dwordOff, 6
%res = lshr i256 %src, %bitOff
store i256 %res, ptr %dst, align 1
ret void
}
+
define void @shl_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; LE-64BIT-LABEL: shl_32bytes:
; LE-64BIT: # %bb.0:
; LE-64BIT-NEXT: li 6, 16
+; LE-64BIT-NEXT: lwz 4, 0(4)
+; LE-64BIT-NEXT: xxlxor 2, 2, 2
+; LE-64BIT-NEXT: addi 7, 1, -64
+; LE-64BIT-NEXT: lxvd2x 1, 0, 3
+; LE-64BIT-NEXT: addi 8, 1, -32
+; LE-64BIT-NEXT: lxvd2x 0, 3, 6
+; LE-64BIT-NEXT: stxvd2x 2, 7, 6
+; LE-64BIT-NEXT: li 6, 48
+; LE-64BIT-NEXT: rlwinm 3, 4, 0, 27, 28
+; LE-64BIT-NEXT: rlwinm 4, 4, 3, 26, 28
+; LE-64BIT-NEXT: neg 3, 3
+; LE-64BIT-NEXT: stxvd2x 0, 7, 6
+; LE-64BIT-NEXT: li 6, 32
+; LE-64BIT-NEXT: extsw 3, 3
+; LE-64BIT-NEXT: stxvd2x 1, 7, 6
+; LE-64BIT-NEXT: stxvd2x 2, 0, 7
+; LE-64BIT-NEXT: subfic 6, 4, 64
+; LE-64BIT-NEXT: ldux 3, 8, 3
+; LE-64BIT-NEXT: ld 7, 16(8)
+; LE-64BIT-NEXT: ld 9, 24(8)
+; LE-64BIT-NEXT: ld 8, 8(8)
+; LE-64BIT-NEXT: srd 10, 7, 6
+; LE-64BIT-NEXT: sld 9, 9, 4
+; LE-64BIT-NEXT: sld 7, 7, 4
+; LE-64BIT-NEXT: or 9, 9, 10
+; LE-64BIT-NEXT: srd 10, 8, 6
+; LE-64BIT-NEXT: srd 6, 3, 6
+; LE-64BIT-NEXT: sld 8, 8, 4
+; LE-64BIT-NEXT: sld 3, 3, 4
+; LE-64BIT-NEXT: or 6, 8, 6
+; LE-64BIT-NEXT: std 3, 0(5)
+; LE-64BIT-NEXT: or 3, 7, 10
+; LE-64BIT-NEXT: std 9, 24(5)
+; LE-64BIT-NEXT: std 6, 8(5)
+; LE-64BIT-NEXT: std 3, 16(5)
+; LE-64BIT-NEXT: blr
+;
+; BE-LABEL: shl_32bytes:
+; BE: # %bb.0:
+; BE-NEXT: ld 6, 0(3)
+; BE-NEXT: ld 7, 8(3)
+; BE-NEXT: ld 8, 16(3)
+; BE-NEXT: ld 3, 24(3)
+; BE-NEXT: lwz 4, 28(4)
+; BE-NEXT: li 9, 0
+; BE-NEXT: addi 10, 1, -64
+; BE-NEXT: std 9, -8(1)
+; BE-NEXT: std 9, -16(1)
+; BE-NEXT: std 9, -24(1)
+; BE-NEXT: std 9, -32(1)
+; BE-NEXT: std 3, -40(1)
+; BE-NEXT: std 8, -48(1)
+; BE-NEXT: std 7, -56(1)
+; BE-NEXT: std 6, -64(1)
+; BE-NEXT: rlwinm 3, 4, 0, 27, 28
+; BE-NEXT: ldux 6, 3, 10
+; BE-NEXT: rlwinm 4, 4, 3, 26, 28
+; BE-NEXT: subfic 9, 4, 64
+; BE-NEXT: ld 7, 16(3)
+; BE-NEXT: ld 8, 8(3)
+; BE-NEXT: ld 3, 24(3)
+; BE-NEXT: sld 6, 6, 4
+; BE-NEXT: srd 10, 7, 9
+; BE-NEXT: sld 11, 8, 4
+; BE-NEXT: srd 8, 8, 9
+; BE-NEXT: srd 9, 3, 9
+; BE-NEXT: sld 7, 7, 4
+; BE-NEXT: sld 3, 3, 4
+; BE-NEXT: or 10, 11, 10
+; BE-NEXT: or 6, 6, 8
+; BE-NEXT: or 7, 7, 9
+; BE-NEXT: std 3, 24(5)
+; BE-NEXT: std 7, 16(5)
+; BE-NEXT: std 6, 0(5)
+; BE-NEXT: std 10, 8(5)
+; BE-NEXT: blr
+;
+; LE-32BIT-LABEL: shl_32bytes:
+; LE-32BIT: # %bb.0:
+; LE-32BIT-NEXT: stwu 1, -112(1)
+; LE-32BIT-NEXT: lwz 7, 0(3)
+; LE-32BIT-NEXT: li 6, 0
+; LE-32BIT-NEXT: lwz 8, 4(3)
+; LE-32BIT-NEXT: lwz 9, 8(3)
+; LE-32BIT-NEXT: lwz 10, 12(3)
+; LE-32BIT-NEXT: lwz 11, 16(3)
+; LE-32BIT-NEXT: lwz 12, 20(3)
+; LE-32BIT-NEXT: lwz 0, 24(3)
+; LE-32BIT-NEXT: lwz 3, 28(3)
+; LE-32BIT-NEXT: lwz 4, 28(4)
+; LE-32BIT-NEXT: stw 25, 84(1) # 4-byte Folded Spill
+; LE-32BIT-NEXT: stw 26, 88(1) # 4-byte Folded Spill
+; LE-32BIT-NEXT: stw 27, 92(1) # 4-byte Folded Spill
+; LE-32BIT-NEXT: stw 28, 96(1) # 4-byte Folded Spill
+; LE-32BIT-NEXT: stw 29, 100(1) # 4-byte Folded Spill
+; LE-32BIT-NEXT: stw 30, 104(1) # 4-byte Folded Spill
+; LE-32BIT-NEXT: stw 6, 76(1)
+; LE-32BIT-NEXT: stw 6, 72(1)
+; LE-32BIT-NEXT: stw 6, 68(1)
+; LE-32BIT-NEXT: stw 6, 64(1)
+; LE-32BIT-NEXT: stw 6, 60(1)
+; LE-32BIT-NEXT: stw 6, 56(1)
+; LE-32BIT-NEXT: stw 6, 52(1)
+; LE-32BIT-NEXT: stw 6, 48(1)
+; LE-32BIT-NEXT: rlwinm 6, 4, 0, 27, 29
+; LE-32BIT-NEXT: stw 3, 44(1)
+; LE-32BIT-NEXT: addi 3, 1, 16
+; LE-32BIT-NEXT: stw 0, 40(1)
+; LE-32BIT-NEXT: rlwinm 4, 4, 3, 27, 28
+; LE-32BIT-NEXT: stw 12, 36(1)
+; LE-32BIT-NEXT: subfic 12, 4, 32
+; LE-32BIT-NEXT: stw 11, 32(1)
+; LE-32BIT-NEXT: stw 10, 28(1)
+; LE-32BIT-NEXT: stw 9, 24(1)
+; LE-32BIT-NEXT: stw 8, 20(1)
+; LE-32BIT-NEXT: stw 7, 16(1)
+; LE-32BIT-NEXT: lwzux 3, 6, 3
+; LE-32BIT-NEXT: lwz 7, 8(6)
+; LE-32BIT-NEXT: slw 3, 3, 4
+; LE-32BIT-NEXT: lwz 8, 4(6)
+; LE-32BIT-NEXT: lwz 9, 16(6)
+; LE-32BIT-NEXT: srw 30, 7, 12
+; LE-32BIT-NEXT: lwz 10, 12(6)
+; LE-32BIT-NEXT: slw 29, 8, 4
+; LE-32BIT-NEXT: lwz 11, 24(6)
+; LE-32BIT-NEXT: srw 8, 8, 12
+; LE-32BIT-NEXT: lwz 0, 20(6)
+; LE-32BIT-NEXT: srw 28, 9, 12
+; LE-32BIT-NEXT: lwz 6, 28(6)
+; LE-32BIT-NEXT: slw 27, 10, 4
+; LE-32BIT-NEXT: srw 10, 10, 12
+; LE-32BIT-NEXT: slw 7, 7, 4
+; LE-32BIT-NEXT: srw 26, 11, 12
+; LE-32BIT-NEXT: slw 25, 0, 4
+; LE-32BIT-NEXT: srw 0, 0, 12
+; LE-32BIT-NEXT: slw 9, 9, 4
+; LE-32BIT-NEXT: srw 12, 6, 12
+; LE-32BIT-NEXT: slw 11, 11, 4
+; LE-32BIT-NEXT: slw 4, 6, 4
+; LE-32BIT-NEXT: stw 4, 28(5)
+; LE-32BIT-NEXT: or 4, 11, 12
+; LE-32BIT-NEXT: stw 4, 24(5)
+; LE-32BIT-NEXT: or 4, 9, 0
+; LE-32BIT-NEXT: stw 4, 16(5)
+; LE-32BIT-NEXT: or 4, 25, 26
+; LE-32BIT-NEXT: stw 4, 20(5)
+; LE-32BIT-NEXT: or 4, 7, 10
+; LE-32BIT-NEXT: or 3, 3, 8
+; LE-32BIT-NEXT: stw 4, 8(5)
+; LE-32BIT-NEXT: or 4, 27, 28
+; LE-32BIT-NEXT: stw 3, 0(5)
+; LE-32BIT-NEXT: or 3, 29, 30
+; LE-32BIT-NEXT: stw 4, 12(5)
+; LE-32BIT-NEXT: stw 3, 4(5)
+; LE-32BIT-NEXT: lwz 30, 104(1) # 4-byte Folded Reload
+; LE-32BIT-NEXT: lwz 29, 100(1) # 4-byte Folded Reload
+; LE-32BIT-NEXT: lwz 28, 96(1) # 4-byte Folded Reload
+; LE-32BIT-NEXT: lwz 27, 92(1) # 4-byte Folded Reload
+; LE-32BIT-NEXT: lwz 26, 88(1) # 4-byte Folded Reload
+; LE-32BIT-NEXT: lwz 25, 84(1) # 4-byte Folded Reload
+; LE-32BIT-NEXT: addi 1, 1, 112
+; LE-32BIT-NEXT: blr
+ %src = load i256, ptr %src.ptr, align 1
+ %byteOff = load i256, ptr %byteOff.ptr, align 1
+ %bitOff = shl i256 %byteOff, 3
+ %res = shl i256 %src, %bitOff
+ store i256 %res, ptr %dst, align 1
+ ret void
+}
+
+define void @shl_32bytes_wordOff(ptr %src.ptr, ptr %wordOff.ptr, ptr %dst) nounwind {
+; LE-64BIT-LABEL: shl_32bytes_wordOff:
+; LE-64BIT: # %bb.0:
+; LE-64BIT-NEXT: li 6, 16
+; LE-64BIT-NEXT: lwz 4, 0(4)
+; LE-64BIT-NEXT: xxlxor 2, 2, 2
+; LE-64BIT-NEXT: addi 7, 1, -64
+; LE-64BIT-NEXT: lxvd2x 1, 0, 3
+; LE-64BIT-NEXT: addi 8, 1, -32
+; LE-64BIT-NEXT: lxvd2x 0, 3, 6
+; LE-64BIT-NEXT: stxvd2x 2, 7, 6
+; LE-64BIT-NEXT: li 6, 48
+; LE-64BIT-NEXT: rlwinm 3, 4, 2, 27, 28
+; LE-64BIT-NEXT: rlwinm 4, 4, 5, 26, 26
+; LE-64BIT-NEXT: neg 3, 3
+; LE-64BIT-NEXT: stxvd2x 0, 7, 6
+; LE-64BIT-NEXT: li 6, 32
+; LE-64BIT-NEXT: extsw 3, 3
+; LE-64BIT-NEXT: stxvd2x 1, 7, 6
+; LE-64BIT-NEXT: stxvd2x 2, 0, 7
+; LE-64BIT-NEXT: subfic 6, 4, 64
+; LE-64BIT-NEXT: ldux 3, 8, 3
+; LE-64BIT-NEXT: ld 7, 16(8)
+; LE-64BIT-NEXT: ld 9, 24(8)
+; LE-64BIT-NEXT: ld 8, 8(8)
+; LE-64BIT-NEXT: srd 10, 7, 6
+; LE-64BIT-NEXT: sld 9, 9, 4
+; LE-64BIT-NEXT: sld 7, 7, 4
+; LE-64BIT-NEXT: or 9, 9, 10
+; LE-64BIT-NEXT: srd 10, 8, 6
+; LE-64BIT-NEXT: srd 6, 3, 6
+; LE-64BIT-NEXT: sld 8, 8, 4
+; LE-64BIT-NEXT: sld 3, 3, 4
+; LE-64BIT-NEXT: or 6, 8, 6
+; LE-64BIT-NEXT: std 3, 0(5)
+; LE-64BIT-NEXT: or 3, 7, 10
+; LE-64BIT-NEXT: std 9, 24(5)
+; LE-64BIT-NEXT: std 6, 8(5)
+; LE-64BIT-NEXT: std 3, 16(5)
+; LE-64BIT-NEXT: blr
+;
+; BE-LABEL: shl_32bytes_wordOff:
+; BE: # %bb.0:
+; BE-NEXT: ld 6, 0(3)
+; BE-NEXT: ld 7, 8(3)
+; BE-NEXT: ld 8, 16(3)
+; BE-NEXT: ld 3, 24(3)
+; BE-NEXT: lwz 4, 28(4)
+; BE-NEXT: li 9, 0
+; BE-NEXT: addi 10, 1, -64
+; BE-NEXT: std 9, -8(1)
+; BE-NEXT: std 9, -16(1)
+; BE-NEXT: std 9, -24(1)
+; BE-NEXT: std 9, -32(1)
+; BE-NEXT: std 3, -40(1)
+; BE-NEXT: std 8, -48(1)
+; BE-NEXT: std 7, -56(1)
+; BE-NEXT: std 6, -64(1)
+; BE-NEXT: rlwinm 3, 4, 2, 27, 28
+; BE-NEXT: ldux 6, 3, 10
+; BE-NEXT: rlwinm 4, 4, 5, 26, 26
+; BE-NEXT: subfic 9, 4, 64
+; BE-NEXT: ld 7, 16(3)
+; BE-NEXT: ld 8, 8(3)
+; BE-NEXT: ld 3, 24(3)
+; BE-NEXT: sld 6, 6, 4
+; BE-NEXT: srd 10, 7, 9
+; BE-NEXT: sld 11, 8, 4
+; BE-NEXT: srd 8, 8, 9
+; BE-NEXT: srd 9, 3, 9
+; BE-NEXT: sld 7, 7, 4
+; BE-NEXT: sld 3, 3, 4
+; BE-NEXT: or 10, 11, 10
+; BE-NEXT: or 6, 6, 8
+; BE-NEXT: or 7, 7, 9
+; BE-NEXT: std 3, 24(5)
+; BE-NEXT: std 7, 16(5)
+; BE-NEXT: std 6, 0(5)
+; BE-NEXT: std 10, 8(5)
+; BE-NEXT: blr
+;
+; LE-32BIT-LABEL: shl_32bytes_wordOff:
+; LE-32BIT: # %bb.0:
+; LE-32BIT-NEXT: stwu 1, -80(1)
+; LE-32BIT-NEXT: lwz 7, 0(3)
+; LE-32BIT-NEXT: li 6, 0
+; LE-32BIT-NEXT: lwz 8, 4(3)
+; LE-32BIT-NEXT: lwz 9, 8(3)
+; LE-32BIT-NEXT: lwz 10, 12(3)
+; LE-32BIT-NEXT: lwz 11, 16(3)
+; LE-32BIT-NEXT: lwz 12, 20(3)
+; LE-32BIT-NEXT: lwz 0, 24(3)
+; LE-32BIT-NEXT: lwz 3, 28(3)
+; LE-32BIT-NEXT: lwz 4, 28(4)
+; LE-32BIT-NEXT: stw 6, 76(1)
+; LE-32BIT-NEXT: stw 6, 72(1)
+; LE-32BIT-NEXT: rlwinm 4, 4, 2, 27, 29
+; LE-32BIT-NEXT: stw 6, 68(1)
+; LE-32BIT-NEXT: stw 6, 64(1)
+; LE-32BIT-NEXT: stw 6, 60(1)
+; LE-32BIT-NEXT: stw 6, 56(1)
+; LE-32BIT-NEXT: stw 6, 52(1)
+; LE-32BIT-NEXT: stw 6, 48(1)
+; LE-32BIT-NEXT: stw 3, 44(1)
+; LE-32BIT-NEXT: addi 3, 1, 16
+; LE-32BIT-NEXT: stw 0, 40(1)
+; LE-32BIT-NEXT: stw 12, 36(1)
+; LE-32BIT-NEXT: stw 11, 32(1)
+; LE-32BIT-NEXT: stw 10, 28(1)
+; LE-32BIT-NEXT: stw 9, 24(1)
+; LE-32BIT-NEXT: stw 8, 20(1)
+; LE-32BIT-NEXT: stw 7, 16(1)
+; LE-32BIT-NEXT: lwzux 3, 4, 3
+; LE-32BIT-NEXT: lwz 6, 4(4)
+; LE-32BIT-NEXT: lwz 7, 12(4)
+; LE-32BIT-NEXT: lwz 8, 8(4)
+; LE-32BIT-NEXT: lwz 9, 20(4)
+; LE-32BIT-NEXT: lwz 10, 16(4)
+; LE-32BIT-NEXT: lwz 11, 28(4)
+; LE-32BIT-NEXT: lwz 4, 24(4)
+; LE-32BIT-NEXT: stw 3, 0(5)
+; LE-32BIT-NEXT: stw 4, 24(5)
+; LE-32BIT-NEXT: stw 11, 28(5)
+; LE-32BIT-NEXT: stw 10, 16(5)
+; LE-32BIT-NEXT: stw 9, 20(5)
+; LE-32BIT-NEXT: stw 8, 8(5)
+; LE-32BIT-NEXT: stw 7, 12(5)
+; LE-32BIT-NEXT: stw 6, 4(5)
+; LE-32BIT-NEXT: addi 1, 1, 80
+; LE-32BIT-NEXT: blr
+ %src = load i256, ptr %src.ptr, align 1
+ %wordOff = load i256, ptr %wordOff.ptr, align 1
+ %bitOff = shl i256 %wordOff, 5
+ %res = shl i256 %src, %bitOff
+ store i256 %res, ptr %dst, align 1
+ ret void
+}
+
+define void @shl_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) nounwind {
+; LE-64BIT-LABEL: shl_32bytes_dwordOff:
+; LE-64BIT: # %bb.0:
+; LE-64BIT-NEXT: li 6, 16
; LE-64BIT-NEXT: lxvd2x 1, 0, 3
; LE-64BIT-NEXT: xxlxor 2, 2, 2
; LE-64BIT-NEXT: li 7, 48
@@ -537,7 +1429,7 @@ define void @shl_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; LE-64BIT-NEXT: lwz 3, 0(4)
; LE-64BIT-NEXT: addi 4, 1, -64
; LE-64BIT-NEXT: stxvd2x 2, 4, 6
-; LE-64BIT-NEXT: clrlwi 3, 3, 27
+; LE-64BIT-NEXT: rlwinm 3, 3, 3, 27, 28
; LE-64BIT-NEXT: stxvd2x 0, 4, 7
; LE-64BIT-NEXT: li 7, 32
; LE-64BIT-NEXT: neg 3, 3
@@ -552,25 +1444,25 @@ define void @shl_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; LE-64BIT-NEXT: stxvd2x 0, 0, 5
; LE-64BIT-NEXT: blr
;
-; BE-LABEL: shl_32bytes:
+; BE-LABEL: shl_32bytes_dwordOff:
; BE: # %bb.0:
-; BE-NEXT: ld 6, 0(3)
-; BE-NEXT: ld 7, 8(3)
-; BE-NEXT: ld 8, 16(3)
+; BE-NEXT: ld 7, 0(3)
+; BE-NEXT: ld 8, 8(3)
+; BE-NEXT: ld 9, 16(3)
; BE-NEXT: ld 3, 24(3)
; BE-NEXT: lwz 4, 28(4)
-; BE-NEXT: addi 9, 1, -64
-; BE-NEXT: li 10, 0
-; BE-NEXT: std 10, 56(9)
-; BE-NEXT: std 10, 48(9)
-; BE-NEXT: std 10, 40(9)
-; BE-NEXT: std 10, 32(9)
-; BE-NEXT: std 3, 24(9)
-; BE-NEXT: std 8, 16(9)
-; BE-NEXT: std 7, 8(9)
-; BE-NEXT: std 6, -64(1)
-; BE-NEXT: clrldi 3, 4, 59
-; BE-NEXT: ldux 4, 3, 9
+; BE-NEXT: li 6, 0
+; BE-NEXT: std 6, -8(1)
+; BE-NEXT: std 6, -16(1)
+; BE-NEXT: std 6, -24(1)
+; BE-NEXT: std 6, -32(1)
+; BE-NEXT: std 3, -40(1)
+; BE-NEXT: std 9, -48(1)
+; BE-NEXT: std 8, -56(1)
+; BE-NEXT: std 7, -64(1)
+; BE-NEXT: rlwinm 3, 4, 3, 27, 28
+; BE-NEXT: addi 4, 1, -64
+; BE-NEXT: ldux 4, 3, 4
; BE-NEXT: ld 6, 8(3)
; BE-NEXT: ld 7, 24(3)
; BE-NEXT: ld 3, 16(3)
@@ -580,7 +1472,7 @@ define void @shl_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; BE-NEXT: std 6, 8(5)
; BE-NEXT: blr
;
-; LE-32BIT-LABEL: shl_32bytes:
+; LE-32BIT-LABEL: shl_32bytes_dwordOff:
; LE-32BIT: # %bb.0:
; LE-32BIT-NEXT: stwu 1, -80(1)
; LE-32BIT-NEXT: lwz 7, 0(3)
@@ -595,7 +1487,7 @@ define void @shl_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; LE-32BIT-NEXT: lwz 4, 28(4)
; LE-32BIT-NEXT: stw 6, 76(1)
; LE-32BIT-NEXT: stw 6, 72(1)
-; LE-32BIT-NEXT: clrlwi 4, 4, 27
+; LE-32BIT-NEXT: rlwinm 4, 4, 3, 27, 28
; LE-32BIT-NEXT: stw 6, 68(1)
; LE-32BIT-NEXT: stw 6, 64(1)
; LE-32BIT-NEXT: stw 6, 60(1)
@@ -612,87 +1504,403 @@ define void @shl_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; LE-32BIT-NEXT: stw 8, 20(1)
; LE-32BIT-NEXT: stw 7, 16(1)
; LE-32BIT-NEXT: lwzux 3, 4, 3
-; LE-32BIT-NEXT: lwz 6, 4(4)
-; LE-32BIT-NEXT: lwz 7, 12(4)
-; LE-32BIT-NEXT: lwz 8, 8(4)
-; LE-32BIT-NEXT: lwz 9, 20(4)
-; LE-32BIT-NEXT: lwz 10, 16(4)
-; LE-32BIT-NEXT: lwz 11, 28(4)
-; LE-32BIT-NEXT: lwz 4, 24(4)
+; LE-32BIT-NEXT: lwz 6, 12(4)
+; LE-32BIT-NEXT: lwz 7, 8(4)
+; LE-32BIT-NEXT: lwz 8, 20(4)
+; LE-32BIT-NEXT: lwz 9, 16(4)
+; LE-32BIT-NEXT: lwz 10, 28(4)
+; LE-32BIT-NEXT: lwz 11, 24(4)
+; LE-32BIT-NEXT: ori 4, 4, 4
+; LE-32BIT-NEXT: lwz 4, 0(4)
; LE-32BIT-NEXT: stw 3, 0(5)
-; LE-32BIT-NEXT: stw 4, 24(5)
-; LE-32BIT-NEXT: stw 11, 28(5)
-; LE-32BIT-NEXT: stw 10, 16(5)
-; LE-32BIT-NEXT: stw 9, 20(5)
-; LE-32BIT-NEXT: stw 8, 8(5)
-; LE-32BIT-NEXT: stw 7, 12(5)
-; LE-32BIT-NEXT: stw 6, 4(5)
+; LE-32BIT-NEXT: stw 11, 24(5)
+; LE-32BIT-NEXT: stw 10, 28(5)
+; LE-32BIT-NEXT: stw 9, 16(5)
+; LE-32BIT-NEXT: stw 8, 20(5)
+; LE-32BIT-NEXT: stw 7, 8(5)
+; LE-32BIT-NEXT: stw 6, 12(5)
+; LE-32BIT-NEXT: stw 4, 4(5)
; LE-32BIT-NEXT: addi 1, 1, 80
; LE-32BIT-NEXT: blr
%src = load i256, ptr %src.ptr, align 1
- %byteOff = load i256, ptr %byteOff.ptr, align 1
- %bitOff = shl i256 %byteOff, 3
+ %dwordOff = load i256, ptr %dwordOff.ptr, align 1
+ %bitOff = shl i256 %dwordOff, 6
%res = shl i256 %src, %bitOff
store i256 %res, ptr %dst, align 1
ret void
}
+
+
define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; LE-64BIT-LABEL: ashr_32bytes:
; LE-64BIT: # %bb.0:
+; LE-64BIT-NEXT: ld 6, 24(3)
; LE-64BIT-NEXT: lxvd2x 0, 0, 3
-; LE-64BIT-NEXT: ld 6, 16(3)
-; LE-64BIT-NEXT: ld 3, 24(3)
+; LE-64BIT-NEXT: lwz 4, 0(4)
; LE-64BIT-NEXT: addi 7, 1, -64
+; LE-64BIT-NEXT: ld 3, 16(3)
+; LE-64BIT-NEXT: sradi 8, 6, 63
+; LE-64BIT-NEXT: rlwinm 9, 4, 0, 27, 28
+; LE-64BIT-NEXT: stxvd2x 0, 0, 7
+; LE-64BIT-NEXT: std 6, -40(1)
+; LE-64BIT-NEXT: std 3, -48(1)
+; LE-64BIT-NEXT: std 8, -8(1)
+; LE-64BIT-NEXT: std 8, -16(1)
+; LE-64BIT-NEXT: std 8, -24(1)
+; LE-64BIT-NEXT: std 8, -32(1)
+; LE-64BIT-NEXT: rlwinm 3, 4, 3, 26, 28
+; LE-64BIT-NEXT: ldux 4, 9, 7
+; LE-64BIT-NEXT: ld 7, 8(9)
+; LE-64BIT-NEXT: subfic 6, 3, 64
+; LE-64BIT-NEXT: ld 8, 16(9)
+; LE-64BIT-NEXT: ld 9, 24(9)
+; LE-64BIT-NEXT: srd 4, 4, 3
+; LE-64BIT-NEXT: sld 10, 7, 6
+; LE-64BIT-NEXT: sld 11, 9, 6
+; LE-64BIT-NEXT: srd 7, 7, 3
+; LE-64BIT-NEXT: sld 6, 8, 6
+; LE-64BIT-NEXT: or 4, 10, 4
+; LE-64BIT-NEXT: srd 10, 8, 3
+; LE-64BIT-NEXT: srad 3, 9, 3
+; LE-64BIT-NEXT: or 6, 6, 7
+; LE-64BIT-NEXT: std 3, 24(5)
+; LE-64BIT-NEXT: or 3, 11, 10
+; LE-64BIT-NEXT: std 6, 8(5)
+; LE-64BIT-NEXT: std 4, 0(5)
+; LE-64BIT-NEXT: std 3, 16(5)
+; LE-64BIT-NEXT: blr
+;
+; BE-LABEL: ashr_32bytes:
+; BE: # %bb.0:
+; BE-NEXT: ld 7, 0(3)
+; BE-NEXT: ld 8, 8(3)
+; BE-NEXT: ld 9, 16(3)
+; BE-NEXT: ld 3, 24(3)
+; BE-NEXT: lwz 4, 28(4)
+; BE-NEXT: addi 6, 1, -32
+; BE-NEXT: std 3, -8(1)
+; BE-NEXT: std 7, -32(1)
+; BE-NEXT: sradi 3, 7, 63
+; BE-NEXT: rlwinm 7, 4, 0, 27, 28
+; BE-NEXT: std 3, -40(1)
+; BE-NEXT: std 3, -48(1)
+; BE-NEXT: std 3, -56(1)
+; BE-NEXT: std 3, -64(1)
+; BE-NEXT: neg 3, 7
+; BE-NEXT: std 9, -16(1)
+; BE-NEXT: std 8, -24(1)
+; BE-NEXT: extsw 3, 3
+; BE-NEXT: ldux 3, 6, 3
+; BE-NEXT: rlwinm 4, 4, 3, 26, 28
+; BE-NEXT: subfic 9, 4, 64
+; BE-NEXT: ld 7, 8(6)
+; BE-NEXT: ld 8, 24(6)
+; BE-NEXT: ld 6, 16(6)
+; BE-NEXT: sld 10, 3, 9
+; BE-NEXT: srad 3, 3, 4
+; BE-NEXT: std 3, 0(5)
+; BE-NEXT: srd 11, 7, 4
+; BE-NEXT: srd 8, 8, 4
+; BE-NEXT: sld 7, 7, 9
+; BE-NEXT: sld 9, 6, 9
+; BE-NEXT: srd 6, 6, 4
+; BE-NEXT: or 10, 10, 11
+; BE-NEXT: or 8, 9, 8
+; BE-NEXT: or 6, 7, 6
+; BE-NEXT: std 6, 16(5)
+; BE-NEXT: std 8, 24(5)
+; BE-NEXT: std 10, 8(5)
+; BE-NEXT: blr
+;
+; LE-32BIT-LABEL: ashr_32bytes:
+; LE-32BIT: # %bb.0:
+; LE-32BIT-NEXT: stwu 1, -112(1)
+; LE-32BIT-NEXT: lwz 7, 0(3)
+; LE-32BIT-NEXT: addi 6, 1, 48
+; LE-32BIT-NEXT: lwz 8, 4(3)
+; LE-32BIT-NEXT: lwz 9, 8(3)
+; LE-32BIT-NEXT: lwz 10, 12(3)
+; LE-32BIT-NEXT: lwz 11, 16(3)
+; LE-32BIT-NEXT: lwz 12, 20(3)
+; LE-32BIT-NEXT: lwz 0, 24(3)
+; LE-32BIT-NEXT: lwz 3, 28(3)
+; LE-32BIT-NEXT: lwz 4, 28(4)
+; LE-32BIT-NEXT: stw 3, 76(1)
+; LE-32BIT-NEXT: srawi 3, 7, 31
+; LE-32BIT-NEXT: stw 7, 48(1)
+; LE-32BIT-NEXT: rlwinm 7, 4, 0, 27, 29
+; LE-32BIT-NEXT: stw 25, 84(1) # 4-byte Folded Spill
+; LE-32BIT-NEXT: rlwinm 4, 4, 3, 27, 28
+; LE-32BIT-NEXT: stw 26, 88(1) # 4-byte Folded Spill
+; LE-32BIT-NEXT: stw 27, 92(1) # 4-byte Folded Spill
+; LE-32BIT-NEXT: stw 28, 96(1) # 4-byte Folded Spill
+; LE-32BIT-NEXT: stw 29, 100(1) # 4-byte Folded Spill
+; LE-32BIT-NEXT: stw 30, 104(1) # 4-byte Folded Spill
+; LE-32BIT-NEXT: stw 0, 72(1)
+; LE-32BIT-NEXT: subfic 0, 4, 32
+; LE-32BIT-NEXT: stw 12, 68(1)
+; LE-32BIT-NEXT: stw 11, 64(1)
+; LE-32BIT-NEXT: stw 10, 60(1)
+; LE-32BIT-NEXT: stw 9, 56(1)
+; LE-32BIT-NEXT: stw 8, 52(1)
+; LE-32BIT-NEXT: stw 3, 44(1)
+; LE-32BIT-NEXT: stw 3, 40(1)
+; LE-32BIT-NEXT: stw 3, 36(1)
+; LE-32BIT-NEXT: stw 3, 32(1)
+; LE-32BIT-NEXT: stw 3, 28(1)
+; LE-32BIT-NEXT: stw 3, 24(1)
+; LE-32BIT-NEXT: stw 3, 20(1)
+; LE-32BIT-NEXT: stw 3, 16(1)
+; LE-32BIT-NEXT: sub 3, 6, 7
+; LE-32BIT-NEXT: lwz 6, 4(3)
+; LE-32BIT-NEXT: lwz 7, 0(3)
+; LE-32BIT-NEXT: lwz 8, 12(3)
+; LE-32BIT-NEXT: srw 30, 6, 4
+; LE-32BIT-NEXT: lwz 9, 8(3)
+; LE-32BIT-NEXT: slw 29, 7, 0
+; LE-32BIT-NEXT: lwz 10, 20(3)
+; LE-32BIT-NEXT: srw 28, 8, 4
+; LE-32BIT-NEXT: lwz 11, 16(3)
+; LE-32BIT-NEXT: slw 27, 9, 0
+; LE-32BIT-NEXT: lwz 12, 28(3)
+; LE-32BIT-NEXT: slw 6, 6, 0
+; LE-32BIT-NEXT: lwz 3, 24(3)
+; LE-32BIT-NEXT: srw 26, 10, 4
+; LE-32BIT-NEXT: slw 25, 11, 0
+; LE-32BIT-NEXT: slw 8, 8, 0
+; LE-32BIT-NEXT: slw 10, 10, 0
+; LE-32BIT-NEXT: slw 0, 3, 0
+; LE-32BIT-NEXT: srw 3, 3, 4
+; LE-32BIT-NEXT: srw 12, 12, 4
+; LE-32BIT-NEXT: or 3, 10, 3
+; LE-32BIT-NEXT: srw 11, 11, 4
+; LE-32BIT-NEXT: stw 3, 24(5)
+; LE-32BIT-NEXT: or 3, 0, 12
+; LE-32BIT-NEXT: stw 3, 28(5)
+; LE-32BIT-NEXT: or 3, 8, 11
+; LE-32BIT-NEXT: srw 9, 9, 4
+; LE-32BIT-NEXT: stw 3, 16(5)
+; LE-32BIT-NEXT: or 3, 25, 26
+; LE-32BIT-NEXT: stw 3, 20(5)
+; LE-32BIT-NEXT: or 3, 6, 9
+; LE-32BIT-NEXT: stw 3, 8(5)
+; LE-32BIT-NEXT: or 3, 27, 28
+; LE-32BIT-NEXT: sraw 4, 7, 4
+; LE-32BIT-NEXT: stw 3, 12(5)
+; LE-32BIT-NEXT: or 3, 29, 30
+; LE-32BIT-NEXT: stw 4, 0(5)
+; LE-32BIT-NEXT: stw 3, 4(5)
+; LE-32BIT-NEXT: lwz 30, 104(1) # 4-byte Folded Reload
+; LE-32BIT-NEXT: lwz 29, 100(1) # 4-byte Folded Reload
+; LE-32BIT-NEXT: lwz 28, 96(1) # 4-byte Folded Reload
+; LE-32BIT-NEXT: lwz 27, 92(1) # 4-byte Folded Reload
+; LE-32BIT-NEXT: lwz 26, 88(1) # 4-byte Folded Reload
+; LE-32BIT-NEXT: lwz 25, 84(1) # 4-byte Folded Reload
+; LE-32BIT-NEXT: addi 1, 1, 112
+; LE-32BIT-NEXT: blr
+ %src = load i256, ptr %src.ptr, align 1
+ %byteOff = load i256, ptr %byteOff.ptr, align 1
+ %bitOff = shl i256 %byteOff, 3
+ %res = ashr i256 %src, %bitOff
+ store i256 %res, ptr %dst, align 1
+ ret void
+}
+
+define void @ashr_32bytes_wordOff(ptr %src.ptr, ptr %wordOff.ptr, ptr %dst) nounwind {
+; LE-64BIT-LABEL: ashr_32bytes_wordOff:
+; LE-64BIT: # %bb.0:
+; LE-64BIT-NEXT: ld 6, 24(3)
+; LE-64BIT-NEXT: lxvd2x 0, 0, 3
; LE-64BIT-NEXT: lwz 4, 0(4)
-; LE-64BIT-NEXT: li 8, 16
-; LE-64BIT-NEXT: std 3, 24(7)
-; LE-64BIT-NEXT: sradi 3, 3, 63
-; LE-64BIT-NEXT: std 6, 16(7)
-; LE-64BIT-NEXT: std 3, 56(7)
-; LE-64BIT-NEXT: std 3, 48(7)
-; LE-64BIT-NEXT: std 3, 40(7)
-; LE-64BIT-NEXT: std 3, 32(7)
-; LE-64BIT-NEXT: clrldi 3, 4, 59
+; LE-64BIT-NEXT: addi 7, 1, -64
+; LE-64BIT-NEXT: ld 3, 16(3)
+; LE-64BIT-NEXT: sradi 8, 6, 63
+; LE-64BIT-NEXT: rlwinm 9, 4, 2, 27, 28
; LE-64BIT-NEXT: stxvd2x 0, 0, 7
-; LE-64BIT-NEXT: lxvd2x 0, 7, 3
-; LE-64BIT-NEXT: add 3, 7, 3
-; LE-64BIT-NEXT: lxvd2x 1, 3, 8
-; LE-64BIT-NEXT: stxvd2x 1, 5, 8
+; LE-64BIT-NEXT: std 6, -40(1)
+; LE-64BIT-NEXT: std 3, -48(1)
+; LE-64BIT-NEXT: std 8, -8(1)
+; LE-64BIT-NEXT: std 8, -16(1)
+; LE-64BIT-NEXT: std 8, -24(1)
+; LE-64BIT-NEXT: std 8, -32(1)
+; LE-64BIT-NEXT: rlwinm 3, 4, 5, 26, 26
+; LE-64BIT-NEXT: ldux 4, 9, 7
+; LE-64BIT-NEXT: ld 7, 8(9)
+; LE-64BIT-NEXT: subfic 6, 3, 64
+; LE-64BIT-NEXT: ld 8, 16(9)
+; LE-64BIT-NEXT: ld 9, 24(9)
+; LE-64BIT-NEXT: srd 4, 4, 3
+; LE-64BIT-NEXT: sld 10, 7, 6
+; LE-64BIT-NEXT: sld 11, 9, 6
+; LE-64BIT-NEXT: srd 7, 7, 3
+; LE-64BIT-NEXT: sld 6, 8, 6
+; LE-64BIT-NEXT: or 4, 10, 4
+; LE-64BIT-NEXT: srd 10, 8, 3
+; LE-64BIT-NEXT: srad 3, 9, 3
+; LE-64BIT-NEXT: or 6, 6, 7
+; LE-64BIT-NEXT: std 3, 24(5)
+; LE-64BIT-NEXT: or 3, 11, 10
+; LE-64BIT-NEXT: std 6, 8(5)
+; LE-64BIT-NEXT: std 4, 0(5)
+; LE-64BIT-NEXT: std 3, 16(5)
+; LE-64BIT-NEXT: blr
+;
+; BE-LABEL: ashr_32bytes_wordOff:
+; BE: # %bb.0:
+; BE-NEXT: ld 7, 0(3)
+; BE-NEXT: ld 8, 8(3)
+; BE-NEXT: ld 9, 16(3)
+; BE-NEXT: ld 3, 24(3)
+; BE-NEXT: lwz 4, 28(4)
+; BE-NEXT: addi 6, 1, -32
+; BE-NEXT: std 3, -8(1)
+; BE-NEXT: std 7, -32(1)
+; BE-NEXT: sradi 3, 7, 63
+; BE-NEXT: rlwinm 7, 4, 2, 27, 28
+; BE-NEXT: std 3, -40(1)
+; BE-NEXT: std 3, -48(1)
+; BE-NEXT: std 3, -56(1)
+; BE-NEXT: std 3, -64(1)
+; BE-NEXT: neg 3, 7
+; BE-NEXT: std 9, -16(1)
+; BE-NEXT: std 8, -24(1)
+; BE-NEXT: extsw 3, 3
+; BE-NEXT: ldux 3, 6, 3
+; BE-NEXT: rlwinm 4, 4, 5, 26, 26
+; BE-NEXT: subfic 9, 4, 64
+; BE-NEXT: ld 7, 8(6)
+; BE-NEXT: ld 8, 24(6)
+; BE-NEXT: ld 6, 16(6)
+; BE-NEXT: sld 10, 3, 9
+; BE-NEXT: srad 3, 3, 4
+; BE-NEXT: std 3, 0(5)
+; BE-NEXT: srd 11, 7, 4
+; BE-NEXT: srd 8, 8, 4
+; BE-NEXT: sld 7, 7, 9
+; BE-NEXT: sld 9, 6, 9
+; BE-NEXT: srd 6, 6, 4
+; BE-NEXT: or 10, 10, 11
+; BE-NEXT: or 8, 9, 8
+; BE-NEXT: or 6, 7, 6
+; BE-NEXT: std 6, 16(5)
+; BE-NEXT: std 8, 24(5)
+; BE-NEXT: std 10, 8(5)
+; BE-NEXT: blr
+;
+; LE-32BIT-LABEL: ashr_32bytes_wordOff:
+; LE-32BIT: # %bb.0:
+; LE-32BIT-NEXT: stwu 1, -80(1)
+; LE-32BIT-NEXT: lwz 7, 0(3)
+; LE-32BIT-NEXT: addi 6, 1, 48
+; LE-32BIT-NEXT: lwz 8, 4(3)
+; LE-32BIT-NEXT: lwz 9, 8(3)
+; LE-32BIT-NEXT: lwz 10, 12(3)
+; LE-32BIT-NEXT: lwz 11, 16(3)
+; LE-32BIT-NEXT: lwz 12, 20(3)
+; LE-32BIT-NEXT: lwz 0, 24(3)
+; LE-32BIT-NEXT: lwz 3, 28(3)
+; LE-32BIT-NEXT: lwz 4, 28(4)
+; LE-32BIT-NEXT: stw 3, 76(1)
+; LE-32BIT-NEXT: srawi 3, 7, 31
+; LE-32BIT-NEXT: rlwinm 4, 4, 2, 27, 29
+; LE-32BIT-NEXT: stw 0, 72(1)
+; LE-32BIT-NEXT: stw 12, 68(1)
+; LE-32BIT-NEXT: stw 11, 64(1)
+; LE-32BIT-NEXT: stw 10, 60(1)
+; LE-32BIT-NEXT: stw 9, 56(1)
+; LE-32BIT-NEXT: stw 8, 52(1)
+; LE-32BIT-NEXT: stw 7, 48(1)
+; LE-32BIT-NEXT: stw 3, 44(1)
+; LE-32BIT-NEXT: stw 3, 40(1)
+; LE-32BIT-NEXT: stw 3, 36(1)
+; LE-32BIT-NEXT: stw 3, 32(1)
+; LE-32BIT-NEXT: stw 3, 28(1)
+; LE-32BIT-NEXT: stw 3, 24(1)
+; LE-32BIT-NEXT: stw 3, 20(1)
+; LE-32BIT-NEXT: stw 3, 16(1)
+; LE-32BIT-NEXT: sub 3, 6, 4
+; LE-32BIT-NEXT: lwz 4, 4(3)
+; LE-32BIT-NEXT: lwz 6, 0(3)
+; LE-32BIT-NEXT: lwz 7, 12(3)
+; LE-32BIT-NEXT: lwz 8, 8(3)
+; LE-32BIT-NEXT: lwz 9, 20(3)
+; LE-32BIT-NEXT: lwz 10, 16(3)
+; LE-32BIT-NEXT: lwz 11, 24(3)
+; LE-32BIT-NEXT: lwz 3, 28(3)
+; LE-32BIT-NEXT: stw 11, 24(5)
+; LE-32BIT-NEXT: stw 3, 28(5)
+; LE-32BIT-NEXT: stw 10, 16(5)
+; LE-32BIT-NEXT: stw 9, 20(5)
+; LE-32BIT-NEXT: stw 8, 8(5)
+; LE-32BIT-NEXT: stw 7, 12(5)
+; LE-32BIT-NEXT: stw 6, 0(5)
+; LE-32BIT-NEXT: stw 4, 4(5)
+; LE-32BIT-NEXT: addi 1, 1, 80
+; LE-32BIT-NEXT: blr
+ %src = load i256, ptr %src.ptr, align 1
+ %wordOff = load i256, ptr %wordOff.ptr, align 1
+ %bitOff = shl i256 %wordOff, 5
+ %res = ashr i256 %src, %bitOff
+ store i256 %res, ptr %dst, align 1
+ ret void
+}
+
+define void @ashr_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) nounwind {
+; LE-64BIT-LABEL: ashr_32bytes_dwordOff:
+; LE-64BIT: # %bb.0:
+; LE-64BIT-NEXT: lxvd2x 0, 0, 3
+; LE-64BIT-NEXT: ld 6, 16(3)
+; LE-64BIT-NEXT: ld 7, 24(3)
+; LE-64BIT-NEXT: lwz 3, 0(4)
+; LE-64BIT-NEXT: addi 4, 1, -64
+; LE-64BIT-NEXT: rlwinm 3, 3, 3, 27, 28
+; LE-64BIT-NEXT: stxvd2x 0, 0, 4
+; LE-64BIT-NEXT: std 6, -48(1)
+; LE-64BIT-NEXT: sradi 6, 7, 63
+; LE-64BIT-NEXT: std 7, -40(1)
+; LE-64BIT-NEXT: std 6, -8(1)
+; LE-64BIT-NEXT: std 6, -16(1)
+; LE-64BIT-NEXT: std 6, -24(1)
+; LE-64BIT-NEXT: std 6, -32(1)
+; LE-64BIT-NEXT: lxvd2x 0, 4, 3
+; LE-64BIT-NEXT: add 3, 4, 3
+; LE-64BIT-NEXT: li 4, 16
+; LE-64BIT-NEXT: lxvd2x 1, 3, 4
+; LE-64BIT-NEXT: stxvd2x 1, 5, 4
; LE-64BIT-NEXT: stxvd2x 0, 0, 5
; LE-64BIT-NEXT: blr
;
-; BE-LABEL: ashr_32bytes:
+; BE-LABEL: ashr_32bytes_dwordOff:
; BE: # %bb.0:
; BE-NEXT: ld 7, 0(3)
; BE-NEXT: ld 8, 8(3)
; BE-NEXT: ld 9, 16(3)
; BE-NEXT: ld 3, 24(3)
; BE-NEXT: lwz 4, 28(4)
-; BE-NEXT: addi 6, 1, -64
-; BE-NEXT: std 3, 56(6)
+; BE-NEXT: addi 6, 1, -32
+; BE-NEXT: std 3, -8(1)
; BE-NEXT: sradi 3, 7, 63
-; BE-NEXT: clrlwi 4, 4, 27
-; BE-NEXT: std 3, 24(6)
-; BE-NEXT: std 3, 16(6)
-; BE-NEXT: std 3, 8(6)
+; BE-NEXT: rlwinm 4, 4, 3, 27, 28
+; BE-NEXT: std 3, -40(1)
+; BE-NEXT: std 3, -48(1)
+; BE-NEXT: std 3, -56(1)
; BE-NEXT: std 3, -64(1)
; BE-NEXT: neg 3, 4
-; BE-NEXT: std 9, 48(6)
-; BE-NEXT: std 8, 40(6)
-; BE-NEXT: std 7, 32(6)
+; BE-NEXT: std 9, -16(1)
+; BE-NEXT: std 8, -24(1)
+; BE-NEXT: std 7, -32(1)
; BE-NEXT: extsw 3, 3
-; BE-NEXT: addi 4, 1, -32
-; BE-NEXT: ldux 3, 4, 3
-; BE-NEXT: ld 6, 8(4)
-; BE-NEXT: ld 7, 24(4)
-; BE-NEXT: ld 4, 16(4)
+; BE-NEXT: ldux 3, 6, 3
+; BE-NEXT: ld 4, 8(6)
+; BE-NEXT: ld 7, 24(6)
+; BE-NEXT: ld 6, 16(6)
; BE-NEXT: std 3, 0(5)
-; BE-NEXT: std 4, 16(5)
+; BE-NEXT: std 6, 16(5)
; BE-NEXT: std 7, 24(5)
-; BE-NEXT: std 6, 8(5)
+; BE-NEXT: std 4, 8(5)
; BE-NEXT: blr
;
-; LE-32BIT-LABEL: ashr_32bytes:
+; LE-32BIT-LABEL: ashr_32bytes_dwordOff:
; LE-32BIT: # %bb.0:
; LE-32BIT-NEXT: stwu 1, -80(1)
; LE-32BIT-NEXT: lwz 7, 0(3)
@@ -707,7 +1915,7 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; LE-32BIT-NEXT: lwz 4, 28(4)
; LE-32BIT-NEXT: stw 3, 76(1)
; LE-32BIT-NEXT: srawi 3, 7, 31
-; LE-32BIT-NEXT: clrlwi 4, 4, 27
+; LE-32BIT-NEXT: rlwinm 4, 4, 3, 27, 28
; LE-32BIT-NEXT: stw 0, 72(1)
; LE-32BIT-NEXT: stw 12, 68(1)
; LE-32BIT-NEXT: stw 11, 64(1)
@@ -743,11 +1951,13 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; LE-32BIT-NEXT: addi 1, 1, 80
; LE-32BIT-NEXT: blr
%src = load i256, ptr %src.ptr, align 1
- %byteOff = load i256, ptr %byteOff.ptr, align 1
- %bitOff = shl i256 %byteOff, 3
+ %dwordOff = load i256, ptr %dwordOff.ptr, align 1
+ %bitOff = shl i256 %dwordOff, 6
%res = ashr i256 %src, %bitOff
store i256 %res, ptr %dst, align 1
ret void
}
+
+
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; LE: {{.*}}
diff --git a/llvm/test/CodeGen/PowerPC/wide-scalar-shift-legalization.ll b/llvm/test/CodeGen/PowerPC/wide-scalar-shift-legalization.ll
index 044ddf5..8e69547 100644
--- a/llvm/test/CodeGen/PowerPC/wide-scalar-shift-legalization.ll
+++ b/llvm/test/CodeGen/PowerPC/wide-scalar-shift-legalization.ll
@@ -209,45 +209,41 @@ define void @lshr_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; LE-32BIT-NEXT: stwu 1, -48(1)
; LE-32BIT-NEXT: lwz 7, 0(3)
; LE-32BIT-NEXT: li 6, 0
-; LE-32BIT-NEXT: lwz 4, 12(4)
; LE-32BIT-NEXT: lwz 8, 4(3)
; LE-32BIT-NEXT: lwz 9, 8(3)
; LE-32BIT-NEXT: lwz 3, 12(3)
+; LE-32BIT-NEXT: lwz 4, 12(4)
; LE-32BIT-NEXT: stw 6, 28(1)
; LE-32BIT-NEXT: stw 6, 24(1)
; LE-32BIT-NEXT: stw 6, 20(1)
; LE-32BIT-NEXT: stw 6, 16(1)
-; LE-32BIT-NEXT: addi 6, 1, 32
-; LE-32BIT-NEXT: stw 7, 32(1)
-; LE-32BIT-NEXT: rlwinm 7, 4, 29, 28, 31
+; LE-32BIT-NEXT: rlwinm 6, 4, 29, 28, 29
; LE-32BIT-NEXT: stw 3, 44(1)
-; LE-32BIT-NEXT: sub 6, 6, 7
+; LE-32BIT-NEXT: addi 3, 1, 32
; LE-32BIT-NEXT: stw 9, 40(1)
-; LE-32BIT-NEXT: li 3, 7
+; LE-32BIT-NEXT: sub 3, 3, 6
; LE-32BIT-NEXT: stw 8, 36(1)
-; LE-32BIT-NEXT: nand 3, 4, 3
-; LE-32BIT-NEXT: lwz 7, 4(6)
-; LE-32BIT-NEXT: clrlwi 4, 4, 29
-; LE-32BIT-NEXT: lwz 8, 8(6)
-; LE-32BIT-NEXT: subfic 10, 4, 32
-; LE-32BIT-NEXT: lwz 9, 0(6)
-; LE-32BIT-NEXT: clrlwi 3, 3, 27
-; LE-32BIT-NEXT: lwz 6, 12(6)
-; LE-32BIT-NEXT: srw 11, 8, 4
-; LE-32BIT-NEXT: slw 8, 8, 10
-; LE-32BIT-NEXT: slw 10, 9, 10
-; LE-32BIT-NEXT: srw 6, 6, 4
-; LE-32BIT-NEXT: srw 9, 9, 4
-; LE-32BIT-NEXT: srw 4, 7, 4
-; LE-32BIT-NEXT: slwi 7, 7, 1
-; LE-32BIT-NEXT: slw 3, 7, 3
-; LE-32BIT-NEXT: or 6, 8, 6
-; LE-32BIT-NEXT: or 4, 10, 4
-; LE-32BIT-NEXT: or 3, 11, 3
-; LE-32BIT-NEXT: stw 9, 0(5)
-; LE-32BIT-NEXT: stw 6, 12(5)
-; LE-32BIT-NEXT: stw 4, 4(5)
+; LE-32BIT-NEXT: clrlwi 4, 4, 27
+; LE-32BIT-NEXT: stw 7, 32(1)
+; LE-32BIT-NEXT: subfic 9, 4, 32
+; LE-32BIT-NEXT: lwz 6, 4(3)
+; LE-32BIT-NEXT: lwz 7, 0(3)
+; LE-32BIT-NEXT: lwz 8, 12(3)
+; LE-32BIT-NEXT: srw 10, 6, 4
+; LE-32BIT-NEXT: lwz 3, 8(3)
+; LE-32BIT-NEXT: slw 11, 7, 9
+; LE-32BIT-NEXT: slw 6, 6, 9
+; LE-32BIT-NEXT: srw 8, 8, 4
+; LE-32BIT-NEXT: slw 9, 3, 9
+; LE-32BIT-NEXT: srw 3, 3, 4
+; LE-32BIT-NEXT: or 3, 6, 3
; LE-32BIT-NEXT: stw 3, 8(5)
+; LE-32BIT-NEXT: or 3, 9, 8
+; LE-32BIT-NEXT: srw 4, 7, 4
+; LE-32BIT-NEXT: stw 3, 12(5)
+; LE-32BIT-NEXT: or 3, 11, 10
+; LE-32BIT-NEXT: stw 4, 0(5)
+; LE-32BIT-NEXT: stw 3, 4(5)
; LE-32BIT-NEXT: addi 1, 1, 48
; LE-32BIT-NEXT: blr
%src = load i128, ptr %src.ptr, align 1
@@ -304,34 +300,30 @@ define void @shl_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; LE-32BIT-NEXT: stw 6, 40(1)
; LE-32BIT-NEXT: stw 6, 36(1)
; LE-32BIT-NEXT: stw 6, 32(1)
-; LE-32BIT-NEXT: rlwinm 6, 4, 29, 28, 31
+; LE-32BIT-NEXT: rlwinm 6, 4, 29, 28, 29
; LE-32BIT-NEXT: stw 3, 28(1)
; LE-32BIT-NEXT: addi 3, 1, 16
; LE-32BIT-NEXT: stw 9, 24(1)
+; LE-32BIT-NEXT: clrlwi 4, 4, 27
; LE-32BIT-NEXT: stw 8, 20(1)
+; LE-32BIT-NEXT: subfic 8, 4, 32
; LE-32BIT-NEXT: stw 7, 16(1)
-; LE-32BIT-NEXT: li 7, 7
; LE-32BIT-NEXT: lwzux 3, 6, 3
-; LE-32BIT-NEXT: nand 7, 4, 7
-; LE-32BIT-NEXT: clrlwi 4, 4, 29
-; LE-32BIT-NEXT: subfic 10, 4, 32
-; LE-32BIT-NEXT: lwz 8, 8(6)
-; LE-32BIT-NEXT: clrlwi 7, 7, 27
; LE-32BIT-NEXT: lwz 9, 4(6)
; LE-32BIT-NEXT: slw 3, 3, 4
+; LE-32BIT-NEXT: lwz 7, 8(6)
; LE-32BIT-NEXT: lwz 6, 12(6)
; LE-32BIT-NEXT: slw 11, 9, 4
-; LE-32BIT-NEXT: srw 9, 9, 10
-; LE-32BIT-NEXT: srw 10, 6, 10
-; LE-32BIT-NEXT: slw 6, 6, 4
-; LE-32BIT-NEXT: slw 4, 8, 4
-; LE-32BIT-NEXT: srwi 8, 8, 1
-; LE-32BIT-NEXT: srw 7, 8, 7
+; LE-32BIT-NEXT: srw 9, 9, 8
+; LE-32BIT-NEXT: srw 10, 7, 8
+; LE-32BIT-NEXT: srw 8, 6, 8
+; LE-32BIT-NEXT: slw 7, 7, 4
+; LE-32BIT-NEXT: slw 4, 6, 4
; LE-32BIT-NEXT: or 3, 3, 9
-; LE-32BIT-NEXT: or 4, 4, 10
+; LE-32BIT-NEXT: stw 4, 12(5)
+; LE-32BIT-NEXT: or 4, 7, 8
; LE-32BIT-NEXT: stw 3, 0(5)
-; LE-32BIT-NEXT: or 3, 11, 7
-; LE-32BIT-NEXT: stw 6, 12(5)
+; LE-32BIT-NEXT: or 3, 11, 10
; LE-32BIT-NEXT: stw 4, 8(5)
; LE-32BIT-NEXT: stw 3, 4(5)
; LE-32BIT-NEXT: addi 1, 1, 48
@@ -387,46 +379,42 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; LE-32BIT: # %bb.0:
; LE-32BIT-NEXT: stwu 1, -48(1)
; LE-32BIT-NEXT: lwz 7, 0(3)
-; LE-32BIT-NEXT: li 6, 7
+; LE-32BIT-NEXT: addi 6, 1, 32
; LE-32BIT-NEXT: lwz 8, 4(3)
; LE-32BIT-NEXT: lwz 9, 8(3)
; LE-32BIT-NEXT: lwz 3, 12(3)
; LE-32BIT-NEXT: lwz 4, 12(4)
; LE-32BIT-NEXT: stw 3, 44(1)
; LE-32BIT-NEXT: srawi 3, 7, 31
-; LE-32BIT-NEXT: stw 8, 36(1)
-; LE-32BIT-NEXT: rlwinm 8, 4, 29, 28, 31
; LE-32BIT-NEXT: stw 7, 32(1)
-; LE-32BIT-NEXT: addi 7, 1, 32
+; LE-32BIT-NEXT: rlwinm 7, 4, 29, 28, 29
; LE-32BIT-NEXT: stw 9, 40(1)
-; LE-32BIT-NEXT: nand 6, 4, 6
+; LE-32BIT-NEXT: clrlwi 4, 4, 27
+; LE-32BIT-NEXT: stw 8, 36(1)
+; LE-32BIT-NEXT: subfic 9, 4, 32
; LE-32BIT-NEXT: stw 3, 28(1)
-; LE-32BIT-NEXT: clrlwi 4, 4, 29
; LE-32BIT-NEXT: stw 3, 24(1)
-; LE-32BIT-NEXT: subfic 10, 4, 32
; LE-32BIT-NEXT: stw 3, 20(1)
-; LE-32BIT-NEXT: clrlwi 6, 6, 27
; LE-32BIT-NEXT: stw 3, 16(1)
-; LE-32BIT-NEXT: sub 3, 7, 8
-; LE-32BIT-NEXT: lwz 7, 4(3)
-; LE-32BIT-NEXT: lwz 8, 8(3)
-; LE-32BIT-NEXT: lwz 9, 0(3)
-; LE-32BIT-NEXT: lwz 3, 12(3)
-; LE-32BIT-NEXT: srw 11, 8, 4
-; LE-32BIT-NEXT: slw 8, 8, 10
-; LE-32BIT-NEXT: slw 10, 9, 10
+; LE-32BIT-NEXT: sub 3, 6, 7
+; LE-32BIT-NEXT: lwz 6, 4(3)
+; LE-32BIT-NEXT: lwz 7, 0(3)
+; LE-32BIT-NEXT: lwz 8, 12(3)
+; LE-32BIT-NEXT: srw 10, 6, 4
+; LE-32BIT-NEXT: lwz 3, 8(3)
+; LE-32BIT-NEXT: slw 11, 7, 9
+; LE-32BIT-NEXT: slw 6, 6, 9
+; LE-32BIT-NEXT: srw 8, 8, 4
+; LE-32BIT-NEXT: slw 9, 3, 9
; LE-32BIT-NEXT: srw 3, 3, 4
-; LE-32BIT-NEXT: sraw 9, 9, 4
-; LE-32BIT-NEXT: srw 4, 7, 4
-; LE-32BIT-NEXT: slwi 7, 7, 1
-; LE-32BIT-NEXT: or 3, 8, 3
-; LE-32BIT-NEXT: slw 6, 7, 6
+; LE-32BIT-NEXT: or 3, 6, 3
+; LE-32BIT-NEXT: stw 3, 8(5)
+; LE-32BIT-NEXT: or 3, 9, 8
+; LE-32BIT-NEXT: sraw 4, 7, 4
; LE-32BIT-NEXT: stw 3, 12(5)
-; LE-32BIT-NEXT: or 3, 10, 4
+; LE-32BIT-NEXT: or 3, 11, 10
+; LE-32BIT-NEXT: stw 4, 0(5)
; LE-32BIT-NEXT: stw 3, 4(5)
-; LE-32BIT-NEXT: or 3, 11, 6
-; LE-32BIT-NEXT: stw 9, 0(5)
-; LE-32BIT-NEXT: stw 3, 8(5)
; LE-32BIT-NEXT: addi 1, 1, 48
; LE-32BIT-NEXT: blr
%src = load i128, ptr %src.ptr, align 1
@@ -449,32 +437,30 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; LE-64BIT-NEXT: li 4, 48
; LE-64BIT-NEXT: stxvd2x 2, 7, 4
; LE-64BIT-NEXT: stxvd2x 2, 7, 8
-; LE-64BIT-NEXT: rlwinm 4, 3, 29, 27, 31
+; LE-64BIT-NEXT: rlwinm 4, 3, 29, 27, 28
+; LE-64BIT-NEXT: clrlwi 3, 3, 26
; LE-64BIT-NEXT: stxvd2x 0, 7, 6
; LE-64BIT-NEXT: stxvd2x 1, 0, 7
-; LE-64BIT-NEXT: li 6, 7
-; LE-64BIT-NEXT: ldux 7, 4, 7
-; LE-64BIT-NEXT: ld 8, 16(4)
-; LE-64BIT-NEXT: nand 6, 3, 6
+; LE-64BIT-NEXT: xori 8, 3, 63
+; LE-64BIT-NEXT: ldux 6, 4, 7
+; LE-64BIT-NEXT: ld 7, 16(4)
; LE-64BIT-NEXT: ld 9, 8(4)
-; LE-64BIT-NEXT: clrlwi 3, 3, 29
; LE-64BIT-NEXT: ld 4, 24(4)
-; LE-64BIT-NEXT: clrlwi 6, 6, 26
+; LE-64BIT-NEXT: srd 6, 6, 3
+; LE-64BIT-NEXT: sldi 11, 7, 1
+; LE-64BIT-NEXT: srd 10, 9, 3
; LE-64BIT-NEXT: srd 7, 7, 3
-; LE-64BIT-NEXT: sldi 10, 8, 1
-; LE-64BIT-NEXT: srd 11, 9, 3
-; LE-64BIT-NEXT: srd 8, 8, 3
-; LE-64BIT-NEXT: sld 6, 10, 6
+; LE-64BIT-NEXT: sld 8, 11, 8
+; LE-64BIT-NEXT: or 8, 10, 8
; LE-64BIT-NEXT: subfic 10, 3, 64
; LE-64BIT-NEXT: srd 3, 4, 3
-; LE-64BIT-NEXT: or 6, 11, 6
; LE-64BIT-NEXT: sld 11, 4, 10
; LE-64BIT-NEXT: sld 9, 9, 10
; LE-64BIT-NEXT: std 3, 24(5)
-; LE-64BIT-NEXT: or 7, 9, 7
-; LE-64BIT-NEXT: or 3, 11, 8
-; LE-64BIT-NEXT: std 6, 8(5)
-; LE-64BIT-NEXT: std 7, 0(5)
+; LE-64BIT-NEXT: std 8, 8(5)
+; LE-64BIT-NEXT: or 6, 9, 6
+; LE-64BIT-NEXT: or 3, 11, 7
+; LE-64BIT-NEXT: std 6, 0(5)
; LE-64BIT-NEXT: std 3, 16(5)
; LE-64BIT-NEXT: blr
;
@@ -485,44 +471,39 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; BE-NEXT: ld 8, 16(3)
; BE-NEXT: ld 3, 24(3)
; BE-NEXT: lwz 4, 28(4)
-; BE-NEXT: addi 9, 1, -64
-; BE-NEXT: li 10, 0
-; BE-NEXT: addi 11, 1, -32
-; BE-NEXT: std 3, 56(9)
-; BE-NEXT: rlwinm 3, 4, 29, 27, 31
+; BE-NEXT: li 9, 0
+; BE-NEXT: addi 10, 1, -32
+; BE-NEXT: std 9, -40(1)
+; BE-NEXT: std 9, -48(1)
+; BE-NEXT: std 9, -56(1)
+; BE-NEXT: std 9, -64(1)
+; BE-NEXT: std 3, -8(1)
+; BE-NEXT: rlwinm 3, 4, 29, 27, 28
; BE-NEXT: neg 3, 3
-; BE-NEXT: std 10, 24(9)
-; BE-NEXT: std 10, 16(9)
-; BE-NEXT: std 10, 8(9)
-; BE-NEXT: std 10, -64(1)
-; BE-NEXT: std 8, 48(9)
-; BE-NEXT: std 7, 40(9)
-; BE-NEXT: std 6, 32(9)
+; BE-NEXT: std 8, -16(1)
+; BE-NEXT: std 7, -24(1)
+; BE-NEXT: std 6, -32(1)
; BE-NEXT: extsw 3, 3
-; BE-NEXT: ldux 3, 11, 3
-; BE-NEXT: li 6, 7
-; BE-NEXT: nand 6, 4, 6
-; BE-NEXT: clrlwi 4, 4, 29
-; BE-NEXT: clrlwi 6, 6, 26
-; BE-NEXT: ld 7, 8(11)
-; BE-NEXT: ld 8, 16(11)
-; BE-NEXT: ld 9, 24(11)
-; BE-NEXT: subfic 10, 4, 64
-; BE-NEXT: sldi 11, 7, 1
-; BE-NEXT: srd 7, 7, 4
-; BE-NEXT: srd 9, 9, 4
-; BE-NEXT: sld 6, 11, 6
-; BE-NEXT: sld 11, 3, 10
-; BE-NEXT: sld 10, 8, 10
-; BE-NEXT: srd 8, 8, 4
+; BE-NEXT: ldux 3, 10, 3
+; BE-NEXT: clrlwi 4, 4, 26
+; BE-NEXT: subfic 9, 4, 64
+; BE-NEXT: ld 6, 8(10)
+; BE-NEXT: ld 7, 24(10)
+; BE-NEXT: ld 8, 16(10)
+; BE-NEXT: sld 10, 3, 9
; BE-NEXT: srd 3, 3, 4
-; BE-NEXT: or 7, 11, 7
-; BE-NEXT: or 6, 8, 6
-; BE-NEXT: or 8, 10, 9
; BE-NEXT: std 3, 0(5)
-; BE-NEXT: std 8, 24(5)
-; BE-NEXT: std 7, 8(5)
+; BE-NEXT: srd 11, 6, 4
+; BE-NEXT: srd 7, 7, 4
+; BE-NEXT: sld 6, 6, 9
+; BE-NEXT: sld 9, 8, 9
+; BE-NEXT: srd 8, 8, 4
+; BE-NEXT: or 10, 10, 11
+; BE-NEXT: or 7, 9, 7
+; BE-NEXT: or 6, 6, 8
; BE-NEXT: std 6, 16(5)
+; BE-NEXT: std 7, 24(5)
+; BE-NEXT: std 10, 8(5)
; BE-NEXT: blr
;
; LE-32BIT-LABEL: lshr_32bytes:
@@ -538,7 +519,6 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; LE-32BIT-NEXT: lwz 0, 24(3)
; LE-32BIT-NEXT: lwz 3, 28(3)
; LE-32BIT-NEXT: lwz 4, 28(4)
-; LE-32BIT-NEXT: stw 6, 48(1)
; LE-32BIT-NEXT: stw 6, 44(1)
; LE-32BIT-NEXT: stw 6, 40(1)
; LE-32BIT-NEXT: stw 6, 36(1)
@@ -546,68 +526,65 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; LE-32BIT-NEXT: stw 6, 28(1)
; LE-32BIT-NEXT: stw 6, 24(1)
; LE-32BIT-NEXT: stw 6, 20(1)
-; LE-32BIT-NEXT: rlwinm 6, 4, 29, 27, 31
-; LE-32BIT-NEXT: stw 3, 80(1)
-; LE-32BIT-NEXT: addi 3, 1, 52
+; LE-32BIT-NEXT: stw 6, 16(1)
+; LE-32BIT-NEXT: rlwinm 6, 4, 29, 27, 29
+; LE-32BIT-NEXT: stw 3, 76(1)
+; LE-32BIT-NEXT: addi 3, 1, 48
; LE-32BIT-NEXT: stw 25, 84(1) # 4-byte Folded Spill
; LE-32BIT-NEXT: sub 3, 3, 6
; LE-32BIT-NEXT: stw 26, 88(1) # 4-byte Folded Spill
+; LE-32BIT-NEXT: clrlwi 4, 4, 27
; LE-32BIT-NEXT: stw 27, 92(1) # 4-byte Folded Spill
; LE-32BIT-NEXT: stw 28, 96(1) # 4-byte Folded Spill
; LE-32BIT-NEXT: stw 29, 100(1) # 4-byte Folded Spill
; LE-32BIT-NEXT: stw 30, 104(1) # 4-byte Folded Spill
-; LE-32BIT-NEXT: stw 0, 76(1)
-; LE-32BIT-NEXT: stw 12, 72(1)
-; LE-32BIT-NEXT: stw 11, 68(1)
-; LE-32BIT-NEXT: stw 10, 64(1)
-; LE-32BIT-NEXT: stw 9, 60(1)
-; LE-32BIT-NEXT: li 9, 7
-; LE-32BIT-NEXT: stw 8, 56(1)
-; LE-32BIT-NEXT: nand 9, 4, 9
-; LE-32BIT-NEXT: stw 7, 52(1)
-; LE-32BIT-NEXT: clrlwi 4, 4, 29
-; LE-32BIT-NEXT: lwz 6, 4(3)
; LE-32BIT-NEXT: subfic 30, 4, 32
-; LE-32BIT-NEXT: lwz 7, 8(3)
-; LE-32BIT-NEXT: clrlwi 9, 9, 27
-; LE-32BIT-NEXT: lwz 8, 12(3)
-; LE-32BIT-NEXT: slwi 29, 6, 1
-; LE-32BIT-NEXT: lwz 10, 16(3)
-; LE-32BIT-NEXT: srw 28, 7, 4
-; LE-32BIT-NEXT: lwz 11, 20(3)
-; LE-32BIT-NEXT: slwi 27, 8, 1
-; LE-32BIT-NEXT: lwz 12, 24(3)
+; LE-32BIT-NEXT: stw 0, 72(1)
+; LE-32BIT-NEXT: stw 12, 68(1)
+; LE-32BIT-NEXT: xori 12, 4, 31
+; LE-32BIT-NEXT: stw 11, 64(1)
+; LE-32BIT-NEXT: stw 10, 60(1)
+; LE-32BIT-NEXT: stw 9, 56(1)
+; LE-32BIT-NEXT: stw 8, 52(1)
+; LE-32BIT-NEXT: stw 7, 48(1)
+; LE-32BIT-NEXT: lwz 6, 8(3)
+; LE-32BIT-NEXT: lwz 7, 4(3)
+; LE-32BIT-NEXT: lwz 8, 0(3)
+; LE-32BIT-NEXT: srw 29, 6, 4
+; LE-32BIT-NEXT: lwz 9, 12(3)
+; LE-32BIT-NEXT: slw 6, 6, 30
+; LE-32BIT-NEXT: lwz 10, 20(3)
+; LE-32BIT-NEXT: slw 28, 8, 30
+; LE-32BIT-NEXT: lwz 11, 16(3)
+; LE-32BIT-NEXT: srw 27, 9, 4
+; LE-32BIT-NEXT: lwz 0, 28(3)
; LE-32BIT-NEXT: srw 26, 10, 4
-; LE-32BIT-NEXT: lwz 0, 0(3)
-; LE-32BIT-NEXT: srw 6, 6, 4
-; LE-32BIT-NEXT: lwz 3, 28(3)
-; LE-32BIT-NEXT: srw 25, 12, 4
-; LE-32BIT-NEXT: slw 12, 12, 30
-; LE-32BIT-NEXT: slw 7, 7, 30
-; LE-32BIT-NEXT: srw 3, 3, 4
+; LE-32BIT-NEXT: lwz 3, 24(3)
+; LE-32BIT-NEXT: slw 25, 11, 30
+; LE-32BIT-NEXT: slw 9, 9, 30
; LE-32BIT-NEXT: slw 10, 10, 30
-; LE-32BIT-NEXT: slw 30, 0, 30
-; LE-32BIT-NEXT: srw 8, 8, 4
+; LE-32BIT-NEXT: slw 30, 3, 30
+; LE-32BIT-NEXT: srw 3, 3, 4
; LE-32BIT-NEXT: srw 0, 0, 4
-; LE-32BIT-NEXT: srw 4, 11, 4
-; LE-32BIT-NEXT: or 3, 12, 3
+; LE-32BIT-NEXT: or 3, 10, 3
+; LE-32BIT-NEXT: srw 11, 11, 4
+; LE-32BIT-NEXT: stw 3, 24(5)
+; LE-32BIT-NEXT: or 3, 30, 0
; LE-32BIT-NEXT: stw 3, 28(5)
-; LE-32BIT-NEXT: or 3, 10, 4
-; LE-32BIT-NEXT: slwi 11, 11, 1
+; LE-32BIT-NEXT: or 3, 9, 11
+; LE-32BIT-NEXT: stw 3, 16(5)
+; LE-32BIT-NEXT: or 3, 25, 26
+; LE-32BIT-NEXT: srw 8, 8, 4
+; LE-32BIT-NEXT: srw 4, 7, 4
+; LE-32BIT-NEXT: slwi 7, 7, 1
; LE-32BIT-NEXT: stw 3, 20(5)
-; LE-32BIT-NEXT: or 3, 7, 8
-; LE-32BIT-NEXT: slw 29, 29, 9
-; LE-32BIT-NEXT: slw 27, 27, 9
-; LE-32BIT-NEXT: slw 9, 11, 9
+; LE-32BIT-NEXT: or 3, 6, 27
+; LE-32BIT-NEXT: slw 7, 7, 12
; LE-32BIT-NEXT: stw 3, 12(5)
-; LE-32BIT-NEXT: or 3, 30, 6
+; LE-32BIT-NEXT: or 3, 28, 4
; LE-32BIT-NEXT: stw 3, 4(5)
-; LE-32BIT-NEXT: or 3, 25, 9
-; LE-32BIT-NEXT: stw 3, 24(5)
-; LE-32BIT-NEXT: or 3, 26, 27
-; LE-32BIT-NEXT: stw 3, 16(5)
-; LE-32BIT-NEXT: or 3, 28, 29
-; LE-32BIT-NEXT: stw 0, 0(5)
+; LE-32BIT-NEXT: or 3, 29, 7
+; LE-32BIT-NEXT: stw 8, 0(5)
; LE-32BIT-NEXT: stw 3, 8(5)
; LE-32BIT-NEXT: lwz 30, 104(1) # 4-byte Folded Reload
; LE-32BIT-NEXT: lwz 29, 100(1) # 4-byte Folded Reload
@@ -635,37 +612,33 @@ define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; LE-64BIT-NEXT: lxvd2x 0, 3, 6
; LE-64BIT-NEXT: stxvd2x 2, 7, 6
; LE-64BIT-NEXT: li 6, 48
-; LE-64BIT-NEXT: rlwinm 3, 4, 29, 27, 31
+; LE-64BIT-NEXT: rlwinm 3, 4, 29, 27, 28
+; LE-64BIT-NEXT: clrlwi 4, 4, 26
; LE-64BIT-NEXT: neg 3, 3
; LE-64BIT-NEXT: stxvd2x 0, 7, 6
; LE-64BIT-NEXT: li 6, 32
; LE-64BIT-NEXT: extsw 3, 3
; LE-64BIT-NEXT: stxvd2x 1, 7, 6
; LE-64BIT-NEXT: stxvd2x 2, 0, 7
-; LE-64BIT-NEXT: li 6, 7
+; LE-64BIT-NEXT: subfic 6, 4, 64
; LE-64BIT-NEXT: ldux 3, 8, 3
-; LE-64BIT-NEXT: ld 7, 8(8)
-; LE-64BIT-NEXT: nand 6, 4, 6
-; LE-64BIT-NEXT: ld 9, 16(8)
-; LE-64BIT-NEXT: clrlwi 4, 4, 29
-; LE-64BIT-NEXT: ld 8, 24(8)
-; LE-64BIT-NEXT: clrlwi 6, 6, 26
-; LE-64BIT-NEXT: rldicl 10, 7, 63, 1
-; LE-64BIT-NEXT: sld 8, 8, 4
+; LE-64BIT-NEXT: ld 7, 16(8)
+; LE-64BIT-NEXT: ld 9, 24(8)
+; LE-64BIT-NEXT: ld 8, 8(8)
+; LE-64BIT-NEXT: srd 10, 7, 6
+; LE-64BIT-NEXT: sld 9, 9, 4
; LE-64BIT-NEXT: sld 7, 7, 4
-; LE-64BIT-NEXT: srd 6, 10, 6
-; LE-64BIT-NEXT: sld 10, 9, 4
-; LE-64BIT-NEXT: or 6, 10, 6
-; LE-64BIT-NEXT: subfic 10, 4, 64
-; LE-64BIT-NEXT: srd 9, 9, 10
-; LE-64BIT-NEXT: srd 10, 3, 10
+; LE-64BIT-NEXT: or 9, 9, 10
+; LE-64BIT-NEXT: srd 10, 8, 6
+; LE-64BIT-NEXT: srd 6, 3, 6
+; LE-64BIT-NEXT: sld 8, 8, 4
; LE-64BIT-NEXT: sld 3, 3, 4
-; LE-64BIT-NEXT: std 6, 16(5)
-; LE-64BIT-NEXT: or 7, 7, 10
+; LE-64BIT-NEXT: or 6, 8, 6
; LE-64BIT-NEXT: std 3, 0(5)
-; LE-64BIT-NEXT: or 3, 8, 9
-; LE-64BIT-NEXT: std 7, 8(5)
-; LE-64BIT-NEXT: std 3, 24(5)
+; LE-64BIT-NEXT: or 3, 7, 10
+; LE-64BIT-NEXT: std 9, 24(5)
+; LE-64BIT-NEXT: std 6, 8(5)
+; LE-64BIT-NEXT: std 3, 16(5)
; LE-64BIT-NEXT: blr
;
; BE-LABEL: shl_32bytes:
@@ -675,41 +648,37 @@ define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; BE-NEXT: ld 8, 16(3)
; BE-NEXT: ld 3, 24(3)
; BE-NEXT: lwz 4, 28(4)
-; BE-NEXT: addi 9, 1, -64
-; BE-NEXT: li 10, 0
-; BE-NEXT: std 10, 56(9)
-; BE-NEXT: std 10, 48(9)
-; BE-NEXT: std 10, 40(9)
-; BE-NEXT: std 10, 32(9)
-; BE-NEXT: std 3, 24(9)
-; BE-NEXT: std 8, 16(9)
-; BE-NEXT: std 7, 8(9)
+; BE-NEXT: li 9, 0
+; BE-NEXT: addi 10, 1, -64
+; BE-NEXT: std 9, -8(1)
+; BE-NEXT: std 9, -16(1)
+; BE-NEXT: std 9, -24(1)
+; BE-NEXT: std 9, -32(1)
+; BE-NEXT: std 3, -40(1)
+; BE-NEXT: std 8, -48(1)
+; BE-NEXT: std 7, -56(1)
; BE-NEXT: std 6, -64(1)
-; BE-NEXT: rlwinm 3, 4, 29, 27, 31
-; BE-NEXT: ldux 6, 3, 9
-; BE-NEXT: li 7, 7
-; BE-NEXT: nand 7, 4, 7
-; BE-NEXT: clrlwi 4, 4, 29
-; BE-NEXT: clrlwi 7, 7, 26
-; BE-NEXT: ld 8, 16(3)
-; BE-NEXT: ld 9, 8(3)
+; BE-NEXT: rlwinm 3, 4, 29, 27, 28
+; BE-NEXT: ldux 6, 3, 10
+; BE-NEXT: clrlwi 4, 4, 26
+; BE-NEXT: subfic 9, 4, 64
+; BE-NEXT: ld 7, 16(3)
+; BE-NEXT: ld 8, 8(3)
; BE-NEXT: ld 3, 24(3)
-; BE-NEXT: subfic 10, 4, 64
; BE-NEXT: sld 6, 6, 4
-; BE-NEXT: rldicl 11, 8, 63, 1
-; BE-NEXT: sld 8, 8, 4
-; BE-NEXT: srd 7, 11, 7
-; BE-NEXT: srd 11, 9, 10
-; BE-NEXT: sld 9, 9, 4
-; BE-NEXT: srd 10, 3, 10
+; BE-NEXT: srd 10, 7, 9
+; BE-NEXT: sld 11, 8, 4
+; BE-NEXT: srd 8, 8, 9
+; BE-NEXT: srd 9, 3, 9
+; BE-NEXT: sld 7, 7, 4
; BE-NEXT: sld 3, 3, 4
-; BE-NEXT: or 6, 6, 11
-; BE-NEXT: or 7, 9, 7
-; BE-NEXT: or 8, 8, 10
+; BE-NEXT: or 10, 11, 10
+; BE-NEXT: or 6, 6, 8
+; BE-NEXT: or 7, 7, 9
; BE-NEXT: std 3, 24(5)
-; BE-NEXT: std 8, 16(5)
+; BE-NEXT: std 7, 16(5)
; BE-NEXT: std 6, 0(5)
-; BE-NEXT: std 7, 8(5)
+; BE-NEXT: std 10, 8(5)
; BE-NEXT: blr
;
; LE-32BIT-LABEL: shl_32bytes:
@@ -731,7 +700,6 @@ define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; LE-32BIT-NEXT: stw 28, 96(1) # 4-byte Folded Spill
; LE-32BIT-NEXT: stw 29, 100(1) # 4-byte Folded Spill
; LE-32BIT-NEXT: stw 30, 104(1) # 4-byte Folded Spill
-; LE-32BIT-NEXT: stw 6, 80(1)
; LE-32BIT-NEXT: stw 6, 76(1)
; LE-32BIT-NEXT: stw 6, 72(1)
; LE-32BIT-NEXT: stw 6, 68(1)
@@ -739,61 +707,56 @@ define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; LE-32BIT-NEXT: stw 6, 60(1)
; LE-32BIT-NEXT: stw 6, 56(1)
; LE-32BIT-NEXT: stw 6, 52(1)
-; LE-32BIT-NEXT: rlwinm 6, 4, 29, 27, 31
-; LE-32BIT-NEXT: stw 3, 48(1)
-; LE-32BIT-NEXT: addi 3, 1, 20
-; LE-32BIT-NEXT: stw 0, 44(1)
-; LE-32BIT-NEXT: stw 12, 40(1)
-; LE-32BIT-NEXT: stw 11, 36(1)
-; LE-32BIT-NEXT: stw 10, 32(1)
-; LE-32BIT-NEXT: stw 9, 28(1)
-; LE-32BIT-NEXT: stw 8, 24(1)
-; LE-32BIT-NEXT: li 8, 7
-; LE-32BIT-NEXT: stw 7, 20(1)
-; LE-32BIT-NEXT: nand 8, 4, 8
+; LE-32BIT-NEXT: stw 6, 48(1)
+; LE-32BIT-NEXT: rlwinm 6, 4, 29, 27, 29
+; LE-32BIT-NEXT: stw 3, 44(1)
+; LE-32BIT-NEXT: addi 3, 1, 16
+; LE-32BIT-NEXT: stw 0, 40(1)
+; LE-32BIT-NEXT: clrlwi 4, 4, 27
+; LE-32BIT-NEXT: stw 12, 36(1)
+; LE-32BIT-NEXT: subfic 12, 4, 32
+; LE-32BIT-NEXT: stw 11, 32(1)
+; LE-32BIT-NEXT: stw 10, 28(1)
+; LE-32BIT-NEXT: stw 9, 24(1)
+; LE-32BIT-NEXT: stw 8, 20(1)
+; LE-32BIT-NEXT: stw 7, 16(1)
; LE-32BIT-NEXT: lwzux 3, 6, 3
-; LE-32BIT-NEXT: clrlwi 4, 4, 29
-; LE-32BIT-NEXT: subfic 0, 4, 32
-; LE-32BIT-NEXT: clrlwi 8, 8, 27
; LE-32BIT-NEXT: lwz 7, 8(6)
; LE-32BIT-NEXT: slw 3, 3, 4
-; LE-32BIT-NEXT: lwz 9, 4(6)
-; LE-32BIT-NEXT: lwz 10, 16(6)
-; LE-32BIT-NEXT: srwi 29, 7, 1
-; LE-32BIT-NEXT: lwz 11, 12(6)
-; LE-32BIT-NEXT: slw 28, 9, 4
-; LE-32BIT-NEXT: lwz 12, 24(6)
-; LE-32BIT-NEXT: srwi 27, 10, 1
-; LE-32BIT-NEXT: lwz 30, 20(6)
-; LE-32BIT-NEXT: slw 26, 11, 4
+; LE-32BIT-NEXT: lwz 8, 4(6)
+; LE-32BIT-NEXT: lwz 9, 16(6)
+; LE-32BIT-NEXT: srw 30, 7, 12
+; LE-32BIT-NEXT: lwz 10, 12(6)
+; LE-32BIT-NEXT: slw 29, 8, 4
+; LE-32BIT-NEXT: lwz 11, 24(6)
+; LE-32BIT-NEXT: srw 8, 8, 12
+; LE-32BIT-NEXT: lwz 0, 20(6)
+; LE-32BIT-NEXT: srw 28, 9, 12
; LE-32BIT-NEXT: lwz 6, 28(6)
-; LE-32BIT-NEXT: srw 9, 9, 0
-; LE-32BIT-NEXT: slw 25, 30, 4
-; LE-32BIT-NEXT: srw 11, 11, 0
+; LE-32BIT-NEXT: slw 27, 10, 4
+; LE-32BIT-NEXT: srw 10, 10, 12
; LE-32BIT-NEXT: slw 7, 7, 4
-; LE-32BIT-NEXT: srw 30, 30, 0
-; LE-32BIT-NEXT: slw 10, 10, 4
-; LE-32BIT-NEXT: srw 0, 6, 0
-; LE-32BIT-NEXT: slw 6, 6, 4
-; LE-32BIT-NEXT: slw 4, 12, 4
-; LE-32BIT-NEXT: srwi 12, 12, 1
-; LE-32BIT-NEXT: srw 29, 29, 8
-; LE-32BIT-NEXT: srw 27, 27, 8
-; LE-32BIT-NEXT: srw 8, 12, 8
-; LE-32BIT-NEXT: or 3, 3, 9
-; LE-32BIT-NEXT: or 4, 4, 0
-; LE-32BIT-NEXT: stw 3, 0(5)
-; LE-32BIT-NEXT: or 3, 25, 8
+; LE-32BIT-NEXT: srw 26, 11, 12
+; LE-32BIT-NEXT: slw 25, 0, 4
+; LE-32BIT-NEXT: srw 0, 0, 12
+; LE-32BIT-NEXT: slw 9, 9, 4
+; LE-32BIT-NEXT: srw 12, 6, 12
+; LE-32BIT-NEXT: slw 11, 11, 4
+; LE-32BIT-NEXT: slw 4, 6, 4
+; LE-32BIT-NEXT: stw 4, 28(5)
+; LE-32BIT-NEXT: or 4, 11, 12
; LE-32BIT-NEXT: stw 4, 24(5)
-; LE-32BIT-NEXT: or 4, 10, 30
-; LE-32BIT-NEXT: stw 3, 20(5)
-; LE-32BIT-NEXT: or 3, 26, 27
+; LE-32BIT-NEXT: or 4, 9, 0
; LE-32BIT-NEXT: stw 4, 16(5)
-; LE-32BIT-NEXT: or 4, 7, 11
-; LE-32BIT-NEXT: stw 3, 12(5)
-; LE-32BIT-NEXT: or 3, 28, 29
-; LE-32BIT-NEXT: stw 6, 28(5)
+; LE-32BIT-NEXT: or 4, 25, 26
+; LE-32BIT-NEXT: stw 4, 20(5)
+; LE-32BIT-NEXT: or 4, 7, 10
+; LE-32BIT-NEXT: or 3, 3, 8
; LE-32BIT-NEXT: stw 4, 8(5)
+; LE-32BIT-NEXT: or 4, 27, 28
+; LE-32BIT-NEXT: stw 3, 0(5)
+; LE-32BIT-NEXT: or 3, 29, 30
+; LE-32BIT-NEXT: stw 4, 12(5)
; LE-32BIT-NEXT: stw 3, 4(5)
; LE-32BIT-NEXT: lwz 30, 104(1) # 4-byte Folded Reload
; LE-32BIT-NEXT: lwz 29, 100(1) # 4-byte Folded Reload
@@ -812,98 +775,91 @@ define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
define void @ashr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; LE-64BIT-LABEL: ashr_32bytes:
; LE-64BIT: # %bb.0:
-; LE-64BIT-NEXT: lxvd2x 0, 0, 3
; LE-64BIT-NEXT: ld 6, 24(3)
+; LE-64BIT-NEXT: lxvd2x 0, 0, 3
; LE-64BIT-NEXT: lwz 4, 0(4)
; LE-64BIT-NEXT: addi 7, 1, -64
; LE-64BIT-NEXT: ld 3, 16(3)
; LE-64BIT-NEXT: sradi 8, 6, 63
-; LE-64BIT-NEXT: rlwinm 9, 4, 29, 27, 31
-; LE-64BIT-NEXT: std 6, 24(7)
-; LE-64BIT-NEXT: std 3, 16(7)
-; LE-64BIT-NEXT: li 3, 7
-; LE-64BIT-NEXT: std 8, 56(7)
-; LE-64BIT-NEXT: std 8, 48(7)
-; LE-64BIT-NEXT: std 8, 40(7)
-; LE-64BIT-NEXT: std 8, 32(7)
+; LE-64BIT-NEXT: rlwinm 9, 4, 29, 27, 28
+; LE-64BIT-NEXT: clrlwi 4, 4, 26
; LE-64BIT-NEXT: stxvd2x 0, 0, 7
-; LE-64BIT-NEXT: nand 3, 4, 3
-; LE-64BIT-NEXT: clrlwi 4, 4, 29
-; LE-64BIT-NEXT: ldux 6, 9, 7
-; LE-64BIT-NEXT: ld 7, 16(9)
+; LE-64BIT-NEXT: std 6, -40(1)
+; LE-64BIT-NEXT: std 3, -48(1)
+; LE-64BIT-NEXT: std 8, -8(1)
+; LE-64BIT-NEXT: std 8, -16(1)
+; LE-64BIT-NEXT: std 8, -24(1)
+; LE-64BIT-NEXT: std 8, -32(1)
+; LE-64BIT-NEXT: ldux 3, 9, 7
+; LE-64BIT-NEXT: xori 7, 4, 63
+; LE-64BIT-NEXT: ld 6, 16(9)
; LE-64BIT-NEXT: ld 8, 8(9)
-; LE-64BIT-NEXT: clrlwi 3, 3, 26
; LE-64BIT-NEXT: ld 9, 24(9)
+; LE-64BIT-NEXT: srd 3, 3, 4
+; LE-64BIT-NEXT: sldi 11, 6, 1
+; LE-64BIT-NEXT: srd 10, 8, 4
; LE-64BIT-NEXT: srd 6, 6, 4
-; LE-64BIT-NEXT: sldi 10, 7, 1
-; LE-64BIT-NEXT: srd 11, 8, 4
-; LE-64BIT-NEXT: srd 7, 7, 4
-; LE-64BIT-NEXT: sld 3, 10, 3
+; LE-64BIT-NEXT: sld 7, 11, 7
+; LE-64BIT-NEXT: or 7, 10, 7
; LE-64BIT-NEXT: subfic 10, 4, 64
; LE-64BIT-NEXT: srad 4, 9, 4
-; LE-64BIT-NEXT: or 3, 11, 3
-; LE-64BIT-NEXT: sld 11, 9, 10
; LE-64BIT-NEXT: sld 8, 8, 10
+; LE-64BIT-NEXT: sld 11, 9, 10
; LE-64BIT-NEXT: std 4, 24(5)
-; LE-64BIT-NEXT: or 6, 8, 6
-; LE-64BIT-NEXT: or 4, 11, 7
-; LE-64BIT-NEXT: std 3, 8(5)
-; LE-64BIT-NEXT: std 6, 0(5)
-; LE-64BIT-NEXT: std 4, 16(5)
+; LE-64BIT-NEXT: std 7, 8(5)
+; LE-64BIT-NEXT: or 3, 8, 3
+; LE-64BIT-NEXT: std 3, 0(5)
+; LE-64BIT-NEXT: or 3, 11, 6
+; LE-64BIT-NEXT: std 3, 16(5)
; LE-64BIT-NEXT: blr
;
; BE-LABEL: ashr_32bytes:
; BE: # %bb.0:
-; BE-NEXT: ld 6, 0(3)
-; BE-NEXT: ld 7, 8(3)
-; BE-NEXT: ld 8, 16(3)
+; BE-NEXT: ld 7, 0(3)
+; BE-NEXT: ld 8, 8(3)
+; BE-NEXT: ld 9, 16(3)
; BE-NEXT: ld 3, 24(3)
; BE-NEXT: lwz 4, 28(4)
-; BE-NEXT: addi 9, 1, -64
-; BE-NEXT: addi 10, 1, -32
-; BE-NEXT: std 3, 56(9)
-; BE-NEXT: std 6, 32(9)
-; BE-NEXT: sradi 3, 6, 63
-; BE-NEXT: rlwinm 6, 4, 29, 27, 31
-; BE-NEXT: std 3, 24(9)
-; BE-NEXT: std 3, 16(9)
-; BE-NEXT: std 3, 8(9)
+; BE-NEXT: addi 6, 1, -32
+; BE-NEXT: std 3, -8(1)
+; BE-NEXT: std 7, -32(1)
+; BE-NEXT: sradi 3, 7, 63
+; BE-NEXT: rlwinm 7, 4, 29, 27, 28
+; BE-NEXT: std 3, -40(1)
+; BE-NEXT: std 3, -48(1)
+; BE-NEXT: std 3, -56(1)
; BE-NEXT: std 3, -64(1)
-; BE-NEXT: neg 3, 6
-; BE-NEXT: std 8, 48(9)
-; BE-NEXT: std 7, 40(9)
+; BE-NEXT: neg 3, 7
+; BE-NEXT: std 9, -16(1)
+; BE-NEXT: std 8, -24(1)
; BE-NEXT: extsw 3, 3
-; BE-NEXT: ldux 3, 10, 3
-; BE-NEXT: li 6, 7
-; BE-NEXT: nand 6, 4, 6
-; BE-NEXT: clrlwi 4, 4, 29
-; BE-NEXT: clrlwi 6, 6, 26
-; BE-NEXT: ld 7, 8(10)
-; BE-NEXT: ld 8, 16(10)
-; BE-NEXT: ld 9, 24(10)
-; BE-NEXT: subfic 10, 4, 64
-; BE-NEXT: sldi 11, 7, 1
-; BE-NEXT: srd 7, 7, 4
-; BE-NEXT: srd 9, 9, 4
-; BE-NEXT: sld 6, 11, 6
-; BE-NEXT: sld 11, 3, 10
-; BE-NEXT: sld 10, 8, 10
-; BE-NEXT: srd 8, 8, 4
+; BE-NEXT: ldux 3, 6, 3
+; BE-NEXT: clrlwi 4, 4, 26
+; BE-NEXT: subfic 9, 4, 64
+; BE-NEXT: ld 7, 8(6)
+; BE-NEXT: ld 8, 24(6)
+; BE-NEXT: ld 6, 16(6)
+; BE-NEXT: sld 10, 3, 9
; BE-NEXT: srad 3, 3, 4
-; BE-NEXT: or 7, 11, 7
-; BE-NEXT: or 6, 8, 6
-; BE-NEXT: or 8, 10, 9
; BE-NEXT: std 3, 0(5)
-; BE-NEXT: std 8, 24(5)
-; BE-NEXT: std 7, 8(5)
+; BE-NEXT: srd 11, 7, 4
+; BE-NEXT: srd 8, 8, 4
+; BE-NEXT: sld 7, 7, 9
+; BE-NEXT: sld 9, 6, 9
+; BE-NEXT: srd 6, 6, 4
+; BE-NEXT: or 10, 10, 11
+; BE-NEXT: or 8, 9, 8
+; BE-NEXT: or 6, 7, 6
; BE-NEXT: std 6, 16(5)
+; BE-NEXT: std 8, 24(5)
+; BE-NEXT: std 10, 8(5)
; BE-NEXT: blr
;
; LE-32BIT-LABEL: ashr_32bytes:
; LE-32BIT: # %bb.0:
; LE-32BIT-NEXT: stwu 1, -112(1)
; LE-32BIT-NEXT: lwz 7, 0(3)
-; LE-32BIT-NEXT: addi 6, 1, 52
+; LE-32BIT-NEXT: addi 6, 1, 48
; LE-32BIT-NEXT: lwz 8, 4(3)
; LE-32BIT-NEXT: lwz 9, 8(3)
; LE-32BIT-NEXT: lwz 10, 12(3)
@@ -912,76 +868,72 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; LE-32BIT-NEXT: lwz 0, 24(3)
; LE-32BIT-NEXT: lwz 3, 28(3)
; LE-32BIT-NEXT: lwz 4, 28(4)
-; LE-32BIT-NEXT: stw 3, 80(1)
+; LE-32BIT-NEXT: stw 3, 76(1)
; LE-32BIT-NEXT: srawi 3, 7, 31
-; LE-32BIT-NEXT: stw 7, 52(1)
-; LE-32BIT-NEXT: rlwinm 7, 4, 29, 27, 31
+; LE-32BIT-NEXT: stw 7, 48(1)
+; LE-32BIT-NEXT: rlwinm 7, 4, 29, 27, 29
; LE-32BIT-NEXT: stw 25, 84(1) # 4-byte Folded Spill
+; LE-32BIT-NEXT: clrlwi 4, 4, 27
; LE-32BIT-NEXT: stw 26, 88(1) # 4-byte Folded Spill
; LE-32BIT-NEXT: stw 27, 92(1) # 4-byte Folded Spill
; LE-32BIT-NEXT: stw 28, 96(1) # 4-byte Folded Spill
; LE-32BIT-NEXT: stw 29, 100(1) # 4-byte Folded Spill
; LE-32BIT-NEXT: stw 30, 104(1) # 4-byte Folded Spill
-; LE-32BIT-NEXT: stw 0, 76(1)
-; LE-32BIT-NEXT: stw 12, 72(1)
-; LE-32BIT-NEXT: stw 11, 68(1)
-; LE-32BIT-NEXT: stw 10, 64(1)
-; LE-32BIT-NEXT: stw 9, 60(1)
-; LE-32BIT-NEXT: li 9, 7
-; LE-32BIT-NEXT: stw 8, 56(1)
-; LE-32BIT-NEXT: nand 9, 4, 9
-; LE-32BIT-NEXT: stw 3, 48(1)
-; LE-32BIT-NEXT: clrlwi 4, 4, 29
-; LE-32BIT-NEXT: stw 3, 44(1)
; LE-32BIT-NEXT: subfic 30, 4, 32
+; LE-32BIT-NEXT: stw 0, 72(1)
+; LE-32BIT-NEXT: stw 12, 68(1)
+; LE-32BIT-NEXT: xori 12, 4, 31
+; LE-32BIT-NEXT: stw 11, 64(1)
+; LE-32BIT-NEXT: stw 10, 60(1)
+; LE-32BIT-NEXT: stw 9, 56(1)
+; LE-32BIT-NEXT: stw 8, 52(1)
+; LE-32BIT-NEXT: stw 3, 44(1)
; LE-32BIT-NEXT: stw 3, 40(1)
-; LE-32BIT-NEXT: clrlwi 9, 9, 27
; LE-32BIT-NEXT: stw 3, 36(1)
; LE-32BIT-NEXT: stw 3, 32(1)
; LE-32BIT-NEXT: stw 3, 28(1)
; LE-32BIT-NEXT: stw 3, 24(1)
; LE-32BIT-NEXT: stw 3, 20(1)
+; LE-32BIT-NEXT: stw 3, 16(1)
; LE-32BIT-NEXT: sub 3, 6, 7
-; LE-32BIT-NEXT: lwz 6, 4(3)
-; LE-32BIT-NEXT: lwz 7, 8(3)
-; LE-32BIT-NEXT: lwz 8, 12(3)
-; LE-32BIT-NEXT: slwi 29, 6, 1
-; LE-32BIT-NEXT: lwz 10, 16(3)
-; LE-32BIT-NEXT: srw 28, 7, 4
-; LE-32BIT-NEXT: lwz 11, 20(3)
-; LE-32BIT-NEXT: slwi 27, 8, 1
-; LE-32BIT-NEXT: lwz 12, 24(3)
+; LE-32BIT-NEXT: lwz 6, 8(3)
+; LE-32BIT-NEXT: lwz 7, 4(3)
+; LE-32BIT-NEXT: lwz 8, 0(3)
+; LE-32BIT-NEXT: srw 29, 6, 4
+; LE-32BIT-NEXT: lwz 9, 12(3)
+; LE-32BIT-NEXT: slw 6, 6, 30
+; LE-32BIT-NEXT: lwz 10, 20(3)
+; LE-32BIT-NEXT: slw 28, 8, 30
+; LE-32BIT-NEXT: lwz 11, 16(3)
+; LE-32BIT-NEXT: srw 27, 9, 4
+; LE-32BIT-NEXT: lwz 0, 28(3)
; LE-32BIT-NEXT: srw 26, 10, 4
-; LE-32BIT-NEXT: lwz 0, 0(3)
-; LE-32BIT-NEXT: srw 6, 6, 4
-; LE-32BIT-NEXT: lwz 3, 28(3)
-; LE-32BIT-NEXT: srw 25, 12, 4
-; LE-32BIT-NEXT: slw 12, 12, 30
-; LE-32BIT-NEXT: slw 7, 7, 30
-; LE-32BIT-NEXT: srw 3, 3, 4
+; LE-32BIT-NEXT: lwz 3, 24(3)
+; LE-32BIT-NEXT: slw 25, 11, 30
+; LE-32BIT-NEXT: slw 9, 9, 30
; LE-32BIT-NEXT: slw 10, 10, 30
-; LE-32BIT-NEXT: slw 30, 0, 30
-; LE-32BIT-NEXT: srw 8, 8, 4
-; LE-32BIT-NEXT: sraw 0, 0, 4
-; LE-32BIT-NEXT: srw 4, 11, 4
-; LE-32BIT-NEXT: or 3, 12, 3
+; LE-32BIT-NEXT: slw 30, 3, 30
+; LE-32BIT-NEXT: srw 3, 3, 4
+; LE-32BIT-NEXT: srw 0, 0, 4
+; LE-32BIT-NEXT: or 3, 10, 3
+; LE-32BIT-NEXT: srw 11, 11, 4
+; LE-32BIT-NEXT: stw 3, 24(5)
+; LE-32BIT-NEXT: or 3, 30, 0
; LE-32BIT-NEXT: stw 3, 28(5)
-; LE-32BIT-NEXT: or 3, 10, 4
-; LE-32BIT-NEXT: slwi 11, 11, 1
+; LE-32BIT-NEXT: or 3, 9, 11
+; LE-32BIT-NEXT: stw 3, 16(5)
+; LE-32BIT-NEXT: or 3, 25, 26
+; LE-32BIT-NEXT: sraw 8, 8, 4
+; LE-32BIT-NEXT: srw 4, 7, 4
+; LE-32BIT-NEXT: slwi 7, 7, 1
; LE-32BIT-NEXT: stw 3, 20(5)
-; LE-32BIT-NEXT: or 3, 7, 8
-; LE-32BIT-NEXT: slw 29, 29, 9
-; LE-32BIT-NEXT: slw 27, 27, 9
-; LE-32BIT-NEXT: slw 9, 11, 9
+; LE-32BIT-NEXT: or 3, 6, 27
+; LE-32BIT-NEXT: slw 7, 7, 12
; LE-32BIT-NEXT: stw 3, 12(5)
-; LE-32BIT-NEXT: or 3, 30, 6
+; LE-32BIT-NEXT: or 3, 28, 4
; LE-32BIT-NEXT: stw 3, 4(5)
-; LE-32BIT-NEXT: or 3, 25, 9
-; LE-32BIT-NEXT: stw 3, 24(5)
-; LE-32BIT-NEXT: or 3, 26, 27
-; LE-32BIT-NEXT: stw 3, 16(5)
-; LE-32BIT-NEXT: or 3, 28, 29
-; LE-32BIT-NEXT: stw 0, 0(5)
+; LE-32BIT-NEXT: or 3, 29, 7
+; LE-32BIT-NEXT: stw 8, 0(5)
; LE-32BIT-NEXT: stw 3, 8(5)
; LE-32BIT-NEXT: lwz 30, 104(1) # 4-byte Folded Reload
; LE-32BIT-NEXT: lwz 29, 100(1) # 4-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
index d1c98f8..abf8936 100644
--- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
@@ -5424,8 +5424,8 @@ for.cond.cleanup: ; preds = %vector.body
ret void
}
-define void @sink_splat_select(ptr nocapture %a, i32 signext %x) {
-; CHECK-LABEL: sink_splat_select:
+define void @sink_splat_select_op1(ptr nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_splat_select_op1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lui a2, 1
; CHECK-NEXT: add a2, a0, a2
@@ -5460,3 +5460,41 @@ vector.body: ; preds = %vector.body, %entry
for.cond.cleanup: ; preds = %vector.body
ret void
}
+
+define void @sink_splat_select_op2(ptr nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_splat_select_op2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: lui a1, 1
+; CHECK-NEXT: add a1, a0, a1
+; CHECK-NEXT: li a2, 42
+; CHECK-NEXT: .LBB118_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v9, (a0)
+; CHECK-NEXT: vmseq.vx v0, v9, a2
+; CHECK-NEXT: vmerge.vvm v9, v8, v9, v0
+; CHECK-NEXT: vse32.v v9, (a0)
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: bne a0, a1, .LBB118_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %load = load <4 x i32>, ptr %0, align 4
+ %cond = icmp eq <4 x i32> %load, splat (i32 42)
+ %1 = select <4 x i1> %cond, <4 x i32> %load, <4 x i32> %broadcast.splat
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = add nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/shifts.ll b/llvm/test/CodeGen/RISCV/shifts.ll
index f61cbfd..5ba8755 100644
--- a/llvm/test/CodeGen/RISCV/shifts.ll
+++ b/llvm/test/CodeGen/RISCV/shifts.ll
@@ -157,106 +157,33 @@ define i128 @lshr128(i128 %a, i128 %b) nounwind {
; RV32I-NEXT: lw a4, 4(a1)
; RV32I-NEXT: lw a5, 8(a1)
; RV32I-NEXT: lw a1, 12(a1)
-; RV32I-NEXT: sb zero, 31(sp)
-; RV32I-NEXT: sb zero, 30(sp)
-; RV32I-NEXT: sb zero, 29(sp)
-; RV32I-NEXT: sb zero, 28(sp)
-; RV32I-NEXT: sb zero, 27(sp)
-; RV32I-NEXT: sb zero, 26(sp)
-; RV32I-NEXT: sb zero, 25(sp)
-; RV32I-NEXT: sb zero, 24(sp)
-; RV32I-NEXT: sb zero, 23(sp)
-; RV32I-NEXT: sb zero, 22(sp)
-; RV32I-NEXT: sb zero, 21(sp)
-; RV32I-NEXT: sb zero, 20(sp)
-; RV32I-NEXT: sb zero, 19(sp)
-; RV32I-NEXT: sb zero, 18(sp)
-; RV32I-NEXT: sb zero, 17(sp)
-; RV32I-NEXT: sb zero, 16(sp)
-; RV32I-NEXT: sb a1, 12(sp)
-; RV32I-NEXT: sb a5, 8(sp)
-; RV32I-NEXT: sb a4, 4(sp)
-; RV32I-NEXT: sb a3, 0(sp)
-; RV32I-NEXT: srli a6, a1, 24
-; RV32I-NEXT: sb a6, 15(sp)
-; RV32I-NEXT: srli a6, a1, 16
-; RV32I-NEXT: sb a6, 14(sp)
-; RV32I-NEXT: srli a1, a1, 8
-; RV32I-NEXT: sb a1, 13(sp)
-; RV32I-NEXT: srli a1, a5, 24
-; RV32I-NEXT: sb a1, 11(sp)
-; RV32I-NEXT: srli a1, a5, 16
-; RV32I-NEXT: sb a1, 10(sp)
-; RV32I-NEXT: srli a5, a5, 8
-; RV32I-NEXT: sb a5, 9(sp)
-; RV32I-NEXT: srli a1, a4, 24
-; RV32I-NEXT: sb a1, 7(sp)
-; RV32I-NEXT: srli a1, a4, 16
-; RV32I-NEXT: sb a1, 6(sp)
-; RV32I-NEXT: srli a4, a4, 8
-; RV32I-NEXT: sb a4, 5(sp)
-; RV32I-NEXT: srli a1, a3, 24
-; RV32I-NEXT: sb a1, 3(sp)
-; RV32I-NEXT: srli a1, a3, 16
-; RV32I-NEXT: sb a1, 2(sp)
-; RV32I-NEXT: srli a3, a3, 8
-; RV32I-NEXT: sb a3, 1(sp)
-; RV32I-NEXT: slli a1, a2, 25
-; RV32I-NEXT: srli a1, a1, 28
+; RV32I-NEXT: sw zero, 28(sp)
+; RV32I-NEXT: sw zero, 24(sp)
+; RV32I-NEXT: sw zero, 20(sp)
+; RV32I-NEXT: sw zero, 16(sp)
+; RV32I-NEXT: sw a1, 12(sp)
+; RV32I-NEXT: sw a5, 8(sp)
+; RV32I-NEXT: sw a4, 4(sp)
+; RV32I-NEXT: sw a3, 0(sp)
+; RV32I-NEXT: srli a1, a2, 3
+; RV32I-NEXT: andi a1, a1, 12
; RV32I-NEXT: mv a3, sp
; RV32I-NEXT: add a1, a3, a1
-; RV32I-NEXT: lbu a3, 1(a1)
-; RV32I-NEXT: lbu a4, 0(a1)
-; RV32I-NEXT: lbu a5, 2(a1)
-; RV32I-NEXT: lbu a6, 3(a1)
-; RV32I-NEXT: slli a3, a3, 8
-; RV32I-NEXT: or a3, a3, a4
-; RV32I-NEXT: slli a5, a5, 16
-; RV32I-NEXT: slli a6, a6, 24
-; RV32I-NEXT: or a4, a6, a5
-; RV32I-NEXT: or a3, a4, a3
-; RV32I-NEXT: andi a2, a2, 7
+; RV32I-NEXT: lw a3, 0(a1)
+; RV32I-NEXT: lw a4, 4(a1)
; RV32I-NEXT: srl a3, a3, a2
-; RV32I-NEXT: lbu a4, 5(a1)
-; RV32I-NEXT: lbu a5, 4(a1)
-; RV32I-NEXT: lbu a6, 6(a1)
-; RV32I-NEXT: lbu a7, 7(a1)
-; RV32I-NEXT: slli a4, a4, 8
-; RV32I-NEXT: or a4, a4, a5
-; RV32I-NEXT: slli a6, a6, 16
-; RV32I-NEXT: slli a7, a7, 24
-; RV32I-NEXT: or a5, a7, a6
-; RV32I-NEXT: or a4, a5, a4
; RV32I-NEXT: slli a5, a4, 1
-; RV32I-NEXT: xori a6, a2, 31
+; RV32I-NEXT: andi a6, a2, 31
+; RV32I-NEXT: xori a6, a6, 31
+; RV32I-NEXT: lw a7, 8(a1)
; RV32I-NEXT: sll a5, a5, a6
; RV32I-NEXT: or a3, a3, a5
; RV32I-NEXT: srl a4, a4, a2
-; RV32I-NEXT: lbu a5, 9(a1)
-; RV32I-NEXT: lbu a7, 8(a1)
-; RV32I-NEXT: lbu t0, 10(a1)
-; RV32I-NEXT: lbu t1, 11(a1)
-; RV32I-NEXT: slli a5, a5, 8
-; RV32I-NEXT: or a5, a5, a7
-; RV32I-NEXT: slli t0, t0, 16
-; RV32I-NEXT: slli t1, t1, 24
-; RV32I-NEXT: or a7, t1, t0
-; RV32I-NEXT: or a5, a7, a5
-; RV32I-NEXT: slli a7, a5, 1
-; RV32I-NEXT: not t0, a2
-; RV32I-NEXT: lbu t1, 13(a1)
-; RV32I-NEXT: sll a7, a7, t0
-; RV32I-NEXT: or a4, a4, a7
-; RV32I-NEXT: lbu a7, 12(a1)
-; RV32I-NEXT: slli t1, t1, 8
-; RV32I-NEXT: lbu t0, 14(a1)
-; RV32I-NEXT: lbu a1, 15(a1)
-; RV32I-NEXT: or a7, t1, a7
-; RV32I-NEXT: srl a5, a5, a2
-; RV32I-NEXT: slli t0, t0, 16
-; RV32I-NEXT: slli a1, a1, 24
-; RV32I-NEXT: or a1, a1, t0
-; RV32I-NEXT: or a1, a1, a7
+; RV32I-NEXT: slli a5, a7, 1
+; RV32I-NEXT: lw a1, 12(a1)
+; RV32I-NEXT: sll a5, a5, a6
+; RV32I-NEXT: or a4, a4, a5
+; RV32I-NEXT: srl a5, a7, a2
; RV32I-NEXT: slli a7, a1, 1
; RV32I-NEXT: sll a6, a7, a6
; RV32I-NEXT: or a5, a5, a6
@@ -299,110 +226,34 @@ define i128 @ashr128(i128 %a, i128 %b) nounwind {
; RV32I-NEXT: lw a4, 8(a1)
; RV32I-NEXT: lw a5, 4(a1)
; RV32I-NEXT: lw a1, 0(a1)
-; RV32I-NEXT: sb a3, 12(sp)
-; RV32I-NEXT: sb a4, 8(sp)
-; RV32I-NEXT: sb a5, 4(sp)
-; RV32I-NEXT: sb a1, 0(sp)
-; RV32I-NEXT: srai a6, a3, 31
-; RV32I-NEXT: sb a6, 28(sp)
-; RV32I-NEXT: sb a6, 24(sp)
-; RV32I-NEXT: sb a6, 20(sp)
-; RV32I-NEXT: sb a6, 16(sp)
-; RV32I-NEXT: srli a7, a3, 24
-; RV32I-NEXT: sb a7, 15(sp)
-; RV32I-NEXT: srli a7, a3, 16
-; RV32I-NEXT: sb a7, 14(sp)
-; RV32I-NEXT: srli a3, a3, 8
-; RV32I-NEXT: sb a3, 13(sp)
-; RV32I-NEXT: srli a3, a4, 24
-; RV32I-NEXT: sb a3, 11(sp)
-; RV32I-NEXT: srli a3, a4, 16
-; RV32I-NEXT: sb a3, 10(sp)
-; RV32I-NEXT: srli a4, a4, 8
-; RV32I-NEXT: sb a4, 9(sp)
-; RV32I-NEXT: srli a3, a5, 24
-; RV32I-NEXT: sb a3, 7(sp)
-; RV32I-NEXT: srli a3, a5, 16
-; RV32I-NEXT: sb a3, 6(sp)
-; RV32I-NEXT: srli a5, a5, 8
-; RV32I-NEXT: sb a5, 5(sp)
-; RV32I-NEXT: srli a3, a1, 24
-; RV32I-NEXT: sb a3, 3(sp)
-; RV32I-NEXT: srli a3, a1, 16
-; RV32I-NEXT: sb a3, 2(sp)
-; RV32I-NEXT: srli a1, a1, 8
-; RV32I-NEXT: sb a1, 1(sp)
-; RV32I-NEXT: srli a1, a6, 24
-; RV32I-NEXT: sb a1, 31(sp)
-; RV32I-NEXT: srli a3, a6, 16
-; RV32I-NEXT: sb a3, 30(sp)
-; RV32I-NEXT: srli a4, a6, 8
-; RV32I-NEXT: sb a4, 29(sp)
-; RV32I-NEXT: sb a1, 27(sp)
-; RV32I-NEXT: sb a3, 26(sp)
-; RV32I-NEXT: sb a4, 25(sp)
-; RV32I-NEXT: sb a1, 23(sp)
-; RV32I-NEXT: sb a3, 22(sp)
-; RV32I-NEXT: sb a4, 21(sp)
-; RV32I-NEXT: sb a1, 19(sp)
-; RV32I-NEXT: sb a3, 18(sp)
-; RV32I-NEXT: sb a4, 17(sp)
-; RV32I-NEXT: slli a1, a2, 25
-; RV32I-NEXT: srli a1, a1, 28
+; RV32I-NEXT: sw a3, 12(sp)
+; RV32I-NEXT: sw a4, 8(sp)
+; RV32I-NEXT: sw a5, 4(sp)
+; RV32I-NEXT: sw a1, 0(sp)
+; RV32I-NEXT: srai a3, a3, 31
+; RV32I-NEXT: sw a3, 28(sp)
+; RV32I-NEXT: sw a3, 24(sp)
+; RV32I-NEXT: sw a3, 20(sp)
+; RV32I-NEXT: sw a3, 16(sp)
+; RV32I-NEXT: srli a1, a2, 3
+; RV32I-NEXT: andi a1, a1, 12
; RV32I-NEXT: mv a3, sp
; RV32I-NEXT: add a1, a3, a1
-; RV32I-NEXT: lbu a3, 1(a1)
-; RV32I-NEXT: lbu a4, 0(a1)
-; RV32I-NEXT: lbu a5, 2(a1)
-; RV32I-NEXT: lbu a6, 3(a1)
-; RV32I-NEXT: slli a3, a3, 8
-; RV32I-NEXT: or a3, a3, a4
-; RV32I-NEXT: slli a5, a5, 16
-; RV32I-NEXT: slli a6, a6, 24
-; RV32I-NEXT: or a4, a6, a5
-; RV32I-NEXT: or a3, a4, a3
-; RV32I-NEXT: andi a2, a2, 7
+; RV32I-NEXT: lw a3, 0(a1)
+; RV32I-NEXT: lw a4, 4(a1)
; RV32I-NEXT: srl a3, a3, a2
-; RV32I-NEXT: lbu a4, 5(a1)
-; RV32I-NEXT: lbu a5, 4(a1)
-; RV32I-NEXT: lbu a6, 6(a1)
-; RV32I-NEXT: lbu a7, 7(a1)
-; RV32I-NEXT: slli a4, a4, 8
-; RV32I-NEXT: or a4, a4, a5
-; RV32I-NEXT: slli a6, a6, 16
-; RV32I-NEXT: slli a7, a7, 24
-; RV32I-NEXT: or a5, a7, a6
-; RV32I-NEXT: or a4, a5, a4
; RV32I-NEXT: slli a5, a4, 1
-; RV32I-NEXT: xori a6, a2, 31
+; RV32I-NEXT: andi a6, a2, 31
+; RV32I-NEXT: xori a6, a6, 31
+; RV32I-NEXT: lw a7, 8(a1)
; RV32I-NEXT: sll a5, a5, a6
; RV32I-NEXT: or a3, a3, a5
; RV32I-NEXT: srl a4, a4, a2
-; RV32I-NEXT: lbu a5, 9(a1)
-; RV32I-NEXT: lbu a7, 8(a1)
-; RV32I-NEXT: lbu t0, 10(a1)
-; RV32I-NEXT: lbu t1, 11(a1)
-; RV32I-NEXT: slli a5, a5, 8
-; RV32I-NEXT: or a5, a5, a7
-; RV32I-NEXT: slli t0, t0, 16
-; RV32I-NEXT: slli t1, t1, 24
-; RV32I-NEXT: or a7, t1, t0
-; RV32I-NEXT: or a5, a7, a5
-; RV32I-NEXT: slli a7, a5, 1
-; RV32I-NEXT: not t0, a2
-; RV32I-NEXT: lbu t1, 13(a1)
-; RV32I-NEXT: sll a7, a7, t0
-; RV32I-NEXT: or a4, a4, a7
-; RV32I-NEXT: lbu a7, 12(a1)
-; RV32I-NEXT: slli t1, t1, 8
-; RV32I-NEXT: lbu t0, 14(a1)
-; RV32I-NEXT: lbu a1, 15(a1)
-; RV32I-NEXT: or a7, t1, a7
-; RV32I-NEXT: srl a5, a5, a2
-; RV32I-NEXT: slli t0, t0, 16
-; RV32I-NEXT: slli a1, a1, 24
-; RV32I-NEXT: or a1, a1, t0
-; RV32I-NEXT: or a1, a1, a7
+; RV32I-NEXT: slli a5, a7, 1
+; RV32I-NEXT: lw a1, 12(a1)
+; RV32I-NEXT: sll a5, a5, a6
+; RV32I-NEXT: or a4, a4, a5
+; RV32I-NEXT: srl a5, a7, a2
; RV32I-NEXT: slli a7, a1, 1
; RV32I-NEXT: sll a6, a7, a6
; RV32I-NEXT: or a5, a5, a6
@@ -445,114 +296,41 @@ define i128 @shl128(i128 %a, i128 %b) nounwind {
; RV32I-NEXT: lw a4, 4(a1)
; RV32I-NEXT: lw a5, 8(a1)
; RV32I-NEXT: lw a1, 12(a1)
-; RV32I-NEXT: sb zero, 15(sp)
-; RV32I-NEXT: sb zero, 14(sp)
-; RV32I-NEXT: sb zero, 13(sp)
-; RV32I-NEXT: sb zero, 12(sp)
-; RV32I-NEXT: sb zero, 11(sp)
-; RV32I-NEXT: sb zero, 10(sp)
-; RV32I-NEXT: sb zero, 9(sp)
-; RV32I-NEXT: sb zero, 8(sp)
-; RV32I-NEXT: sb zero, 7(sp)
-; RV32I-NEXT: sb zero, 6(sp)
-; RV32I-NEXT: sb zero, 5(sp)
-; RV32I-NEXT: sb zero, 4(sp)
-; RV32I-NEXT: sb zero, 3(sp)
-; RV32I-NEXT: sb zero, 2(sp)
-; RV32I-NEXT: sb zero, 1(sp)
-; RV32I-NEXT: sb zero, 0(sp)
-; RV32I-NEXT: sb a1, 28(sp)
-; RV32I-NEXT: sb a5, 24(sp)
-; RV32I-NEXT: sb a4, 20(sp)
-; RV32I-NEXT: sb a3, 16(sp)
-; RV32I-NEXT: srli a6, a1, 24
-; RV32I-NEXT: sb a6, 31(sp)
-; RV32I-NEXT: srli a6, a1, 16
-; RV32I-NEXT: sb a6, 30(sp)
-; RV32I-NEXT: srli a1, a1, 8
-; RV32I-NEXT: sb a1, 29(sp)
-; RV32I-NEXT: srli a1, a5, 24
-; RV32I-NEXT: sb a1, 27(sp)
-; RV32I-NEXT: srli a1, a5, 16
-; RV32I-NEXT: sb a1, 26(sp)
-; RV32I-NEXT: srli a5, a5, 8
-; RV32I-NEXT: sb a5, 25(sp)
-; RV32I-NEXT: srli a1, a4, 24
-; RV32I-NEXT: sb a1, 23(sp)
-; RV32I-NEXT: srli a1, a4, 16
-; RV32I-NEXT: sb a1, 22(sp)
-; RV32I-NEXT: srli a4, a4, 8
-; RV32I-NEXT: sb a4, 21(sp)
-; RV32I-NEXT: srli a1, a3, 24
-; RV32I-NEXT: sb a1, 19(sp)
-; RV32I-NEXT: srli a1, a3, 16
-; RV32I-NEXT: sb a1, 18(sp)
-; RV32I-NEXT: srli a3, a3, 8
-; RV32I-NEXT: sb a3, 17(sp)
-; RV32I-NEXT: slli a1, a2, 25
-; RV32I-NEXT: srli a1, a1, 28
+; RV32I-NEXT: sw zero, 12(sp)
+; RV32I-NEXT: sw zero, 8(sp)
+; RV32I-NEXT: sw zero, 4(sp)
+; RV32I-NEXT: sw zero, 0(sp)
+; RV32I-NEXT: sw a1, 28(sp)
+; RV32I-NEXT: sw a5, 24(sp)
+; RV32I-NEXT: sw a4, 20(sp)
+; RV32I-NEXT: sw a3, 16(sp)
+; RV32I-NEXT: srli a1, a2, 3
+; RV32I-NEXT: andi a1, a1, 12
; RV32I-NEXT: addi a3, sp, 16
-; RV32I-NEXT: sub a1, a3, a1
-; RV32I-NEXT: lbu a3, 5(a1)
-; RV32I-NEXT: lbu a4, 4(a1)
-; RV32I-NEXT: lbu a5, 6(a1)
-; RV32I-NEXT: lbu a6, 7(a1)
-; RV32I-NEXT: slli a3, a3, 8
-; RV32I-NEXT: or a3, a3, a4
-; RV32I-NEXT: slli a5, a5, 16
-; RV32I-NEXT: slli a6, a6, 24
-; RV32I-NEXT: or a4, a6, a5
-; RV32I-NEXT: or a3, a4, a3
-; RV32I-NEXT: andi a2, a2, 7
-; RV32I-NEXT: sll a4, a3, a2
-; RV32I-NEXT: lbu a5, 1(a1)
-; RV32I-NEXT: lbu a6, 0(a1)
-; RV32I-NEXT: lbu a7, 2(a1)
-; RV32I-NEXT: lbu t0, 3(a1)
-; RV32I-NEXT: slli a5, a5, 8
-; RV32I-NEXT: or a5, a5, a6
-; RV32I-NEXT: slli a7, a7, 16
-; RV32I-NEXT: slli t0, t0, 24
-; RV32I-NEXT: or a6, t0, a7
-; RV32I-NEXT: or a5, a6, a5
-; RV32I-NEXT: srli a6, a5, 1
-; RV32I-NEXT: xori a7, a2, 31
+; RV32I-NEXT: sub a3, a3, a1
+; RV32I-NEXT: lw a1, 4(a3)
+; RV32I-NEXT: lw a4, 0(a3)
+; RV32I-NEXT: sll a5, a1, a2
+; RV32I-NEXT: srli a6, a4, 1
+; RV32I-NEXT: andi a7, a2, 31
+; RV32I-NEXT: lw t0, 8(a3)
+; RV32I-NEXT: xori a7, a7, 31
; RV32I-NEXT: srl a6, a6, a7
-; RV32I-NEXT: or a4, a4, a6
-; RV32I-NEXT: lbu a6, 9(a1)
-; RV32I-NEXT: lbu t0, 8(a1)
-; RV32I-NEXT: lbu t1, 10(a1)
-; RV32I-NEXT: lbu t2, 11(a1)
-; RV32I-NEXT: slli a6, a6, 8
-; RV32I-NEXT: or a6, a6, t0
-; RV32I-NEXT: slli t1, t1, 16
-; RV32I-NEXT: slli t2, t2, 24
-; RV32I-NEXT: or t0, t2, t1
-; RV32I-NEXT: or a6, t0, a6
-; RV32I-NEXT: sll t0, a6, a2
-; RV32I-NEXT: srli a3, a3, 1
-; RV32I-NEXT: not t1, a2
-; RV32I-NEXT: srl a3, a3, t1
-; RV32I-NEXT: or a3, t0, a3
-; RV32I-NEXT: lbu t0, 13(a1)
-; RV32I-NEXT: lbu t1, 12(a1)
-; RV32I-NEXT: lbu t2, 14(a1)
-; RV32I-NEXT: lbu a1, 15(a1)
-; RV32I-NEXT: slli t0, t0, 8
-; RV32I-NEXT: or t0, t0, t1
-; RV32I-NEXT: slli t2, t2, 16
-; RV32I-NEXT: slli a1, a1, 24
-; RV32I-NEXT: or a1, a1, t2
-; RV32I-NEXT: or a1, a1, t0
-; RV32I-NEXT: sll a1, a1, a2
-; RV32I-NEXT: srli a6, a6, 1
+; RV32I-NEXT: or a5, a5, a6
+; RV32I-NEXT: sll a6, t0, a2
+; RV32I-NEXT: lw a3, 12(a3)
+; RV32I-NEXT: srli a1, a1, 1
+; RV32I-NEXT: srl a1, a1, a7
+; RV32I-NEXT: or a1, a6, a1
+; RV32I-NEXT: sll a3, a3, a2
+; RV32I-NEXT: srli a6, t0, 1
; RV32I-NEXT: srl a6, a6, a7
-; RV32I-NEXT: or a1, a1, a6
-; RV32I-NEXT: sll a2, a5, a2
+; RV32I-NEXT: or a3, a3, a6
+; RV32I-NEXT: sll a2, a4, a2
; RV32I-NEXT: sw a2, 0(a0)
-; RV32I-NEXT: sw a1, 12(a0)
-; RV32I-NEXT: sw a3, 8(a0)
-; RV32I-NEXT: sw a4, 4(a0)
+; RV32I-NEXT: sw a3, 12(a0)
+; RV32I-NEXT: sw a1, 8(a0)
+; RV32I-NEXT: sw a5, 4(a0)
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/wide-scalar-shift-by-byte-multiple-legalization.ll b/llvm/test/CodeGen/RISCV/wide-scalar-shift-by-byte-multiple-legalization.ll
index b0d4353..29fe0a7 100644
--- a/llvm/test/CodeGen/RISCV/wide-scalar-shift-by-byte-multiple-legalization.ll
+++ b/llvm/test/CodeGen/RISCV/wide-scalar-shift-by-byte-multiple-legalization.ll
@@ -723,98 +723,117 @@ define void @lshr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
;
; RV32I-LABEL: lshr_16bytes:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -48
-; RV32I-NEXT: sw s0, 44(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 40(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 36(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a3, 0(a0)
-; RV32I-NEXT: lbu a4, 1(a0)
+; RV32I-NEXT: addi sp, sp, -32
+; RV32I-NEXT: lbu a3, 1(a0)
+; RV32I-NEXT: lbu a4, 0(a0)
; RV32I-NEXT: lbu a5, 2(a0)
; RV32I-NEXT: lbu a6, 3(a0)
-; RV32I-NEXT: lbu a7, 4(a0)
-; RV32I-NEXT: lbu t0, 5(a0)
-; RV32I-NEXT: lbu t1, 6(a0)
-; RV32I-NEXT: lbu t2, 7(a0)
-; RV32I-NEXT: lbu t3, 8(a0)
-; RV32I-NEXT: lbu t4, 9(a0)
-; RV32I-NEXT: lbu t5, 10(a0)
-; RV32I-NEXT: lbu t6, 11(a0)
-; RV32I-NEXT: lbu s0, 12(a0)
-; RV32I-NEXT: lbu s1, 13(a0)
-; RV32I-NEXT: lbu s2, 14(a0)
+; RV32I-NEXT: slli a3, a3, 8
+; RV32I-NEXT: or a3, a3, a4
+; RV32I-NEXT: slli a5, a5, 16
+; RV32I-NEXT: slli a6, a6, 24
+; RV32I-NEXT: or a4, a6, a5
+; RV32I-NEXT: or a3, a4, a3
+; RV32I-NEXT: lbu a4, 5(a0)
+; RV32I-NEXT: lbu a5, 4(a0)
+; RV32I-NEXT: lbu a6, 6(a0)
+; RV32I-NEXT: lbu a7, 7(a0)
+; RV32I-NEXT: slli a4, a4, 8
+; RV32I-NEXT: or a4, a4, a5
+; RV32I-NEXT: slli a6, a6, 16
+; RV32I-NEXT: slli a7, a7, 24
+; RV32I-NEXT: or a5, a7, a6
+; RV32I-NEXT: or a4, a5, a4
+; RV32I-NEXT: lbu a5, 9(a0)
+; RV32I-NEXT: lbu a6, 8(a0)
+; RV32I-NEXT: lbu a7, 10(a0)
+; RV32I-NEXT: lbu t0, 11(a0)
+; RV32I-NEXT: slli a5, a5, 8
+; RV32I-NEXT: or a5, a5, a6
+; RV32I-NEXT: slli a7, a7, 16
+; RV32I-NEXT: slli t0, t0, 24
+; RV32I-NEXT: or a6, t0, a7
+; RV32I-NEXT: or a5, a6, a5
+; RV32I-NEXT: lbu a6, 13(a0)
+; RV32I-NEXT: lbu a7, 12(a0)
+; RV32I-NEXT: lbu t0, 14(a0)
; RV32I-NEXT: lbu a0, 15(a0)
-; RV32I-NEXT: lbu a1, 0(a1)
-; RV32I-NEXT: sb zero, 35(sp)
-; RV32I-NEXT: sb zero, 34(sp)
-; RV32I-NEXT: sb zero, 33(sp)
-; RV32I-NEXT: sb zero, 32(sp)
-; RV32I-NEXT: sb zero, 31(sp)
-; RV32I-NEXT: sb zero, 30(sp)
-; RV32I-NEXT: sb zero, 29(sp)
-; RV32I-NEXT: sb zero, 28(sp)
-; RV32I-NEXT: sb zero, 27(sp)
-; RV32I-NEXT: sb zero, 26(sp)
-; RV32I-NEXT: sb zero, 25(sp)
-; RV32I-NEXT: sb zero, 24(sp)
-; RV32I-NEXT: sb zero, 23(sp)
-; RV32I-NEXT: sb zero, 22(sp)
-; RV32I-NEXT: sb zero, 21(sp)
-; RV32I-NEXT: sb zero, 20(sp)
-; RV32I-NEXT: sb a0, 19(sp)
-; RV32I-NEXT: sb s2, 18(sp)
-; RV32I-NEXT: sb s1, 17(sp)
-; RV32I-NEXT: sb s0, 16(sp)
-; RV32I-NEXT: sb t6, 15(sp)
-; RV32I-NEXT: sb t5, 14(sp)
-; RV32I-NEXT: sb t4, 13(sp)
-; RV32I-NEXT: sb t3, 12(sp)
-; RV32I-NEXT: sb t2, 11(sp)
-; RV32I-NEXT: sb t1, 10(sp)
-; RV32I-NEXT: sb t0, 9(sp)
-; RV32I-NEXT: sb a7, 8(sp)
-; RV32I-NEXT: sb a6, 7(sp)
-; RV32I-NEXT: sb a5, 6(sp)
-; RV32I-NEXT: sb a4, 5(sp)
-; RV32I-NEXT: sb a3, 4(sp)
-; RV32I-NEXT: andi a1, a1, 15
-; RV32I-NEXT: addi a0, sp, 4
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: lbu a1, 5(a0)
-; RV32I-NEXT: lbu a3, 4(a0)
-; RV32I-NEXT: lbu a4, 7(a0)
-; RV32I-NEXT: lbu a5, 6(a0)
-; RV32I-NEXT: lbu a6, 1(a0)
-; RV32I-NEXT: lbu a7, 0(a0)
-; RV32I-NEXT: lbu t0, 3(a0)
-; RV32I-NEXT: lbu t1, 2(a0)
-; RV32I-NEXT: lbu t2, 13(a0)
-; RV32I-NEXT: lbu t3, 12(a0)
-; RV32I-NEXT: lbu t4, 15(a0)
-; RV32I-NEXT: lbu t5, 14(a0)
-; RV32I-NEXT: lbu t6, 10(a0)
-; RV32I-NEXT: lbu s0, 11(a0)
-; RV32I-NEXT: lbu s1, 8(a0)
-; RV32I-NEXT: lbu a0, 9(a0)
-; RV32I-NEXT: sb t6, 10(a2)
-; RV32I-NEXT: sb s0, 11(a2)
-; RV32I-NEXT: sb s1, 8(a2)
-; RV32I-NEXT: sb a0, 9(a2)
-; RV32I-NEXT: sb t5, 14(a2)
-; RV32I-NEXT: sb t4, 15(a2)
-; RV32I-NEXT: sb t3, 12(a2)
-; RV32I-NEXT: sb t2, 13(a2)
-; RV32I-NEXT: sb t1, 2(a2)
-; RV32I-NEXT: sb t0, 3(a2)
+; RV32I-NEXT: slli a6, a6, 8
+; RV32I-NEXT: or a6, a6, a7
+; RV32I-NEXT: slli t0, t0, 16
+; RV32I-NEXT: slli a0, a0, 24
+; RV32I-NEXT: or a0, a0, t0
+; RV32I-NEXT: or a0, a0, a6
+; RV32I-NEXT: lbu a6, 1(a1)
+; RV32I-NEXT: lbu a7, 0(a1)
+; RV32I-NEXT: lbu t0, 2(a1)
+; RV32I-NEXT: lbu a1, 3(a1)
+; RV32I-NEXT: slli a6, a6, 8
+; RV32I-NEXT: or a6, a6, a7
+; RV32I-NEXT: slli t0, t0, 16
+; RV32I-NEXT: slli a1, a1, 24
+; RV32I-NEXT: or a1, a1, t0
+; RV32I-NEXT: or a1, a1, a6
+; RV32I-NEXT: sw zero, 28(sp)
+; RV32I-NEXT: sw zero, 24(sp)
+; RV32I-NEXT: sw zero, 20(sp)
+; RV32I-NEXT: sw zero, 16(sp)
+; RV32I-NEXT: sw a0, 12(sp)
+; RV32I-NEXT: sw a5, 8(sp)
+; RV32I-NEXT: sw a4, 4(sp)
+; RV32I-NEXT: sw a3, 0(sp)
+; RV32I-NEXT: andi a0, a1, 12
+; RV32I-NEXT: mv a3, sp
+; RV32I-NEXT: add a0, a3, a0
+; RV32I-NEXT: lw a3, 4(a0)
+; RV32I-NEXT: slli a1, a1, 3
+; RV32I-NEXT: srl a4, a3, a1
+; RV32I-NEXT: lw a5, 8(a0)
+; RV32I-NEXT: andi a6, a1, 24
+; RV32I-NEXT: xori a6, a6, 31
+; RV32I-NEXT: lw a7, 0(a0)
+; RV32I-NEXT: slli t0, a5, 1
+; RV32I-NEXT: sll t0, t0, a6
+; RV32I-NEXT: or t0, a4, t0
+; RV32I-NEXT: srl a7, a7, a1
+; RV32I-NEXT: slli a3, a3, 1
+; RV32I-NEXT: lw a0, 12(a0)
+; RV32I-NEXT: sll a3, a3, a6
+; RV32I-NEXT: or a3, a7, a3
+; RV32I-NEXT: srl a5, a5, a1
+; RV32I-NEXT: slli t1, a0, 1
+; RV32I-NEXT: sll a6, t1, a6
+; RV32I-NEXT: or a6, a5, a6
+; RV32I-NEXT: srl a0, a0, a1
+; RV32I-NEXT: sb a5, 8(a2)
+; RV32I-NEXT: sb a0, 12(a2)
; RV32I-NEXT: sb a7, 0(a2)
-; RV32I-NEXT: sb a6, 1(a2)
-; RV32I-NEXT: sb a5, 6(a2)
-; RV32I-NEXT: sb a4, 7(a2)
-; RV32I-NEXT: sb a3, 4(a2)
-; RV32I-NEXT: sb a1, 5(a2)
-; RV32I-NEXT: lw s0, 44(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 40(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 36(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 48
+; RV32I-NEXT: sb a4, 4(a2)
+; RV32I-NEXT: srli a1, a0, 16
+; RV32I-NEXT: sb a1, 14(a2)
+; RV32I-NEXT: srli a1, a0, 24
+; RV32I-NEXT: sb a1, 15(a2)
+; RV32I-NEXT: srli a0, a0, 8
+; RV32I-NEXT: sb a0, 13(a2)
+; RV32I-NEXT: srli a0, a6, 16
+; RV32I-NEXT: sb a0, 10(a2)
+; RV32I-NEXT: srli a0, a6, 24
+; RV32I-NEXT: sb a0, 11(a2)
+; RV32I-NEXT: srli a0, a6, 8
+; RV32I-NEXT: sb a0, 9(a2)
+; RV32I-NEXT: srli a0, a3, 16
+; RV32I-NEXT: sb a0, 2(a2)
+; RV32I-NEXT: srli a0, a3, 24
+; RV32I-NEXT: sb a0, 3(a2)
+; RV32I-NEXT: srli a3, a3, 8
+; RV32I-NEXT: sb a3, 1(a2)
+; RV32I-NEXT: srli a0, t0, 16
+; RV32I-NEXT: sb a0, 6(a2)
+; RV32I-NEXT: srli a0, t0, 24
+; RV32I-NEXT: sb a0, 7(a2)
+; RV32I-NEXT: srli a0, t0, 8
+; RV32I-NEXT: sb a0, 5(a2)
+; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
%src = load i128, ptr %src.ptr, align 1
%byteOff = load i128, ptr %byteOff.ptr, align 1
@@ -823,6 +842,222 @@ define void @lshr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
store i128 %res, ptr %dst, align 1
ret void
}
+
+define void @lshr_16bytes_wordOff(ptr %src.ptr, ptr %wordOff.ptr, ptr %dst) nounwind {
+; RV64I-LABEL: lshr_16bytes_wordOff:
+; RV64I: # %bb.0:
+; RV64I-NEXT: lbu a3, 9(a0)
+; RV64I-NEXT: lbu a4, 8(a0)
+; RV64I-NEXT: lbu a5, 10(a0)
+; RV64I-NEXT: lbu a6, 11(a0)
+; RV64I-NEXT: slli a3, a3, 8
+; RV64I-NEXT: or a3, a3, a4
+; RV64I-NEXT: slli a5, a5, 16
+; RV64I-NEXT: slli a6, a6, 24
+; RV64I-NEXT: or a4, a6, a5
+; RV64I-NEXT: or a3, a4, a3
+; RV64I-NEXT: lbu a4, 13(a0)
+; RV64I-NEXT: lbu a5, 12(a0)
+; RV64I-NEXT: lbu a6, 14(a0)
+; RV64I-NEXT: lbu a7, 15(a0)
+; RV64I-NEXT: slli a4, a4, 8
+; RV64I-NEXT: or a4, a4, a5
+; RV64I-NEXT: slli a6, a6, 16
+; RV64I-NEXT: slli a7, a7, 24
+; RV64I-NEXT: or a5, a7, a6
+; RV64I-NEXT: or a4, a5, a4
+; RV64I-NEXT: slli a4, a4, 32
+; RV64I-NEXT: or a3, a4, a3
+; RV64I-NEXT: lbu a4, 5(a1)
+; RV64I-NEXT: lbu a5, 4(a1)
+; RV64I-NEXT: lbu a6, 6(a1)
+; RV64I-NEXT: lbu a7, 7(a1)
+; RV64I-NEXT: slli a4, a4, 8
+; RV64I-NEXT: or a4, a4, a5
+; RV64I-NEXT: slli a6, a6, 16
+; RV64I-NEXT: slli a7, a7, 24
+; RV64I-NEXT: or a5, a7, a6
+; RV64I-NEXT: or a4, a5, a4
+; RV64I-NEXT: lbu a5, 1(a1)
+; RV64I-NEXT: lbu a6, 0(a1)
+; RV64I-NEXT: lbu a7, 2(a1)
+; RV64I-NEXT: lbu a1, 3(a1)
+; RV64I-NEXT: slli a5, a5, 8
+; RV64I-NEXT: or a5, a5, a6
+; RV64I-NEXT: slli a7, a7, 16
+; RV64I-NEXT: slli a1, a1, 24
+; RV64I-NEXT: or a1, a1, a7
+; RV64I-NEXT: or a1, a1, a5
+; RV64I-NEXT: slli a1, a1, 5
+; RV64I-NEXT: slli a4, a4, 37
+; RV64I-NEXT: or a5, a4, a1
+; RV64I-NEXT: addi a4, a5, -64
+; RV64I-NEXT: srl a1, a3, a5
+; RV64I-NEXT: bltz a4, .LBB7_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: mv a0, a1
+; RV64I-NEXT: j .LBB7_3
+; RV64I-NEXT: .LBB7_2:
+; RV64I-NEXT: lbu a6, 1(a0)
+; RV64I-NEXT: lbu a7, 0(a0)
+; RV64I-NEXT: lbu t0, 2(a0)
+; RV64I-NEXT: lbu t1, 3(a0)
+; RV64I-NEXT: slli a6, a6, 8
+; RV64I-NEXT: or a6, a6, a7
+; RV64I-NEXT: slli t0, t0, 16
+; RV64I-NEXT: slli t1, t1, 24
+; RV64I-NEXT: or a7, t1, t0
+; RV64I-NEXT: or a6, a7, a6
+; RV64I-NEXT: lbu a7, 5(a0)
+; RV64I-NEXT: lbu t0, 4(a0)
+; RV64I-NEXT: lbu t1, 6(a0)
+; RV64I-NEXT: lbu a0, 7(a0)
+; RV64I-NEXT: slli a7, a7, 8
+; RV64I-NEXT: or a7, a7, t0
+; RV64I-NEXT: slli t1, t1, 16
+; RV64I-NEXT: slli a0, a0, 24
+; RV64I-NEXT: or a0, a0, t1
+; RV64I-NEXT: or a0, a0, a7
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: or a0, a0, a6
+; RV64I-NEXT: srl a0, a0, a5
+; RV64I-NEXT: not a5, a5
+; RV64I-NEXT: slli a3, a3, 1
+; RV64I-NEXT: sll a3, a3, a5
+; RV64I-NEXT: or a0, a0, a3
+; RV64I-NEXT: .LBB7_3:
+; RV64I-NEXT: srai a4, a4, 63
+; RV64I-NEXT: and a1, a4, a1
+; RV64I-NEXT: sb a1, 8(a2)
+; RV64I-NEXT: srli a3, a1, 56
+; RV64I-NEXT: sb a3, 15(a2)
+; RV64I-NEXT: srli a3, a1, 48
+; RV64I-NEXT: sb a3, 14(a2)
+; RV64I-NEXT: srli a3, a1, 40
+; RV64I-NEXT: sb a3, 13(a2)
+; RV64I-NEXT: srli a3, a1, 32
+; RV64I-NEXT: sb a3, 12(a2)
+; RV64I-NEXT: srli a3, a1, 24
+; RV64I-NEXT: sb a3, 11(a2)
+; RV64I-NEXT: srli a3, a1, 16
+; RV64I-NEXT: sb a3, 10(a2)
+; RV64I-NEXT: srli a1, a1, 8
+; RV64I-NEXT: sb a1, 9(a2)
+; RV64I-NEXT: sb a0, 0(a2)
+; RV64I-NEXT: srli a1, a0, 56
+; RV64I-NEXT: sb a1, 7(a2)
+; RV64I-NEXT: srli a1, a0, 48
+; RV64I-NEXT: sb a1, 6(a2)
+; RV64I-NEXT: srli a1, a0, 40
+; RV64I-NEXT: sb a1, 5(a2)
+; RV64I-NEXT: srli a1, a0, 32
+; RV64I-NEXT: sb a1, 4(a2)
+; RV64I-NEXT: srli a1, a0, 24
+; RV64I-NEXT: sb a1, 3(a2)
+; RV64I-NEXT: srli a1, a0, 16
+; RV64I-NEXT: sb a1, 2(a2)
+; RV64I-NEXT: srli a0, a0, 8
+; RV64I-NEXT: sb a0, 1(a2)
+; RV64I-NEXT: ret
+;
+; RV32I-LABEL: lshr_16bytes_wordOff:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -32
+; RV32I-NEXT: lbu a3, 1(a0)
+; RV32I-NEXT: lbu a4, 0(a0)
+; RV32I-NEXT: lbu a5, 2(a0)
+; RV32I-NEXT: lbu a6, 3(a0)
+; RV32I-NEXT: slli a3, a3, 8
+; RV32I-NEXT: or a3, a3, a4
+; RV32I-NEXT: slli a5, a5, 16
+; RV32I-NEXT: slli a6, a6, 24
+; RV32I-NEXT: or a4, a6, a5
+; RV32I-NEXT: or a3, a4, a3
+; RV32I-NEXT: lbu a4, 5(a0)
+; RV32I-NEXT: lbu a5, 4(a0)
+; RV32I-NEXT: lbu a6, 6(a0)
+; RV32I-NEXT: lbu a7, 7(a0)
+; RV32I-NEXT: slli a4, a4, 8
+; RV32I-NEXT: or a4, a4, a5
+; RV32I-NEXT: slli a6, a6, 16
+; RV32I-NEXT: slli a7, a7, 24
+; RV32I-NEXT: or a5, a7, a6
+; RV32I-NEXT: or a4, a5, a4
+; RV32I-NEXT: lbu a5, 9(a0)
+; RV32I-NEXT: lbu a6, 8(a0)
+; RV32I-NEXT: lbu a7, 10(a0)
+; RV32I-NEXT: lbu t0, 11(a0)
+; RV32I-NEXT: slli a5, a5, 8
+; RV32I-NEXT: or a5, a5, a6
+; RV32I-NEXT: slli a7, a7, 16
+; RV32I-NEXT: slli t0, t0, 24
+; RV32I-NEXT: or a6, t0, a7
+; RV32I-NEXT: or a5, a6, a5
+; RV32I-NEXT: lbu a6, 13(a0)
+; RV32I-NEXT: lbu a7, 12(a0)
+; RV32I-NEXT: lbu t0, 14(a0)
+; RV32I-NEXT: lbu a0, 15(a0)
+; RV32I-NEXT: slli a6, a6, 8
+; RV32I-NEXT: or a6, a6, a7
+; RV32I-NEXT: slli t0, t0, 16
+; RV32I-NEXT: slli a0, a0, 24
+; RV32I-NEXT: or a0, a0, t0
+; RV32I-NEXT: or a0, a0, a6
+; RV32I-NEXT: lbu a1, 0(a1)
+; RV32I-NEXT: sw zero, 28(sp)
+; RV32I-NEXT: sw zero, 24(sp)
+; RV32I-NEXT: sw zero, 20(sp)
+; RV32I-NEXT: sw zero, 16(sp)
+; RV32I-NEXT: sw a0, 12(sp)
+; RV32I-NEXT: sw a5, 8(sp)
+; RV32I-NEXT: sw a4, 4(sp)
+; RV32I-NEXT: sw a3, 0(sp)
+; RV32I-NEXT: slli a1, a1, 2
+; RV32I-NEXT: andi a1, a1, 12
+; RV32I-NEXT: mv a0, sp
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: lw a1, 8(a0)
+; RV32I-NEXT: lw a3, 12(a0)
+; RV32I-NEXT: lw a4, 0(a0)
+; RV32I-NEXT: lw a0, 4(a0)
+; RV32I-NEXT: sb a1, 8(a2)
+; RV32I-NEXT: sb a3, 12(a2)
+; RV32I-NEXT: sb a4, 0(a2)
+; RV32I-NEXT: sb a0, 4(a2)
+; RV32I-NEXT: srli a5, a1, 16
+; RV32I-NEXT: sb a5, 10(a2)
+; RV32I-NEXT: srli a5, a1, 24
+; RV32I-NEXT: sb a5, 11(a2)
+; RV32I-NEXT: srli a1, a1, 8
+; RV32I-NEXT: sb a1, 9(a2)
+; RV32I-NEXT: srli a1, a3, 16
+; RV32I-NEXT: sb a1, 14(a2)
+; RV32I-NEXT: srli a1, a3, 24
+; RV32I-NEXT: sb a1, 15(a2)
+; RV32I-NEXT: srli a3, a3, 8
+; RV32I-NEXT: sb a3, 13(a2)
+; RV32I-NEXT: srli a1, a4, 16
+; RV32I-NEXT: sb a1, 2(a2)
+; RV32I-NEXT: srli a1, a4, 24
+; RV32I-NEXT: sb a1, 3(a2)
+; RV32I-NEXT: srli a4, a4, 8
+; RV32I-NEXT: sb a4, 1(a2)
+; RV32I-NEXT: srli a1, a0, 16
+; RV32I-NEXT: sb a1, 6(a2)
+; RV32I-NEXT: srli a1, a0, 24
+; RV32I-NEXT: sb a1, 7(a2)
+; RV32I-NEXT: srli a0, a0, 8
+; RV32I-NEXT: sb a0, 5(a2)
+; RV32I-NEXT: addi sp, sp, 32
+; RV32I-NEXT: ret
+ %src = load i128, ptr %src.ptr, align 1
+ %wordOff = load i128, ptr %wordOff.ptr, align 1
+ %bitOff = shl i128 %wordOff, 5
+ %res = lshr i128 %src, %bitOff
+ store i128 %res, ptr %dst, align 1
+ ret void
+}
+
define void @shl_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; RV64I-LABEL: shl_16bytes:
; RV64I: # %bb.0:
@@ -873,11 +1108,11 @@ define void @shl_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; RV64I-NEXT: or a5, a4, a1
; RV64I-NEXT: addi a4, a5, -64
; RV64I-NEXT: sll a1, a3, a5
-; RV64I-NEXT: bltz a4, .LBB7_2
+; RV64I-NEXT: bltz a4, .LBB8_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a0, a1
-; RV64I-NEXT: j .LBB7_3
-; RV64I-NEXT: .LBB7_2:
+; RV64I-NEXT: j .LBB8_3
+; RV64I-NEXT: .LBB8_2:
; RV64I-NEXT: lbu a6, 9(a0)
; RV64I-NEXT: lbu a7, 8(a0)
; RV64I-NEXT: lbu t0, 10(a0)
@@ -905,7 +1140,7 @@ define void @shl_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; RV64I-NEXT: srli a3, a3, 1
; RV64I-NEXT: srl a3, a3, a5
; RV64I-NEXT: or a0, a0, a3
-; RV64I-NEXT: .LBB7_3:
+; RV64I-NEXT: .LBB8_3:
; RV64I-NEXT: srai a4, a4, 63
; RV64I-NEXT: and a1, a4, a1
; RV64I-NEXT: sb a1, 0(a2)
@@ -942,98 +1177,117 @@ define void @shl_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
;
; RV32I-LABEL: shl_16bytes:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -48
-; RV32I-NEXT: sw s0, 44(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 40(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 36(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a3, 0(a0)
-; RV32I-NEXT: lbu a4, 1(a0)
+; RV32I-NEXT: addi sp, sp, -32
+; RV32I-NEXT: lbu a3, 1(a0)
+; RV32I-NEXT: lbu a4, 0(a0)
; RV32I-NEXT: lbu a5, 2(a0)
; RV32I-NEXT: lbu a6, 3(a0)
-; RV32I-NEXT: lbu a7, 4(a0)
-; RV32I-NEXT: lbu t0, 5(a0)
-; RV32I-NEXT: lbu t1, 6(a0)
-; RV32I-NEXT: lbu t2, 7(a0)
-; RV32I-NEXT: lbu t3, 8(a0)
-; RV32I-NEXT: lbu t4, 9(a0)
-; RV32I-NEXT: lbu t5, 10(a0)
-; RV32I-NEXT: lbu t6, 11(a0)
-; RV32I-NEXT: lbu s0, 12(a0)
-; RV32I-NEXT: lbu s1, 13(a0)
-; RV32I-NEXT: lbu s2, 14(a0)
+; RV32I-NEXT: slli a3, a3, 8
+; RV32I-NEXT: or a3, a3, a4
+; RV32I-NEXT: slli a5, a5, 16
+; RV32I-NEXT: slli a6, a6, 24
+; RV32I-NEXT: or a4, a6, a5
+; RV32I-NEXT: or a3, a4, a3
+; RV32I-NEXT: lbu a4, 5(a0)
+; RV32I-NEXT: lbu a5, 4(a0)
+; RV32I-NEXT: lbu a6, 6(a0)
+; RV32I-NEXT: lbu a7, 7(a0)
+; RV32I-NEXT: slli a4, a4, 8
+; RV32I-NEXT: or a4, a4, a5
+; RV32I-NEXT: slli a6, a6, 16
+; RV32I-NEXT: slli a7, a7, 24
+; RV32I-NEXT: or a5, a7, a6
+; RV32I-NEXT: or a4, a5, a4
+; RV32I-NEXT: lbu a5, 9(a0)
+; RV32I-NEXT: lbu a6, 8(a0)
+; RV32I-NEXT: lbu a7, 10(a0)
+; RV32I-NEXT: lbu t0, 11(a0)
+; RV32I-NEXT: slli a5, a5, 8
+; RV32I-NEXT: or a5, a5, a6
+; RV32I-NEXT: slli a7, a7, 16
+; RV32I-NEXT: slli t0, t0, 24
+; RV32I-NEXT: or a6, t0, a7
+; RV32I-NEXT: or a5, a6, a5
+; RV32I-NEXT: lbu a6, 13(a0)
+; RV32I-NEXT: lbu a7, 12(a0)
+; RV32I-NEXT: lbu t0, 14(a0)
; RV32I-NEXT: lbu a0, 15(a0)
-; RV32I-NEXT: lbu a1, 0(a1)
-; RV32I-NEXT: sb zero, 19(sp)
-; RV32I-NEXT: sb zero, 18(sp)
-; RV32I-NEXT: sb zero, 17(sp)
-; RV32I-NEXT: sb zero, 16(sp)
-; RV32I-NEXT: sb zero, 15(sp)
-; RV32I-NEXT: sb zero, 14(sp)
-; RV32I-NEXT: sb zero, 13(sp)
-; RV32I-NEXT: sb zero, 12(sp)
-; RV32I-NEXT: sb zero, 11(sp)
-; RV32I-NEXT: sb zero, 10(sp)
-; RV32I-NEXT: sb zero, 9(sp)
-; RV32I-NEXT: sb zero, 8(sp)
-; RV32I-NEXT: sb zero, 7(sp)
-; RV32I-NEXT: sb zero, 6(sp)
-; RV32I-NEXT: sb zero, 5(sp)
-; RV32I-NEXT: sb zero, 4(sp)
-; RV32I-NEXT: sb a0, 35(sp)
-; RV32I-NEXT: sb s2, 34(sp)
-; RV32I-NEXT: sb s1, 33(sp)
-; RV32I-NEXT: sb s0, 32(sp)
-; RV32I-NEXT: sb t6, 31(sp)
-; RV32I-NEXT: sb t5, 30(sp)
-; RV32I-NEXT: sb t4, 29(sp)
-; RV32I-NEXT: sb t3, 28(sp)
-; RV32I-NEXT: sb t2, 27(sp)
-; RV32I-NEXT: sb t1, 26(sp)
-; RV32I-NEXT: sb t0, 25(sp)
-; RV32I-NEXT: sb a7, 24(sp)
-; RV32I-NEXT: sb a6, 23(sp)
-; RV32I-NEXT: sb a5, 22(sp)
-; RV32I-NEXT: sb a4, 21(sp)
-; RV32I-NEXT: sb a3, 20(sp)
-; RV32I-NEXT: andi a1, a1, 15
-; RV32I-NEXT: addi a0, sp, 20
-; RV32I-NEXT: sub a0, a0, a1
-; RV32I-NEXT: lbu a1, 5(a0)
-; RV32I-NEXT: lbu a3, 4(a0)
-; RV32I-NEXT: lbu a4, 7(a0)
-; RV32I-NEXT: lbu a5, 6(a0)
-; RV32I-NEXT: lbu a6, 1(a0)
-; RV32I-NEXT: lbu a7, 0(a0)
-; RV32I-NEXT: lbu t0, 3(a0)
-; RV32I-NEXT: lbu t1, 2(a0)
-; RV32I-NEXT: lbu t2, 13(a0)
-; RV32I-NEXT: lbu t3, 12(a0)
-; RV32I-NEXT: lbu t4, 15(a0)
-; RV32I-NEXT: lbu t5, 14(a0)
-; RV32I-NEXT: lbu t6, 10(a0)
-; RV32I-NEXT: lbu s0, 11(a0)
-; RV32I-NEXT: lbu s1, 8(a0)
-; RV32I-NEXT: lbu a0, 9(a0)
-; RV32I-NEXT: sb t6, 10(a2)
-; RV32I-NEXT: sb s0, 11(a2)
-; RV32I-NEXT: sb s1, 8(a2)
+; RV32I-NEXT: slli a6, a6, 8
+; RV32I-NEXT: or a6, a6, a7
+; RV32I-NEXT: slli t0, t0, 16
+; RV32I-NEXT: slli a0, a0, 24
+; RV32I-NEXT: or a0, a0, t0
+; RV32I-NEXT: or a0, a0, a6
+; RV32I-NEXT: lbu a6, 1(a1)
+; RV32I-NEXT: lbu a7, 0(a1)
+; RV32I-NEXT: lbu t0, 2(a1)
+; RV32I-NEXT: lbu a1, 3(a1)
+; RV32I-NEXT: slli a6, a6, 8
+; RV32I-NEXT: or a6, a6, a7
+; RV32I-NEXT: slli t0, t0, 16
+; RV32I-NEXT: slli a1, a1, 24
+; RV32I-NEXT: or a1, a1, t0
+; RV32I-NEXT: or a1, a1, a6
+; RV32I-NEXT: sw zero, 12(sp)
+; RV32I-NEXT: sw zero, 8(sp)
+; RV32I-NEXT: sw zero, 4(sp)
+; RV32I-NEXT: sw zero, 0(sp)
+; RV32I-NEXT: sw a0, 28(sp)
+; RV32I-NEXT: sw a5, 24(sp)
+; RV32I-NEXT: sw a4, 20(sp)
+; RV32I-NEXT: sw a3, 16(sp)
+; RV32I-NEXT: andi a0, a1, 12
+; RV32I-NEXT: addi a3, sp, 16
+; RV32I-NEXT: sub a3, a3, a0
+; RV32I-NEXT: lw a0, 4(a3)
+; RV32I-NEXT: slli a1, a1, 3
+; RV32I-NEXT: lw a4, 0(a3)
+; RV32I-NEXT: sll a5, a0, a1
+; RV32I-NEXT: andi a6, a1, 24
+; RV32I-NEXT: xori a6, a6, 31
+; RV32I-NEXT: srli a7, a4, 1
+; RV32I-NEXT: lw t0, 12(a3)
+; RV32I-NEXT: lw a3, 8(a3)
+; RV32I-NEXT: srl a7, a7, a6
+; RV32I-NEXT: or a7, a5, a7
+; RV32I-NEXT: sll t0, t0, a1
+; RV32I-NEXT: srli t1, a3, 1
+; RV32I-NEXT: srl t1, t1, a6
+; RV32I-NEXT: or t1, t0, t1
+; RV32I-NEXT: sll a3, a3, a1
+; RV32I-NEXT: srli a0, a0, 1
+; RV32I-NEXT: srl a0, a0, a6
+; RV32I-NEXT: or a0, a3, a0
+; RV32I-NEXT: sll a1, a4, a1
+; RV32I-NEXT: sb a1, 0(a2)
+; RV32I-NEXT: srli a3, a3, 24
+; RV32I-NEXT: sb a3, 11(a2)
+; RV32I-NEXT: srli a3, t0, 24
+; RV32I-NEXT: sb a3, 15(a2)
+; RV32I-NEXT: srli a3, a1, 16
+; RV32I-NEXT: sb a3, 2(a2)
+; RV32I-NEXT: srli a3, a1, 24
+; RV32I-NEXT: sb a3, 3(a2)
+; RV32I-NEXT: srli a1, a1, 8
+; RV32I-NEXT: sb a1, 1(a2)
+; RV32I-NEXT: srli a5, a5, 24
+; RV32I-NEXT: sb a5, 7(a2)
+; RV32I-NEXT: sb a0, 8(a2)
+; RV32I-NEXT: sb t1, 12(a2)
+; RV32I-NEXT: sb a7, 4(a2)
+; RV32I-NEXT: srli a1, a0, 16
+; RV32I-NEXT: sb a1, 10(a2)
+; RV32I-NEXT: srli a0, a0, 8
; RV32I-NEXT: sb a0, 9(a2)
-; RV32I-NEXT: sb t5, 14(a2)
-; RV32I-NEXT: sb t4, 15(a2)
-; RV32I-NEXT: sb t3, 12(a2)
-; RV32I-NEXT: sb t2, 13(a2)
-; RV32I-NEXT: sb t1, 2(a2)
-; RV32I-NEXT: sb t0, 3(a2)
-; RV32I-NEXT: sb a7, 0(a2)
-; RV32I-NEXT: sb a6, 1(a2)
-; RV32I-NEXT: sb a5, 6(a2)
-; RV32I-NEXT: sb a4, 7(a2)
-; RV32I-NEXT: sb a3, 4(a2)
-; RV32I-NEXT: sb a1, 5(a2)
-; RV32I-NEXT: lw s0, 44(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 40(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 36(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 48
+; RV32I-NEXT: srli a0, t1, 16
+; RV32I-NEXT: sb a0, 14(a2)
+; RV32I-NEXT: srli a0, t1, 8
+; RV32I-NEXT: sb a0, 13(a2)
+; RV32I-NEXT: srli a0, a7, 16
+; RV32I-NEXT: sb a0, 6(a2)
+; RV32I-NEXT: srli a0, a7, 8
+; RV32I-NEXT: sb a0, 5(a2)
+; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
%src = load i128, ptr %src.ptr, align 1
%byteOff = load i128, ptr %byteOff.ptr, align 1
@@ -1042,6 +1296,223 @@ define void @shl_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
store i128 %res, ptr %dst, align 1
ret void
}
+
+define void @shl_16bytes_wordOff(ptr %src.ptr, ptr %wordOff.ptr, ptr %dst) nounwind {
+; RV64I-LABEL: shl_16bytes_wordOff:
+; RV64I: # %bb.0:
+; RV64I-NEXT: lbu a3, 1(a0)
+; RV64I-NEXT: lbu a4, 0(a0)
+; RV64I-NEXT: lbu a5, 2(a0)
+; RV64I-NEXT: lbu a6, 3(a0)
+; RV64I-NEXT: slli a3, a3, 8
+; RV64I-NEXT: or a3, a3, a4
+; RV64I-NEXT: slli a5, a5, 16
+; RV64I-NEXT: slli a6, a6, 24
+; RV64I-NEXT: or a4, a6, a5
+; RV64I-NEXT: or a3, a4, a3
+; RV64I-NEXT: lbu a4, 5(a0)
+; RV64I-NEXT: lbu a5, 4(a0)
+; RV64I-NEXT: lbu a6, 6(a0)
+; RV64I-NEXT: lbu a7, 7(a0)
+; RV64I-NEXT: slli a4, a4, 8
+; RV64I-NEXT: or a4, a4, a5
+; RV64I-NEXT: slli a6, a6, 16
+; RV64I-NEXT: slli a7, a7, 24
+; RV64I-NEXT: or a5, a7, a6
+; RV64I-NEXT: or a4, a5, a4
+; RV64I-NEXT: slli a4, a4, 32
+; RV64I-NEXT: or a3, a4, a3
+; RV64I-NEXT: lbu a4, 5(a1)
+; RV64I-NEXT: lbu a5, 4(a1)
+; RV64I-NEXT: lbu a6, 6(a1)
+; RV64I-NEXT: lbu a7, 7(a1)
+; RV64I-NEXT: slli a4, a4, 8
+; RV64I-NEXT: or a4, a4, a5
+; RV64I-NEXT: slli a6, a6, 16
+; RV64I-NEXT: slli a7, a7, 24
+; RV64I-NEXT: or a5, a7, a6
+; RV64I-NEXT: or a4, a5, a4
+; RV64I-NEXT: lbu a5, 1(a1)
+; RV64I-NEXT: lbu a6, 0(a1)
+; RV64I-NEXT: lbu a7, 2(a1)
+; RV64I-NEXT: lbu a1, 3(a1)
+; RV64I-NEXT: slli a5, a5, 8
+; RV64I-NEXT: or a5, a5, a6
+; RV64I-NEXT: slli a7, a7, 16
+; RV64I-NEXT: slli a1, a1, 24
+; RV64I-NEXT: or a1, a1, a7
+; RV64I-NEXT: or a1, a1, a5
+; RV64I-NEXT: slli a1, a1, 5
+; RV64I-NEXT: slli a4, a4, 37
+; RV64I-NEXT: or a5, a4, a1
+; RV64I-NEXT: addi a4, a5, -64
+; RV64I-NEXT: sll a1, a3, a5
+; RV64I-NEXT: bltz a4, .LBB9_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: mv a0, a1
+; RV64I-NEXT: j .LBB9_3
+; RV64I-NEXT: .LBB9_2:
+; RV64I-NEXT: lbu a6, 9(a0)
+; RV64I-NEXT: lbu a7, 8(a0)
+; RV64I-NEXT: lbu t0, 10(a0)
+; RV64I-NEXT: lbu t1, 11(a0)
+; RV64I-NEXT: slli a6, a6, 8
+; RV64I-NEXT: or a6, a6, a7
+; RV64I-NEXT: slli t0, t0, 16
+; RV64I-NEXT: slli t1, t1, 24
+; RV64I-NEXT: or a7, t1, t0
+; RV64I-NEXT: or a6, a7, a6
+; RV64I-NEXT: lbu a7, 13(a0)
+; RV64I-NEXT: lbu t0, 12(a0)
+; RV64I-NEXT: lbu t1, 14(a0)
+; RV64I-NEXT: lbu a0, 15(a0)
+; RV64I-NEXT: slli a7, a7, 8
+; RV64I-NEXT: or a7, a7, t0
+; RV64I-NEXT: slli t1, t1, 16
+; RV64I-NEXT: slli a0, a0, 24
+; RV64I-NEXT: or a0, a0, t1
+; RV64I-NEXT: or a0, a0, a7
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: or a0, a0, a6
+; RV64I-NEXT: sll a0, a0, a5
+; RV64I-NEXT: not a5, a5
+; RV64I-NEXT: srli a3, a3, 1
+; RV64I-NEXT: srl a3, a3, a5
+; RV64I-NEXT: or a0, a0, a3
+; RV64I-NEXT: .LBB9_3:
+; RV64I-NEXT: srai a4, a4, 63
+; RV64I-NEXT: and a1, a4, a1
+; RV64I-NEXT: sb a1, 0(a2)
+; RV64I-NEXT: sb a0, 8(a2)
+; RV64I-NEXT: srli a3, a1, 56
+; RV64I-NEXT: sb a3, 7(a2)
+; RV64I-NEXT: srli a3, a1, 48
+; RV64I-NEXT: sb a3, 6(a2)
+; RV64I-NEXT: srli a3, a1, 40
+; RV64I-NEXT: sb a3, 5(a2)
+; RV64I-NEXT: srli a3, a1, 32
+; RV64I-NEXT: sb a3, 4(a2)
+; RV64I-NEXT: srli a3, a1, 24
+; RV64I-NEXT: sb a3, 3(a2)
+; RV64I-NEXT: srli a3, a1, 16
+; RV64I-NEXT: sb a3, 2(a2)
+; RV64I-NEXT: srli a1, a1, 8
+; RV64I-NEXT: sb a1, 1(a2)
+; RV64I-NEXT: srli a1, a0, 56
+; RV64I-NEXT: sb a1, 15(a2)
+; RV64I-NEXT: srli a1, a0, 48
+; RV64I-NEXT: sb a1, 14(a2)
+; RV64I-NEXT: srli a1, a0, 40
+; RV64I-NEXT: sb a1, 13(a2)
+; RV64I-NEXT: srli a1, a0, 32
+; RV64I-NEXT: sb a1, 12(a2)
+; RV64I-NEXT: srli a1, a0, 24
+; RV64I-NEXT: sb a1, 11(a2)
+; RV64I-NEXT: srli a1, a0, 16
+; RV64I-NEXT: sb a1, 10(a2)
+; RV64I-NEXT: srli a0, a0, 8
+; RV64I-NEXT: sb a0, 9(a2)
+; RV64I-NEXT: ret
+;
+; RV32I-LABEL: shl_16bytes_wordOff:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -32
+; RV32I-NEXT: lbu a3, 1(a0)
+; RV32I-NEXT: lbu a4, 0(a0)
+; RV32I-NEXT: lbu a5, 2(a0)
+; RV32I-NEXT: lbu a6, 3(a0)
+; RV32I-NEXT: slli a3, a3, 8
+; RV32I-NEXT: or a3, a3, a4
+; RV32I-NEXT: slli a5, a5, 16
+; RV32I-NEXT: slli a6, a6, 24
+; RV32I-NEXT: or a4, a6, a5
+; RV32I-NEXT: or a3, a4, a3
+; RV32I-NEXT: lbu a4, 5(a0)
+; RV32I-NEXT: lbu a5, 4(a0)
+; RV32I-NEXT: lbu a6, 6(a0)
+; RV32I-NEXT: lbu a7, 7(a0)
+; RV32I-NEXT: slli a4, a4, 8
+; RV32I-NEXT: or a4, a4, a5
+; RV32I-NEXT: slli a6, a6, 16
+; RV32I-NEXT: slli a7, a7, 24
+; RV32I-NEXT: or a5, a7, a6
+; RV32I-NEXT: or a4, a5, a4
+; RV32I-NEXT: lbu a5, 9(a0)
+; RV32I-NEXT: lbu a6, 8(a0)
+; RV32I-NEXT: lbu a7, 10(a0)
+; RV32I-NEXT: lbu t0, 11(a0)
+; RV32I-NEXT: slli a5, a5, 8
+; RV32I-NEXT: or a5, a5, a6
+; RV32I-NEXT: slli a7, a7, 16
+; RV32I-NEXT: slli t0, t0, 24
+; RV32I-NEXT: or a6, t0, a7
+; RV32I-NEXT: or a5, a6, a5
+; RV32I-NEXT: lbu a6, 13(a0)
+; RV32I-NEXT: lbu a7, 12(a0)
+; RV32I-NEXT: lbu t0, 14(a0)
+; RV32I-NEXT: lbu a0, 15(a0)
+; RV32I-NEXT: slli a6, a6, 8
+; RV32I-NEXT: or a6, a6, a7
+; RV32I-NEXT: slli t0, t0, 16
+; RV32I-NEXT: slli a0, a0, 24
+; RV32I-NEXT: or a0, a0, t0
+; RV32I-NEXT: or a0, a0, a6
+; RV32I-NEXT: lbu a1, 0(a1)
+; RV32I-NEXT: sw zero, 12(sp)
+; RV32I-NEXT: sw zero, 8(sp)
+; RV32I-NEXT: sw zero, 4(sp)
+; RV32I-NEXT: sw zero, 0(sp)
+; RV32I-NEXT: sw a0, 28(sp)
+; RV32I-NEXT: sw a5, 24(sp)
+; RV32I-NEXT: sw a4, 20(sp)
+; RV32I-NEXT: sw a3, 16(sp)
+; RV32I-NEXT: slli a1, a1, 2
+; RV32I-NEXT: andi a1, a1, 12
+; RV32I-NEXT: addi a0, sp, 16
+; RV32I-NEXT: sub a0, a0, a1
+; RV32I-NEXT: lw a1, 8(a0)
+; RV32I-NEXT: lw a3, 12(a0)
+; RV32I-NEXT: lw a4, 0(a0)
+; RV32I-NEXT: lw a0, 4(a0)
+; RV32I-NEXT: sb a1, 8(a2)
+; RV32I-NEXT: sb a3, 12(a2)
+; RV32I-NEXT: sb a4, 0(a2)
+; RV32I-NEXT: sb a0, 4(a2)
+; RV32I-NEXT: srli a5, a1, 16
+; RV32I-NEXT: sb a5, 10(a2)
+; RV32I-NEXT: srli a5, a1, 24
+; RV32I-NEXT: sb a5, 11(a2)
+; RV32I-NEXT: srli a1, a1, 8
+; RV32I-NEXT: sb a1, 9(a2)
+; RV32I-NEXT: srli a1, a3, 16
+; RV32I-NEXT: sb a1, 14(a2)
+; RV32I-NEXT: srli a1, a3, 24
+; RV32I-NEXT: sb a1, 15(a2)
+; RV32I-NEXT: srli a3, a3, 8
+; RV32I-NEXT: sb a3, 13(a2)
+; RV32I-NEXT: srli a1, a4, 16
+; RV32I-NEXT: sb a1, 2(a2)
+; RV32I-NEXT: srli a1, a4, 24
+; RV32I-NEXT: sb a1, 3(a2)
+; RV32I-NEXT: srli a4, a4, 8
+; RV32I-NEXT: sb a4, 1(a2)
+; RV32I-NEXT: srli a1, a0, 16
+; RV32I-NEXT: sb a1, 6(a2)
+; RV32I-NEXT: srli a1, a0, 24
+; RV32I-NEXT: sb a1, 7(a2)
+; RV32I-NEXT: srli a0, a0, 8
+; RV32I-NEXT: sb a0, 5(a2)
+; RV32I-NEXT: addi sp, sp, 32
+; RV32I-NEXT: ret
+ %src = load i128, ptr %src.ptr, align 1
+ %wordOff = load i128, ptr %wordOff.ptr, align 1
+ %bitOff = shl i128 %wordOff, 5
+ %res = shl i128 %src, %bitOff
+ store i128 %res, ptr %dst, align 1
+ ret void
+}
+
+
define void @ashr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; RV64I-LABEL: ashr_16bytes:
; RV64I: # %bb.0:
@@ -1092,13 +1563,13 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; RV64I-NEXT: or a5, a5, a1
; RV64I-NEXT: addi a6, a5, -64
; RV64I-NEXT: sra a1, a3, a5
-; RV64I-NEXT: bltz a6, .LBB8_2
+; RV64I-NEXT: bltz a6, .LBB10_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: sraiw a3, a4, 31
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: mv a1, a3
-; RV64I-NEXT: j .LBB8_3
-; RV64I-NEXT: .LBB8_2:
+; RV64I-NEXT: j .LBB10_3
+; RV64I-NEXT: .LBB10_2:
; RV64I-NEXT: lbu a4, 1(a0)
; RV64I-NEXT: lbu a6, 0(a0)
; RV64I-NEXT: lbu a7, 2(a0)
@@ -1126,7 +1597,7 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; RV64I-NEXT: slli a3, a3, 1
; RV64I-NEXT: sll a3, a3, a4
; RV64I-NEXT: or a0, a0, a3
-; RV64I-NEXT: .LBB8_3:
+; RV64I-NEXT: .LBB10_3:
; RV64I-NEXT: sb a1, 8(a2)
; RV64I-NEXT: srli a3, a1, 56
; RV64I-NEXT: sb a3, 15(a2)
@@ -1161,105 +1632,118 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
;
; RV32I-LABEL: ashr_16bytes:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -48
-; RV32I-NEXT: sw s0, 44(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 40(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 36(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 32(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a3, 15(a0)
-; RV32I-NEXT: slli a4, a3, 24
-; RV32I-NEXT: lbu a5, 0(a0)
-; RV32I-NEXT: lbu a6, 1(a0)
-; RV32I-NEXT: lbu a7, 2(a0)
-; RV32I-NEXT: lbu t0, 3(a0)
-; RV32I-NEXT: lbu t1, 4(a0)
-; RV32I-NEXT: lbu t2, 5(a0)
-; RV32I-NEXT: lbu t3, 6(a0)
-; RV32I-NEXT: lbu t4, 7(a0)
-; RV32I-NEXT: lbu t5, 8(a0)
-; RV32I-NEXT: lbu t6, 9(a0)
-; RV32I-NEXT: lbu s0, 10(a0)
-; RV32I-NEXT: lbu s1, 11(a0)
-; RV32I-NEXT: lbu s2, 12(a0)
-; RV32I-NEXT: lbu s3, 14(a0)
-; RV32I-NEXT: lbu a0, 13(a0)
-; RV32I-NEXT: lbu a1, 0(a1)
-; RV32I-NEXT: sb a3, 15(sp)
-; RV32I-NEXT: sb s3, 14(sp)
-; RV32I-NEXT: sb a0, 13(sp)
-; RV32I-NEXT: sb s2, 12(sp)
-; RV32I-NEXT: sb s1, 11(sp)
-; RV32I-NEXT: sb s0, 10(sp)
-; RV32I-NEXT: sb t6, 9(sp)
-; RV32I-NEXT: sb t5, 8(sp)
-; RV32I-NEXT: sb t4, 7(sp)
-; RV32I-NEXT: sb t3, 6(sp)
-; RV32I-NEXT: sb t2, 5(sp)
-; RV32I-NEXT: sb t1, 4(sp)
-; RV32I-NEXT: sb t0, 3(sp)
-; RV32I-NEXT: sb a7, 2(sp)
-; RV32I-NEXT: sb a6, 1(sp)
-; RV32I-NEXT: sb a5, 0(sp)
-; RV32I-NEXT: srai a4, a4, 31
-; RV32I-NEXT: sb a4, 28(sp)
-; RV32I-NEXT: sb a4, 24(sp)
-; RV32I-NEXT: sb a4, 20(sp)
-; RV32I-NEXT: sb a4, 16(sp)
-; RV32I-NEXT: srli a0, a4, 24
-; RV32I-NEXT: sb a0, 31(sp)
-; RV32I-NEXT: srli a3, a4, 16
-; RV32I-NEXT: sb a3, 30(sp)
-; RV32I-NEXT: srli a4, a4, 8
-; RV32I-NEXT: sb a4, 29(sp)
-; RV32I-NEXT: sb a0, 27(sp)
-; RV32I-NEXT: sb a3, 26(sp)
-; RV32I-NEXT: sb a4, 25(sp)
-; RV32I-NEXT: sb a0, 23(sp)
-; RV32I-NEXT: sb a3, 22(sp)
-; RV32I-NEXT: sb a4, 21(sp)
-; RV32I-NEXT: sb a0, 19(sp)
-; RV32I-NEXT: sb a3, 18(sp)
-; RV32I-NEXT: sb a4, 17(sp)
-; RV32I-NEXT: andi a1, a1, 15
-; RV32I-NEXT: mv a0, sp
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: lbu a1, 5(a0)
-; RV32I-NEXT: lbu a3, 4(a0)
-; RV32I-NEXT: lbu a4, 7(a0)
-; RV32I-NEXT: lbu a5, 6(a0)
-; RV32I-NEXT: lbu a6, 1(a0)
-; RV32I-NEXT: lbu a7, 0(a0)
-; RV32I-NEXT: lbu t0, 3(a0)
-; RV32I-NEXT: lbu t1, 2(a0)
-; RV32I-NEXT: lbu t2, 13(a0)
-; RV32I-NEXT: lbu t3, 12(a0)
-; RV32I-NEXT: lbu t4, 15(a0)
-; RV32I-NEXT: lbu t5, 14(a0)
-; RV32I-NEXT: lbu t6, 10(a0)
-; RV32I-NEXT: lbu s0, 11(a0)
-; RV32I-NEXT: lbu s1, 8(a0)
-; RV32I-NEXT: lbu a0, 9(a0)
-; RV32I-NEXT: sb t6, 10(a2)
-; RV32I-NEXT: sb s0, 11(a2)
-; RV32I-NEXT: sb s1, 8(a2)
-; RV32I-NEXT: sb a0, 9(a2)
-; RV32I-NEXT: sb t5, 14(a2)
-; RV32I-NEXT: sb t4, 15(a2)
-; RV32I-NEXT: sb t3, 12(a2)
-; RV32I-NEXT: sb t2, 13(a2)
-; RV32I-NEXT: sb t1, 2(a2)
-; RV32I-NEXT: sb t0, 3(a2)
+; RV32I-NEXT: addi sp, sp, -32
+; RV32I-NEXT: lbu a3, 1(a0)
+; RV32I-NEXT: lbu a4, 0(a0)
+; RV32I-NEXT: lbu a5, 2(a0)
+; RV32I-NEXT: lbu a6, 3(a0)
+; RV32I-NEXT: slli a3, a3, 8
+; RV32I-NEXT: or a3, a3, a4
+; RV32I-NEXT: slli a5, a5, 16
+; RV32I-NEXT: slli a6, a6, 24
+; RV32I-NEXT: or a4, a6, a5
+; RV32I-NEXT: or a3, a4, a3
+; RV32I-NEXT: lbu a4, 5(a0)
+; RV32I-NEXT: lbu a5, 4(a0)
+; RV32I-NEXT: lbu a6, 6(a0)
+; RV32I-NEXT: lbu a7, 7(a0)
+; RV32I-NEXT: slli a4, a4, 8
+; RV32I-NEXT: or a4, a4, a5
+; RV32I-NEXT: slli a6, a6, 16
+; RV32I-NEXT: slli a7, a7, 24
+; RV32I-NEXT: or a5, a7, a6
+; RV32I-NEXT: or a4, a5, a4
+; RV32I-NEXT: lbu a5, 9(a0)
+; RV32I-NEXT: lbu a6, 8(a0)
+; RV32I-NEXT: lbu a7, 10(a0)
+; RV32I-NEXT: lbu t0, 11(a0)
+; RV32I-NEXT: slli a5, a5, 8
+; RV32I-NEXT: or a5, a5, a6
+; RV32I-NEXT: slli a7, a7, 16
+; RV32I-NEXT: slli t0, t0, 24
+; RV32I-NEXT: or a6, t0, a7
+; RV32I-NEXT: or a5, a6, a5
+; RV32I-NEXT: lbu a6, 13(a0)
+; RV32I-NEXT: lbu a7, 12(a0)
+; RV32I-NEXT: lbu t0, 14(a0)
+; RV32I-NEXT: lbu a0, 15(a0)
+; RV32I-NEXT: slli a6, a6, 8
+; RV32I-NEXT: or a6, a6, a7
+; RV32I-NEXT: slli t0, t0, 16
+; RV32I-NEXT: slli a0, a0, 24
+; RV32I-NEXT: or a7, a0, t0
+; RV32I-NEXT: or a6, a7, a6
+; RV32I-NEXT: lbu a7, 1(a1)
+; RV32I-NEXT: lbu t0, 0(a1)
+; RV32I-NEXT: lbu t1, 2(a1)
+; RV32I-NEXT: lbu a1, 3(a1)
+; RV32I-NEXT: slli a7, a7, 8
+; RV32I-NEXT: or a7, a7, t0
+; RV32I-NEXT: slli t1, t1, 16
+; RV32I-NEXT: slli a1, a1, 24
+; RV32I-NEXT: or a1, a1, t1
+; RV32I-NEXT: or a1, a1, a7
+; RV32I-NEXT: srai a0, a0, 31
+; RV32I-NEXT: sw a0, 28(sp)
+; RV32I-NEXT: sw a0, 24(sp)
+; RV32I-NEXT: sw a0, 20(sp)
+; RV32I-NEXT: sw a0, 16(sp)
+; RV32I-NEXT: sw a6, 12(sp)
+; RV32I-NEXT: sw a5, 8(sp)
+; RV32I-NEXT: sw a4, 4(sp)
+; RV32I-NEXT: sw a3, 0(sp)
+; RV32I-NEXT: andi a0, a1, 12
+; RV32I-NEXT: mv a3, sp
+; RV32I-NEXT: add a0, a3, a0
+; RV32I-NEXT: lw a3, 4(a0)
+; RV32I-NEXT: slli a1, a1, 3
+; RV32I-NEXT: srl a4, a3, a1
+; RV32I-NEXT: lw a5, 8(a0)
+; RV32I-NEXT: andi a6, a1, 24
+; RV32I-NEXT: xori a6, a6, 31
+; RV32I-NEXT: lw a7, 0(a0)
+; RV32I-NEXT: slli t0, a5, 1
+; RV32I-NEXT: sll t0, t0, a6
+; RV32I-NEXT: or t0, a4, t0
+; RV32I-NEXT: srl a7, a7, a1
+; RV32I-NEXT: slli a3, a3, 1
+; RV32I-NEXT: lw a0, 12(a0)
+; RV32I-NEXT: sll a3, a3, a6
+; RV32I-NEXT: or a3, a7, a3
+; RV32I-NEXT: srl a5, a5, a1
+; RV32I-NEXT: slli t1, a0, 1
+; RV32I-NEXT: sll a6, t1, a6
+; RV32I-NEXT: or a6, a5, a6
+; RV32I-NEXT: sra a0, a0, a1
+; RV32I-NEXT: sb a5, 8(a2)
+; RV32I-NEXT: sb a0, 12(a2)
; RV32I-NEXT: sb a7, 0(a2)
-; RV32I-NEXT: sb a6, 1(a2)
-; RV32I-NEXT: sb a5, 6(a2)
-; RV32I-NEXT: sb a4, 7(a2)
-; RV32I-NEXT: sb a3, 4(a2)
-; RV32I-NEXT: sb a1, 5(a2)
-; RV32I-NEXT: lw s0, 44(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 40(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 36(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 32(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 48
+; RV32I-NEXT: sb a4, 4(a2)
+; RV32I-NEXT: srli a1, a0, 16
+; RV32I-NEXT: sb a1, 14(a2)
+; RV32I-NEXT: srli a1, a0, 24
+; RV32I-NEXT: sb a1, 15(a2)
+; RV32I-NEXT: srli a0, a0, 8
+; RV32I-NEXT: sb a0, 13(a2)
+; RV32I-NEXT: srli a0, a6, 16
+; RV32I-NEXT: sb a0, 10(a2)
+; RV32I-NEXT: srli a0, a6, 24
+; RV32I-NEXT: sb a0, 11(a2)
+; RV32I-NEXT: srli a0, a6, 8
+; RV32I-NEXT: sb a0, 9(a2)
+; RV32I-NEXT: srli a0, a3, 16
+; RV32I-NEXT: sb a0, 2(a2)
+; RV32I-NEXT: srli a0, a3, 24
+; RV32I-NEXT: sb a0, 3(a2)
+; RV32I-NEXT: srli a3, a3, 8
+; RV32I-NEXT: sb a3, 1(a2)
+; RV32I-NEXT: srli a0, t0, 16
+; RV32I-NEXT: sb a0, 6(a2)
+; RV32I-NEXT: srli a0, t0, 24
+; RV32I-NEXT: sb a0, 7(a2)
+; RV32I-NEXT: srli a0, t0, 8
+; RV32I-NEXT: sb a0, 5(a2)
+; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
%src = load i128, ptr %src.ptr, align 1
%byteOff = load i128, ptr %byteOff.ptr, align 1
@@ -1269,441 +1753,645 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
ret void
}
+define void @ashr_16bytes_wordOff(ptr %src.ptr, ptr %wordOff.ptr, ptr %dst) nounwind {
+; RV64I-LABEL: ashr_16bytes_wordOff:
+; RV64I: # %bb.0:
+; RV64I-NEXT: lbu a3, 9(a0)
+; RV64I-NEXT: lbu a4, 8(a0)
+; RV64I-NEXT: lbu a5, 10(a0)
+; RV64I-NEXT: lbu a6, 11(a0)
+; RV64I-NEXT: slli a3, a3, 8
+; RV64I-NEXT: or a3, a3, a4
+; RV64I-NEXT: slli a5, a5, 16
+; RV64I-NEXT: slli a6, a6, 24
+; RV64I-NEXT: or a4, a6, a5
+; RV64I-NEXT: or a3, a4, a3
+; RV64I-NEXT: lbu a4, 13(a0)
+; RV64I-NEXT: lbu a5, 12(a0)
+; RV64I-NEXT: lbu a6, 14(a0)
+; RV64I-NEXT: lbu a7, 15(a0)
+; RV64I-NEXT: slli a4, a4, 8
+; RV64I-NEXT: or a4, a4, a5
+; RV64I-NEXT: slli a6, a6, 16
+; RV64I-NEXT: slli a7, a7, 24
+; RV64I-NEXT: or a5, a7, a6
+; RV64I-NEXT: or a4, a5, a4
+; RV64I-NEXT: slli a5, a4, 32
+; RV64I-NEXT: or a3, a5, a3
+; RV64I-NEXT: lbu a5, 5(a1)
+; RV64I-NEXT: lbu a6, 4(a1)
+; RV64I-NEXT: lbu a7, 6(a1)
+; RV64I-NEXT: lbu t0, 7(a1)
+; RV64I-NEXT: slli a5, a5, 8
+; RV64I-NEXT: or a5, a5, a6
+; RV64I-NEXT: slli a7, a7, 16
+; RV64I-NEXT: slli t0, t0, 24
+; RV64I-NEXT: or a6, t0, a7
+; RV64I-NEXT: or a5, a6, a5
+; RV64I-NEXT: lbu a6, 1(a1)
+; RV64I-NEXT: lbu a7, 0(a1)
+; RV64I-NEXT: lbu t0, 2(a1)
+; RV64I-NEXT: lbu a1, 3(a1)
+; RV64I-NEXT: slli a6, a6, 8
+; RV64I-NEXT: or a6, a6, a7
+; RV64I-NEXT: slli t0, t0, 16
+; RV64I-NEXT: slli a1, a1, 24
+; RV64I-NEXT: or a1, a1, t0
+; RV64I-NEXT: or a1, a1, a6
+; RV64I-NEXT: slli a1, a1, 5
+; RV64I-NEXT: slli a5, a5, 37
+; RV64I-NEXT: or a5, a5, a1
+; RV64I-NEXT: addi a6, a5, -64
+; RV64I-NEXT: sra a1, a3, a5
+; RV64I-NEXT: bltz a6, .LBB11_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: sraiw a3, a4, 31
+; RV64I-NEXT: mv a0, a1
+; RV64I-NEXT: mv a1, a3
+; RV64I-NEXT: j .LBB11_3
+; RV64I-NEXT: .LBB11_2:
+; RV64I-NEXT: lbu a4, 1(a0)
+; RV64I-NEXT: lbu a6, 0(a0)
+; RV64I-NEXT: lbu a7, 2(a0)
+; RV64I-NEXT: lbu t0, 3(a0)
+; RV64I-NEXT: slli a4, a4, 8
+; RV64I-NEXT: or a4, a4, a6
+; RV64I-NEXT: slli a7, a7, 16
+; RV64I-NEXT: slli t0, t0, 24
+; RV64I-NEXT: or a6, t0, a7
+; RV64I-NEXT: or a4, a6, a4
+; RV64I-NEXT: lbu a6, 5(a0)
+; RV64I-NEXT: lbu a7, 4(a0)
+; RV64I-NEXT: lbu t0, 6(a0)
+; RV64I-NEXT: lbu a0, 7(a0)
+; RV64I-NEXT: slli a6, a6, 8
+; RV64I-NEXT: or a6, a6, a7
+; RV64I-NEXT: slli t0, t0, 16
+; RV64I-NEXT: slli a0, a0, 24
+; RV64I-NEXT: or a0, a0, t0
+; RV64I-NEXT: or a0, a0, a6
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: or a0, a0, a4
+; RV64I-NEXT: srl a0, a0, a5
+; RV64I-NEXT: not a4, a5
+; RV64I-NEXT: slli a3, a3, 1
+; RV64I-NEXT: sll a3, a3, a4
+; RV64I-NEXT: or a0, a0, a3
+; RV64I-NEXT: .LBB11_3:
+; RV64I-NEXT: sb a1, 8(a2)
+; RV64I-NEXT: srli a3, a1, 56
+; RV64I-NEXT: sb a3, 15(a2)
+; RV64I-NEXT: srli a3, a1, 48
+; RV64I-NEXT: sb a3, 14(a2)
+; RV64I-NEXT: srli a3, a1, 40
+; RV64I-NEXT: sb a3, 13(a2)
+; RV64I-NEXT: srli a3, a1, 32
+; RV64I-NEXT: sb a3, 12(a2)
+; RV64I-NEXT: srli a3, a1, 24
+; RV64I-NEXT: sb a3, 11(a2)
+; RV64I-NEXT: srli a3, a1, 16
+; RV64I-NEXT: sb a3, 10(a2)
+; RV64I-NEXT: srli a1, a1, 8
+; RV64I-NEXT: sb a1, 9(a2)
+; RV64I-NEXT: sb a0, 0(a2)
+; RV64I-NEXT: srli a1, a0, 56
+; RV64I-NEXT: sb a1, 7(a2)
+; RV64I-NEXT: srli a1, a0, 48
+; RV64I-NEXT: sb a1, 6(a2)
+; RV64I-NEXT: srli a1, a0, 40
+; RV64I-NEXT: sb a1, 5(a2)
+; RV64I-NEXT: srli a1, a0, 32
+; RV64I-NEXT: sb a1, 4(a2)
+; RV64I-NEXT: srli a1, a0, 24
+; RV64I-NEXT: sb a1, 3(a2)
+; RV64I-NEXT: srli a1, a0, 16
+; RV64I-NEXT: sb a1, 2(a2)
+; RV64I-NEXT: srli a0, a0, 8
+; RV64I-NEXT: sb a0, 1(a2)
+; RV64I-NEXT: ret
+;
+; RV32I-LABEL: ashr_16bytes_wordOff:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -32
+; RV32I-NEXT: lbu a3, 1(a0)
+; RV32I-NEXT: lbu a4, 0(a0)
+; RV32I-NEXT: lbu a5, 2(a0)
+; RV32I-NEXT: lbu a6, 3(a0)
+; RV32I-NEXT: slli a3, a3, 8
+; RV32I-NEXT: or a3, a3, a4
+; RV32I-NEXT: slli a5, a5, 16
+; RV32I-NEXT: slli a6, a6, 24
+; RV32I-NEXT: or a4, a6, a5
+; RV32I-NEXT: or a3, a4, a3
+; RV32I-NEXT: lbu a4, 5(a0)
+; RV32I-NEXT: lbu a5, 4(a0)
+; RV32I-NEXT: lbu a6, 6(a0)
+; RV32I-NEXT: lbu a7, 7(a0)
+; RV32I-NEXT: slli a4, a4, 8
+; RV32I-NEXT: or a4, a4, a5
+; RV32I-NEXT: slli a6, a6, 16
+; RV32I-NEXT: slli a7, a7, 24
+; RV32I-NEXT: or a5, a7, a6
+; RV32I-NEXT: or a4, a5, a4
+; RV32I-NEXT: lbu a5, 9(a0)
+; RV32I-NEXT: lbu a6, 8(a0)
+; RV32I-NEXT: lbu a7, 10(a0)
+; RV32I-NEXT: lbu t0, 11(a0)
+; RV32I-NEXT: slli a5, a5, 8
+; RV32I-NEXT: or a5, a5, a6
+; RV32I-NEXT: slli a7, a7, 16
+; RV32I-NEXT: slli t0, t0, 24
+; RV32I-NEXT: or a6, t0, a7
+; RV32I-NEXT: or a5, a6, a5
+; RV32I-NEXT: lbu a6, 13(a0)
+; RV32I-NEXT: lbu a7, 12(a0)
+; RV32I-NEXT: lbu t0, 14(a0)
+; RV32I-NEXT: lbu a0, 15(a0)
+; RV32I-NEXT: slli a6, a6, 8
+; RV32I-NEXT: or a6, a6, a7
+; RV32I-NEXT: slli t0, t0, 16
+; RV32I-NEXT: slli a0, a0, 24
+; RV32I-NEXT: or a7, a0, t0
+; RV32I-NEXT: or a6, a7, a6
+; RV32I-NEXT: lbu a1, 0(a1)
+; RV32I-NEXT: srai a0, a0, 31
+; RV32I-NEXT: sw a0, 28(sp)
+; RV32I-NEXT: sw a0, 24(sp)
+; RV32I-NEXT: sw a0, 20(sp)
+; RV32I-NEXT: sw a0, 16(sp)
+; RV32I-NEXT: sw a6, 12(sp)
+; RV32I-NEXT: sw a5, 8(sp)
+; RV32I-NEXT: sw a4, 4(sp)
+; RV32I-NEXT: sw a3, 0(sp)
+; RV32I-NEXT: slli a1, a1, 2
+; RV32I-NEXT: andi a1, a1, 12
+; RV32I-NEXT: mv a0, sp
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: lw a1, 8(a0)
+; RV32I-NEXT: lw a3, 12(a0)
+; RV32I-NEXT: lw a4, 0(a0)
+; RV32I-NEXT: lw a0, 4(a0)
+; RV32I-NEXT: sb a1, 8(a2)
+; RV32I-NEXT: sb a3, 12(a2)
+; RV32I-NEXT: sb a4, 0(a2)
+; RV32I-NEXT: sb a0, 4(a2)
+; RV32I-NEXT: srli a5, a1, 16
+; RV32I-NEXT: sb a5, 10(a2)
+; RV32I-NEXT: srli a5, a1, 24
+; RV32I-NEXT: sb a5, 11(a2)
+; RV32I-NEXT: srli a1, a1, 8
+; RV32I-NEXT: sb a1, 9(a2)
+; RV32I-NEXT: srli a1, a3, 16
+; RV32I-NEXT: sb a1, 14(a2)
+; RV32I-NEXT: srli a1, a3, 24
+; RV32I-NEXT: sb a1, 15(a2)
+; RV32I-NEXT: srli a3, a3, 8
+; RV32I-NEXT: sb a3, 13(a2)
+; RV32I-NEXT: srli a1, a4, 16
+; RV32I-NEXT: sb a1, 2(a2)
+; RV32I-NEXT: srli a1, a4, 24
+; RV32I-NEXT: sb a1, 3(a2)
+; RV32I-NEXT: srli a4, a4, 8
+; RV32I-NEXT: sb a4, 1(a2)
+; RV32I-NEXT: srli a1, a0, 16
+; RV32I-NEXT: sb a1, 6(a2)
+; RV32I-NEXT: srli a1, a0, 24
+; RV32I-NEXT: sb a1, 7(a2)
+; RV32I-NEXT: srli a0, a0, 8
+; RV32I-NEXT: sb a0, 5(a2)
+; RV32I-NEXT: addi sp, sp, 32
+; RV32I-NEXT: ret
+ %src = load i128, ptr %src.ptr, align 1
+ %wordOff = load i128, ptr %wordOff.ptr, align 1
+ %bitOff = shl i128 %wordOff, 5
+ %res = ashr i128 %src, %bitOff
+ store i128 %res, ptr %dst, align 1
+ ret void
+}
+
define void @lshr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; RV64I-LABEL: lshr_32bytes:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -224
-; RV64I-NEXT: sd ra, 216(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s0, 208(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s1, 200(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s2, 192(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s3, 184(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s4, 176(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s5, 168(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s6, 160(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s7, 152(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s8, 144(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s9, 136(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s10, 128(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s11, 120(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a3, 0(a0)
-; RV64I-NEXT: sd a3, 48(sp) # 8-byte Folded Spill
+; RV64I-NEXT: addi sp, sp, -64
; RV64I-NEXT: lbu a3, 1(a0)
-; RV64I-NEXT: sd a3, 40(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a3, 2(a0)
-; RV64I-NEXT: sd a3, 32(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a3, 3(a0)
-; RV64I-NEXT: sd a3, 24(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a3, 4(a0)
-; RV64I-NEXT: sd a3, 16(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a3, 5(a0)
-; RV64I-NEXT: sd a3, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu t1, 6(a0)
-; RV64I-NEXT: lbu t2, 7(a0)
-; RV64I-NEXT: lbu t3, 8(a0)
-; RV64I-NEXT: lbu t4, 9(a0)
-; RV64I-NEXT: lbu t5, 10(a0)
-; RV64I-NEXT: lbu t6, 11(a0)
-; RV64I-NEXT: lbu s0, 12(a0)
-; RV64I-NEXT: lbu s1, 13(a0)
-; RV64I-NEXT: lbu s2, 14(a0)
-; RV64I-NEXT: lbu s3, 15(a0)
-; RV64I-NEXT: lbu s4, 16(a0)
-; RV64I-NEXT: lbu s5, 17(a0)
-; RV64I-NEXT: lbu s6, 18(a0)
-; RV64I-NEXT: lbu s7, 19(a0)
-; RV64I-NEXT: lbu s8, 20(a0)
-; RV64I-NEXT: lbu s9, 21(a0)
-; RV64I-NEXT: lbu s10, 22(a0)
-; RV64I-NEXT: lbu s11, 23(a0)
-; RV64I-NEXT: lbu ra, 24(a0)
-; RV64I-NEXT: lbu t0, 25(a0)
-; RV64I-NEXT: lbu a7, 26(a0)
-; RV64I-NEXT: lbu a6, 27(a0)
-; RV64I-NEXT: lbu a5, 28(a0)
-; RV64I-NEXT: lbu a3, 31(a0)
-; RV64I-NEXT: lbu a4, 30(a0)
-; RV64I-NEXT: lbu a0, 29(a0)
-; RV64I-NEXT: lbu a1, 0(a1)
-; RV64I-NEXT: sb a3, 87(sp)
-; RV64I-NEXT: sb a4, 86(sp)
-; RV64I-NEXT: sb a0, 85(sp)
-; RV64I-NEXT: sb a5, 84(sp)
-; RV64I-NEXT: sb a6, 83(sp)
-; RV64I-NEXT: sb a7, 82(sp)
-; RV64I-NEXT: sb zero, 119(sp)
-; RV64I-NEXT: sb zero, 118(sp)
-; RV64I-NEXT: sb zero, 117(sp)
-; RV64I-NEXT: sb zero, 116(sp)
-; RV64I-NEXT: sb zero, 115(sp)
-; RV64I-NEXT: sb zero, 114(sp)
-; RV64I-NEXT: sb zero, 113(sp)
-; RV64I-NEXT: sb zero, 112(sp)
-; RV64I-NEXT: sb zero, 111(sp)
-; RV64I-NEXT: sb zero, 110(sp)
-; RV64I-NEXT: sb zero, 109(sp)
-; RV64I-NEXT: sb zero, 108(sp)
-; RV64I-NEXT: sb zero, 107(sp)
-; RV64I-NEXT: sb zero, 106(sp)
-; RV64I-NEXT: sb zero, 105(sp)
-; RV64I-NEXT: sb zero, 104(sp)
-; RV64I-NEXT: sb zero, 103(sp)
-; RV64I-NEXT: sb zero, 102(sp)
-; RV64I-NEXT: sb zero, 101(sp)
-; RV64I-NEXT: sb zero, 100(sp)
-; RV64I-NEXT: sb zero, 99(sp)
-; RV64I-NEXT: sb zero, 98(sp)
-; RV64I-NEXT: sb zero, 97(sp)
-; RV64I-NEXT: sb zero, 96(sp)
-; RV64I-NEXT: sb zero, 95(sp)
-; RV64I-NEXT: sb zero, 94(sp)
-; RV64I-NEXT: sb zero, 93(sp)
-; RV64I-NEXT: sb zero, 92(sp)
-; RV64I-NEXT: sb zero, 91(sp)
-; RV64I-NEXT: sb zero, 90(sp)
-; RV64I-NEXT: sb zero, 89(sp)
-; RV64I-NEXT: sb zero, 88(sp)
-; RV64I-NEXT: sb t0, 81(sp)
-; RV64I-NEXT: sb ra, 80(sp)
-; RV64I-NEXT: sb s11, 79(sp)
-; RV64I-NEXT: sb s10, 78(sp)
-; RV64I-NEXT: sb s9, 77(sp)
-; RV64I-NEXT: sb s8, 76(sp)
-; RV64I-NEXT: sb s7, 75(sp)
-; RV64I-NEXT: sb s6, 74(sp)
-; RV64I-NEXT: sb s5, 73(sp)
-; RV64I-NEXT: sb s4, 72(sp)
-; RV64I-NEXT: sb s3, 71(sp)
-; RV64I-NEXT: sb s2, 70(sp)
-; RV64I-NEXT: sb s1, 69(sp)
-; RV64I-NEXT: sb s0, 68(sp)
-; RV64I-NEXT: sb t6, 67(sp)
-; RV64I-NEXT: sb t5, 66(sp)
-; RV64I-NEXT: sb t4, 65(sp)
-; RV64I-NEXT: sb t3, 64(sp)
-; RV64I-NEXT: sb t2, 63(sp)
-; RV64I-NEXT: sb t1, 62(sp)
-; RV64I-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 61(sp)
-; RV64I-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 60(sp)
-; RV64I-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 59(sp)
-; RV64I-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 58(sp)
-; RV64I-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 57(sp)
-; RV64I-NEXT: ld a0, 48(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 56(sp)
-; RV64I-NEXT: andi a1, a1, 31
-; RV64I-NEXT: addi a0, sp, 56
-; RV64I-NEXT: add a6, a0, a1
-; RV64I-NEXT: lbu a0, 8(a6)
-; RV64I-NEXT: sd a0, 48(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a0, 9(a6)
-; RV64I-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a0, 10(a6)
-; RV64I-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a0, 11(a6)
-; RV64I-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a0, 12(a6)
-; RV64I-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a7, 13(a6)
-; RV64I-NEXT: lbu t0, 14(a6)
-; RV64I-NEXT: lbu t1, 15(a6)
-; RV64I-NEXT: lbu t2, 0(a6)
-; RV64I-NEXT: lbu t3, 1(a6)
-; RV64I-NEXT: lbu t4, 2(a6)
-; RV64I-NEXT: lbu t5, 3(a6)
-; RV64I-NEXT: lbu t6, 4(a6)
-; RV64I-NEXT: lbu s0, 5(a6)
-; RV64I-NEXT: lbu s1, 6(a6)
-; RV64I-NEXT: lbu s2, 7(a6)
-; RV64I-NEXT: lbu s3, 24(a6)
-; RV64I-NEXT: lbu s4, 25(a6)
-; RV64I-NEXT: lbu s5, 26(a6)
-; RV64I-NEXT: lbu s6, 27(a6)
-; RV64I-NEXT: lbu s7, 28(a6)
-; RV64I-NEXT: lbu s8, 29(a6)
-; RV64I-NEXT: lbu s9, 30(a6)
-; RV64I-NEXT: lbu s10, 31(a6)
-; RV64I-NEXT: lbu s11, 16(a6)
-; RV64I-NEXT: lbu ra, 17(a6)
-; RV64I-NEXT: lbu a5, 18(a6)
-; RV64I-NEXT: lbu a4, 19(a6)
-; RV64I-NEXT: lbu a0, 23(a6)
-; RV64I-NEXT: lbu a1, 22(a6)
-; RV64I-NEXT: lbu a3, 21(a6)
-; RV64I-NEXT: lbu a6, 20(a6)
-; RV64I-NEXT: sb a0, 23(a2)
+; RV64I-NEXT: lbu a4, 0(a0)
+; RV64I-NEXT: lbu a5, 2(a0)
+; RV64I-NEXT: lbu a6, 3(a0)
+; RV64I-NEXT: slli a3, a3, 8
+; RV64I-NEXT: or a3, a3, a4
+; RV64I-NEXT: slli a5, a5, 16
+; RV64I-NEXT: slli a6, a6, 24
+; RV64I-NEXT: or a4, a6, a5
+; RV64I-NEXT: or a3, a4, a3
+; RV64I-NEXT: lbu a4, 5(a0)
+; RV64I-NEXT: lbu a5, 4(a0)
+; RV64I-NEXT: lbu a6, 6(a0)
+; RV64I-NEXT: lbu a7, 7(a0)
+; RV64I-NEXT: slli a4, a4, 8
+; RV64I-NEXT: or a4, a4, a5
+; RV64I-NEXT: slli a6, a6, 16
+; RV64I-NEXT: slli a7, a7, 24
+; RV64I-NEXT: or a5, a7, a6
+; RV64I-NEXT: or a4, a5, a4
+; RV64I-NEXT: slli a4, a4, 32
+; RV64I-NEXT: or a3, a4, a3
+; RV64I-NEXT: lbu a4, 9(a0)
+; RV64I-NEXT: lbu a5, 8(a0)
+; RV64I-NEXT: lbu a6, 10(a0)
+; RV64I-NEXT: lbu a7, 11(a0)
+; RV64I-NEXT: slli a4, a4, 8
+; RV64I-NEXT: or a4, a4, a5
+; RV64I-NEXT: slli a6, a6, 16
+; RV64I-NEXT: slli a7, a7, 24
+; RV64I-NEXT: or a5, a7, a6
+; RV64I-NEXT: or a4, a5, a4
+; RV64I-NEXT: lbu a5, 13(a0)
+; RV64I-NEXT: lbu a6, 12(a0)
+; RV64I-NEXT: lbu a7, 14(a0)
+; RV64I-NEXT: lbu t0, 15(a0)
+; RV64I-NEXT: slli a5, a5, 8
+; RV64I-NEXT: or a5, a5, a6
+; RV64I-NEXT: slli a7, a7, 16
+; RV64I-NEXT: slli t0, t0, 24
+; RV64I-NEXT: or a6, t0, a7
+; RV64I-NEXT: or a5, a6, a5
+; RV64I-NEXT: slli a5, a5, 32
+; RV64I-NEXT: or a4, a5, a4
+; RV64I-NEXT: lbu a5, 17(a0)
+; RV64I-NEXT: lbu a6, 16(a0)
+; RV64I-NEXT: lbu a7, 18(a0)
+; RV64I-NEXT: lbu t0, 19(a0)
+; RV64I-NEXT: slli a5, a5, 8
+; RV64I-NEXT: or a5, a5, a6
+; RV64I-NEXT: slli a7, a7, 16
+; RV64I-NEXT: slli t0, t0, 24
+; RV64I-NEXT: or a6, t0, a7
+; RV64I-NEXT: or a5, a6, a5
+; RV64I-NEXT: lbu a6, 21(a0)
+; RV64I-NEXT: lbu a7, 20(a0)
+; RV64I-NEXT: lbu t0, 22(a0)
+; RV64I-NEXT: lbu t1, 23(a0)
+; RV64I-NEXT: slli a6, a6, 8
+; RV64I-NEXT: or a6, a6, a7
+; RV64I-NEXT: slli t0, t0, 16
+; RV64I-NEXT: slli t1, t1, 24
+; RV64I-NEXT: or a7, t1, t0
+; RV64I-NEXT: or a6, a7, a6
+; RV64I-NEXT: slli a6, a6, 32
+; RV64I-NEXT: or a5, a6, a5
+; RV64I-NEXT: lbu a6, 25(a0)
+; RV64I-NEXT: lbu a7, 24(a0)
+; RV64I-NEXT: lbu t0, 26(a0)
+; RV64I-NEXT: lbu t1, 27(a0)
+; RV64I-NEXT: slli a6, a6, 8
+; RV64I-NEXT: or a6, a6, a7
+; RV64I-NEXT: slli t0, t0, 16
+; RV64I-NEXT: slli t1, t1, 24
+; RV64I-NEXT: or a7, t1, t0
+; RV64I-NEXT: or a6, a7, a6
+; RV64I-NEXT: lbu a7, 29(a0)
+; RV64I-NEXT: lbu t0, 28(a0)
+; RV64I-NEXT: lbu t1, 30(a0)
+; RV64I-NEXT: lbu a0, 31(a0)
+; RV64I-NEXT: slli a7, a7, 8
+; RV64I-NEXT: or a7, a7, t0
+; RV64I-NEXT: slli t1, t1, 16
+; RV64I-NEXT: slli a0, a0, 24
+; RV64I-NEXT: or a0, a0, t1
+; RV64I-NEXT: or a0, a0, a7
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: or a0, a0, a6
+; RV64I-NEXT: lbu a6, 1(a1)
+; RV64I-NEXT: lbu a7, 0(a1)
+; RV64I-NEXT: lbu t0, 2(a1)
+; RV64I-NEXT: lbu t1, 3(a1)
+; RV64I-NEXT: slli a6, a6, 8
+; RV64I-NEXT: or a6, a6, a7
+; RV64I-NEXT: slli t0, t0, 16
+; RV64I-NEXT: slli t1, t1, 24
+; RV64I-NEXT: or a7, t1, t0
+; RV64I-NEXT: or a6, a7, a6
+; RV64I-NEXT: lbu a7, 5(a1)
+; RV64I-NEXT: lbu t0, 4(a1)
+; RV64I-NEXT: lbu t1, 6(a1)
+; RV64I-NEXT: lbu a1, 7(a1)
+; RV64I-NEXT: slli a7, a7, 8
+; RV64I-NEXT: or a7, a7, t0
+; RV64I-NEXT: slli t1, t1, 16
+; RV64I-NEXT: slli a1, a1, 24
+; RV64I-NEXT: or a1, a1, t1
+; RV64I-NEXT: or a1, a1, a7
+; RV64I-NEXT: slli a1, a1, 32
+; RV64I-NEXT: or a1, a1, a6
+; RV64I-NEXT: sd zero, 56(sp)
+; RV64I-NEXT: sd zero, 48(sp)
+; RV64I-NEXT: sd zero, 40(sp)
+; RV64I-NEXT: sd zero, 32(sp)
+; RV64I-NEXT: sd a0, 24(sp)
+; RV64I-NEXT: sd a5, 16(sp)
+; RV64I-NEXT: sd a4, 8(sp)
+; RV64I-NEXT: sd a3, 0(sp)
+; RV64I-NEXT: andi a0, a1, 24
+; RV64I-NEXT: mv a3, sp
+; RV64I-NEXT: add a3, a3, a0
+; RV64I-NEXT: ld a4, 8(a3)
+; RV64I-NEXT: slli a1, a1, 3
+; RV64I-NEXT: srl a5, a4, a1
+; RV64I-NEXT: ld a6, 16(a3)
+; RV64I-NEXT: andi a0, a1, 56
+; RV64I-NEXT: xori a7, a0, 63
+; RV64I-NEXT: ld t0, 0(a3)
+; RV64I-NEXT: slli a0, a6, 1
+; RV64I-NEXT: sll a0, a0, a7
+; RV64I-NEXT: or a0, a5, a0
+; RV64I-NEXT: srl t0, t0, a1
+; RV64I-NEXT: slli a4, a4, 1
+; RV64I-NEXT: ld a3, 24(a3)
+; RV64I-NEXT: sll a4, a4, a7
+; RV64I-NEXT: or a4, t0, a4
+; RV64I-NEXT: srl a6, a6, a1
+; RV64I-NEXT: slli t1, a3, 1
+; RV64I-NEXT: sll a7, t1, a7
+; RV64I-NEXT: or a7, a6, a7
+; RV64I-NEXT: srl a1, a3, a1
+; RV64I-NEXT: sb a6, 16(a2)
+; RV64I-NEXT: sb a1, 24(a2)
+; RV64I-NEXT: sb t0, 0(a2)
+; RV64I-NEXT: sb a5, 8(a2)
+; RV64I-NEXT: srli a3, a1, 56
+; RV64I-NEXT: sb a3, 31(a2)
+; RV64I-NEXT: srli a3, a1, 48
+; RV64I-NEXT: sb a3, 30(a2)
+; RV64I-NEXT: srli a3, a1, 40
+; RV64I-NEXT: sb a3, 29(a2)
+; RV64I-NEXT: srli a3, a1, 32
+; RV64I-NEXT: sb a3, 28(a2)
+; RV64I-NEXT: srli a3, a1, 24
+; RV64I-NEXT: sb a3, 27(a2)
+; RV64I-NEXT: srli a3, a1, 16
+; RV64I-NEXT: sb a3, 26(a2)
+; RV64I-NEXT: srli a1, a1, 8
+; RV64I-NEXT: sb a1, 25(a2)
+; RV64I-NEXT: srli a1, a7, 56
+; RV64I-NEXT: sb a1, 23(a2)
+; RV64I-NEXT: srli a1, a7, 48
; RV64I-NEXT: sb a1, 22(a2)
-; RV64I-NEXT: sb a3, 21(a2)
-; RV64I-NEXT: sb a6, 20(a2)
-; RV64I-NEXT: sb a4, 19(a2)
-; RV64I-NEXT: sb a5, 18(a2)
-; RV64I-NEXT: sb ra, 17(a2)
-; RV64I-NEXT: sb s11, 16(a2)
-; RV64I-NEXT: sb s10, 31(a2)
-; RV64I-NEXT: sb s9, 30(a2)
-; RV64I-NEXT: sb s8, 29(a2)
-; RV64I-NEXT: sb s7, 28(a2)
-; RV64I-NEXT: sb s6, 27(a2)
-; RV64I-NEXT: sb s5, 26(a2)
-; RV64I-NEXT: sb s4, 25(a2)
-; RV64I-NEXT: sb s3, 24(a2)
-; RV64I-NEXT: sb s2, 7(a2)
-; RV64I-NEXT: sb s1, 6(a2)
-; RV64I-NEXT: sb s0, 5(a2)
-; RV64I-NEXT: sb t6, 4(a2)
-; RV64I-NEXT: sb t5, 3(a2)
-; RV64I-NEXT: sb t4, 2(a2)
-; RV64I-NEXT: sb t3, 1(a2)
-; RV64I-NEXT: sb t2, 0(a2)
-; RV64I-NEXT: sb t1, 15(a2)
-; RV64I-NEXT: sb t0, 14(a2)
-; RV64I-NEXT: sb a7, 13(a2)
-; RV64I-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 12(a2)
-; RV64I-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 11(a2)
-; RV64I-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 10(a2)
-; RV64I-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
+; RV64I-NEXT: srli a1, a7, 40
+; RV64I-NEXT: sb a1, 21(a2)
+; RV64I-NEXT: srli a1, a7, 32
+; RV64I-NEXT: sb a1, 20(a2)
+; RV64I-NEXT: srli a1, a7, 24
+; RV64I-NEXT: sb a1, 19(a2)
+; RV64I-NEXT: srli a1, a7, 16
+; RV64I-NEXT: sb a1, 18(a2)
+; RV64I-NEXT: srli a1, a7, 8
+; RV64I-NEXT: sb a1, 17(a2)
+; RV64I-NEXT: srli a1, a4, 56
+; RV64I-NEXT: sb a1, 7(a2)
+; RV64I-NEXT: srli a1, a4, 48
+; RV64I-NEXT: sb a1, 6(a2)
+; RV64I-NEXT: srli a1, a4, 40
+; RV64I-NEXT: sb a1, 5(a2)
+; RV64I-NEXT: srli a1, a4, 32
+; RV64I-NEXT: sb a1, 4(a2)
+; RV64I-NEXT: srli a1, a4, 24
+; RV64I-NEXT: sb a1, 3(a2)
+; RV64I-NEXT: srli a1, a4, 16
+; RV64I-NEXT: sb a1, 2(a2)
+; RV64I-NEXT: srli a4, a4, 8
+; RV64I-NEXT: sb a4, 1(a2)
+; RV64I-NEXT: srli a1, a0, 56
+; RV64I-NEXT: sb a1, 15(a2)
+; RV64I-NEXT: srli a1, a0, 48
+; RV64I-NEXT: sb a1, 14(a2)
+; RV64I-NEXT: srli a1, a0, 40
+; RV64I-NEXT: sb a1, 13(a2)
+; RV64I-NEXT: srli a1, a0, 32
+; RV64I-NEXT: sb a1, 12(a2)
+; RV64I-NEXT: srli a1, a0, 24
+; RV64I-NEXT: sb a1, 11(a2)
+; RV64I-NEXT: srli a1, a0, 16
+; RV64I-NEXT: sb a1, 10(a2)
+; RV64I-NEXT: srli a0, a0, 8
; RV64I-NEXT: sb a0, 9(a2)
-; RV64I-NEXT: ld a0, 48(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 8(a2)
-; RV64I-NEXT: ld ra, 216(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s0, 208(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s1, 200(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s2, 192(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s3, 184(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s4, 176(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s5, 168(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s6, 160(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s7, 152(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s8, 144(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s9, 136(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s10, 128(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s11, 120(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 224
+; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
;
; RV32I-LABEL: lshr_32bytes:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -144
-; RV32I-NEXT: sw ra, 140(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s0, 136(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 132(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 128(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 124(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s4, 120(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s5, 116(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s6, 112(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s7, 108(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s8, 104(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s9, 100(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s10, 96(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s11, 92(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a3, 0(a0)
-; RV32I-NEXT: sw a3, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT: addi sp, sp, -80
+; RV32I-NEXT: sw s0, 76(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s1, 72(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s2, 68(sp) # 4-byte Folded Spill
; RV32I-NEXT: lbu a3, 1(a0)
-; RV32I-NEXT: sw a3, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a3, 2(a0)
-; RV32I-NEXT: sw a3, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a3, 3(a0)
-; RV32I-NEXT: sw a3, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a3, 4(a0)
-; RV32I-NEXT: sw a3, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a3, 5(a0)
-; RV32I-NEXT: sw a3, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu t1, 6(a0)
-; RV32I-NEXT: lbu t2, 7(a0)
-; RV32I-NEXT: lbu t3, 8(a0)
-; RV32I-NEXT: lbu t4, 9(a0)
-; RV32I-NEXT: lbu t5, 10(a0)
-; RV32I-NEXT: lbu t6, 11(a0)
-; RV32I-NEXT: lbu s0, 12(a0)
-; RV32I-NEXT: lbu s1, 13(a0)
-; RV32I-NEXT: lbu s2, 14(a0)
-; RV32I-NEXT: lbu s3, 15(a0)
-; RV32I-NEXT: lbu s4, 16(a0)
-; RV32I-NEXT: lbu s5, 17(a0)
-; RV32I-NEXT: lbu s6, 18(a0)
-; RV32I-NEXT: lbu s7, 19(a0)
-; RV32I-NEXT: lbu s8, 20(a0)
-; RV32I-NEXT: lbu s9, 21(a0)
-; RV32I-NEXT: lbu s10, 22(a0)
-; RV32I-NEXT: lbu s11, 23(a0)
-; RV32I-NEXT: lbu ra, 24(a0)
-; RV32I-NEXT: lbu t0, 25(a0)
-; RV32I-NEXT: lbu a7, 26(a0)
-; RV32I-NEXT: lbu a6, 27(a0)
-; RV32I-NEXT: lbu a5, 28(a0)
-; RV32I-NEXT: lbu a3, 31(a0)
-; RV32I-NEXT: lbu a4, 30(a0)
-; RV32I-NEXT: lbu a0, 29(a0)
-; RV32I-NEXT: lbu a1, 0(a1)
-; RV32I-NEXT: sb a3, 59(sp)
-; RV32I-NEXT: sb a4, 58(sp)
-; RV32I-NEXT: sb a0, 57(sp)
-; RV32I-NEXT: sb a5, 56(sp)
-; RV32I-NEXT: sb a6, 55(sp)
-; RV32I-NEXT: sb a7, 54(sp)
-; RV32I-NEXT: sb zero, 91(sp)
-; RV32I-NEXT: sb zero, 90(sp)
-; RV32I-NEXT: sb zero, 89(sp)
-; RV32I-NEXT: sb zero, 88(sp)
-; RV32I-NEXT: sb zero, 87(sp)
-; RV32I-NEXT: sb zero, 86(sp)
-; RV32I-NEXT: sb zero, 85(sp)
-; RV32I-NEXT: sb zero, 84(sp)
-; RV32I-NEXT: sb zero, 83(sp)
-; RV32I-NEXT: sb zero, 82(sp)
-; RV32I-NEXT: sb zero, 81(sp)
-; RV32I-NEXT: sb zero, 80(sp)
-; RV32I-NEXT: sb zero, 79(sp)
-; RV32I-NEXT: sb zero, 78(sp)
-; RV32I-NEXT: sb zero, 77(sp)
-; RV32I-NEXT: sb zero, 76(sp)
-; RV32I-NEXT: sb zero, 75(sp)
-; RV32I-NEXT: sb zero, 74(sp)
-; RV32I-NEXT: sb zero, 73(sp)
-; RV32I-NEXT: sb zero, 72(sp)
-; RV32I-NEXT: sb zero, 71(sp)
-; RV32I-NEXT: sb zero, 70(sp)
-; RV32I-NEXT: sb zero, 69(sp)
-; RV32I-NEXT: sb zero, 68(sp)
-; RV32I-NEXT: sb zero, 67(sp)
-; RV32I-NEXT: sb zero, 66(sp)
-; RV32I-NEXT: sb zero, 65(sp)
-; RV32I-NEXT: sb zero, 64(sp)
-; RV32I-NEXT: sb zero, 63(sp)
-; RV32I-NEXT: sb zero, 62(sp)
-; RV32I-NEXT: sb zero, 61(sp)
-; RV32I-NEXT: sb zero, 60(sp)
-; RV32I-NEXT: sb t0, 53(sp)
-; RV32I-NEXT: sb ra, 52(sp)
-; RV32I-NEXT: sb s11, 51(sp)
-; RV32I-NEXT: sb s10, 50(sp)
-; RV32I-NEXT: sb s9, 49(sp)
-; RV32I-NEXT: sb s8, 48(sp)
-; RV32I-NEXT: sb s7, 47(sp)
-; RV32I-NEXT: sb s6, 46(sp)
-; RV32I-NEXT: sb s5, 45(sp)
-; RV32I-NEXT: sb s4, 44(sp)
-; RV32I-NEXT: sb s3, 43(sp)
-; RV32I-NEXT: sb s2, 42(sp)
-; RV32I-NEXT: sb s1, 41(sp)
-; RV32I-NEXT: sb s0, 40(sp)
-; RV32I-NEXT: sb t6, 39(sp)
-; RV32I-NEXT: sb t5, 38(sp)
-; RV32I-NEXT: sb t4, 37(sp)
-; RV32I-NEXT: sb t3, 36(sp)
-; RV32I-NEXT: sb t2, 35(sp)
-; RV32I-NEXT: sb t1, 34(sp)
-; RV32I-NEXT: lw a0, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 33(sp)
-; RV32I-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 32(sp)
-; RV32I-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 31(sp)
-; RV32I-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 30(sp)
-; RV32I-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 29(sp)
-; RV32I-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 28(sp)
-; RV32I-NEXT: andi a1, a1, 31
-; RV32I-NEXT: addi a0, sp, 28
-; RV32I-NEXT: add a6, a0, a1
-; RV32I-NEXT: lbu a0, 6(a6)
-; RV32I-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a0, 7(a6)
-; RV32I-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a0, 4(a6)
-; RV32I-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a0, 5(a6)
-; RV32I-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a0, 0(a6)
-; RV32I-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a7, 1(a6)
-; RV32I-NEXT: lbu t0, 2(a6)
-; RV32I-NEXT: lbu t1, 3(a6)
-; RV32I-NEXT: lbu t2, 14(a6)
-; RV32I-NEXT: lbu t3, 15(a6)
-; RV32I-NEXT: lbu t4, 12(a6)
-; RV32I-NEXT: lbu t5, 13(a6)
-; RV32I-NEXT: lbu t6, 10(a6)
-; RV32I-NEXT: lbu s0, 11(a6)
-; RV32I-NEXT: lbu s1, 8(a6)
-; RV32I-NEXT: lbu s2, 9(a6)
-; RV32I-NEXT: lbu s3, 22(a6)
-; RV32I-NEXT: lbu s4, 23(a6)
-; RV32I-NEXT: lbu s5, 20(a6)
-; RV32I-NEXT: lbu s6, 21(a6)
-; RV32I-NEXT: lbu s7, 18(a6)
-; RV32I-NEXT: lbu s8, 19(a6)
-; RV32I-NEXT: lbu s9, 16(a6)
-; RV32I-NEXT: lbu s10, 17(a6)
-; RV32I-NEXT: lbu s11, 30(a6)
-; RV32I-NEXT: lbu ra, 31(a6)
-; RV32I-NEXT: lbu a5, 28(a6)
-; RV32I-NEXT: lbu a4, 29(a6)
-; RV32I-NEXT: lbu a0, 25(a6)
-; RV32I-NEXT: lbu a1, 24(a6)
-; RV32I-NEXT: lbu a3, 27(a6)
-; RV32I-NEXT: lbu a6, 26(a6)
-; RV32I-NEXT: sb a0, 25(a2)
-; RV32I-NEXT: sb a1, 24(a2)
-; RV32I-NEXT: sb a3, 27(a2)
-; RV32I-NEXT: sb a6, 26(a2)
-; RV32I-NEXT: sb a4, 29(a2)
+; RV32I-NEXT: lbu a4, 0(a0)
+; RV32I-NEXT: lbu a5, 2(a0)
+; RV32I-NEXT: lbu a6, 3(a0)
+; RV32I-NEXT: slli a3, a3, 8
+; RV32I-NEXT: or a3, a3, a4
+; RV32I-NEXT: slli a5, a5, 16
+; RV32I-NEXT: slli a6, a6, 24
+; RV32I-NEXT: or a4, a6, a5
+; RV32I-NEXT: or a3, a4, a3
+; RV32I-NEXT: lbu a4, 5(a0)
+; RV32I-NEXT: lbu a5, 4(a0)
+; RV32I-NEXT: lbu a6, 6(a0)
+; RV32I-NEXT: lbu a7, 7(a0)
+; RV32I-NEXT: slli a4, a4, 8
+; RV32I-NEXT: or a4, a4, a5
+; RV32I-NEXT: slli a6, a6, 16
+; RV32I-NEXT: slli a7, a7, 24
+; RV32I-NEXT: or a5, a7, a6
+; RV32I-NEXT: or a4, a5, a4
+; RV32I-NEXT: lbu a5, 9(a0)
+; RV32I-NEXT: lbu a6, 8(a0)
+; RV32I-NEXT: lbu a7, 10(a0)
+; RV32I-NEXT: lbu t0, 11(a0)
+; RV32I-NEXT: slli a5, a5, 8
+; RV32I-NEXT: or a5, a5, a6
+; RV32I-NEXT: slli a7, a7, 16
+; RV32I-NEXT: slli t0, t0, 24
+; RV32I-NEXT: or a6, t0, a7
+; RV32I-NEXT: or a5, a6, a5
+; RV32I-NEXT: lbu a6, 13(a0)
+; RV32I-NEXT: lbu a7, 12(a0)
+; RV32I-NEXT: lbu t0, 14(a0)
+; RV32I-NEXT: lbu t1, 15(a0)
+; RV32I-NEXT: slli a6, a6, 8
+; RV32I-NEXT: or a6, a6, a7
+; RV32I-NEXT: slli t0, t0, 16
+; RV32I-NEXT: slli t1, t1, 24
+; RV32I-NEXT: or a7, t1, t0
+; RV32I-NEXT: or a6, a7, a6
+; RV32I-NEXT: lbu a7, 17(a0)
+; RV32I-NEXT: lbu t0, 16(a0)
+; RV32I-NEXT: lbu t1, 18(a0)
+; RV32I-NEXT: lbu t2, 19(a0)
+; RV32I-NEXT: slli a7, a7, 8
+; RV32I-NEXT: or a7, a7, t0
+; RV32I-NEXT: slli t1, t1, 16
+; RV32I-NEXT: slli t2, t2, 24
+; RV32I-NEXT: or t0, t2, t1
+; RV32I-NEXT: or a7, t0, a7
+; RV32I-NEXT: lbu t0, 21(a0)
+; RV32I-NEXT: lbu t1, 20(a0)
+; RV32I-NEXT: lbu t2, 22(a0)
+; RV32I-NEXT: lbu t3, 23(a0)
+; RV32I-NEXT: slli t0, t0, 8
+; RV32I-NEXT: or t0, t0, t1
+; RV32I-NEXT: slli t2, t2, 16
+; RV32I-NEXT: slli t3, t3, 24
+; RV32I-NEXT: or t1, t3, t2
+; RV32I-NEXT: or t0, t1, t0
+; RV32I-NEXT: lbu t1, 25(a0)
+; RV32I-NEXT: lbu t2, 24(a0)
+; RV32I-NEXT: lbu t3, 26(a0)
+; RV32I-NEXT: lbu t4, 27(a0)
+; RV32I-NEXT: slli t1, t1, 8
+; RV32I-NEXT: or t1, t1, t2
+; RV32I-NEXT: slli t3, t3, 16
+; RV32I-NEXT: slli t4, t4, 24
+; RV32I-NEXT: or t2, t4, t3
+; RV32I-NEXT: or t1, t2, t1
+; RV32I-NEXT: lbu t2, 29(a0)
+; RV32I-NEXT: lbu t3, 28(a0)
+; RV32I-NEXT: lbu t4, 30(a0)
+; RV32I-NEXT: lbu a0, 31(a0)
+; RV32I-NEXT: slli t2, t2, 8
+; RV32I-NEXT: or t2, t2, t3
+; RV32I-NEXT: slli t4, t4, 16
+; RV32I-NEXT: slli a0, a0, 24
+; RV32I-NEXT: or a0, a0, t4
+; RV32I-NEXT: or a0, a0, t2
+; RV32I-NEXT: lbu t2, 1(a1)
+; RV32I-NEXT: lbu t3, 0(a1)
+; RV32I-NEXT: lbu t4, 2(a1)
+; RV32I-NEXT: lbu a1, 3(a1)
+; RV32I-NEXT: slli t2, t2, 8
+; RV32I-NEXT: or t2, t2, t3
+; RV32I-NEXT: slli t4, t4, 16
+; RV32I-NEXT: slli a1, a1, 24
+; RV32I-NEXT: or a1, a1, t4
+; RV32I-NEXT: or a1, a1, t2
+; RV32I-NEXT: sw zero, 60(sp)
+; RV32I-NEXT: sw zero, 56(sp)
+; RV32I-NEXT: sw zero, 52(sp)
+; RV32I-NEXT: sw zero, 48(sp)
+; RV32I-NEXT: sw zero, 44(sp)
+; RV32I-NEXT: sw zero, 40(sp)
+; RV32I-NEXT: sw zero, 36(sp)
+; RV32I-NEXT: sw zero, 32(sp)
+; RV32I-NEXT: sw a0, 28(sp)
+; RV32I-NEXT: sw t1, 24(sp)
+; RV32I-NEXT: sw t0, 20(sp)
+; RV32I-NEXT: sw a7, 16(sp)
+; RV32I-NEXT: sw a6, 12(sp)
+; RV32I-NEXT: sw a5, 8(sp)
+; RV32I-NEXT: sw a4, 4(sp)
+; RV32I-NEXT: sw a3, 0(sp)
+; RV32I-NEXT: andi a0, a1, 28
+; RV32I-NEXT: mv a3, sp
+; RV32I-NEXT: add a5, a3, a0
+; RV32I-NEXT: lw a3, 4(a5)
+; RV32I-NEXT: slli a6, a1, 3
+; RV32I-NEXT: srl a4, a3, a6
+; RV32I-NEXT: lw a7, 8(a5)
+; RV32I-NEXT: andi a0, a6, 24
+; RV32I-NEXT: xori t0, a0, 31
+; RV32I-NEXT: lw a1, 0(a5)
+; RV32I-NEXT: slli a0, a7, 1
+; RV32I-NEXT: sll a0, a0, t0
+; RV32I-NEXT: or a0, a4, a0
+; RV32I-NEXT: srl t1, a1, a6
+; RV32I-NEXT: slli a3, a3, 1
+; RV32I-NEXT: lw t2, 12(a5)
+; RV32I-NEXT: lw t3, 16(a5)
+; RV32I-NEXT: sll a1, a3, t0
+; RV32I-NEXT: or a1, t1, a1
+; RV32I-NEXT: srl t4, t2, a6
+; RV32I-NEXT: slli a3, t3, 1
+; RV32I-NEXT: sll a3, a3, t0
+; RV32I-NEXT: or a3, t4, a3
+; RV32I-NEXT: srl a7, a7, a6
+; RV32I-NEXT: slli t2, t2, 1
+; RV32I-NEXT: lw t5, 20(a5)
+; RV32I-NEXT: lw t6, 24(a5)
+; RV32I-NEXT: sll t2, t2, t0
+; RV32I-NEXT: or t2, a7, t2
+; RV32I-NEXT: srl s0, t5, a6
+; RV32I-NEXT: slli s1, t6, 1
+; RV32I-NEXT: sll s1, s1, t0
+; RV32I-NEXT: or s1, s0, s1
+; RV32I-NEXT: srl t3, t3, a6
+; RV32I-NEXT: slli t5, t5, 1
+; RV32I-NEXT: lw a5, 28(a5)
+; RV32I-NEXT: sll t5, t5, t0
+; RV32I-NEXT: or t5, t3, t5
+; RV32I-NEXT: srl t6, t6, a6
+; RV32I-NEXT: slli s2, a5, 1
+; RV32I-NEXT: sll t0, s2, t0
+; RV32I-NEXT: or t0, t6, t0
+; RV32I-NEXT: srl a5, a5, a6
+; RV32I-NEXT: sb t6, 24(a2)
; RV32I-NEXT: sb a5, 28(a2)
-; RV32I-NEXT: sb ra, 31(a2)
-; RV32I-NEXT: sb s11, 30(a2)
-; RV32I-NEXT: sb s10, 17(a2)
-; RV32I-NEXT: sb s9, 16(a2)
-; RV32I-NEXT: sb s8, 19(a2)
-; RV32I-NEXT: sb s7, 18(a2)
-; RV32I-NEXT: sb s6, 21(a2)
-; RV32I-NEXT: sb s5, 20(a2)
-; RV32I-NEXT: sb s4, 23(a2)
-; RV32I-NEXT: sb s3, 22(a2)
-; RV32I-NEXT: sb s2, 9(a2)
-; RV32I-NEXT: sb s1, 8(a2)
-; RV32I-NEXT: sb s0, 11(a2)
-; RV32I-NEXT: sb t6, 10(a2)
-; RV32I-NEXT: sb t5, 13(a2)
+; RV32I-NEXT: sb t3, 16(a2)
+; RV32I-NEXT: sb s0, 20(a2)
+; RV32I-NEXT: sb a7, 8(a2)
; RV32I-NEXT: sb t4, 12(a2)
-; RV32I-NEXT: sb t3, 15(a2)
-; RV32I-NEXT: sb t2, 14(a2)
-; RV32I-NEXT: sb t1, 3(a2)
-; RV32I-NEXT: sb t0, 2(a2)
-; RV32I-NEXT: sb a7, 1(a2)
-; RV32I-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 0(a2)
-; RV32I-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sb t1, 0(a2)
+; RV32I-NEXT: sb a4, 4(a2)
+; RV32I-NEXT: srli a4, a5, 24
+; RV32I-NEXT: sb a4, 31(a2)
+; RV32I-NEXT: srli a4, a5, 16
+; RV32I-NEXT: sb a4, 30(a2)
+; RV32I-NEXT: srli a5, a5, 8
+; RV32I-NEXT: sb a5, 29(a2)
+; RV32I-NEXT: srli a4, t0, 24
+; RV32I-NEXT: sb a4, 27(a2)
+; RV32I-NEXT: srli a4, t0, 16
+; RV32I-NEXT: sb a4, 26(a2)
+; RV32I-NEXT: srli a4, t0, 8
+; RV32I-NEXT: sb a4, 25(a2)
+; RV32I-NEXT: srli a4, t5, 24
+; RV32I-NEXT: sb a4, 19(a2)
+; RV32I-NEXT: srli a4, t5, 16
+; RV32I-NEXT: sb a4, 18(a2)
+; RV32I-NEXT: srli a4, t5, 8
+; RV32I-NEXT: sb a4, 17(a2)
+; RV32I-NEXT: srli a4, s1, 24
+; RV32I-NEXT: sb a4, 23(a2)
+; RV32I-NEXT: srli a4, s1, 16
+; RV32I-NEXT: sb a4, 22(a2)
+; RV32I-NEXT: srli s1, s1, 8
+; RV32I-NEXT: sb s1, 21(a2)
+; RV32I-NEXT: srli a4, t2, 24
+; RV32I-NEXT: sb a4, 11(a2)
+; RV32I-NEXT: srli a4, t2, 16
+; RV32I-NEXT: sb a4, 10(a2)
+; RV32I-NEXT: srli a4, t2, 8
+; RV32I-NEXT: sb a4, 9(a2)
+; RV32I-NEXT: srli a4, a3, 24
+; RV32I-NEXT: sb a4, 15(a2)
+; RV32I-NEXT: srli a4, a3, 16
+; RV32I-NEXT: sb a4, 14(a2)
+; RV32I-NEXT: srli a3, a3, 8
+; RV32I-NEXT: sb a3, 13(a2)
+; RV32I-NEXT: srli a3, a1, 24
+; RV32I-NEXT: sb a3, 3(a2)
+; RV32I-NEXT: srli a3, a1, 16
+; RV32I-NEXT: sb a3, 2(a2)
+; RV32I-NEXT: srli a1, a1, 8
+; RV32I-NEXT: sb a1, 1(a2)
+; RV32I-NEXT: srli a1, a0, 24
+; RV32I-NEXT: sb a1, 7(a2)
+; RV32I-NEXT: srli a1, a0, 16
+; RV32I-NEXT: sb a1, 6(a2)
+; RV32I-NEXT: srli a0, a0, 8
; RV32I-NEXT: sb a0, 5(a2)
-; RV32I-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 4(a2)
-; RV32I-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 7(a2)
-; RV32I-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 6(a2)
-; RV32I-NEXT: lw ra, 140(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s0, 136(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 132(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 128(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 124(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s4, 120(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s5, 116(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s6, 112(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s7, 108(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s8, 104(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s9, 100(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s10, 96(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s11, 92(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 144
+; RV32I-NEXT: lw s0, 76(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s1, 72(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s2, 68(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 80
; RV32I-NEXT: ret
%src = load i256, ptr %src.ptr, align 1
%byteOff = load i256, ptr %byteOff.ptr, align 1
@@ -1712,441 +2400,1167 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
store i256 %res, ptr %dst, align 1
ret void
}
-define void @shl_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
-; RV64I-LABEL: shl_32bytes:
+
+define void @lshr_32bytes_wordOff(ptr %src.ptr, ptr %wordOff.ptr, ptr %dst) nounwind {
+; RV64I-LABEL: lshr_32bytes_wordOff:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -224
-; RV64I-NEXT: sd ra, 216(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s0, 208(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s1, 200(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s2, 192(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s3, 184(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s4, 176(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s5, 168(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s6, 160(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s7, 152(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s8, 144(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s9, 136(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s10, 128(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s11, 120(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a3, 0(a0)
-; RV64I-NEXT: sd a3, 48(sp) # 8-byte Folded Spill
+; RV64I-NEXT: addi sp, sp, -64
; RV64I-NEXT: lbu a3, 1(a0)
-; RV64I-NEXT: sd a3, 40(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a3, 2(a0)
-; RV64I-NEXT: sd a3, 32(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a3, 3(a0)
-; RV64I-NEXT: sd a3, 24(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a3, 4(a0)
-; RV64I-NEXT: sd a3, 16(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a3, 5(a0)
-; RV64I-NEXT: sd a3, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu t1, 6(a0)
-; RV64I-NEXT: lbu t2, 7(a0)
-; RV64I-NEXT: lbu t3, 8(a0)
-; RV64I-NEXT: lbu t4, 9(a0)
-; RV64I-NEXT: lbu t5, 10(a0)
-; RV64I-NEXT: lbu t6, 11(a0)
-; RV64I-NEXT: lbu s0, 12(a0)
-; RV64I-NEXT: lbu s1, 13(a0)
-; RV64I-NEXT: lbu s2, 14(a0)
-; RV64I-NEXT: lbu s3, 15(a0)
-; RV64I-NEXT: lbu s4, 16(a0)
-; RV64I-NEXT: lbu s5, 17(a0)
-; RV64I-NEXT: lbu s6, 18(a0)
-; RV64I-NEXT: lbu s7, 19(a0)
-; RV64I-NEXT: lbu s8, 20(a0)
-; RV64I-NEXT: lbu s9, 21(a0)
-; RV64I-NEXT: lbu s10, 22(a0)
-; RV64I-NEXT: lbu s11, 23(a0)
-; RV64I-NEXT: lbu ra, 24(a0)
-; RV64I-NEXT: lbu t0, 25(a0)
-; RV64I-NEXT: lbu a7, 26(a0)
-; RV64I-NEXT: lbu a6, 27(a0)
-; RV64I-NEXT: lbu a5, 28(a0)
-; RV64I-NEXT: lbu a3, 31(a0)
-; RV64I-NEXT: lbu a4, 30(a0)
-; RV64I-NEXT: lbu a0, 29(a0)
-; RV64I-NEXT: lbu a1, 0(a1)
-; RV64I-NEXT: sb a3, 119(sp)
-; RV64I-NEXT: sb a4, 118(sp)
-; RV64I-NEXT: sb a0, 117(sp)
-; RV64I-NEXT: sb a5, 116(sp)
-; RV64I-NEXT: sb a6, 115(sp)
-; RV64I-NEXT: sb a7, 114(sp)
-; RV64I-NEXT: sb zero, 87(sp)
-; RV64I-NEXT: sb zero, 86(sp)
-; RV64I-NEXT: sb zero, 85(sp)
-; RV64I-NEXT: sb zero, 84(sp)
-; RV64I-NEXT: sb zero, 83(sp)
-; RV64I-NEXT: sb zero, 82(sp)
-; RV64I-NEXT: sb zero, 81(sp)
-; RV64I-NEXT: sb zero, 80(sp)
-; RV64I-NEXT: sb zero, 79(sp)
-; RV64I-NEXT: sb zero, 78(sp)
-; RV64I-NEXT: sb zero, 77(sp)
-; RV64I-NEXT: sb zero, 76(sp)
-; RV64I-NEXT: sb zero, 75(sp)
-; RV64I-NEXT: sb zero, 74(sp)
-; RV64I-NEXT: sb zero, 73(sp)
-; RV64I-NEXT: sb zero, 72(sp)
-; RV64I-NEXT: sb zero, 71(sp)
-; RV64I-NEXT: sb zero, 70(sp)
-; RV64I-NEXT: sb zero, 69(sp)
-; RV64I-NEXT: sb zero, 68(sp)
-; RV64I-NEXT: sb zero, 67(sp)
-; RV64I-NEXT: sb zero, 66(sp)
-; RV64I-NEXT: sb zero, 65(sp)
-; RV64I-NEXT: sb zero, 64(sp)
-; RV64I-NEXT: sb zero, 63(sp)
-; RV64I-NEXT: sb zero, 62(sp)
-; RV64I-NEXT: sb zero, 61(sp)
-; RV64I-NEXT: sb zero, 60(sp)
-; RV64I-NEXT: sb zero, 59(sp)
-; RV64I-NEXT: sb zero, 58(sp)
-; RV64I-NEXT: sb zero, 57(sp)
-; RV64I-NEXT: sb zero, 56(sp)
-; RV64I-NEXT: sb t0, 113(sp)
-; RV64I-NEXT: sb ra, 112(sp)
-; RV64I-NEXT: sb s11, 111(sp)
-; RV64I-NEXT: sb s10, 110(sp)
-; RV64I-NEXT: sb s9, 109(sp)
-; RV64I-NEXT: sb s8, 108(sp)
-; RV64I-NEXT: sb s7, 107(sp)
-; RV64I-NEXT: sb s6, 106(sp)
-; RV64I-NEXT: sb s5, 105(sp)
-; RV64I-NEXT: sb s4, 104(sp)
-; RV64I-NEXT: sb s3, 103(sp)
-; RV64I-NEXT: sb s2, 102(sp)
-; RV64I-NEXT: sb s1, 101(sp)
-; RV64I-NEXT: sb s0, 100(sp)
-; RV64I-NEXT: sb t6, 99(sp)
-; RV64I-NEXT: sb t5, 98(sp)
-; RV64I-NEXT: sb t4, 97(sp)
-; RV64I-NEXT: sb t3, 96(sp)
-; RV64I-NEXT: sb t2, 95(sp)
-; RV64I-NEXT: sb t1, 94(sp)
-; RV64I-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 93(sp)
-; RV64I-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 92(sp)
-; RV64I-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 91(sp)
-; RV64I-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 90(sp)
-; RV64I-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 89(sp)
-; RV64I-NEXT: ld a0, 48(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 88(sp)
-; RV64I-NEXT: andi a1, a1, 31
-; RV64I-NEXT: addi a0, sp, 88
-; RV64I-NEXT: sub a6, a0, a1
-; RV64I-NEXT: lbu a0, 8(a6)
-; RV64I-NEXT: sd a0, 48(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a0, 9(a6)
-; RV64I-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a0, 10(a6)
-; RV64I-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a0, 11(a6)
-; RV64I-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a0, 12(a6)
-; RV64I-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a7, 13(a6)
-; RV64I-NEXT: lbu t0, 14(a6)
-; RV64I-NEXT: lbu t1, 15(a6)
-; RV64I-NEXT: lbu t2, 0(a6)
-; RV64I-NEXT: lbu t3, 1(a6)
-; RV64I-NEXT: lbu t4, 2(a6)
-; RV64I-NEXT: lbu t5, 3(a6)
-; RV64I-NEXT: lbu t6, 4(a6)
-; RV64I-NEXT: lbu s0, 5(a6)
-; RV64I-NEXT: lbu s1, 6(a6)
-; RV64I-NEXT: lbu s2, 7(a6)
-; RV64I-NEXT: lbu s3, 24(a6)
-; RV64I-NEXT: lbu s4, 25(a6)
-; RV64I-NEXT: lbu s5, 26(a6)
-; RV64I-NEXT: lbu s6, 27(a6)
-; RV64I-NEXT: lbu s7, 28(a6)
-; RV64I-NEXT: lbu s8, 29(a6)
-; RV64I-NEXT: lbu s9, 30(a6)
-; RV64I-NEXT: lbu s10, 31(a6)
-; RV64I-NEXT: lbu s11, 16(a6)
-; RV64I-NEXT: lbu ra, 17(a6)
-; RV64I-NEXT: lbu a5, 18(a6)
-; RV64I-NEXT: lbu a4, 19(a6)
-; RV64I-NEXT: lbu a0, 23(a6)
-; RV64I-NEXT: lbu a1, 22(a6)
-; RV64I-NEXT: lbu a3, 21(a6)
-; RV64I-NEXT: lbu a6, 20(a6)
-; RV64I-NEXT: sb a0, 23(a2)
-; RV64I-NEXT: sb a1, 22(a2)
-; RV64I-NEXT: sb a3, 21(a2)
-; RV64I-NEXT: sb a6, 20(a2)
-; RV64I-NEXT: sb a4, 19(a2)
+; RV64I-NEXT: lbu a4, 0(a0)
+; RV64I-NEXT: lbu a5, 2(a0)
+; RV64I-NEXT: lbu a6, 3(a0)
+; RV64I-NEXT: slli a3, a3, 8
+; RV64I-NEXT: or a3, a3, a4
+; RV64I-NEXT: slli a5, a5, 16
+; RV64I-NEXT: slli a6, a6, 24
+; RV64I-NEXT: or a4, a6, a5
+; RV64I-NEXT: or a3, a4, a3
+; RV64I-NEXT: lbu a4, 5(a0)
+; RV64I-NEXT: lbu a5, 4(a0)
+; RV64I-NEXT: lbu a6, 6(a0)
+; RV64I-NEXT: lbu a7, 7(a0)
+; RV64I-NEXT: slli a4, a4, 8
+; RV64I-NEXT: or a4, a4, a5
+; RV64I-NEXT: slli a6, a6, 16
+; RV64I-NEXT: slli a7, a7, 24
+; RV64I-NEXT: or a5, a7, a6
+; RV64I-NEXT: or a4, a5, a4
+; RV64I-NEXT: slli a4, a4, 32
+; RV64I-NEXT: or a3, a4, a3
+; RV64I-NEXT: lbu a4, 9(a0)
+; RV64I-NEXT: lbu a5, 8(a0)
+; RV64I-NEXT: lbu a6, 10(a0)
+; RV64I-NEXT: lbu a7, 11(a0)
+; RV64I-NEXT: slli a4, a4, 8
+; RV64I-NEXT: or a4, a4, a5
+; RV64I-NEXT: slli a6, a6, 16
+; RV64I-NEXT: slli a7, a7, 24
+; RV64I-NEXT: or a5, a7, a6
+; RV64I-NEXT: or a4, a5, a4
+; RV64I-NEXT: lbu a5, 13(a0)
+; RV64I-NEXT: lbu a6, 12(a0)
+; RV64I-NEXT: lbu a7, 14(a0)
+; RV64I-NEXT: lbu t0, 15(a0)
+; RV64I-NEXT: slli a5, a5, 8
+; RV64I-NEXT: or a5, a5, a6
+; RV64I-NEXT: slli a7, a7, 16
+; RV64I-NEXT: slli t0, t0, 24
+; RV64I-NEXT: or a6, t0, a7
+; RV64I-NEXT: or a5, a6, a5
+; RV64I-NEXT: slli a5, a5, 32
+; RV64I-NEXT: or a4, a5, a4
+; RV64I-NEXT: lbu a5, 17(a0)
+; RV64I-NEXT: lbu a6, 16(a0)
+; RV64I-NEXT: lbu a7, 18(a0)
+; RV64I-NEXT: lbu t0, 19(a0)
+; RV64I-NEXT: slli a5, a5, 8
+; RV64I-NEXT: or a5, a5, a6
+; RV64I-NEXT: slli a7, a7, 16
+; RV64I-NEXT: slli t0, t0, 24
+; RV64I-NEXT: or a6, t0, a7
+; RV64I-NEXT: or a5, a6, a5
+; RV64I-NEXT: lbu a6, 21(a0)
+; RV64I-NEXT: lbu a7, 20(a0)
+; RV64I-NEXT: lbu t0, 22(a0)
+; RV64I-NEXT: lbu t1, 23(a0)
+; RV64I-NEXT: slli a6, a6, 8
+; RV64I-NEXT: or a6, a6, a7
+; RV64I-NEXT: slli t0, t0, 16
+; RV64I-NEXT: slli t1, t1, 24
+; RV64I-NEXT: or a7, t1, t0
+; RV64I-NEXT: or a6, a7, a6
+; RV64I-NEXT: slli a6, a6, 32
+; RV64I-NEXT: or a5, a6, a5
+; RV64I-NEXT: lbu a6, 25(a0)
+; RV64I-NEXT: lbu a7, 24(a0)
+; RV64I-NEXT: lbu t0, 26(a0)
+; RV64I-NEXT: lbu t1, 27(a0)
+; RV64I-NEXT: slli a6, a6, 8
+; RV64I-NEXT: or a6, a6, a7
+; RV64I-NEXT: slli t0, t0, 16
+; RV64I-NEXT: slli t1, t1, 24
+; RV64I-NEXT: or a7, t1, t0
+; RV64I-NEXT: or a6, a7, a6
+; RV64I-NEXT: lbu a7, 29(a0)
+; RV64I-NEXT: lbu t0, 28(a0)
+; RV64I-NEXT: lbu t1, 30(a0)
+; RV64I-NEXT: lbu a0, 31(a0)
+; RV64I-NEXT: slli a7, a7, 8
+; RV64I-NEXT: or a7, a7, t0
+; RV64I-NEXT: slli t1, t1, 16
+; RV64I-NEXT: slli a0, a0, 24
+; RV64I-NEXT: or a0, a0, t1
+; RV64I-NEXT: or a0, a0, a7
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: or a0, a0, a6
+; RV64I-NEXT: lbu a6, 1(a1)
+; RV64I-NEXT: lbu a7, 0(a1)
+; RV64I-NEXT: lbu t0, 2(a1)
+; RV64I-NEXT: lbu t1, 3(a1)
+; RV64I-NEXT: slli a6, a6, 8
+; RV64I-NEXT: or a6, a6, a7
+; RV64I-NEXT: slli t0, t0, 16
+; RV64I-NEXT: slli t1, t1, 24
+; RV64I-NEXT: or a7, t1, t0
+; RV64I-NEXT: or a6, a7, a6
+; RV64I-NEXT: lbu a7, 5(a1)
+; RV64I-NEXT: lbu t0, 4(a1)
+; RV64I-NEXT: lbu t1, 6(a1)
+; RV64I-NEXT: lbu a1, 7(a1)
+; RV64I-NEXT: slli a7, a7, 8
+; RV64I-NEXT: or a7, a7, t0
+; RV64I-NEXT: slli t1, t1, 16
+; RV64I-NEXT: slli a1, a1, 24
+; RV64I-NEXT: or a1, a1, t1
+; RV64I-NEXT: or a1, a1, a7
+; RV64I-NEXT: slli a1, a1, 32
+; RV64I-NEXT: or a1, a1, a6
+; RV64I-NEXT: sd zero, 56(sp)
+; RV64I-NEXT: sd zero, 48(sp)
+; RV64I-NEXT: sd zero, 40(sp)
+; RV64I-NEXT: sd zero, 32(sp)
+; RV64I-NEXT: sd a0, 24(sp)
+; RV64I-NEXT: sd a5, 16(sp)
+; RV64I-NEXT: sd a4, 8(sp)
+; RV64I-NEXT: sd a3, 0(sp)
+; RV64I-NEXT: slli a0, a1, 2
+; RV64I-NEXT: andi a0, a0, 24
+; RV64I-NEXT: mv a3, sp
+; RV64I-NEXT: add a3, a3, a0
+; RV64I-NEXT: ld a4, 8(a3)
+; RV64I-NEXT: slli a5, a1, 5
+; RV64I-NEXT: srl a1, a4, a5
+; RV64I-NEXT: ld a6, 16(a3)
+; RV64I-NEXT: andi a0, a5, 32
+; RV64I-NEXT: xori a7, a0, 63
+; RV64I-NEXT: ld t0, 0(a3)
+; RV64I-NEXT: slli a0, a6, 1
+; RV64I-NEXT: sll a0, a0, a7
+; RV64I-NEXT: or a0, a1, a0
+; RV64I-NEXT: srl t0, t0, a5
+; RV64I-NEXT: slli a4, a4, 1
+; RV64I-NEXT: ld a3, 24(a3)
+; RV64I-NEXT: sll a4, a4, a7
+; RV64I-NEXT: or a4, t0, a4
+; RV64I-NEXT: srl a6, a6, a5
+; RV64I-NEXT: slli t1, a3, 1
+; RV64I-NEXT: sll a7, t1, a7
+; RV64I-NEXT: or a7, a6, a7
+; RV64I-NEXT: srl a3, a3, a5
+; RV64I-NEXT: sb a6, 16(a2)
+; RV64I-NEXT: sb a3, 24(a2)
+; RV64I-NEXT: sb t0, 0(a2)
+; RV64I-NEXT: sb a1, 8(a2)
+; RV64I-NEXT: srli a5, a6, 24
+; RV64I-NEXT: sb a5, 19(a2)
+; RV64I-NEXT: srli a5, a6, 16
; RV64I-NEXT: sb a5, 18(a2)
-; RV64I-NEXT: sb ra, 17(a2)
-; RV64I-NEXT: sb s11, 16(a2)
-; RV64I-NEXT: sb s10, 31(a2)
-; RV64I-NEXT: sb s9, 30(a2)
-; RV64I-NEXT: sb s8, 29(a2)
-; RV64I-NEXT: sb s7, 28(a2)
-; RV64I-NEXT: sb s6, 27(a2)
-; RV64I-NEXT: sb s5, 26(a2)
-; RV64I-NEXT: sb s4, 25(a2)
-; RV64I-NEXT: sb s3, 24(a2)
-; RV64I-NEXT: sb s2, 7(a2)
-; RV64I-NEXT: sb s1, 6(a2)
-; RV64I-NEXT: sb s0, 5(a2)
-; RV64I-NEXT: sb t6, 4(a2)
-; RV64I-NEXT: sb t5, 3(a2)
-; RV64I-NEXT: sb t4, 2(a2)
-; RV64I-NEXT: sb t3, 1(a2)
-; RV64I-NEXT: sb t2, 0(a2)
-; RV64I-NEXT: sb t1, 15(a2)
-; RV64I-NEXT: sb t0, 14(a2)
-; RV64I-NEXT: sb a7, 13(a2)
-; RV64I-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT: srli a5, a6, 8
+; RV64I-NEXT: sb a5, 17(a2)
+; RV64I-NEXT: srli a5, a3, 56
+; RV64I-NEXT: sb a5, 31(a2)
+; RV64I-NEXT: srli a5, a3, 48
+; RV64I-NEXT: sb a5, 30(a2)
+; RV64I-NEXT: srli a5, a3, 40
+; RV64I-NEXT: sb a5, 29(a2)
+; RV64I-NEXT: srli a5, a3, 32
+; RV64I-NEXT: sb a5, 28(a2)
+; RV64I-NEXT: srli a5, a3, 24
+; RV64I-NEXT: sb a5, 27(a2)
+; RV64I-NEXT: srli a5, a3, 16
+; RV64I-NEXT: sb a5, 26(a2)
+; RV64I-NEXT: srli a3, a3, 8
+; RV64I-NEXT: sb a3, 25(a2)
+; RV64I-NEXT: srli a3, t0, 24
+; RV64I-NEXT: sb a3, 3(a2)
+; RV64I-NEXT: srli a3, t0, 16
+; RV64I-NEXT: sb a3, 2(a2)
+; RV64I-NEXT: srli a3, t0, 8
+; RV64I-NEXT: sb a3, 1(a2)
+; RV64I-NEXT: srli a3, a1, 24
+; RV64I-NEXT: sb a3, 11(a2)
+; RV64I-NEXT: srli a3, a1, 16
+; RV64I-NEXT: sb a3, 10(a2)
+; RV64I-NEXT: srli a1, a1, 8
+; RV64I-NEXT: sb a1, 9(a2)
+; RV64I-NEXT: srli a1, a7, 56
+; RV64I-NEXT: sb a1, 23(a2)
+; RV64I-NEXT: srli a1, a7, 48
+; RV64I-NEXT: sb a1, 22(a2)
+; RV64I-NEXT: srli a1, a7, 40
+; RV64I-NEXT: sb a1, 21(a2)
+; RV64I-NEXT: srli a1, a7, 32
+; RV64I-NEXT: sb a1, 20(a2)
+; RV64I-NEXT: srli a1, a4, 56
+; RV64I-NEXT: sb a1, 7(a2)
+; RV64I-NEXT: srli a1, a4, 48
+; RV64I-NEXT: sb a1, 6(a2)
+; RV64I-NEXT: srli a1, a4, 40
+; RV64I-NEXT: sb a1, 5(a2)
+; RV64I-NEXT: srli a4, a4, 32
+; RV64I-NEXT: sb a4, 4(a2)
+; RV64I-NEXT: srli a1, a0, 56
+; RV64I-NEXT: sb a1, 15(a2)
+; RV64I-NEXT: srli a1, a0, 48
+; RV64I-NEXT: sb a1, 14(a2)
+; RV64I-NEXT: srli a1, a0, 40
+; RV64I-NEXT: sb a1, 13(a2)
+; RV64I-NEXT: srli a0, a0, 32
; RV64I-NEXT: sb a0, 12(a2)
-; RV64I-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 11(a2)
-; RV64I-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 10(a2)
-; RV64I-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 64
+; RV64I-NEXT: ret
+;
+; RV32I-LABEL: lshr_32bytes_wordOff:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -64
+; RV32I-NEXT: lbu a3, 1(a0)
+; RV32I-NEXT: lbu a4, 0(a0)
+; RV32I-NEXT: lbu a5, 2(a0)
+; RV32I-NEXT: lbu a6, 3(a0)
+; RV32I-NEXT: slli a3, a3, 8
+; RV32I-NEXT: or a3, a3, a4
+; RV32I-NEXT: slli a5, a5, 16
+; RV32I-NEXT: slli a6, a6, 24
+; RV32I-NEXT: or a4, a6, a5
+; RV32I-NEXT: or a3, a4, a3
+; RV32I-NEXT: lbu a4, 5(a0)
+; RV32I-NEXT: lbu a5, 4(a0)
+; RV32I-NEXT: lbu a6, 6(a0)
+; RV32I-NEXT: lbu a7, 7(a0)
+; RV32I-NEXT: slli a4, a4, 8
+; RV32I-NEXT: or a4, a4, a5
+; RV32I-NEXT: slli a6, a6, 16
+; RV32I-NEXT: slli a7, a7, 24
+; RV32I-NEXT: or a5, a7, a6
+; RV32I-NEXT: or a4, a5, a4
+; RV32I-NEXT: lbu a5, 9(a0)
+; RV32I-NEXT: lbu a6, 8(a0)
+; RV32I-NEXT: lbu a7, 10(a0)
+; RV32I-NEXT: lbu t0, 11(a0)
+; RV32I-NEXT: slli a5, a5, 8
+; RV32I-NEXT: or a5, a5, a6
+; RV32I-NEXT: slli a7, a7, 16
+; RV32I-NEXT: slli t0, t0, 24
+; RV32I-NEXT: or a6, t0, a7
+; RV32I-NEXT: or a5, a6, a5
+; RV32I-NEXT: lbu a6, 13(a0)
+; RV32I-NEXT: lbu a7, 12(a0)
+; RV32I-NEXT: lbu t0, 14(a0)
+; RV32I-NEXT: lbu t1, 15(a0)
+; RV32I-NEXT: slli a6, a6, 8
+; RV32I-NEXT: or a6, a6, a7
+; RV32I-NEXT: slli t0, t0, 16
+; RV32I-NEXT: slli t1, t1, 24
+; RV32I-NEXT: or a7, t1, t0
+; RV32I-NEXT: or a6, a7, a6
+; RV32I-NEXT: lbu a7, 17(a0)
+; RV32I-NEXT: lbu t0, 16(a0)
+; RV32I-NEXT: lbu t1, 18(a0)
+; RV32I-NEXT: lbu t2, 19(a0)
+; RV32I-NEXT: slli a7, a7, 8
+; RV32I-NEXT: or a7, a7, t0
+; RV32I-NEXT: slli t1, t1, 16
+; RV32I-NEXT: slli t2, t2, 24
+; RV32I-NEXT: or t0, t2, t1
+; RV32I-NEXT: or a7, t0, a7
+; RV32I-NEXT: lbu t0, 21(a0)
+; RV32I-NEXT: lbu t1, 20(a0)
+; RV32I-NEXT: lbu t2, 22(a0)
+; RV32I-NEXT: lbu t3, 23(a0)
+; RV32I-NEXT: slli t0, t0, 8
+; RV32I-NEXT: or t0, t0, t1
+; RV32I-NEXT: slli t2, t2, 16
+; RV32I-NEXT: slli t3, t3, 24
+; RV32I-NEXT: or t1, t3, t2
+; RV32I-NEXT: or t0, t1, t0
+; RV32I-NEXT: lbu t1, 25(a0)
+; RV32I-NEXT: lbu t2, 24(a0)
+; RV32I-NEXT: lbu t3, 26(a0)
+; RV32I-NEXT: lbu t4, 27(a0)
+; RV32I-NEXT: slli t1, t1, 8
+; RV32I-NEXT: or t1, t1, t2
+; RV32I-NEXT: slli t3, t3, 16
+; RV32I-NEXT: slli t4, t4, 24
+; RV32I-NEXT: or t2, t4, t3
+; RV32I-NEXT: or t1, t2, t1
+; RV32I-NEXT: lbu t2, 29(a0)
+; RV32I-NEXT: lbu t3, 28(a0)
+; RV32I-NEXT: lbu t4, 30(a0)
+; RV32I-NEXT: lbu a0, 31(a0)
+; RV32I-NEXT: slli t2, t2, 8
+; RV32I-NEXT: or t2, t2, t3
+; RV32I-NEXT: slli t4, t4, 16
+; RV32I-NEXT: slli a0, a0, 24
+; RV32I-NEXT: or a0, a0, t4
+; RV32I-NEXT: or a0, a0, t2
+; RV32I-NEXT: lbu a1, 0(a1)
+; RV32I-NEXT: sw zero, 60(sp)
+; RV32I-NEXT: sw zero, 56(sp)
+; RV32I-NEXT: sw zero, 52(sp)
+; RV32I-NEXT: sw zero, 48(sp)
+; RV32I-NEXT: sw zero, 44(sp)
+; RV32I-NEXT: sw zero, 40(sp)
+; RV32I-NEXT: sw zero, 36(sp)
+; RV32I-NEXT: sw zero, 32(sp)
+; RV32I-NEXT: sw a0, 28(sp)
+; RV32I-NEXT: sw t1, 24(sp)
+; RV32I-NEXT: sw t0, 20(sp)
+; RV32I-NEXT: sw a7, 16(sp)
+; RV32I-NEXT: sw a6, 12(sp)
+; RV32I-NEXT: sw a5, 8(sp)
+; RV32I-NEXT: sw a4, 4(sp)
+; RV32I-NEXT: sw a3, 0(sp)
+; RV32I-NEXT: slli a1, a1, 2
+; RV32I-NEXT: andi a1, a1, 28
+; RV32I-NEXT: mv a0, sp
+; RV32I-NEXT: add a3, a0, a1
+; RV32I-NEXT: lw a0, 4(a3)
+; RV32I-NEXT: lw a1, 0(a3)
+; RV32I-NEXT: lw a4, 12(a3)
+; RV32I-NEXT: lw a5, 8(a3)
+; RV32I-NEXT: lw a6, 24(a3)
+; RV32I-NEXT: lw a7, 28(a3)
+; RV32I-NEXT: lw t0, 16(a3)
+; RV32I-NEXT: lw a3, 20(a3)
+; RV32I-NEXT: sb a6, 24(a2)
+; RV32I-NEXT: sb a7, 28(a2)
+; RV32I-NEXT: sb t0, 16(a2)
+; RV32I-NEXT: sb a3, 20(a2)
+; RV32I-NEXT: sb a5, 8(a2)
+; RV32I-NEXT: sb a4, 12(a2)
+; RV32I-NEXT: sb a1, 0(a2)
+; RV32I-NEXT: sb a0, 4(a2)
+; RV32I-NEXT: srli t1, a6, 24
+; RV32I-NEXT: sb t1, 27(a2)
+; RV32I-NEXT: srli t1, a6, 16
+; RV32I-NEXT: sb t1, 26(a2)
+; RV32I-NEXT: srli a6, a6, 8
+; RV32I-NEXT: sb a6, 25(a2)
+; RV32I-NEXT: srli a6, a7, 24
+; RV32I-NEXT: sb a6, 31(a2)
+; RV32I-NEXT: srli a6, a7, 16
+; RV32I-NEXT: sb a6, 30(a2)
+; RV32I-NEXT: srli a6, a7, 8
+; RV32I-NEXT: sb a6, 29(a2)
+; RV32I-NEXT: srli a6, t0, 24
+; RV32I-NEXT: sb a6, 19(a2)
+; RV32I-NEXT: srli a6, t0, 16
+; RV32I-NEXT: sb a6, 18(a2)
+; RV32I-NEXT: srli a6, t0, 8
+; RV32I-NEXT: sb a6, 17(a2)
+; RV32I-NEXT: srli a6, a3, 24
+; RV32I-NEXT: sb a6, 23(a2)
+; RV32I-NEXT: srli a6, a3, 16
+; RV32I-NEXT: sb a6, 22(a2)
+; RV32I-NEXT: srli a3, a3, 8
+; RV32I-NEXT: sb a3, 21(a2)
+; RV32I-NEXT: srli a3, a5, 24
+; RV32I-NEXT: sb a3, 11(a2)
+; RV32I-NEXT: srli a3, a5, 16
+; RV32I-NEXT: sb a3, 10(a2)
+; RV32I-NEXT: srli a5, a5, 8
+; RV32I-NEXT: sb a5, 9(a2)
+; RV32I-NEXT: srli a3, a4, 24
+; RV32I-NEXT: sb a3, 15(a2)
+; RV32I-NEXT: srli a3, a4, 16
+; RV32I-NEXT: sb a3, 14(a2)
+; RV32I-NEXT: srli a4, a4, 8
+; RV32I-NEXT: sb a4, 13(a2)
+; RV32I-NEXT: srli a3, a1, 24
+; RV32I-NEXT: sb a3, 3(a2)
+; RV32I-NEXT: srli a3, a1, 16
+; RV32I-NEXT: sb a3, 2(a2)
+; RV32I-NEXT: srli a1, a1, 8
+; RV32I-NEXT: sb a1, 1(a2)
+; RV32I-NEXT: srli a1, a0, 24
+; RV32I-NEXT: sb a1, 7(a2)
+; RV32I-NEXT: srli a1, a0, 16
+; RV32I-NEXT: sb a1, 6(a2)
+; RV32I-NEXT: srli a0, a0, 8
+; RV32I-NEXT: sb a0, 5(a2)
+; RV32I-NEXT: addi sp, sp, 64
+; RV32I-NEXT: ret
+ %src = load i256, ptr %src.ptr, align 1
+ %wordOff = load i256, ptr %wordOff.ptr, align 1
+ %bitOff = shl i256 %wordOff, 5
+ %res = lshr i256 %src, %bitOff
+ store i256 %res, ptr %dst, align 1
+ ret void
+}
+
+define void @lshr_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) nounwind {
+; RV64I-LABEL: lshr_32bytes_dwordOff:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -64
+; RV64I-NEXT: lbu a3, 1(a0)
+; RV64I-NEXT: lbu a4, 0(a0)
+; RV64I-NEXT: lbu a5, 2(a0)
+; RV64I-NEXT: lbu a6, 3(a0)
+; RV64I-NEXT: slli a3, a3, 8
+; RV64I-NEXT: or a3, a3, a4
+; RV64I-NEXT: slli a5, a5, 16
+; RV64I-NEXT: slli a6, a6, 24
+; RV64I-NEXT: or a4, a6, a5
+; RV64I-NEXT: or a3, a4, a3
+; RV64I-NEXT: lbu a4, 5(a0)
+; RV64I-NEXT: lbu a5, 4(a0)
+; RV64I-NEXT: lbu a6, 6(a0)
+; RV64I-NEXT: lbu a7, 7(a0)
+; RV64I-NEXT: slli a4, a4, 8
+; RV64I-NEXT: or a4, a4, a5
+; RV64I-NEXT: slli a6, a6, 16
+; RV64I-NEXT: slli a7, a7, 24
+; RV64I-NEXT: or a5, a7, a6
+; RV64I-NEXT: or a4, a5, a4
+; RV64I-NEXT: slli a4, a4, 32
+; RV64I-NEXT: or a3, a4, a3
+; RV64I-NEXT: lbu a4, 9(a0)
+; RV64I-NEXT: lbu a5, 8(a0)
+; RV64I-NEXT: lbu a6, 10(a0)
+; RV64I-NEXT: lbu a7, 11(a0)
+; RV64I-NEXT: slli a4, a4, 8
+; RV64I-NEXT: or a4, a4, a5
+; RV64I-NEXT: slli a6, a6, 16
+; RV64I-NEXT: slli a7, a7, 24
+; RV64I-NEXT: or a5, a7, a6
+; RV64I-NEXT: or a4, a5, a4
+; RV64I-NEXT: lbu a5, 13(a0)
+; RV64I-NEXT: lbu a6, 12(a0)
+; RV64I-NEXT: lbu a7, 14(a0)
+; RV64I-NEXT: lbu t0, 15(a0)
+; RV64I-NEXT: slli a5, a5, 8
+; RV64I-NEXT: or a5, a5, a6
+; RV64I-NEXT: slli a7, a7, 16
+; RV64I-NEXT: slli t0, t0, 24
+; RV64I-NEXT: or a6, t0, a7
+; RV64I-NEXT: or a5, a6, a5
+; RV64I-NEXT: slli a5, a5, 32
+; RV64I-NEXT: or a4, a5, a4
+; RV64I-NEXT: lbu a5, 17(a0)
+; RV64I-NEXT: lbu a6, 16(a0)
+; RV64I-NEXT: lbu a7, 18(a0)
+; RV64I-NEXT: lbu t0, 19(a0)
+; RV64I-NEXT: slli a5, a5, 8
+; RV64I-NEXT: or a5, a5, a6
+; RV64I-NEXT: slli a7, a7, 16
+; RV64I-NEXT: slli t0, t0, 24
+; RV64I-NEXT: or a6, t0, a7
+; RV64I-NEXT: or a5, a6, a5
+; RV64I-NEXT: lbu a6, 21(a0)
+; RV64I-NEXT: lbu a7, 20(a0)
+; RV64I-NEXT: lbu t0, 22(a0)
+; RV64I-NEXT: lbu t1, 23(a0)
+; RV64I-NEXT: slli a6, a6, 8
+; RV64I-NEXT: or a6, a6, a7
+; RV64I-NEXT: slli t0, t0, 16
+; RV64I-NEXT: slli t1, t1, 24
+; RV64I-NEXT: or a7, t1, t0
+; RV64I-NEXT: or a6, a7, a6
+; RV64I-NEXT: slli a6, a6, 32
+; RV64I-NEXT: or a5, a6, a5
+; RV64I-NEXT: lbu a6, 25(a0)
+; RV64I-NEXT: lbu a7, 24(a0)
+; RV64I-NEXT: lbu t0, 26(a0)
+; RV64I-NEXT: lbu t1, 27(a0)
+; RV64I-NEXT: slli a6, a6, 8
+; RV64I-NEXT: or a6, a6, a7
+; RV64I-NEXT: slli t0, t0, 16
+; RV64I-NEXT: slli t1, t1, 24
+; RV64I-NEXT: or a7, t1, t0
+; RV64I-NEXT: or a6, a7, a6
+; RV64I-NEXT: lbu a7, 29(a0)
+; RV64I-NEXT: lbu t0, 28(a0)
+; RV64I-NEXT: lbu t1, 30(a0)
+; RV64I-NEXT: lbu a0, 31(a0)
+; RV64I-NEXT: slli a7, a7, 8
+; RV64I-NEXT: or a7, a7, t0
+; RV64I-NEXT: slli t1, t1, 16
+; RV64I-NEXT: slli a0, a0, 24
+; RV64I-NEXT: or a0, a0, t1
+; RV64I-NEXT: or a0, a0, a7
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: or a0, a0, a6
+; RV64I-NEXT: lbu a1, 0(a1)
+; RV64I-NEXT: sd zero, 56(sp)
+; RV64I-NEXT: sd zero, 48(sp)
+; RV64I-NEXT: sd zero, 40(sp)
+; RV64I-NEXT: sd zero, 32(sp)
+; RV64I-NEXT: sd a0, 24(sp)
+; RV64I-NEXT: sd a5, 16(sp)
+; RV64I-NEXT: sd a4, 8(sp)
+; RV64I-NEXT: sd a3, 0(sp)
+; RV64I-NEXT: slli a1, a1, 3
+; RV64I-NEXT: andi a1, a1, 24
+; RV64I-NEXT: mv a0, sp
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: ld a1, 16(a0)
+; RV64I-NEXT: ld a3, 24(a0)
+; RV64I-NEXT: ld a4, 0(a0)
+; RV64I-NEXT: ld a0, 8(a0)
+; RV64I-NEXT: sb a1, 16(a2)
+; RV64I-NEXT: sb a3, 24(a2)
+; RV64I-NEXT: sb a4, 0(a2)
+; RV64I-NEXT: sb a0, 8(a2)
+; RV64I-NEXT: srli a5, a1, 56
+; RV64I-NEXT: sb a5, 23(a2)
+; RV64I-NEXT: srli a5, a1, 48
+; RV64I-NEXT: sb a5, 22(a2)
+; RV64I-NEXT: srli a5, a1, 40
+; RV64I-NEXT: sb a5, 21(a2)
+; RV64I-NEXT: srli a5, a1, 32
+; RV64I-NEXT: sb a5, 20(a2)
+; RV64I-NEXT: srli a5, a1, 24
+; RV64I-NEXT: sb a5, 19(a2)
+; RV64I-NEXT: srli a5, a1, 16
+; RV64I-NEXT: sb a5, 18(a2)
+; RV64I-NEXT: srli a1, a1, 8
+; RV64I-NEXT: sb a1, 17(a2)
+; RV64I-NEXT: srli a1, a3, 56
+; RV64I-NEXT: sb a1, 31(a2)
+; RV64I-NEXT: srli a1, a3, 48
+; RV64I-NEXT: sb a1, 30(a2)
+; RV64I-NEXT: srli a1, a3, 40
+; RV64I-NEXT: sb a1, 29(a2)
+; RV64I-NEXT: srli a1, a3, 32
+; RV64I-NEXT: sb a1, 28(a2)
+; RV64I-NEXT: srli a1, a3, 24
+; RV64I-NEXT: sb a1, 27(a2)
+; RV64I-NEXT: srli a1, a3, 16
+; RV64I-NEXT: sb a1, 26(a2)
+; RV64I-NEXT: srli a3, a3, 8
+; RV64I-NEXT: sb a3, 25(a2)
+; RV64I-NEXT: srli a1, a4, 56
+; RV64I-NEXT: sb a1, 7(a2)
+; RV64I-NEXT: srli a1, a4, 48
+; RV64I-NEXT: sb a1, 6(a2)
+; RV64I-NEXT: srli a1, a4, 40
+; RV64I-NEXT: sb a1, 5(a2)
+; RV64I-NEXT: srli a1, a4, 32
+; RV64I-NEXT: sb a1, 4(a2)
+; RV64I-NEXT: srli a1, a4, 24
+; RV64I-NEXT: sb a1, 3(a2)
+; RV64I-NEXT: srli a1, a4, 16
+; RV64I-NEXT: sb a1, 2(a2)
+; RV64I-NEXT: srli a4, a4, 8
+; RV64I-NEXT: sb a4, 1(a2)
+; RV64I-NEXT: srli a1, a0, 56
+; RV64I-NEXT: sb a1, 15(a2)
+; RV64I-NEXT: srli a1, a0, 48
+; RV64I-NEXT: sb a1, 14(a2)
+; RV64I-NEXT: srli a1, a0, 40
+; RV64I-NEXT: sb a1, 13(a2)
+; RV64I-NEXT: srli a1, a0, 32
+; RV64I-NEXT: sb a1, 12(a2)
+; RV64I-NEXT: srli a1, a0, 24
+; RV64I-NEXT: sb a1, 11(a2)
+; RV64I-NEXT: srli a1, a0, 16
+; RV64I-NEXT: sb a1, 10(a2)
+; RV64I-NEXT: srli a0, a0, 8
; RV64I-NEXT: sb a0, 9(a2)
-; RV64I-NEXT: ld a0, 48(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 64
+; RV64I-NEXT: ret
+;
+; RV32I-LABEL: lshr_32bytes_dwordOff:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -64
+; RV32I-NEXT: lbu a3, 1(a0)
+; RV32I-NEXT: lbu a4, 0(a0)
+; RV32I-NEXT: lbu a5, 2(a0)
+; RV32I-NEXT: lbu a6, 3(a0)
+; RV32I-NEXT: slli a3, a3, 8
+; RV32I-NEXT: or a3, a3, a4
+; RV32I-NEXT: slli a5, a5, 16
+; RV32I-NEXT: slli a6, a6, 24
+; RV32I-NEXT: or a4, a6, a5
+; RV32I-NEXT: or a3, a4, a3
+; RV32I-NEXT: lbu a4, 5(a0)
+; RV32I-NEXT: lbu a5, 4(a0)
+; RV32I-NEXT: lbu a6, 6(a0)
+; RV32I-NEXT: lbu a7, 7(a0)
+; RV32I-NEXT: slli a4, a4, 8
+; RV32I-NEXT: or a4, a4, a5
+; RV32I-NEXT: slli a6, a6, 16
+; RV32I-NEXT: slli a7, a7, 24
+; RV32I-NEXT: or a5, a7, a6
+; RV32I-NEXT: or a4, a5, a4
+; RV32I-NEXT: lbu a5, 9(a0)
+; RV32I-NEXT: lbu a6, 8(a0)
+; RV32I-NEXT: lbu a7, 10(a0)
+; RV32I-NEXT: lbu t0, 11(a0)
+; RV32I-NEXT: slli a5, a5, 8
+; RV32I-NEXT: or a5, a5, a6
+; RV32I-NEXT: slli a7, a7, 16
+; RV32I-NEXT: slli t0, t0, 24
+; RV32I-NEXT: or a6, t0, a7
+; RV32I-NEXT: or a5, a6, a5
+; RV32I-NEXT: lbu a6, 13(a0)
+; RV32I-NEXT: lbu a7, 12(a0)
+; RV32I-NEXT: lbu t0, 14(a0)
+; RV32I-NEXT: lbu t1, 15(a0)
+; RV32I-NEXT: slli a6, a6, 8
+; RV32I-NEXT: or a6, a6, a7
+; RV32I-NEXT: slli t0, t0, 16
+; RV32I-NEXT: slli t1, t1, 24
+; RV32I-NEXT: or a7, t1, t0
+; RV32I-NEXT: or a6, a7, a6
+; RV32I-NEXT: lbu a7, 17(a0)
+; RV32I-NEXT: lbu t0, 16(a0)
+; RV32I-NEXT: lbu t1, 18(a0)
+; RV32I-NEXT: lbu t2, 19(a0)
+; RV32I-NEXT: slli a7, a7, 8
+; RV32I-NEXT: or a7, a7, t0
+; RV32I-NEXT: slli t1, t1, 16
+; RV32I-NEXT: slli t2, t2, 24
+; RV32I-NEXT: or t0, t2, t1
+; RV32I-NEXT: or a7, t0, a7
+; RV32I-NEXT: lbu t0, 21(a0)
+; RV32I-NEXT: lbu t1, 20(a0)
+; RV32I-NEXT: lbu t2, 22(a0)
+; RV32I-NEXT: lbu t3, 23(a0)
+; RV32I-NEXT: slli t0, t0, 8
+; RV32I-NEXT: or t0, t0, t1
+; RV32I-NEXT: slli t2, t2, 16
+; RV32I-NEXT: slli t3, t3, 24
+; RV32I-NEXT: or t1, t3, t2
+; RV32I-NEXT: or t0, t1, t0
+; RV32I-NEXT: lbu t1, 25(a0)
+; RV32I-NEXT: lbu t2, 24(a0)
+; RV32I-NEXT: lbu t3, 26(a0)
+; RV32I-NEXT: lbu t4, 27(a0)
+; RV32I-NEXT: slli t1, t1, 8
+; RV32I-NEXT: or t1, t1, t2
+; RV32I-NEXT: slli t3, t3, 16
+; RV32I-NEXT: slli t4, t4, 24
+; RV32I-NEXT: or t2, t4, t3
+; RV32I-NEXT: or t1, t2, t1
+; RV32I-NEXT: lbu t2, 29(a0)
+; RV32I-NEXT: lbu t3, 28(a0)
+; RV32I-NEXT: lbu t4, 30(a0)
+; RV32I-NEXT: lbu a0, 31(a0)
+; RV32I-NEXT: slli t2, t2, 8
+; RV32I-NEXT: or t2, t2, t3
+; RV32I-NEXT: slli t4, t4, 16
+; RV32I-NEXT: slli a0, a0, 24
+; RV32I-NEXT: or a0, a0, t4
+; RV32I-NEXT: or a0, a0, t2
+; RV32I-NEXT: lbu a1, 0(a1)
+; RV32I-NEXT: sw zero, 60(sp)
+; RV32I-NEXT: sw zero, 56(sp)
+; RV32I-NEXT: sw zero, 52(sp)
+; RV32I-NEXT: sw zero, 48(sp)
+; RV32I-NEXT: sw zero, 44(sp)
+; RV32I-NEXT: sw zero, 40(sp)
+; RV32I-NEXT: sw zero, 36(sp)
+; RV32I-NEXT: sw zero, 32(sp)
+; RV32I-NEXT: sw a0, 28(sp)
+; RV32I-NEXT: sw t1, 24(sp)
+; RV32I-NEXT: sw t0, 20(sp)
+; RV32I-NEXT: sw a7, 16(sp)
+; RV32I-NEXT: sw a6, 12(sp)
+; RV32I-NEXT: sw a5, 8(sp)
+; RV32I-NEXT: sw a4, 4(sp)
+; RV32I-NEXT: sw a3, 0(sp)
+; RV32I-NEXT: slli a1, a1, 3
+; RV32I-NEXT: andi a1, a1, 24
+; RV32I-NEXT: mv a0, sp
+; RV32I-NEXT: add a3, a0, a1
+; RV32I-NEXT: lw a0, 4(a3)
+; RV32I-NEXT: lw a1, 0(a3)
+; RV32I-NEXT: lw a4, 12(a3)
+; RV32I-NEXT: lw a5, 8(a3)
+; RV32I-NEXT: lw a6, 24(a3)
+; RV32I-NEXT: lw a7, 28(a3)
+; RV32I-NEXT: lw t0, 16(a3)
+; RV32I-NEXT: lw a3, 20(a3)
+; RV32I-NEXT: sb a6, 24(a2)
+; RV32I-NEXT: sb a7, 28(a2)
+; RV32I-NEXT: sb t0, 16(a2)
+; RV32I-NEXT: sb a3, 20(a2)
+; RV32I-NEXT: sb a5, 8(a2)
+; RV32I-NEXT: sb a4, 12(a2)
+; RV32I-NEXT: sb a1, 0(a2)
+; RV32I-NEXT: sb a0, 4(a2)
+; RV32I-NEXT: srli t1, a6, 24
+; RV32I-NEXT: sb t1, 27(a2)
+; RV32I-NEXT: srli t1, a6, 16
+; RV32I-NEXT: sb t1, 26(a2)
+; RV32I-NEXT: srli a6, a6, 8
+; RV32I-NEXT: sb a6, 25(a2)
+; RV32I-NEXT: srli a6, a7, 24
+; RV32I-NEXT: sb a6, 31(a2)
+; RV32I-NEXT: srli a6, a7, 16
+; RV32I-NEXT: sb a6, 30(a2)
+; RV32I-NEXT: srli a6, a7, 8
+; RV32I-NEXT: sb a6, 29(a2)
+; RV32I-NEXT: srli a6, t0, 24
+; RV32I-NEXT: sb a6, 19(a2)
+; RV32I-NEXT: srli a6, t0, 16
+; RV32I-NEXT: sb a6, 18(a2)
+; RV32I-NEXT: srli a6, t0, 8
+; RV32I-NEXT: sb a6, 17(a2)
+; RV32I-NEXT: srli a6, a3, 24
+; RV32I-NEXT: sb a6, 23(a2)
+; RV32I-NEXT: srli a6, a3, 16
+; RV32I-NEXT: sb a6, 22(a2)
+; RV32I-NEXT: srli a3, a3, 8
+; RV32I-NEXT: sb a3, 21(a2)
+; RV32I-NEXT: srli a3, a5, 24
+; RV32I-NEXT: sb a3, 11(a2)
+; RV32I-NEXT: srli a3, a5, 16
+; RV32I-NEXT: sb a3, 10(a2)
+; RV32I-NEXT: srli a5, a5, 8
+; RV32I-NEXT: sb a5, 9(a2)
+; RV32I-NEXT: srli a3, a4, 24
+; RV32I-NEXT: sb a3, 15(a2)
+; RV32I-NEXT: srli a3, a4, 16
+; RV32I-NEXT: sb a3, 14(a2)
+; RV32I-NEXT: srli a4, a4, 8
+; RV32I-NEXT: sb a4, 13(a2)
+; RV32I-NEXT: srli a3, a1, 24
+; RV32I-NEXT: sb a3, 3(a2)
+; RV32I-NEXT: srli a3, a1, 16
+; RV32I-NEXT: sb a3, 2(a2)
+; RV32I-NEXT: srli a1, a1, 8
+; RV32I-NEXT: sb a1, 1(a2)
+; RV32I-NEXT: srli a1, a0, 24
+; RV32I-NEXT: sb a1, 7(a2)
+; RV32I-NEXT: srli a1, a0, 16
+; RV32I-NEXT: sb a1, 6(a2)
+; RV32I-NEXT: srli a0, a0, 8
+; RV32I-NEXT: sb a0, 5(a2)
+; RV32I-NEXT: addi sp, sp, 64
+; RV32I-NEXT: ret
+ %src = load i256, ptr %src.ptr, align 1
+ %dwordOff = load i256, ptr %dwordOff.ptr, align 1
+ %bitOff = shl i256 %dwordOff, 6
+ %res = lshr i256 %src, %bitOff
+ store i256 %res, ptr %dst, align 1
+ ret void
+}
+
+define void @shl_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
+; RV64I-LABEL: shl_32bytes:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -64
+; RV64I-NEXT: lbu a3, 1(a0)
+; RV64I-NEXT: lbu a4, 0(a0)
+; RV64I-NEXT: lbu a5, 2(a0)
+; RV64I-NEXT: lbu a6, 3(a0)
+; RV64I-NEXT: slli a3, a3, 8
+; RV64I-NEXT: or a3, a3, a4
+; RV64I-NEXT: slli a5, a5, 16
+; RV64I-NEXT: slli a6, a6, 24
+; RV64I-NEXT: or a4, a6, a5
+; RV64I-NEXT: or a3, a4, a3
+; RV64I-NEXT: lbu a4, 5(a0)
+; RV64I-NEXT: lbu a5, 4(a0)
+; RV64I-NEXT: lbu a6, 6(a0)
+; RV64I-NEXT: lbu a7, 7(a0)
+; RV64I-NEXT: slli a4, a4, 8
+; RV64I-NEXT: or a4, a4, a5
+; RV64I-NEXT: slli a6, a6, 16
+; RV64I-NEXT: slli a7, a7, 24
+; RV64I-NEXT: or a5, a7, a6
+; RV64I-NEXT: or a4, a5, a4
+; RV64I-NEXT: slli a4, a4, 32
+; RV64I-NEXT: or a3, a4, a3
+; RV64I-NEXT: lbu a4, 9(a0)
+; RV64I-NEXT: lbu a5, 8(a0)
+; RV64I-NEXT: lbu a6, 10(a0)
+; RV64I-NEXT: lbu a7, 11(a0)
+; RV64I-NEXT: slli a4, a4, 8
+; RV64I-NEXT: or a4, a4, a5
+; RV64I-NEXT: slli a6, a6, 16
+; RV64I-NEXT: slli a7, a7, 24
+; RV64I-NEXT: or a5, a7, a6
+; RV64I-NEXT: or a4, a5, a4
+; RV64I-NEXT: lbu a5, 13(a0)
+; RV64I-NEXT: lbu a6, 12(a0)
+; RV64I-NEXT: lbu a7, 14(a0)
+; RV64I-NEXT: lbu t0, 15(a0)
+; RV64I-NEXT: slli a5, a5, 8
+; RV64I-NEXT: or a5, a5, a6
+; RV64I-NEXT: slli a7, a7, 16
+; RV64I-NEXT: slli t0, t0, 24
+; RV64I-NEXT: or a6, t0, a7
+; RV64I-NEXT: or a5, a6, a5
+; RV64I-NEXT: slli a5, a5, 32
+; RV64I-NEXT: or a4, a5, a4
+; RV64I-NEXT: lbu a5, 17(a0)
+; RV64I-NEXT: lbu a6, 16(a0)
+; RV64I-NEXT: lbu a7, 18(a0)
+; RV64I-NEXT: lbu t0, 19(a0)
+; RV64I-NEXT: slli a5, a5, 8
+; RV64I-NEXT: or a5, a5, a6
+; RV64I-NEXT: slli a7, a7, 16
+; RV64I-NEXT: slli t0, t0, 24
+; RV64I-NEXT: or a6, t0, a7
+; RV64I-NEXT: or a5, a6, a5
+; RV64I-NEXT: lbu a6, 21(a0)
+; RV64I-NEXT: lbu a7, 20(a0)
+; RV64I-NEXT: lbu t0, 22(a0)
+; RV64I-NEXT: lbu t1, 23(a0)
+; RV64I-NEXT: slli a6, a6, 8
+; RV64I-NEXT: or a6, a6, a7
+; RV64I-NEXT: slli t0, t0, 16
+; RV64I-NEXT: slli t1, t1, 24
+; RV64I-NEXT: or a7, t1, t0
+; RV64I-NEXT: or a6, a7, a6
+; RV64I-NEXT: slli a6, a6, 32
+; RV64I-NEXT: or a5, a6, a5
+; RV64I-NEXT: lbu a6, 25(a0)
+; RV64I-NEXT: lbu a7, 24(a0)
+; RV64I-NEXT: lbu t0, 26(a0)
+; RV64I-NEXT: lbu t1, 27(a0)
+; RV64I-NEXT: slli a6, a6, 8
+; RV64I-NEXT: or a6, a6, a7
+; RV64I-NEXT: slli t0, t0, 16
+; RV64I-NEXT: slli t1, t1, 24
+; RV64I-NEXT: or a7, t1, t0
+; RV64I-NEXT: or a6, a7, a6
+; RV64I-NEXT: lbu a7, 29(a0)
+; RV64I-NEXT: lbu t0, 28(a0)
+; RV64I-NEXT: lbu t1, 30(a0)
+; RV64I-NEXT: lbu a0, 31(a0)
+; RV64I-NEXT: slli a7, a7, 8
+; RV64I-NEXT: or a7, a7, t0
+; RV64I-NEXT: slli t1, t1, 16
+; RV64I-NEXT: slli a0, a0, 24
+; RV64I-NEXT: or a0, a0, t1
+; RV64I-NEXT: or a0, a0, a7
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: or a0, a0, a6
+; RV64I-NEXT: lbu a6, 1(a1)
+; RV64I-NEXT: lbu a7, 0(a1)
+; RV64I-NEXT: lbu t0, 2(a1)
+; RV64I-NEXT: lbu t1, 3(a1)
+; RV64I-NEXT: slli a6, a6, 8
+; RV64I-NEXT: or a6, a6, a7
+; RV64I-NEXT: slli t0, t0, 16
+; RV64I-NEXT: slli t1, t1, 24
+; RV64I-NEXT: or a7, t1, t0
+; RV64I-NEXT: or a6, a7, a6
+; RV64I-NEXT: lbu a7, 5(a1)
+; RV64I-NEXT: lbu t0, 4(a1)
+; RV64I-NEXT: lbu t1, 6(a1)
+; RV64I-NEXT: lbu a1, 7(a1)
+; RV64I-NEXT: slli a7, a7, 8
+; RV64I-NEXT: or a7, a7, t0
+; RV64I-NEXT: slli t1, t1, 16
+; RV64I-NEXT: slli a1, a1, 24
+; RV64I-NEXT: or a1, a1, t1
+; RV64I-NEXT: or a1, a1, a7
+; RV64I-NEXT: slli a1, a1, 32
+; RV64I-NEXT: or a1, a1, a6
+; RV64I-NEXT: sd zero, 24(sp)
+; RV64I-NEXT: sd zero, 16(sp)
+; RV64I-NEXT: sd zero, 8(sp)
+; RV64I-NEXT: sd zero, 0(sp)
+; RV64I-NEXT: sd a0, 56(sp)
+; RV64I-NEXT: sd a5, 48(sp)
+; RV64I-NEXT: sd a4, 40(sp)
+; RV64I-NEXT: sd a3, 32(sp)
+; RV64I-NEXT: andi a0, a1, 24
+; RV64I-NEXT: addi a3, sp, 32
+; RV64I-NEXT: sub a3, a3, a0
+; RV64I-NEXT: ld a4, 8(a3)
+; RV64I-NEXT: slli a1, a1, 3
+; RV64I-NEXT: ld a5, 0(a3)
+; RV64I-NEXT: sll a6, a4, a1
+; RV64I-NEXT: andi a0, a1, 56
+; RV64I-NEXT: xori a7, a0, 63
+; RV64I-NEXT: srli a0, a5, 1
+; RV64I-NEXT: ld t0, 24(a3)
+; RV64I-NEXT: ld a3, 16(a3)
+; RV64I-NEXT: srl a0, a0, a7
+; RV64I-NEXT: or a0, a6, a0
+; RV64I-NEXT: sll t0, t0, a1
+; RV64I-NEXT: srli t1, a3, 1
+; RV64I-NEXT: srl t1, t1, a7
+; RV64I-NEXT: or t1, t0, t1
+; RV64I-NEXT: sll a3, a3, a1
+; RV64I-NEXT: srli a4, a4, 1
+; RV64I-NEXT: srl a4, a4, a7
+; RV64I-NEXT: or a4, a3, a4
+; RV64I-NEXT: sll a1, a5, a1
+; RV64I-NEXT: sb a1, 0(a2)
+; RV64I-NEXT: srli a3, a3, 56
+; RV64I-NEXT: sb a3, 23(a2)
+; RV64I-NEXT: srli a3, t0, 56
+; RV64I-NEXT: sb a3, 31(a2)
+; RV64I-NEXT: srli a3, a1, 56
+; RV64I-NEXT: sb a3, 7(a2)
+; RV64I-NEXT: srli a3, a1, 48
+; RV64I-NEXT: sb a3, 6(a2)
+; RV64I-NEXT: srli a3, a1, 40
+; RV64I-NEXT: sb a3, 5(a2)
+; RV64I-NEXT: srli a3, a1, 32
+; RV64I-NEXT: sb a3, 4(a2)
+; RV64I-NEXT: srli a3, a1, 24
+; RV64I-NEXT: sb a3, 3(a2)
+; RV64I-NEXT: srli a3, a1, 16
+; RV64I-NEXT: sb a3, 2(a2)
+; RV64I-NEXT: srli a1, a1, 8
+; RV64I-NEXT: sb a1, 1(a2)
+; RV64I-NEXT: srli a1, a6, 56
+; RV64I-NEXT: sb a1, 15(a2)
+; RV64I-NEXT: sb a4, 16(a2)
+; RV64I-NEXT: sb t1, 24(a2)
; RV64I-NEXT: sb a0, 8(a2)
-; RV64I-NEXT: ld ra, 216(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s0, 208(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s1, 200(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s2, 192(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s3, 184(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s4, 176(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s5, 168(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s6, 160(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s7, 152(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s8, 144(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s9, 136(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s10, 128(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s11, 120(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 224
+; RV64I-NEXT: srli a1, a4, 48
+; RV64I-NEXT: sb a1, 22(a2)
+; RV64I-NEXT: srli a1, a4, 40
+; RV64I-NEXT: sb a1, 21(a2)
+; RV64I-NEXT: srli a1, a4, 32
+; RV64I-NEXT: sb a1, 20(a2)
+; RV64I-NEXT: srli a1, a4, 24
+; RV64I-NEXT: sb a1, 19(a2)
+; RV64I-NEXT: srli a1, a4, 16
+; RV64I-NEXT: sb a1, 18(a2)
+; RV64I-NEXT: srli a4, a4, 8
+; RV64I-NEXT: sb a4, 17(a2)
+; RV64I-NEXT: srli a1, t1, 48
+; RV64I-NEXT: sb a1, 30(a2)
+; RV64I-NEXT: srli a1, t1, 40
+; RV64I-NEXT: sb a1, 29(a2)
+; RV64I-NEXT: srli a1, t1, 32
+; RV64I-NEXT: sb a1, 28(a2)
+; RV64I-NEXT: srli a1, t1, 24
+; RV64I-NEXT: sb a1, 27(a2)
+; RV64I-NEXT: srli a1, t1, 16
+; RV64I-NEXT: sb a1, 26(a2)
+; RV64I-NEXT: srli a1, t1, 8
+; RV64I-NEXT: sb a1, 25(a2)
+; RV64I-NEXT: srli a1, a0, 48
+; RV64I-NEXT: sb a1, 14(a2)
+; RV64I-NEXT: srli a1, a0, 40
+; RV64I-NEXT: sb a1, 13(a2)
+; RV64I-NEXT: srli a1, a0, 32
+; RV64I-NEXT: sb a1, 12(a2)
+; RV64I-NEXT: srli a1, a0, 24
+; RV64I-NEXT: sb a1, 11(a2)
+; RV64I-NEXT: srli a1, a0, 16
+; RV64I-NEXT: sb a1, 10(a2)
+; RV64I-NEXT: srli a0, a0, 8
+; RV64I-NEXT: sb a0, 9(a2)
+; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
;
; RV32I-LABEL: shl_32bytes:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -144
-; RV32I-NEXT: sw ra, 140(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s0, 136(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 132(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 128(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 124(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s4, 120(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s5, 116(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s6, 112(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s7, 108(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s8, 104(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s9, 100(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s10, 96(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s11, 92(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a3, 0(a0)
-; RV32I-NEXT: sw a3, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT: addi sp, sp, -80
+; RV32I-NEXT: sw s0, 76(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s1, 72(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s2, 68(sp) # 4-byte Folded Spill
; RV32I-NEXT: lbu a3, 1(a0)
-; RV32I-NEXT: sw a3, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a3, 2(a0)
-; RV32I-NEXT: sw a3, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a3, 3(a0)
-; RV32I-NEXT: sw a3, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a3, 4(a0)
-; RV32I-NEXT: sw a3, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a3, 5(a0)
-; RV32I-NEXT: sw a3, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu t1, 6(a0)
-; RV32I-NEXT: lbu t2, 7(a0)
-; RV32I-NEXT: lbu t3, 8(a0)
-; RV32I-NEXT: lbu t4, 9(a0)
-; RV32I-NEXT: lbu t5, 10(a0)
-; RV32I-NEXT: lbu t6, 11(a0)
-; RV32I-NEXT: lbu s0, 12(a0)
-; RV32I-NEXT: lbu s1, 13(a0)
-; RV32I-NEXT: lbu s2, 14(a0)
-; RV32I-NEXT: lbu s3, 15(a0)
-; RV32I-NEXT: lbu s4, 16(a0)
-; RV32I-NEXT: lbu s5, 17(a0)
-; RV32I-NEXT: lbu s6, 18(a0)
-; RV32I-NEXT: lbu s7, 19(a0)
-; RV32I-NEXT: lbu s8, 20(a0)
-; RV32I-NEXT: lbu s9, 21(a0)
-; RV32I-NEXT: lbu s10, 22(a0)
-; RV32I-NEXT: lbu s11, 23(a0)
-; RV32I-NEXT: lbu ra, 24(a0)
-; RV32I-NEXT: lbu t0, 25(a0)
-; RV32I-NEXT: lbu a7, 26(a0)
-; RV32I-NEXT: lbu a6, 27(a0)
-; RV32I-NEXT: lbu a5, 28(a0)
-; RV32I-NEXT: lbu a3, 31(a0)
-; RV32I-NEXT: lbu a4, 30(a0)
-; RV32I-NEXT: lbu a0, 29(a0)
-; RV32I-NEXT: lbu a1, 0(a1)
-; RV32I-NEXT: sb a3, 91(sp)
-; RV32I-NEXT: sb a4, 90(sp)
-; RV32I-NEXT: sb a0, 89(sp)
-; RV32I-NEXT: sb a5, 88(sp)
-; RV32I-NEXT: sb a6, 87(sp)
-; RV32I-NEXT: sb a7, 86(sp)
-; RV32I-NEXT: sb zero, 59(sp)
-; RV32I-NEXT: sb zero, 58(sp)
-; RV32I-NEXT: sb zero, 57(sp)
-; RV32I-NEXT: sb zero, 56(sp)
-; RV32I-NEXT: sb zero, 55(sp)
-; RV32I-NEXT: sb zero, 54(sp)
-; RV32I-NEXT: sb zero, 53(sp)
-; RV32I-NEXT: sb zero, 52(sp)
-; RV32I-NEXT: sb zero, 51(sp)
-; RV32I-NEXT: sb zero, 50(sp)
-; RV32I-NEXT: sb zero, 49(sp)
-; RV32I-NEXT: sb zero, 48(sp)
-; RV32I-NEXT: sb zero, 47(sp)
-; RV32I-NEXT: sb zero, 46(sp)
-; RV32I-NEXT: sb zero, 45(sp)
-; RV32I-NEXT: sb zero, 44(sp)
-; RV32I-NEXT: sb zero, 43(sp)
-; RV32I-NEXT: sb zero, 42(sp)
-; RV32I-NEXT: sb zero, 41(sp)
-; RV32I-NEXT: sb zero, 40(sp)
-; RV32I-NEXT: sb zero, 39(sp)
-; RV32I-NEXT: sb zero, 38(sp)
-; RV32I-NEXT: sb zero, 37(sp)
-; RV32I-NEXT: sb zero, 36(sp)
-; RV32I-NEXT: sb zero, 35(sp)
-; RV32I-NEXT: sb zero, 34(sp)
-; RV32I-NEXT: sb zero, 33(sp)
-; RV32I-NEXT: sb zero, 32(sp)
-; RV32I-NEXT: sb zero, 31(sp)
-; RV32I-NEXT: sb zero, 30(sp)
-; RV32I-NEXT: sb zero, 29(sp)
-; RV32I-NEXT: sb zero, 28(sp)
-; RV32I-NEXT: sb t0, 85(sp)
-; RV32I-NEXT: sb ra, 84(sp)
-; RV32I-NEXT: sb s11, 83(sp)
-; RV32I-NEXT: sb s10, 82(sp)
-; RV32I-NEXT: sb s9, 81(sp)
-; RV32I-NEXT: sb s8, 80(sp)
-; RV32I-NEXT: sb s7, 79(sp)
-; RV32I-NEXT: sb s6, 78(sp)
-; RV32I-NEXT: sb s5, 77(sp)
-; RV32I-NEXT: sb s4, 76(sp)
-; RV32I-NEXT: sb s3, 75(sp)
-; RV32I-NEXT: sb s2, 74(sp)
-; RV32I-NEXT: sb s1, 73(sp)
-; RV32I-NEXT: sb s0, 72(sp)
-; RV32I-NEXT: sb t6, 71(sp)
-; RV32I-NEXT: sb t5, 70(sp)
-; RV32I-NEXT: sb t4, 69(sp)
-; RV32I-NEXT: sb t3, 68(sp)
-; RV32I-NEXT: sb t2, 67(sp)
-; RV32I-NEXT: sb t1, 66(sp)
-; RV32I-NEXT: lw a0, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 65(sp)
-; RV32I-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 64(sp)
-; RV32I-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 63(sp)
-; RV32I-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 62(sp)
-; RV32I-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 61(sp)
-; RV32I-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 60(sp)
-; RV32I-NEXT: andi a1, a1, 31
-; RV32I-NEXT: addi a0, sp, 60
-; RV32I-NEXT: sub a6, a0, a1
-; RV32I-NEXT: lbu a0, 6(a6)
-; RV32I-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a0, 7(a6)
-; RV32I-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a0, 4(a6)
-; RV32I-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a0, 5(a6)
-; RV32I-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a0, 0(a6)
-; RV32I-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a7, 1(a6)
-; RV32I-NEXT: lbu t0, 2(a6)
-; RV32I-NEXT: lbu t1, 3(a6)
-; RV32I-NEXT: lbu t2, 14(a6)
-; RV32I-NEXT: lbu t3, 15(a6)
-; RV32I-NEXT: lbu t4, 12(a6)
-; RV32I-NEXT: lbu t5, 13(a6)
-; RV32I-NEXT: lbu t6, 10(a6)
-; RV32I-NEXT: lbu s0, 11(a6)
-; RV32I-NEXT: lbu s1, 8(a6)
-; RV32I-NEXT: lbu s2, 9(a6)
-; RV32I-NEXT: lbu s3, 22(a6)
-; RV32I-NEXT: lbu s4, 23(a6)
-; RV32I-NEXT: lbu s5, 20(a6)
-; RV32I-NEXT: lbu s6, 21(a6)
-; RV32I-NEXT: lbu s7, 18(a6)
-; RV32I-NEXT: lbu s8, 19(a6)
-; RV32I-NEXT: lbu s9, 16(a6)
-; RV32I-NEXT: lbu s10, 17(a6)
-; RV32I-NEXT: lbu s11, 30(a6)
-; RV32I-NEXT: lbu ra, 31(a6)
-; RV32I-NEXT: lbu a5, 28(a6)
-; RV32I-NEXT: lbu a4, 29(a6)
-; RV32I-NEXT: lbu a0, 25(a6)
-; RV32I-NEXT: lbu a1, 24(a6)
-; RV32I-NEXT: lbu a3, 27(a6)
-; RV32I-NEXT: lbu a6, 26(a6)
-; RV32I-NEXT: sb a0, 25(a2)
-; RV32I-NEXT: sb a1, 24(a2)
-; RV32I-NEXT: sb a3, 27(a2)
-; RV32I-NEXT: sb a6, 26(a2)
+; RV32I-NEXT: lbu a4, 0(a0)
+; RV32I-NEXT: lbu a5, 2(a0)
+; RV32I-NEXT: lbu a6, 3(a0)
+; RV32I-NEXT: slli a3, a3, 8
+; RV32I-NEXT: or a3, a3, a4
+; RV32I-NEXT: slli a5, a5, 16
+; RV32I-NEXT: slli a6, a6, 24
+; RV32I-NEXT: or a4, a6, a5
+; RV32I-NEXT: or a3, a4, a3
+; RV32I-NEXT: lbu a4, 5(a0)
+; RV32I-NEXT: lbu a5, 4(a0)
+; RV32I-NEXT: lbu a6, 6(a0)
+; RV32I-NEXT: lbu a7, 7(a0)
+; RV32I-NEXT: slli a4, a4, 8
+; RV32I-NEXT: or a4, a4, a5
+; RV32I-NEXT: slli a6, a6, 16
+; RV32I-NEXT: slli a7, a7, 24
+; RV32I-NEXT: or a5, a7, a6
+; RV32I-NEXT: or a4, a5, a4
+; RV32I-NEXT: lbu a5, 9(a0)
+; RV32I-NEXT: lbu a6, 8(a0)
+; RV32I-NEXT: lbu a7, 10(a0)
+; RV32I-NEXT: lbu t0, 11(a0)
+; RV32I-NEXT: slli a5, a5, 8
+; RV32I-NEXT: or a5, a5, a6
+; RV32I-NEXT: slli a7, a7, 16
+; RV32I-NEXT: slli t0, t0, 24
+; RV32I-NEXT: or a6, t0, a7
+; RV32I-NEXT: or a5, a6, a5
+; RV32I-NEXT: lbu a6, 13(a0)
+; RV32I-NEXT: lbu a7, 12(a0)
+; RV32I-NEXT: lbu t0, 14(a0)
+; RV32I-NEXT: lbu t1, 15(a0)
+; RV32I-NEXT: slli a6, a6, 8
+; RV32I-NEXT: or a6, a6, a7
+; RV32I-NEXT: slli t0, t0, 16
+; RV32I-NEXT: slli t1, t1, 24
+; RV32I-NEXT: or a7, t1, t0
+; RV32I-NEXT: or a6, a7, a6
+; RV32I-NEXT: lbu a7, 17(a0)
+; RV32I-NEXT: lbu t0, 16(a0)
+; RV32I-NEXT: lbu t1, 18(a0)
+; RV32I-NEXT: lbu t2, 19(a0)
+; RV32I-NEXT: slli a7, a7, 8
+; RV32I-NEXT: or a7, a7, t0
+; RV32I-NEXT: slli t1, t1, 16
+; RV32I-NEXT: slli t2, t2, 24
+; RV32I-NEXT: or t0, t2, t1
+; RV32I-NEXT: or a7, t0, a7
+; RV32I-NEXT: lbu t0, 21(a0)
+; RV32I-NEXT: lbu t1, 20(a0)
+; RV32I-NEXT: lbu t2, 22(a0)
+; RV32I-NEXT: lbu t3, 23(a0)
+; RV32I-NEXT: slli t0, t0, 8
+; RV32I-NEXT: or t0, t0, t1
+; RV32I-NEXT: slli t2, t2, 16
+; RV32I-NEXT: slli t3, t3, 24
+; RV32I-NEXT: or t1, t3, t2
+; RV32I-NEXT: or t0, t1, t0
+; RV32I-NEXT: lbu t1, 25(a0)
+; RV32I-NEXT: lbu t2, 24(a0)
+; RV32I-NEXT: lbu t3, 26(a0)
+; RV32I-NEXT: lbu t4, 27(a0)
+; RV32I-NEXT: slli t1, t1, 8
+; RV32I-NEXT: or t1, t1, t2
+; RV32I-NEXT: slli t3, t3, 16
+; RV32I-NEXT: slli t4, t4, 24
+; RV32I-NEXT: or t2, t4, t3
+; RV32I-NEXT: or t1, t2, t1
+; RV32I-NEXT: lbu t2, 29(a0)
+; RV32I-NEXT: lbu t3, 28(a0)
+; RV32I-NEXT: lbu t4, 30(a0)
+; RV32I-NEXT: lbu a0, 31(a0)
+; RV32I-NEXT: slli t2, t2, 8
+; RV32I-NEXT: or t2, t2, t3
+; RV32I-NEXT: slli t4, t4, 16
+; RV32I-NEXT: slli a0, a0, 24
+; RV32I-NEXT: or a0, a0, t4
+; RV32I-NEXT: or a0, a0, t2
+; RV32I-NEXT: lbu t2, 1(a1)
+; RV32I-NEXT: lbu t3, 0(a1)
+; RV32I-NEXT: lbu t4, 2(a1)
+; RV32I-NEXT: lbu a1, 3(a1)
+; RV32I-NEXT: slli t2, t2, 8
+; RV32I-NEXT: or t2, t2, t3
+; RV32I-NEXT: slli t4, t4, 16
+; RV32I-NEXT: slli a1, a1, 24
+; RV32I-NEXT: or a1, a1, t4
+; RV32I-NEXT: or a1, a1, t2
+; RV32I-NEXT: sw zero, 28(sp)
+; RV32I-NEXT: sw zero, 24(sp)
+; RV32I-NEXT: sw zero, 20(sp)
+; RV32I-NEXT: sw zero, 16(sp)
+; RV32I-NEXT: sw zero, 12(sp)
+; RV32I-NEXT: sw zero, 8(sp)
+; RV32I-NEXT: sw zero, 4(sp)
+; RV32I-NEXT: sw zero, 0(sp)
+; RV32I-NEXT: sw a0, 60(sp)
+; RV32I-NEXT: sw t1, 56(sp)
+; RV32I-NEXT: sw t0, 52(sp)
+; RV32I-NEXT: sw a7, 48(sp)
+; RV32I-NEXT: sw a6, 44(sp)
+; RV32I-NEXT: sw a5, 40(sp)
+; RV32I-NEXT: sw a4, 36(sp)
+; RV32I-NEXT: sw a3, 32(sp)
+; RV32I-NEXT: andi a0, a1, 28
+; RV32I-NEXT: addi a3, sp, 32
+; RV32I-NEXT: sub a6, a3, a0
+; RV32I-NEXT: lw a3, 4(a6)
+; RV32I-NEXT: slli a7, a1, 3
+; RV32I-NEXT: lw t0, 0(a6)
+; RV32I-NEXT: sll a4, a3, a7
+; RV32I-NEXT: andi a0, a7, 24
+; RV32I-NEXT: xori t1, a0, 31
+; RV32I-NEXT: srli a0, t0, 1
+; RV32I-NEXT: lw t2, 12(a6)
+; RV32I-NEXT: lw a5, 8(a6)
+; RV32I-NEXT: srl a0, a0, t1
+; RV32I-NEXT: or a0, a4, a0
+; RV32I-NEXT: sll t3, t2, a7
+; RV32I-NEXT: srli a1, a5, 1
+; RV32I-NEXT: srl a1, a1, t1
+; RV32I-NEXT: or a1, t3, a1
+; RV32I-NEXT: sll t4, a5, a7
+; RV32I-NEXT: srli a3, a3, 1
+; RV32I-NEXT: lw t5, 20(a6)
+; RV32I-NEXT: lw t6, 16(a6)
+; RV32I-NEXT: srl a3, a3, t1
+; RV32I-NEXT: or a3, t4, a3
+; RV32I-NEXT: sll s0, t5, a7
+; RV32I-NEXT: srli a5, t6, 1
+; RV32I-NEXT: srl a5, a5, t1
+; RV32I-NEXT: or a5, s0, a5
+; RV32I-NEXT: sll t6, t6, a7
+; RV32I-NEXT: srli t2, t2, 1
+; RV32I-NEXT: lw s1, 28(a6)
+; RV32I-NEXT: lw a6, 24(a6)
+; RV32I-NEXT: srl t2, t2, t1
+; RV32I-NEXT: or t2, t6, t2
+; RV32I-NEXT: sll s1, s1, a7
+; RV32I-NEXT: srli s2, a6, 1
+; RV32I-NEXT: srl s2, s2, t1
+; RV32I-NEXT: or s2, s1, s2
+; RV32I-NEXT: sll a6, a6, a7
+; RV32I-NEXT: srli t5, t5, 1
+; RV32I-NEXT: srl t1, t5, t1
+; RV32I-NEXT: or t1, a6, t1
+; RV32I-NEXT: sll a7, t0, a7
+; RV32I-NEXT: sb a7, 0(a2)
+; RV32I-NEXT: srli a6, a6, 24
+; RV32I-NEXT: sb a6, 27(a2)
+; RV32I-NEXT: srli s1, s1, 24
+; RV32I-NEXT: sb s1, 31(a2)
+; RV32I-NEXT: srli a6, t6, 24
+; RV32I-NEXT: sb a6, 19(a2)
+; RV32I-NEXT: srli s0, s0, 24
+; RV32I-NEXT: sb s0, 23(a2)
+; RV32I-NEXT: srli a6, t4, 24
+; RV32I-NEXT: sb a6, 11(a2)
+; RV32I-NEXT: srli a6, t3, 24
+; RV32I-NEXT: sb a6, 15(a2)
+; RV32I-NEXT: srli a6, a7, 24
+; RV32I-NEXT: sb a6, 3(a2)
+; RV32I-NEXT: srli a6, a7, 16
+; RV32I-NEXT: sb a6, 2(a2)
+; RV32I-NEXT: srli a6, a7, 8
+; RV32I-NEXT: sb a6, 1(a2)
+; RV32I-NEXT: srli a4, a4, 24
+; RV32I-NEXT: sb a4, 7(a2)
+; RV32I-NEXT: sb t1, 24(a2)
+; RV32I-NEXT: sb s2, 28(a2)
+; RV32I-NEXT: sb t2, 16(a2)
+; RV32I-NEXT: sb a5, 20(a2)
+; RV32I-NEXT: sb a3, 8(a2)
+; RV32I-NEXT: sb a1, 12(a2)
+; RV32I-NEXT: sb a0, 4(a2)
+; RV32I-NEXT: srli a4, t1, 16
+; RV32I-NEXT: sb a4, 26(a2)
+; RV32I-NEXT: srli a4, t1, 8
+; RV32I-NEXT: sb a4, 25(a2)
+; RV32I-NEXT: srli a4, s2, 16
+; RV32I-NEXT: sb a4, 30(a2)
+; RV32I-NEXT: srli a4, s2, 8
; RV32I-NEXT: sb a4, 29(a2)
-; RV32I-NEXT: sb a5, 28(a2)
-; RV32I-NEXT: sb ra, 31(a2)
-; RV32I-NEXT: sb s11, 30(a2)
-; RV32I-NEXT: sb s10, 17(a2)
-; RV32I-NEXT: sb s9, 16(a2)
-; RV32I-NEXT: sb s8, 19(a2)
-; RV32I-NEXT: sb s7, 18(a2)
-; RV32I-NEXT: sb s6, 21(a2)
-; RV32I-NEXT: sb s5, 20(a2)
-; RV32I-NEXT: sb s4, 23(a2)
-; RV32I-NEXT: sb s3, 22(a2)
-; RV32I-NEXT: sb s2, 9(a2)
-; RV32I-NEXT: sb s1, 8(a2)
-; RV32I-NEXT: sb s0, 11(a2)
-; RV32I-NEXT: sb t6, 10(a2)
-; RV32I-NEXT: sb t5, 13(a2)
-; RV32I-NEXT: sb t4, 12(a2)
-; RV32I-NEXT: sb t3, 15(a2)
-; RV32I-NEXT: sb t2, 14(a2)
-; RV32I-NEXT: sb t1, 3(a2)
-; RV32I-NEXT: sb t0, 2(a2)
-; RV32I-NEXT: sb a7, 1(a2)
-; RV32I-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 0(a2)
-; RV32I-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: srli a4, t2, 16
+; RV32I-NEXT: sb a4, 18(a2)
+; RV32I-NEXT: srli a4, t2, 8
+; RV32I-NEXT: sb a4, 17(a2)
+; RV32I-NEXT: srli a4, a5, 16
+; RV32I-NEXT: sb a4, 22(a2)
+; RV32I-NEXT: srli a5, a5, 8
+; RV32I-NEXT: sb a5, 21(a2)
+; RV32I-NEXT: srli a4, a3, 16
+; RV32I-NEXT: sb a4, 10(a2)
+; RV32I-NEXT: srli a3, a3, 8
+; RV32I-NEXT: sb a3, 9(a2)
+; RV32I-NEXT: srli a3, a1, 16
+; RV32I-NEXT: sb a3, 14(a2)
+; RV32I-NEXT: srli a1, a1, 8
+; RV32I-NEXT: sb a1, 13(a2)
+; RV32I-NEXT: srli a1, a0, 16
+; RV32I-NEXT: sb a1, 6(a2)
+; RV32I-NEXT: srli a0, a0, 8
; RV32I-NEXT: sb a0, 5(a2)
-; RV32I-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 4(a2)
-; RV32I-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 7(a2)
-; RV32I-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 6(a2)
-; RV32I-NEXT: lw ra, 140(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s0, 136(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 132(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 128(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 124(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s4, 120(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s5, 116(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s6, 112(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s7, 108(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s8, 104(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s9, 100(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s10, 96(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s11, 92(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 144
+; RV32I-NEXT: lw s0, 76(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s1, 72(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s2, 68(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 80
; RV32I-NEXT: ret
%src = load i256, ptr %src.ptr, align 1
%byteOff = load i256, ptr %byteOff.ptr, align 1
@@ -2155,457 +3569,1169 @@ define void @shl_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
store i256 %res, ptr %dst, align 1
ret void
}
-define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
-; RV64I-LABEL: ashr_32bytes:
+
+define void @shl_32bytes_wordOff(ptr %src.ptr, ptr %wordOff.ptr, ptr %dst) nounwind {
+; RV64I-LABEL: shl_32bytes_wordOff:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -224
-; RV64I-NEXT: sd ra, 216(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s0, 208(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s1, 200(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s2, 192(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s3, 184(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s4, 176(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s5, 168(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s6, 160(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s7, 152(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s8, 144(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s9, 136(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s10, 128(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s11, 120(sp) # 8-byte Folded Spill
-; RV64I-NEXT: mv t0, a1
-; RV64I-NEXT: lbu t1, 31(a0)
-; RV64I-NEXT: lbu a1, 0(a0)
-; RV64I-NEXT: sd a1, 48(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a1, 1(a0)
-; RV64I-NEXT: sd a1, 40(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a1, 2(a0)
-; RV64I-NEXT: sd a1, 32(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a1, 3(a0)
-; RV64I-NEXT: sd a1, 24(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a1, 4(a0)
-; RV64I-NEXT: sd a1, 16(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a1, 5(a0)
-; RV64I-NEXT: sd a1, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu t2, 6(a0)
-; RV64I-NEXT: lbu t3, 7(a0)
-; RV64I-NEXT: lbu t4, 8(a0)
-; RV64I-NEXT: lbu t5, 9(a0)
-; RV64I-NEXT: lbu t6, 10(a0)
-; RV64I-NEXT: lbu s0, 11(a0)
-; RV64I-NEXT: lbu s1, 12(a0)
-; RV64I-NEXT: lbu s2, 13(a0)
-; RV64I-NEXT: lbu s3, 14(a0)
-; RV64I-NEXT: lbu s4, 15(a0)
-; RV64I-NEXT: lbu s5, 16(a0)
-; RV64I-NEXT: lbu s6, 17(a0)
-; RV64I-NEXT: lbu s7, 18(a0)
-; RV64I-NEXT: lbu s8, 19(a0)
-; RV64I-NEXT: lbu s9, 20(a0)
-; RV64I-NEXT: lbu s10, 21(a0)
-; RV64I-NEXT: lbu s11, 22(a0)
-; RV64I-NEXT: lbu ra, 23(a0)
+; RV64I-NEXT: addi sp, sp, -64
+; RV64I-NEXT: lbu a3, 1(a0)
+; RV64I-NEXT: lbu a4, 0(a0)
+; RV64I-NEXT: lbu a5, 2(a0)
+; RV64I-NEXT: lbu a6, 3(a0)
+; RV64I-NEXT: slli a3, a3, 8
+; RV64I-NEXT: or a3, a3, a4
+; RV64I-NEXT: slli a5, a5, 16
+; RV64I-NEXT: slli a6, a6, 24
+; RV64I-NEXT: or a4, a6, a5
+; RV64I-NEXT: or a3, a4, a3
+; RV64I-NEXT: lbu a4, 5(a0)
+; RV64I-NEXT: lbu a5, 4(a0)
+; RV64I-NEXT: lbu a6, 6(a0)
+; RV64I-NEXT: lbu a7, 7(a0)
+; RV64I-NEXT: slli a4, a4, 8
+; RV64I-NEXT: or a4, a4, a5
+; RV64I-NEXT: slli a6, a6, 16
+; RV64I-NEXT: slli a7, a7, 24
+; RV64I-NEXT: or a5, a7, a6
+; RV64I-NEXT: or a4, a5, a4
+; RV64I-NEXT: slli a4, a4, 32
+; RV64I-NEXT: or a3, a4, a3
+; RV64I-NEXT: lbu a4, 9(a0)
+; RV64I-NEXT: lbu a5, 8(a0)
+; RV64I-NEXT: lbu a6, 10(a0)
+; RV64I-NEXT: lbu a7, 11(a0)
+; RV64I-NEXT: slli a4, a4, 8
+; RV64I-NEXT: or a4, a4, a5
+; RV64I-NEXT: slli a6, a6, 16
+; RV64I-NEXT: slli a7, a7, 24
+; RV64I-NEXT: or a5, a7, a6
+; RV64I-NEXT: or a4, a5, a4
+; RV64I-NEXT: lbu a5, 13(a0)
+; RV64I-NEXT: lbu a6, 12(a0)
+; RV64I-NEXT: lbu a7, 14(a0)
+; RV64I-NEXT: lbu t0, 15(a0)
+; RV64I-NEXT: slli a5, a5, 8
+; RV64I-NEXT: or a5, a5, a6
+; RV64I-NEXT: slli a7, a7, 16
+; RV64I-NEXT: slli t0, t0, 24
+; RV64I-NEXT: or a6, t0, a7
+; RV64I-NEXT: or a5, a6, a5
+; RV64I-NEXT: slli a5, a5, 32
+; RV64I-NEXT: or a4, a5, a4
+; RV64I-NEXT: lbu a5, 17(a0)
+; RV64I-NEXT: lbu a6, 16(a0)
+; RV64I-NEXT: lbu a7, 18(a0)
+; RV64I-NEXT: lbu t0, 19(a0)
+; RV64I-NEXT: slli a5, a5, 8
+; RV64I-NEXT: or a5, a5, a6
+; RV64I-NEXT: slli a7, a7, 16
+; RV64I-NEXT: slli t0, t0, 24
+; RV64I-NEXT: or a6, t0, a7
+; RV64I-NEXT: or a5, a6, a5
+; RV64I-NEXT: lbu a6, 21(a0)
+; RV64I-NEXT: lbu a7, 20(a0)
+; RV64I-NEXT: lbu t0, 22(a0)
+; RV64I-NEXT: lbu t1, 23(a0)
+; RV64I-NEXT: slli a6, a6, 8
+; RV64I-NEXT: or a6, a6, a7
+; RV64I-NEXT: slli t0, t0, 16
+; RV64I-NEXT: slli t1, t1, 24
+; RV64I-NEXT: or a7, t1, t0
+; RV64I-NEXT: or a6, a7, a6
+; RV64I-NEXT: slli a6, a6, 32
+; RV64I-NEXT: or a5, a6, a5
+; RV64I-NEXT: lbu a6, 25(a0)
; RV64I-NEXT: lbu a7, 24(a0)
+; RV64I-NEXT: lbu t0, 26(a0)
+; RV64I-NEXT: lbu t1, 27(a0)
+; RV64I-NEXT: slli a6, a6, 8
+; RV64I-NEXT: or a6, a6, a7
+; RV64I-NEXT: slli t0, t0, 16
+; RV64I-NEXT: slli t1, t1, 24
+; RV64I-NEXT: or a7, t1, t0
+; RV64I-NEXT: or a6, a7, a6
+; RV64I-NEXT: lbu a7, 29(a0)
+; RV64I-NEXT: lbu t0, 28(a0)
+; RV64I-NEXT: lbu t1, 30(a0)
+; RV64I-NEXT: lbu a0, 31(a0)
+; RV64I-NEXT: slli a7, a7, 8
+; RV64I-NEXT: or a7, a7, t0
+; RV64I-NEXT: slli t1, t1, 16
+; RV64I-NEXT: slli a0, a0, 24
+; RV64I-NEXT: or a0, a0, t1
+; RV64I-NEXT: or a0, a0, a7
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: or a0, a0, a6
+; RV64I-NEXT: lbu a6, 1(a1)
+; RV64I-NEXT: lbu a7, 0(a1)
+; RV64I-NEXT: lbu t0, 2(a1)
+; RV64I-NEXT: lbu t1, 3(a1)
+; RV64I-NEXT: slli a6, a6, 8
+; RV64I-NEXT: or a6, a6, a7
+; RV64I-NEXT: slli t0, t0, 16
+; RV64I-NEXT: slli t1, t1, 24
+; RV64I-NEXT: or a7, t1, t0
+; RV64I-NEXT: or a6, a7, a6
+; RV64I-NEXT: lbu a7, 5(a1)
+; RV64I-NEXT: lbu t0, 4(a1)
+; RV64I-NEXT: lbu t1, 6(a1)
+; RV64I-NEXT: lbu a1, 7(a1)
+; RV64I-NEXT: slli a7, a7, 8
+; RV64I-NEXT: or a7, a7, t0
+; RV64I-NEXT: slli t1, t1, 16
+; RV64I-NEXT: slli a1, a1, 24
+; RV64I-NEXT: or a1, a1, t1
+; RV64I-NEXT: or a1, a1, a7
+; RV64I-NEXT: slli a1, a1, 32
+; RV64I-NEXT: or a1, a1, a6
+; RV64I-NEXT: sd zero, 24(sp)
+; RV64I-NEXT: sd zero, 16(sp)
+; RV64I-NEXT: sd zero, 8(sp)
+; RV64I-NEXT: sd zero, 0(sp)
+; RV64I-NEXT: sd a0, 56(sp)
+; RV64I-NEXT: sd a5, 48(sp)
+; RV64I-NEXT: sd a4, 40(sp)
+; RV64I-NEXT: sd a3, 32(sp)
+; RV64I-NEXT: slli a0, a1, 2
+; RV64I-NEXT: andi a0, a0, 24
+; RV64I-NEXT: addi a3, sp, 32
+; RV64I-NEXT: sub a0, a3, a0
+; RV64I-NEXT: ld a4, 8(a0)
+; RV64I-NEXT: slli a5, a1, 5
+; RV64I-NEXT: ld a6, 0(a0)
+; RV64I-NEXT: sll a3, a4, a5
+; RV64I-NEXT: andi a1, a5, 32
+; RV64I-NEXT: xori a7, a1, 63
+; RV64I-NEXT: srli a1, a6, 1
+; RV64I-NEXT: ld t0, 24(a0)
+; RV64I-NEXT: ld t1, 16(a0)
+; RV64I-NEXT: srl a0, a1, a7
+; RV64I-NEXT: or a0, a3, a0
+; RV64I-NEXT: sll t0, t0, a5
+; RV64I-NEXT: srli a1, t1, 1
+; RV64I-NEXT: srl a1, a1, a7
+; RV64I-NEXT: or a1, t0, a1
+; RV64I-NEXT: sll t1, t1, a5
+; RV64I-NEXT: srli a4, a4, 1
+; RV64I-NEXT: srl a4, a4, a7
+; RV64I-NEXT: or a4, t1, a4
+; RV64I-NEXT: sll a5, a6, a5
+; RV64I-NEXT: sb a5, 0(a2)
+; RV64I-NEXT: srli a6, t1, 56
+; RV64I-NEXT: sb a6, 23(a2)
+; RV64I-NEXT: srli a6, t1, 48
+; RV64I-NEXT: sb a6, 22(a2)
+; RV64I-NEXT: srli a6, t1, 40
+; RV64I-NEXT: sb a6, 21(a2)
+; RV64I-NEXT: srli a6, t1, 32
+; RV64I-NEXT: sb a6, 20(a2)
+; RV64I-NEXT: srli a6, t0, 56
+; RV64I-NEXT: sb a6, 31(a2)
+; RV64I-NEXT: srli a6, t0, 48
+; RV64I-NEXT: sb a6, 30(a2)
+; RV64I-NEXT: srli a6, t0, 40
+; RV64I-NEXT: sb a6, 29(a2)
+; RV64I-NEXT: srli a6, t0, 32
+; RV64I-NEXT: sb a6, 28(a2)
+; RV64I-NEXT: srli a6, a5, 56
+; RV64I-NEXT: sb a6, 7(a2)
+; RV64I-NEXT: srli a6, a5, 48
+; RV64I-NEXT: sb a6, 6(a2)
+; RV64I-NEXT: srli a6, a5, 40
+; RV64I-NEXT: sb a6, 5(a2)
+; RV64I-NEXT: srli a6, a5, 32
+; RV64I-NEXT: sb a6, 4(a2)
+; RV64I-NEXT: srli a6, a5, 24
+; RV64I-NEXT: sb a6, 3(a2)
+; RV64I-NEXT: srli a6, a5, 16
+; RV64I-NEXT: sb a6, 2(a2)
+; RV64I-NEXT: srli a5, a5, 8
+; RV64I-NEXT: sb a5, 1(a2)
+; RV64I-NEXT: srli a5, a3, 56
+; RV64I-NEXT: sb a5, 15(a2)
+; RV64I-NEXT: srli a5, a3, 48
+; RV64I-NEXT: sb a5, 14(a2)
+; RV64I-NEXT: srli a5, a3, 40
+; RV64I-NEXT: sb a5, 13(a2)
+; RV64I-NEXT: srli a3, a3, 32
+; RV64I-NEXT: sb a3, 12(a2)
+; RV64I-NEXT: sb a4, 16(a2)
+; RV64I-NEXT: sb a1, 24(a2)
+; RV64I-NEXT: sb a0, 8(a2)
+; RV64I-NEXT: srli a3, a4, 24
+; RV64I-NEXT: sb a3, 19(a2)
+; RV64I-NEXT: srli a3, a4, 16
+; RV64I-NEXT: sb a3, 18(a2)
+; RV64I-NEXT: srli a4, a4, 8
+; RV64I-NEXT: sb a4, 17(a2)
+; RV64I-NEXT: srli a3, a1, 24
+; RV64I-NEXT: sb a3, 27(a2)
+; RV64I-NEXT: srli a3, a1, 16
+; RV64I-NEXT: sb a3, 26(a2)
+; RV64I-NEXT: srli a1, a1, 8
+; RV64I-NEXT: sb a1, 25(a2)
+; RV64I-NEXT: srli a1, a0, 24
+; RV64I-NEXT: sb a1, 11(a2)
+; RV64I-NEXT: srli a1, a0, 16
+; RV64I-NEXT: sb a1, 10(a2)
+; RV64I-NEXT: srli a0, a0, 8
+; RV64I-NEXT: sb a0, 9(a2)
+; RV64I-NEXT: addi sp, sp, 64
+; RV64I-NEXT: ret
+;
+; RV32I-LABEL: shl_32bytes_wordOff:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -64
+; RV32I-NEXT: lbu a3, 1(a0)
+; RV32I-NEXT: lbu a4, 0(a0)
+; RV32I-NEXT: lbu a5, 2(a0)
+; RV32I-NEXT: lbu a6, 3(a0)
+; RV32I-NEXT: slli a3, a3, 8
+; RV32I-NEXT: or a3, a3, a4
+; RV32I-NEXT: slli a5, a5, 16
+; RV32I-NEXT: slli a6, a6, 24
+; RV32I-NEXT: or a4, a6, a5
+; RV32I-NEXT: or a3, a4, a3
+; RV32I-NEXT: lbu a4, 5(a0)
+; RV32I-NEXT: lbu a5, 4(a0)
+; RV32I-NEXT: lbu a6, 6(a0)
+; RV32I-NEXT: lbu a7, 7(a0)
+; RV32I-NEXT: slli a4, a4, 8
+; RV32I-NEXT: or a4, a4, a5
+; RV32I-NEXT: slli a6, a6, 16
+; RV32I-NEXT: slli a7, a7, 24
+; RV32I-NEXT: or a5, a7, a6
+; RV32I-NEXT: or a4, a5, a4
+; RV32I-NEXT: lbu a5, 9(a0)
+; RV32I-NEXT: lbu a6, 8(a0)
+; RV32I-NEXT: lbu a7, 10(a0)
+; RV32I-NEXT: lbu t0, 11(a0)
+; RV32I-NEXT: slli a5, a5, 8
+; RV32I-NEXT: or a5, a5, a6
+; RV32I-NEXT: slli a7, a7, 16
+; RV32I-NEXT: slli t0, t0, 24
+; RV32I-NEXT: or a6, t0, a7
+; RV32I-NEXT: or a5, a6, a5
+; RV32I-NEXT: lbu a6, 13(a0)
+; RV32I-NEXT: lbu a7, 12(a0)
+; RV32I-NEXT: lbu t0, 14(a0)
+; RV32I-NEXT: lbu t1, 15(a0)
+; RV32I-NEXT: slli a6, a6, 8
+; RV32I-NEXT: or a6, a6, a7
+; RV32I-NEXT: slli t0, t0, 16
+; RV32I-NEXT: slli t1, t1, 24
+; RV32I-NEXT: or a7, t1, t0
+; RV32I-NEXT: or a6, a7, a6
+; RV32I-NEXT: lbu a7, 17(a0)
+; RV32I-NEXT: lbu t0, 16(a0)
+; RV32I-NEXT: lbu t1, 18(a0)
+; RV32I-NEXT: lbu t2, 19(a0)
+; RV32I-NEXT: slli a7, a7, 8
+; RV32I-NEXT: or a7, a7, t0
+; RV32I-NEXT: slli t1, t1, 16
+; RV32I-NEXT: slli t2, t2, 24
+; RV32I-NEXT: or t0, t2, t1
+; RV32I-NEXT: or a7, t0, a7
+; RV32I-NEXT: lbu t0, 21(a0)
+; RV32I-NEXT: lbu t1, 20(a0)
+; RV32I-NEXT: lbu t2, 22(a0)
+; RV32I-NEXT: lbu t3, 23(a0)
+; RV32I-NEXT: slli t0, t0, 8
+; RV32I-NEXT: or t0, t0, t1
+; RV32I-NEXT: slli t2, t2, 16
+; RV32I-NEXT: slli t3, t3, 24
+; RV32I-NEXT: or t1, t3, t2
+; RV32I-NEXT: or t0, t1, t0
+; RV32I-NEXT: lbu t1, 25(a0)
+; RV32I-NEXT: lbu t2, 24(a0)
+; RV32I-NEXT: lbu t3, 26(a0)
+; RV32I-NEXT: lbu t4, 27(a0)
+; RV32I-NEXT: slli t1, t1, 8
+; RV32I-NEXT: or t1, t1, t2
+; RV32I-NEXT: slli t3, t3, 16
+; RV32I-NEXT: slli t4, t4, 24
+; RV32I-NEXT: or t2, t4, t3
+; RV32I-NEXT: or t1, t2, t1
+; RV32I-NEXT: lbu t2, 29(a0)
+; RV32I-NEXT: lbu t3, 28(a0)
+; RV32I-NEXT: lbu t4, 30(a0)
+; RV32I-NEXT: lbu a0, 31(a0)
+; RV32I-NEXT: slli t2, t2, 8
+; RV32I-NEXT: or t2, t2, t3
+; RV32I-NEXT: slli t4, t4, 16
+; RV32I-NEXT: slli a0, a0, 24
+; RV32I-NEXT: or a0, a0, t4
+; RV32I-NEXT: or a0, a0, t2
+; RV32I-NEXT: lbu a1, 0(a1)
+; RV32I-NEXT: sw zero, 28(sp)
+; RV32I-NEXT: sw zero, 24(sp)
+; RV32I-NEXT: sw zero, 20(sp)
+; RV32I-NEXT: sw zero, 16(sp)
+; RV32I-NEXT: sw zero, 12(sp)
+; RV32I-NEXT: sw zero, 8(sp)
+; RV32I-NEXT: sw zero, 4(sp)
+; RV32I-NEXT: sw zero, 0(sp)
+; RV32I-NEXT: sw a0, 60(sp)
+; RV32I-NEXT: sw t1, 56(sp)
+; RV32I-NEXT: sw t0, 52(sp)
+; RV32I-NEXT: sw a7, 48(sp)
+; RV32I-NEXT: sw a6, 44(sp)
+; RV32I-NEXT: sw a5, 40(sp)
+; RV32I-NEXT: sw a4, 36(sp)
+; RV32I-NEXT: sw a3, 32(sp)
+; RV32I-NEXT: slli a1, a1, 2
+; RV32I-NEXT: andi a1, a1, 28
+; RV32I-NEXT: addi a0, sp, 32
+; RV32I-NEXT: sub a3, a0, a1
+; RV32I-NEXT: lw a0, 4(a3)
+; RV32I-NEXT: lw a1, 0(a3)
+; RV32I-NEXT: lw a4, 12(a3)
+; RV32I-NEXT: lw a5, 8(a3)
+; RV32I-NEXT: lw a6, 24(a3)
+; RV32I-NEXT: lw a7, 28(a3)
+; RV32I-NEXT: lw t0, 16(a3)
+; RV32I-NEXT: lw a3, 20(a3)
+; RV32I-NEXT: sb a6, 24(a2)
+; RV32I-NEXT: sb a7, 28(a2)
+; RV32I-NEXT: sb t0, 16(a2)
+; RV32I-NEXT: sb a3, 20(a2)
+; RV32I-NEXT: sb a5, 8(a2)
+; RV32I-NEXT: sb a4, 12(a2)
+; RV32I-NEXT: sb a1, 0(a2)
+; RV32I-NEXT: sb a0, 4(a2)
+; RV32I-NEXT: srli t1, a6, 24
+; RV32I-NEXT: sb t1, 27(a2)
+; RV32I-NEXT: srli t1, a6, 16
+; RV32I-NEXT: sb t1, 26(a2)
+; RV32I-NEXT: srli a6, a6, 8
+; RV32I-NEXT: sb a6, 25(a2)
+; RV32I-NEXT: srli a6, a7, 24
+; RV32I-NEXT: sb a6, 31(a2)
+; RV32I-NEXT: srli a6, a7, 16
+; RV32I-NEXT: sb a6, 30(a2)
+; RV32I-NEXT: srli a6, a7, 8
+; RV32I-NEXT: sb a6, 29(a2)
+; RV32I-NEXT: srli a6, t0, 24
+; RV32I-NEXT: sb a6, 19(a2)
+; RV32I-NEXT: srli a6, t0, 16
+; RV32I-NEXT: sb a6, 18(a2)
+; RV32I-NEXT: srli a6, t0, 8
+; RV32I-NEXT: sb a6, 17(a2)
+; RV32I-NEXT: srli a6, a3, 24
+; RV32I-NEXT: sb a6, 23(a2)
+; RV32I-NEXT: srli a6, a3, 16
+; RV32I-NEXT: sb a6, 22(a2)
+; RV32I-NEXT: srli a3, a3, 8
+; RV32I-NEXT: sb a3, 21(a2)
+; RV32I-NEXT: srli a3, a5, 24
+; RV32I-NEXT: sb a3, 11(a2)
+; RV32I-NEXT: srli a3, a5, 16
+; RV32I-NEXT: sb a3, 10(a2)
+; RV32I-NEXT: srli a5, a5, 8
+; RV32I-NEXT: sb a5, 9(a2)
+; RV32I-NEXT: srli a3, a4, 24
+; RV32I-NEXT: sb a3, 15(a2)
+; RV32I-NEXT: srli a3, a4, 16
+; RV32I-NEXT: sb a3, 14(a2)
+; RV32I-NEXT: srli a4, a4, 8
+; RV32I-NEXT: sb a4, 13(a2)
+; RV32I-NEXT: srli a3, a1, 24
+; RV32I-NEXT: sb a3, 3(a2)
+; RV32I-NEXT: srli a3, a1, 16
+; RV32I-NEXT: sb a3, 2(a2)
+; RV32I-NEXT: srli a1, a1, 8
+; RV32I-NEXT: sb a1, 1(a2)
+; RV32I-NEXT: srli a1, a0, 24
+; RV32I-NEXT: sb a1, 7(a2)
+; RV32I-NEXT: srli a1, a0, 16
+; RV32I-NEXT: sb a1, 6(a2)
+; RV32I-NEXT: srli a0, a0, 8
+; RV32I-NEXT: sb a0, 5(a2)
+; RV32I-NEXT: addi sp, sp, 64
+; RV32I-NEXT: ret
+ %src = load i256, ptr %src.ptr, align 1
+ %wordOff = load i256, ptr %wordOff.ptr, align 1
+ %bitOff = shl i256 %wordOff, 5
+ %res = shl i256 %src, %bitOff
+ store i256 %res, ptr %dst, align 1
+ ret void
+}
+
+define void @shl_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) nounwind {
+; RV64I-LABEL: shl_32bytes_dwordOff:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -64
+; RV64I-NEXT: lbu a3, 1(a0)
+; RV64I-NEXT: lbu a4, 0(a0)
+; RV64I-NEXT: lbu a5, 2(a0)
+; RV64I-NEXT: lbu a6, 3(a0)
+; RV64I-NEXT: slli a3, a3, 8
+; RV64I-NEXT: or a3, a3, a4
+; RV64I-NEXT: slli a5, a5, 16
+; RV64I-NEXT: slli a6, a6, 24
+; RV64I-NEXT: or a4, a6, a5
+; RV64I-NEXT: or a3, a4, a3
+; RV64I-NEXT: lbu a4, 5(a0)
+; RV64I-NEXT: lbu a5, 4(a0)
+; RV64I-NEXT: lbu a6, 6(a0)
+; RV64I-NEXT: lbu a7, 7(a0)
+; RV64I-NEXT: slli a4, a4, 8
+; RV64I-NEXT: or a4, a4, a5
+; RV64I-NEXT: slli a6, a6, 16
+; RV64I-NEXT: slli a7, a7, 24
+; RV64I-NEXT: or a5, a7, a6
+; RV64I-NEXT: or a4, a5, a4
+; RV64I-NEXT: slli a4, a4, 32
+; RV64I-NEXT: or a3, a4, a3
+; RV64I-NEXT: lbu a4, 9(a0)
+; RV64I-NEXT: lbu a5, 8(a0)
+; RV64I-NEXT: lbu a6, 10(a0)
+; RV64I-NEXT: lbu a7, 11(a0)
+; RV64I-NEXT: slli a4, a4, 8
+; RV64I-NEXT: or a4, a4, a5
+; RV64I-NEXT: slli a6, a6, 16
+; RV64I-NEXT: slli a7, a7, 24
+; RV64I-NEXT: or a5, a7, a6
+; RV64I-NEXT: or a4, a5, a4
+; RV64I-NEXT: lbu a5, 13(a0)
+; RV64I-NEXT: lbu a6, 12(a0)
+; RV64I-NEXT: lbu a7, 14(a0)
+; RV64I-NEXT: lbu t0, 15(a0)
+; RV64I-NEXT: slli a5, a5, 8
+; RV64I-NEXT: or a5, a5, a6
+; RV64I-NEXT: slli a7, a7, 16
+; RV64I-NEXT: slli t0, t0, 24
+; RV64I-NEXT: or a6, t0, a7
+; RV64I-NEXT: or a5, a6, a5
+; RV64I-NEXT: slli a5, a5, 32
+; RV64I-NEXT: or a4, a5, a4
+; RV64I-NEXT: lbu a5, 17(a0)
+; RV64I-NEXT: lbu a6, 16(a0)
+; RV64I-NEXT: lbu a7, 18(a0)
+; RV64I-NEXT: lbu t0, 19(a0)
+; RV64I-NEXT: slli a5, a5, 8
+; RV64I-NEXT: or a5, a5, a6
+; RV64I-NEXT: slli a7, a7, 16
+; RV64I-NEXT: slli t0, t0, 24
+; RV64I-NEXT: or a6, t0, a7
+; RV64I-NEXT: or a5, a6, a5
+; RV64I-NEXT: lbu a6, 21(a0)
+; RV64I-NEXT: lbu a7, 20(a0)
+; RV64I-NEXT: lbu t0, 22(a0)
+; RV64I-NEXT: lbu t1, 23(a0)
+; RV64I-NEXT: slli a6, a6, 8
+; RV64I-NEXT: or a6, a6, a7
+; RV64I-NEXT: slli t0, t0, 16
+; RV64I-NEXT: slli t1, t1, 24
+; RV64I-NEXT: or a7, t1, t0
+; RV64I-NEXT: or a6, a7, a6
+; RV64I-NEXT: slli a6, a6, 32
+; RV64I-NEXT: or a5, a6, a5
; RV64I-NEXT: lbu a6, 25(a0)
-; RV64I-NEXT: lbu a5, 26(a0)
-; RV64I-NEXT: lbu a4, 27(a0)
-; RV64I-NEXT: lbu a1, 30(a0)
-; RV64I-NEXT: lbu a3, 29(a0)
-; RV64I-NEXT: lbu a0, 28(a0)
-; RV64I-NEXT: lbu t0, 0(t0)
-; RV64I-NEXT: sb a1, 86(sp)
-; RV64I-NEXT: sb a3, 85(sp)
-; RV64I-NEXT: sb a0, 84(sp)
-; RV64I-NEXT: sb a4, 83(sp)
-; RV64I-NEXT: sb a5, 82(sp)
-; RV64I-NEXT: sb a6, 81(sp)
-; RV64I-NEXT: sb t1, 87(sp)
-; RV64I-NEXT: slli t1, t1, 56
-; RV64I-NEXT: sb a7, 80(sp)
-; RV64I-NEXT: sb ra, 79(sp)
-; RV64I-NEXT: sb s11, 78(sp)
-; RV64I-NEXT: sb s10, 77(sp)
-; RV64I-NEXT: sb s9, 76(sp)
-; RV64I-NEXT: sb s8, 75(sp)
-; RV64I-NEXT: sb s7, 74(sp)
-; RV64I-NEXT: sb s6, 73(sp)
-; RV64I-NEXT: sb s5, 72(sp)
-; RV64I-NEXT: sb s4, 71(sp)
-; RV64I-NEXT: sb s3, 70(sp)
-; RV64I-NEXT: sb s2, 69(sp)
-; RV64I-NEXT: sb s1, 68(sp)
-; RV64I-NEXT: sb s0, 67(sp)
-; RV64I-NEXT: sb t6, 66(sp)
-; RV64I-NEXT: sb t5, 65(sp)
-; RV64I-NEXT: sb t4, 64(sp)
-; RV64I-NEXT: sb t3, 63(sp)
-; RV64I-NEXT: sb t2, 62(sp)
-; RV64I-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 61(sp)
-; RV64I-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 60(sp)
-; RV64I-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 59(sp)
-; RV64I-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 58(sp)
-; RV64I-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 57(sp)
-; RV64I-NEXT: ld a0, 48(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 56(sp)
-; RV64I-NEXT: srai a0, t1, 63
-; RV64I-NEXT: sb a0, 112(sp)
-; RV64I-NEXT: sb a0, 104(sp)
-; RV64I-NEXT: sb a0, 96(sp)
-; RV64I-NEXT: sb a0, 88(sp)
+; RV64I-NEXT: lbu a7, 24(a0)
+; RV64I-NEXT: lbu t0, 26(a0)
+; RV64I-NEXT: lbu t1, 27(a0)
+; RV64I-NEXT: slli a6, a6, 8
+; RV64I-NEXT: or a6, a6, a7
+; RV64I-NEXT: slli t0, t0, 16
+; RV64I-NEXT: slli t1, t1, 24
+; RV64I-NEXT: or a7, t1, t0
+; RV64I-NEXT: or a6, a7, a6
+; RV64I-NEXT: lbu a7, 29(a0)
+; RV64I-NEXT: lbu t0, 28(a0)
+; RV64I-NEXT: lbu t1, 30(a0)
+; RV64I-NEXT: lbu a0, 31(a0)
+; RV64I-NEXT: slli a7, a7, 8
+; RV64I-NEXT: or a7, a7, t0
+; RV64I-NEXT: slli t1, t1, 16
+; RV64I-NEXT: slli a0, a0, 24
+; RV64I-NEXT: or a0, a0, t1
+; RV64I-NEXT: or a0, a0, a7
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: or a0, a0, a6
+; RV64I-NEXT: lbu a1, 0(a1)
+; RV64I-NEXT: sd zero, 24(sp)
+; RV64I-NEXT: sd zero, 16(sp)
+; RV64I-NEXT: sd zero, 8(sp)
+; RV64I-NEXT: sd zero, 0(sp)
+; RV64I-NEXT: sd a0, 56(sp)
+; RV64I-NEXT: sd a5, 48(sp)
+; RV64I-NEXT: sd a4, 40(sp)
+; RV64I-NEXT: sd a3, 32(sp)
+; RV64I-NEXT: slli a1, a1, 3
+; RV64I-NEXT: andi a1, a1, 24
+; RV64I-NEXT: addi a0, sp, 32
+; RV64I-NEXT: sub a0, a0, a1
+; RV64I-NEXT: ld a1, 16(a0)
+; RV64I-NEXT: ld a3, 24(a0)
+; RV64I-NEXT: ld a4, 0(a0)
+; RV64I-NEXT: ld a0, 8(a0)
+; RV64I-NEXT: sb a1, 16(a2)
+; RV64I-NEXT: sb a3, 24(a2)
+; RV64I-NEXT: sb a4, 0(a2)
+; RV64I-NEXT: sb a0, 8(a2)
+; RV64I-NEXT: srli a5, a1, 56
+; RV64I-NEXT: sb a5, 23(a2)
+; RV64I-NEXT: srli a5, a1, 48
+; RV64I-NEXT: sb a5, 22(a2)
+; RV64I-NEXT: srli a5, a1, 40
+; RV64I-NEXT: sb a5, 21(a2)
+; RV64I-NEXT: srli a5, a1, 32
+; RV64I-NEXT: sb a5, 20(a2)
+; RV64I-NEXT: srli a5, a1, 24
+; RV64I-NEXT: sb a5, 19(a2)
+; RV64I-NEXT: srli a5, a1, 16
+; RV64I-NEXT: sb a5, 18(a2)
+; RV64I-NEXT: srli a1, a1, 8
+; RV64I-NEXT: sb a1, 17(a2)
+; RV64I-NEXT: srli a1, a3, 56
+; RV64I-NEXT: sb a1, 31(a2)
+; RV64I-NEXT: srli a1, a3, 48
+; RV64I-NEXT: sb a1, 30(a2)
+; RV64I-NEXT: srli a1, a3, 40
+; RV64I-NEXT: sb a1, 29(a2)
+; RV64I-NEXT: srli a1, a3, 32
+; RV64I-NEXT: sb a1, 28(a2)
+; RV64I-NEXT: srli a1, a3, 24
+; RV64I-NEXT: sb a1, 27(a2)
+; RV64I-NEXT: srli a1, a3, 16
+; RV64I-NEXT: sb a1, 26(a2)
+; RV64I-NEXT: srli a3, a3, 8
+; RV64I-NEXT: sb a3, 25(a2)
+; RV64I-NEXT: srli a1, a4, 56
+; RV64I-NEXT: sb a1, 7(a2)
+; RV64I-NEXT: srli a1, a4, 48
+; RV64I-NEXT: sb a1, 6(a2)
+; RV64I-NEXT: srli a1, a4, 40
+; RV64I-NEXT: sb a1, 5(a2)
+; RV64I-NEXT: srli a1, a4, 32
+; RV64I-NEXT: sb a1, 4(a2)
+; RV64I-NEXT: srli a1, a4, 24
+; RV64I-NEXT: sb a1, 3(a2)
+; RV64I-NEXT: srli a1, a4, 16
+; RV64I-NEXT: sb a1, 2(a2)
+; RV64I-NEXT: srli a4, a4, 8
+; RV64I-NEXT: sb a4, 1(a2)
; RV64I-NEXT: srli a1, a0, 56
-; RV64I-NEXT: sb a1, 119(sp)
-; RV64I-NEXT: srli a3, a0, 48
-; RV64I-NEXT: sb a3, 118(sp)
-; RV64I-NEXT: srli a4, a0, 40
-; RV64I-NEXT: sb a4, 117(sp)
-; RV64I-NEXT: srli a5, a0, 32
-; RV64I-NEXT: sb a5, 116(sp)
-; RV64I-NEXT: srli a6, a0, 24
-; RV64I-NEXT: sb a6, 115(sp)
-; RV64I-NEXT: srli a7, a0, 16
-; RV64I-NEXT: sb a7, 114(sp)
+; RV64I-NEXT: sb a1, 15(a2)
+; RV64I-NEXT: srli a1, a0, 48
+; RV64I-NEXT: sb a1, 14(a2)
+; RV64I-NEXT: srli a1, a0, 40
+; RV64I-NEXT: sb a1, 13(a2)
+; RV64I-NEXT: srli a1, a0, 32
+; RV64I-NEXT: sb a1, 12(a2)
+; RV64I-NEXT: srli a1, a0, 24
+; RV64I-NEXT: sb a1, 11(a2)
+; RV64I-NEXT: srli a1, a0, 16
+; RV64I-NEXT: sb a1, 10(a2)
; RV64I-NEXT: srli a0, a0, 8
-; RV64I-NEXT: sb a0, 113(sp)
-; RV64I-NEXT: sb a1, 111(sp)
-; RV64I-NEXT: sb a3, 110(sp)
-; RV64I-NEXT: sb a4, 109(sp)
-; RV64I-NEXT: sb a5, 108(sp)
-; RV64I-NEXT: sb a6, 107(sp)
-; RV64I-NEXT: sb a7, 106(sp)
-; RV64I-NEXT: sb a0, 105(sp)
-; RV64I-NEXT: sb a1, 103(sp)
-; RV64I-NEXT: sb a3, 102(sp)
-; RV64I-NEXT: sb a4, 101(sp)
-; RV64I-NEXT: sb a5, 100(sp)
-; RV64I-NEXT: sb a6, 99(sp)
-; RV64I-NEXT: sb a7, 98(sp)
-; RV64I-NEXT: sb a0, 97(sp)
-; RV64I-NEXT: sb a1, 95(sp)
-; RV64I-NEXT: sb a3, 94(sp)
-; RV64I-NEXT: sb a4, 93(sp)
-; RV64I-NEXT: sb a5, 92(sp)
-; RV64I-NEXT: sb a6, 91(sp)
-; RV64I-NEXT: sb a7, 90(sp)
-; RV64I-NEXT: sb a0, 89(sp)
-; RV64I-NEXT: andi a0, t0, 31
-; RV64I-NEXT: addi a1, sp, 56
-; RV64I-NEXT: add a6, a1, a0
-; RV64I-NEXT: lbu a0, 8(a6)
-; RV64I-NEXT: sd a0, 48(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a0, 9(a6)
-; RV64I-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a0, 10(a6)
-; RV64I-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a0, 11(a6)
-; RV64I-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a0, 12(a6)
-; RV64I-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a7, 13(a6)
-; RV64I-NEXT: lbu t0, 14(a6)
-; RV64I-NEXT: lbu t1, 15(a6)
-; RV64I-NEXT: lbu t2, 0(a6)
-; RV64I-NEXT: lbu t3, 1(a6)
-; RV64I-NEXT: lbu t4, 2(a6)
-; RV64I-NEXT: lbu t5, 3(a6)
-; RV64I-NEXT: lbu t6, 4(a6)
-; RV64I-NEXT: lbu s0, 5(a6)
-; RV64I-NEXT: lbu s1, 6(a6)
-; RV64I-NEXT: lbu s2, 7(a6)
-; RV64I-NEXT: lbu s3, 24(a6)
-; RV64I-NEXT: lbu s4, 25(a6)
-; RV64I-NEXT: lbu s5, 26(a6)
-; RV64I-NEXT: lbu s6, 27(a6)
-; RV64I-NEXT: lbu s7, 28(a6)
-; RV64I-NEXT: lbu s8, 29(a6)
-; RV64I-NEXT: lbu s9, 30(a6)
-; RV64I-NEXT: lbu s10, 31(a6)
-; RV64I-NEXT: lbu s11, 16(a6)
-; RV64I-NEXT: lbu ra, 17(a6)
-; RV64I-NEXT: lbu a5, 18(a6)
-; RV64I-NEXT: lbu a4, 19(a6)
-; RV64I-NEXT: lbu a0, 23(a6)
-; RV64I-NEXT: lbu a1, 22(a6)
-; RV64I-NEXT: lbu a3, 21(a6)
-; RV64I-NEXT: lbu a6, 20(a6)
-; RV64I-NEXT: sb a0, 23(a2)
-; RV64I-NEXT: sb a1, 22(a2)
-; RV64I-NEXT: sb a3, 21(a2)
-; RV64I-NEXT: sb a6, 20(a2)
-; RV64I-NEXT: sb a4, 19(a2)
-; RV64I-NEXT: sb a5, 18(a2)
-; RV64I-NEXT: sb ra, 17(a2)
-; RV64I-NEXT: sb s11, 16(a2)
-; RV64I-NEXT: sb s10, 31(a2)
-; RV64I-NEXT: sb s9, 30(a2)
-; RV64I-NEXT: sb s8, 29(a2)
-; RV64I-NEXT: sb s7, 28(a2)
-; RV64I-NEXT: sb s6, 27(a2)
-; RV64I-NEXT: sb s5, 26(a2)
-; RV64I-NEXT: sb s4, 25(a2)
-; RV64I-NEXT: sb s3, 24(a2)
-; RV64I-NEXT: sb s2, 7(a2)
-; RV64I-NEXT: sb s1, 6(a2)
-; RV64I-NEXT: sb s0, 5(a2)
-; RV64I-NEXT: sb t6, 4(a2)
-; RV64I-NEXT: sb t5, 3(a2)
-; RV64I-NEXT: sb t4, 2(a2)
-; RV64I-NEXT: sb t3, 1(a2)
-; RV64I-NEXT: sb t2, 0(a2)
-; RV64I-NEXT: sb t1, 15(a2)
-; RV64I-NEXT: sb t0, 14(a2)
-; RV64I-NEXT: sb a7, 13(a2)
-; RV64I-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 12(a2)
-; RV64I-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 11(a2)
-; RV64I-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 10(a2)
-; RV64I-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
; RV64I-NEXT: sb a0, 9(a2)
-; RV64I-NEXT: ld a0, 48(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 8(a2)
-; RV64I-NEXT: ld ra, 216(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s0, 208(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s1, 200(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s2, 192(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s3, 184(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s4, 176(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s5, 168(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s6, 160(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s7, 152(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s8, 144(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s9, 136(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s10, 128(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s11, 120(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 224
+; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
;
-; RV32I-LABEL: ashr_32bytes:
+; RV32I-LABEL: shl_32bytes_dwordOff:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -144
-; RV32I-NEXT: sw ra, 140(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s0, 136(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 132(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 128(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 124(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s4, 120(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s5, 116(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s6, 112(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s7, 108(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s8, 104(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s9, 100(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s10, 96(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s11, 92(sp) # 4-byte Folded Spill
-; RV32I-NEXT: mv t0, a1
-; RV32I-NEXT: lbu t1, 31(a0)
-; RV32I-NEXT: lbu a1, 0(a0)
-; RV32I-NEXT: sw a1, 24(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a1, 1(a0)
-; RV32I-NEXT: sw a1, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a1, 2(a0)
-; RV32I-NEXT: sw a1, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a1, 3(a0)
-; RV32I-NEXT: sw a1, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a1, 4(a0)
-; RV32I-NEXT: sw a1, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a1, 5(a0)
-; RV32I-NEXT: sw a1, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu t2, 6(a0)
-; RV32I-NEXT: lbu t3, 7(a0)
-; RV32I-NEXT: lbu t4, 8(a0)
-; RV32I-NEXT: lbu t5, 9(a0)
-; RV32I-NEXT: lbu t6, 10(a0)
-; RV32I-NEXT: lbu s0, 11(a0)
-; RV32I-NEXT: lbu s1, 12(a0)
-; RV32I-NEXT: lbu s2, 13(a0)
-; RV32I-NEXT: lbu s3, 14(a0)
-; RV32I-NEXT: lbu s4, 15(a0)
-; RV32I-NEXT: lbu s5, 16(a0)
-; RV32I-NEXT: lbu s6, 17(a0)
-; RV32I-NEXT: lbu s7, 18(a0)
-; RV32I-NEXT: lbu s8, 19(a0)
-; RV32I-NEXT: lbu s9, 20(a0)
-; RV32I-NEXT: lbu s10, 21(a0)
-; RV32I-NEXT: lbu s11, 22(a0)
-; RV32I-NEXT: lbu ra, 23(a0)
-; RV32I-NEXT: lbu a7, 24(a0)
-; RV32I-NEXT: lbu a6, 25(a0)
-; RV32I-NEXT: lbu a5, 26(a0)
-; RV32I-NEXT: lbu a4, 27(a0)
-; RV32I-NEXT: lbu a1, 30(a0)
-; RV32I-NEXT: lbu a3, 29(a0)
-; RV32I-NEXT: lbu a0, 28(a0)
-; RV32I-NEXT: lbu t0, 0(t0)
-; RV32I-NEXT: sb a1, 58(sp)
-; RV32I-NEXT: sb a3, 57(sp)
-; RV32I-NEXT: sb a0, 56(sp)
-; RV32I-NEXT: sb a4, 55(sp)
-; RV32I-NEXT: sb a5, 54(sp)
-; RV32I-NEXT: sb a6, 53(sp)
-; RV32I-NEXT: sb t1, 59(sp)
+; RV32I-NEXT: addi sp, sp, -64
+; RV32I-NEXT: lbu a3, 1(a0)
+; RV32I-NEXT: lbu a4, 0(a0)
+; RV32I-NEXT: lbu a5, 2(a0)
+; RV32I-NEXT: lbu a6, 3(a0)
+; RV32I-NEXT: slli a3, a3, 8
+; RV32I-NEXT: or a3, a3, a4
+; RV32I-NEXT: slli a5, a5, 16
+; RV32I-NEXT: slli a6, a6, 24
+; RV32I-NEXT: or a4, a6, a5
+; RV32I-NEXT: or a3, a4, a3
+; RV32I-NEXT: lbu a4, 5(a0)
+; RV32I-NEXT: lbu a5, 4(a0)
+; RV32I-NEXT: lbu a6, 6(a0)
+; RV32I-NEXT: lbu a7, 7(a0)
+; RV32I-NEXT: slli a4, a4, 8
+; RV32I-NEXT: or a4, a4, a5
+; RV32I-NEXT: slli a6, a6, 16
+; RV32I-NEXT: slli a7, a7, 24
+; RV32I-NEXT: or a5, a7, a6
+; RV32I-NEXT: or a4, a5, a4
+; RV32I-NEXT: lbu a5, 9(a0)
+; RV32I-NEXT: lbu a6, 8(a0)
+; RV32I-NEXT: lbu a7, 10(a0)
+; RV32I-NEXT: lbu t0, 11(a0)
+; RV32I-NEXT: slli a5, a5, 8
+; RV32I-NEXT: or a5, a5, a6
+; RV32I-NEXT: slli a7, a7, 16
+; RV32I-NEXT: slli t0, t0, 24
+; RV32I-NEXT: or a6, t0, a7
+; RV32I-NEXT: or a5, a6, a5
+; RV32I-NEXT: lbu a6, 13(a0)
+; RV32I-NEXT: lbu a7, 12(a0)
+; RV32I-NEXT: lbu t0, 14(a0)
+; RV32I-NEXT: lbu t1, 15(a0)
+; RV32I-NEXT: slli a6, a6, 8
+; RV32I-NEXT: or a6, a6, a7
+; RV32I-NEXT: slli t0, t0, 16
; RV32I-NEXT: slli t1, t1, 24
-; RV32I-NEXT: sb a7, 52(sp)
-; RV32I-NEXT: sb ra, 51(sp)
-; RV32I-NEXT: sb s11, 50(sp)
-; RV32I-NEXT: sb s10, 49(sp)
-; RV32I-NEXT: sb s9, 48(sp)
-; RV32I-NEXT: sb s8, 47(sp)
-; RV32I-NEXT: sb s7, 46(sp)
-; RV32I-NEXT: sb s6, 45(sp)
-; RV32I-NEXT: sb s5, 44(sp)
-; RV32I-NEXT: sb s4, 43(sp)
-; RV32I-NEXT: sb s3, 42(sp)
-; RV32I-NEXT: sb s2, 41(sp)
-; RV32I-NEXT: sb s1, 40(sp)
-; RV32I-NEXT: sb s0, 39(sp)
-; RV32I-NEXT: sb t6, 38(sp)
-; RV32I-NEXT: sb t5, 37(sp)
-; RV32I-NEXT: sb t4, 36(sp)
-; RV32I-NEXT: sb t3, 35(sp)
-; RV32I-NEXT: sb t2, 34(sp)
-; RV32I-NEXT: lw a0, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 33(sp)
-; RV32I-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 32(sp)
-; RV32I-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 31(sp)
-; RV32I-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 30(sp)
-; RV32I-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 29(sp)
-; RV32I-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 28(sp)
-; RV32I-NEXT: srai a0, t1, 31
-; RV32I-NEXT: sb a0, 88(sp)
-; RV32I-NEXT: sb a0, 84(sp)
-; RV32I-NEXT: sb a0, 80(sp)
-; RV32I-NEXT: sb a0, 76(sp)
-; RV32I-NEXT: sb a0, 72(sp)
-; RV32I-NEXT: sb a0, 68(sp)
-; RV32I-NEXT: sb a0, 64(sp)
-; RV32I-NEXT: sb a0, 60(sp)
+; RV32I-NEXT: or a7, t1, t0
+; RV32I-NEXT: or a6, a7, a6
+; RV32I-NEXT: lbu a7, 17(a0)
+; RV32I-NEXT: lbu t0, 16(a0)
+; RV32I-NEXT: lbu t1, 18(a0)
+; RV32I-NEXT: lbu t2, 19(a0)
+; RV32I-NEXT: slli a7, a7, 8
+; RV32I-NEXT: or a7, a7, t0
+; RV32I-NEXT: slli t1, t1, 16
+; RV32I-NEXT: slli t2, t2, 24
+; RV32I-NEXT: or t0, t2, t1
+; RV32I-NEXT: or a7, t0, a7
+; RV32I-NEXT: lbu t0, 21(a0)
+; RV32I-NEXT: lbu t1, 20(a0)
+; RV32I-NEXT: lbu t2, 22(a0)
+; RV32I-NEXT: lbu t3, 23(a0)
+; RV32I-NEXT: slli t0, t0, 8
+; RV32I-NEXT: or t0, t0, t1
+; RV32I-NEXT: slli t2, t2, 16
+; RV32I-NEXT: slli t3, t3, 24
+; RV32I-NEXT: or t1, t3, t2
+; RV32I-NEXT: or t0, t1, t0
+; RV32I-NEXT: lbu t1, 25(a0)
+; RV32I-NEXT: lbu t2, 24(a0)
+; RV32I-NEXT: lbu t3, 26(a0)
+; RV32I-NEXT: lbu t4, 27(a0)
+; RV32I-NEXT: slli t1, t1, 8
+; RV32I-NEXT: or t1, t1, t2
+; RV32I-NEXT: slli t3, t3, 16
+; RV32I-NEXT: slli t4, t4, 24
+; RV32I-NEXT: or t2, t4, t3
+; RV32I-NEXT: or t1, t2, t1
+; RV32I-NEXT: lbu t2, 29(a0)
+; RV32I-NEXT: lbu t3, 28(a0)
+; RV32I-NEXT: lbu t4, 30(a0)
+; RV32I-NEXT: lbu a0, 31(a0)
+; RV32I-NEXT: slli t2, t2, 8
+; RV32I-NEXT: or t2, t2, t3
+; RV32I-NEXT: slli t4, t4, 16
+; RV32I-NEXT: slli a0, a0, 24
+; RV32I-NEXT: or a0, a0, t4
+; RV32I-NEXT: or a0, a0, t2
+; RV32I-NEXT: lbu a1, 0(a1)
+; RV32I-NEXT: sw zero, 28(sp)
+; RV32I-NEXT: sw zero, 24(sp)
+; RV32I-NEXT: sw zero, 20(sp)
+; RV32I-NEXT: sw zero, 16(sp)
+; RV32I-NEXT: sw zero, 12(sp)
+; RV32I-NEXT: sw zero, 8(sp)
+; RV32I-NEXT: sw zero, 4(sp)
+; RV32I-NEXT: sw zero, 0(sp)
+; RV32I-NEXT: sw a0, 60(sp)
+; RV32I-NEXT: sw t1, 56(sp)
+; RV32I-NEXT: sw t0, 52(sp)
+; RV32I-NEXT: sw a7, 48(sp)
+; RV32I-NEXT: sw a6, 44(sp)
+; RV32I-NEXT: sw a5, 40(sp)
+; RV32I-NEXT: sw a4, 36(sp)
+; RV32I-NEXT: sw a3, 32(sp)
+; RV32I-NEXT: slli a1, a1, 3
+; RV32I-NEXT: andi a1, a1, 24
+; RV32I-NEXT: addi a0, sp, 32
+; RV32I-NEXT: sub a3, a0, a1
+; RV32I-NEXT: lw a0, 4(a3)
+; RV32I-NEXT: lw a1, 0(a3)
+; RV32I-NEXT: lw a4, 12(a3)
+; RV32I-NEXT: lw a5, 8(a3)
+; RV32I-NEXT: lw a6, 24(a3)
+; RV32I-NEXT: lw a7, 28(a3)
+; RV32I-NEXT: lw t0, 16(a3)
+; RV32I-NEXT: lw a3, 20(a3)
+; RV32I-NEXT: sb a6, 24(a2)
+; RV32I-NEXT: sb a7, 28(a2)
+; RV32I-NEXT: sb t0, 16(a2)
+; RV32I-NEXT: sb a3, 20(a2)
+; RV32I-NEXT: sb a5, 8(a2)
+; RV32I-NEXT: sb a4, 12(a2)
+; RV32I-NEXT: sb a1, 0(a2)
+; RV32I-NEXT: sb a0, 4(a2)
+; RV32I-NEXT: srli t1, a6, 24
+; RV32I-NEXT: sb t1, 27(a2)
+; RV32I-NEXT: srli t1, a6, 16
+; RV32I-NEXT: sb t1, 26(a2)
+; RV32I-NEXT: srli a6, a6, 8
+; RV32I-NEXT: sb a6, 25(a2)
+; RV32I-NEXT: srli a6, a7, 24
+; RV32I-NEXT: sb a6, 31(a2)
+; RV32I-NEXT: srli a6, a7, 16
+; RV32I-NEXT: sb a6, 30(a2)
+; RV32I-NEXT: srli a6, a7, 8
+; RV32I-NEXT: sb a6, 29(a2)
+; RV32I-NEXT: srli a6, t0, 24
+; RV32I-NEXT: sb a6, 19(a2)
+; RV32I-NEXT: srli a6, t0, 16
+; RV32I-NEXT: sb a6, 18(a2)
+; RV32I-NEXT: srli a6, t0, 8
+; RV32I-NEXT: sb a6, 17(a2)
+; RV32I-NEXT: srli a6, a3, 24
+; RV32I-NEXT: sb a6, 23(a2)
+; RV32I-NEXT: srli a6, a3, 16
+; RV32I-NEXT: sb a6, 22(a2)
+; RV32I-NEXT: srli a3, a3, 8
+; RV32I-NEXT: sb a3, 21(a2)
+; RV32I-NEXT: srli a3, a5, 24
+; RV32I-NEXT: sb a3, 11(a2)
+; RV32I-NEXT: srli a3, a5, 16
+; RV32I-NEXT: sb a3, 10(a2)
+; RV32I-NEXT: srli a5, a5, 8
+; RV32I-NEXT: sb a5, 9(a2)
+; RV32I-NEXT: srli a3, a4, 24
+; RV32I-NEXT: sb a3, 15(a2)
+; RV32I-NEXT: srli a3, a4, 16
+; RV32I-NEXT: sb a3, 14(a2)
+; RV32I-NEXT: srli a4, a4, 8
+; RV32I-NEXT: sb a4, 13(a2)
+; RV32I-NEXT: srli a3, a1, 24
+; RV32I-NEXT: sb a3, 3(a2)
+; RV32I-NEXT: srli a3, a1, 16
+; RV32I-NEXT: sb a3, 2(a2)
+; RV32I-NEXT: srli a1, a1, 8
+; RV32I-NEXT: sb a1, 1(a2)
; RV32I-NEXT: srli a1, a0, 24
-; RV32I-NEXT: sb a1, 91(sp)
-; RV32I-NEXT: srli a3, a0, 16
-; RV32I-NEXT: sb a3, 90(sp)
+; RV32I-NEXT: sb a1, 7(a2)
+; RV32I-NEXT: srli a1, a0, 16
+; RV32I-NEXT: sb a1, 6(a2)
; RV32I-NEXT: srli a0, a0, 8
-; RV32I-NEXT: sb a0, 89(sp)
-; RV32I-NEXT: sb a1, 87(sp)
-; RV32I-NEXT: sb a3, 86(sp)
-; RV32I-NEXT: sb a0, 85(sp)
-; RV32I-NEXT: sb a1, 83(sp)
-; RV32I-NEXT: sb a3, 82(sp)
-; RV32I-NEXT: sb a0, 81(sp)
-; RV32I-NEXT: sb a1, 79(sp)
-; RV32I-NEXT: sb a3, 78(sp)
-; RV32I-NEXT: sb a0, 77(sp)
-; RV32I-NEXT: sb a1, 75(sp)
-; RV32I-NEXT: sb a3, 74(sp)
-; RV32I-NEXT: sb a0, 73(sp)
-; RV32I-NEXT: sb a1, 71(sp)
-; RV32I-NEXT: sb a3, 70(sp)
-; RV32I-NEXT: sb a0, 69(sp)
-; RV32I-NEXT: sb a1, 67(sp)
-; RV32I-NEXT: sb a3, 66(sp)
-; RV32I-NEXT: sb a0, 65(sp)
-; RV32I-NEXT: sb a1, 63(sp)
-; RV32I-NEXT: sb a3, 62(sp)
-; RV32I-NEXT: sb a0, 61(sp)
-; RV32I-NEXT: andi a0, t0, 31
-; RV32I-NEXT: addi a1, sp, 28
-; RV32I-NEXT: add a6, a1, a0
-; RV32I-NEXT: lbu a0, 6(a6)
-; RV32I-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a0, 7(a6)
-; RV32I-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a0, 4(a6)
-; RV32I-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a0, 5(a6)
-; RV32I-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a0, 0(a6)
-; RV32I-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a7, 1(a6)
-; RV32I-NEXT: lbu t0, 2(a6)
-; RV32I-NEXT: lbu t1, 3(a6)
-; RV32I-NEXT: lbu t2, 14(a6)
-; RV32I-NEXT: lbu t3, 15(a6)
-; RV32I-NEXT: lbu t4, 12(a6)
-; RV32I-NEXT: lbu t5, 13(a6)
-; RV32I-NEXT: lbu t6, 10(a6)
-; RV32I-NEXT: lbu s0, 11(a6)
-; RV32I-NEXT: lbu s1, 8(a6)
-; RV32I-NEXT: lbu s2, 9(a6)
-; RV32I-NEXT: lbu s3, 22(a6)
-; RV32I-NEXT: lbu s4, 23(a6)
-; RV32I-NEXT: lbu s5, 20(a6)
-; RV32I-NEXT: lbu s6, 21(a6)
-; RV32I-NEXT: lbu s7, 18(a6)
-; RV32I-NEXT: lbu s8, 19(a6)
-; RV32I-NEXT: lbu s9, 16(a6)
-; RV32I-NEXT: lbu s10, 17(a6)
-; RV32I-NEXT: lbu s11, 30(a6)
-; RV32I-NEXT: lbu ra, 31(a6)
-; RV32I-NEXT: lbu a5, 28(a6)
-; RV32I-NEXT: lbu a4, 29(a6)
-; RV32I-NEXT: lbu a0, 25(a6)
-; RV32I-NEXT: lbu a1, 24(a6)
-; RV32I-NEXT: lbu a3, 27(a6)
-; RV32I-NEXT: lbu a6, 26(a6)
-; RV32I-NEXT: sb a0, 25(a2)
-; RV32I-NEXT: sb a1, 24(a2)
-; RV32I-NEXT: sb a3, 27(a2)
-; RV32I-NEXT: sb a6, 26(a2)
-; RV32I-NEXT: sb a4, 29(a2)
+; RV32I-NEXT: sb a0, 5(a2)
+; RV32I-NEXT: addi sp, sp, 64
+; RV32I-NEXT: ret
+ %src = load i256, ptr %src.ptr, align 1
+ %dwordOff = load i256, ptr %dwordOff.ptr, align 1
+ %bitOff = shl i256 %dwordOff, 6
+ %res = shl i256 %src, %bitOff
+ store i256 %res, ptr %dst, align 1
+ ret void
+}
+
+define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
+; RV64I-LABEL: ashr_32bytes:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -64
+; RV64I-NEXT: lbu a3, 1(a0)
+; RV64I-NEXT: lbu a4, 0(a0)
+; RV64I-NEXT: lbu a5, 2(a0)
+; RV64I-NEXT: lbu a6, 3(a0)
+; RV64I-NEXT: slli a3, a3, 8
+; RV64I-NEXT: or a3, a3, a4
+; RV64I-NEXT: slli a5, a5, 16
+; RV64I-NEXT: slli a6, a6, 24
+; RV64I-NEXT: or a4, a6, a5
+; RV64I-NEXT: or a3, a4, a3
+; RV64I-NEXT: lbu a4, 5(a0)
+; RV64I-NEXT: lbu a5, 4(a0)
+; RV64I-NEXT: lbu a6, 6(a0)
+; RV64I-NEXT: lbu a7, 7(a0)
+; RV64I-NEXT: slli a4, a4, 8
+; RV64I-NEXT: or a4, a4, a5
+; RV64I-NEXT: slli a6, a6, 16
+; RV64I-NEXT: slli a7, a7, 24
+; RV64I-NEXT: or a5, a7, a6
+; RV64I-NEXT: or a4, a5, a4
+; RV64I-NEXT: slli a4, a4, 32
+; RV64I-NEXT: or a3, a4, a3
+; RV64I-NEXT: lbu a4, 9(a0)
+; RV64I-NEXT: lbu a5, 8(a0)
+; RV64I-NEXT: lbu a6, 10(a0)
+; RV64I-NEXT: lbu a7, 11(a0)
+; RV64I-NEXT: slli a4, a4, 8
+; RV64I-NEXT: or a4, a4, a5
+; RV64I-NEXT: slli a6, a6, 16
+; RV64I-NEXT: slli a7, a7, 24
+; RV64I-NEXT: or a5, a7, a6
+; RV64I-NEXT: or a4, a5, a4
+; RV64I-NEXT: lbu a5, 13(a0)
+; RV64I-NEXT: lbu a6, 12(a0)
+; RV64I-NEXT: lbu a7, 14(a0)
+; RV64I-NEXT: lbu t0, 15(a0)
+; RV64I-NEXT: slli a5, a5, 8
+; RV64I-NEXT: or a5, a5, a6
+; RV64I-NEXT: slli a7, a7, 16
+; RV64I-NEXT: slli t0, t0, 24
+; RV64I-NEXT: or a6, t0, a7
+; RV64I-NEXT: or a5, a6, a5
+; RV64I-NEXT: slli a5, a5, 32
+; RV64I-NEXT: or a4, a5, a4
+; RV64I-NEXT: lbu a5, 17(a0)
+; RV64I-NEXT: lbu a6, 16(a0)
+; RV64I-NEXT: lbu a7, 18(a0)
+; RV64I-NEXT: lbu t0, 19(a0)
+; RV64I-NEXT: slli a5, a5, 8
+; RV64I-NEXT: or a5, a5, a6
+; RV64I-NEXT: slli a7, a7, 16
+; RV64I-NEXT: slli t0, t0, 24
+; RV64I-NEXT: or a6, t0, a7
+; RV64I-NEXT: or a5, a6, a5
+; RV64I-NEXT: lbu a6, 21(a0)
+; RV64I-NEXT: lbu a7, 20(a0)
+; RV64I-NEXT: lbu t0, 22(a0)
+; RV64I-NEXT: lbu t1, 23(a0)
+; RV64I-NEXT: slli a6, a6, 8
+; RV64I-NEXT: or a6, a6, a7
+; RV64I-NEXT: slli t0, t0, 16
+; RV64I-NEXT: slli t1, t1, 24
+; RV64I-NEXT: or a7, t1, t0
+; RV64I-NEXT: or a6, a7, a6
+; RV64I-NEXT: slli a6, a6, 32
+; RV64I-NEXT: or a5, a6, a5
+; RV64I-NEXT: lbu a6, 25(a0)
+; RV64I-NEXT: lbu a7, 24(a0)
+; RV64I-NEXT: lbu t0, 26(a0)
+; RV64I-NEXT: lbu t1, 27(a0)
+; RV64I-NEXT: slli a6, a6, 8
+; RV64I-NEXT: or a6, a6, a7
+; RV64I-NEXT: slli t0, t0, 16
+; RV64I-NEXT: slli t1, t1, 24
+; RV64I-NEXT: or a7, t1, t0
+; RV64I-NEXT: or a6, a7, a6
+; RV64I-NEXT: lbu a7, 29(a0)
+; RV64I-NEXT: lbu t0, 28(a0)
+; RV64I-NEXT: lbu t1, 30(a0)
+; RV64I-NEXT: lbu a0, 31(a0)
+; RV64I-NEXT: slli a7, a7, 8
+; RV64I-NEXT: or a7, a7, t0
+; RV64I-NEXT: slli t1, t1, 16
+; RV64I-NEXT: slli a0, a0, 24
+; RV64I-NEXT: or a0, a0, t1
+; RV64I-NEXT: or a0, a0, a7
+; RV64I-NEXT: slli a7, a0, 32
+; RV64I-NEXT: or a6, a7, a6
+; RV64I-NEXT: lbu a7, 1(a1)
+; RV64I-NEXT: lbu t0, 0(a1)
+; RV64I-NEXT: lbu t1, 2(a1)
+; RV64I-NEXT: lbu t2, 3(a1)
+; RV64I-NEXT: slli a7, a7, 8
+; RV64I-NEXT: or a7, a7, t0
+; RV64I-NEXT: slli t1, t1, 16
+; RV64I-NEXT: slli t2, t2, 24
+; RV64I-NEXT: or t0, t2, t1
+; RV64I-NEXT: or a7, t0, a7
+; RV64I-NEXT: lbu t0, 5(a1)
+; RV64I-NEXT: lbu t1, 4(a1)
+; RV64I-NEXT: lbu t2, 6(a1)
+; RV64I-NEXT: lbu a1, 7(a1)
+; RV64I-NEXT: slli t0, t0, 8
+; RV64I-NEXT: or t0, t0, t1
+; RV64I-NEXT: slli t2, t2, 16
+; RV64I-NEXT: slli a1, a1, 24
+; RV64I-NEXT: or a1, a1, t2
+; RV64I-NEXT: or a1, a1, t0
+; RV64I-NEXT: slli a1, a1, 32
+; RV64I-NEXT: or a1, a1, a7
+; RV64I-NEXT: sraiw a0, a0, 31
+; RV64I-NEXT: sd a0, 56(sp)
+; RV64I-NEXT: sd a0, 48(sp)
+; RV64I-NEXT: sd a0, 40(sp)
+; RV64I-NEXT: sd a0, 32(sp)
+; RV64I-NEXT: sd a6, 24(sp)
+; RV64I-NEXT: sd a5, 16(sp)
+; RV64I-NEXT: sd a4, 8(sp)
+; RV64I-NEXT: sd a3, 0(sp)
+; RV64I-NEXT: andi a0, a1, 24
+; RV64I-NEXT: mv a3, sp
+; RV64I-NEXT: add a3, a3, a0
+; RV64I-NEXT: ld a4, 8(a3)
+; RV64I-NEXT: slli a1, a1, 3
+; RV64I-NEXT: srl a5, a4, a1
+; RV64I-NEXT: ld a6, 16(a3)
+; RV64I-NEXT: andi a0, a1, 56
+; RV64I-NEXT: xori a7, a0, 63
+; RV64I-NEXT: ld t0, 0(a3)
+; RV64I-NEXT: slli a0, a6, 1
+; RV64I-NEXT: sll a0, a0, a7
+; RV64I-NEXT: or a0, a5, a0
+; RV64I-NEXT: srl t0, t0, a1
+; RV64I-NEXT: slli a4, a4, 1
+; RV64I-NEXT: ld a3, 24(a3)
+; RV64I-NEXT: sll a4, a4, a7
+; RV64I-NEXT: or a4, t0, a4
+; RV64I-NEXT: srl a6, a6, a1
+; RV64I-NEXT: slli t1, a3, 1
+; RV64I-NEXT: sll a7, t1, a7
+; RV64I-NEXT: or a7, a6, a7
+; RV64I-NEXT: sra a1, a3, a1
+; RV64I-NEXT: sb a6, 16(a2)
+; RV64I-NEXT: sb a1, 24(a2)
+; RV64I-NEXT: sb t0, 0(a2)
+; RV64I-NEXT: sb a5, 8(a2)
+; RV64I-NEXT: srli a3, a1, 56
+; RV64I-NEXT: sb a3, 31(a2)
+; RV64I-NEXT: srli a3, a1, 48
+; RV64I-NEXT: sb a3, 30(a2)
+; RV64I-NEXT: srli a3, a1, 40
+; RV64I-NEXT: sb a3, 29(a2)
+; RV64I-NEXT: srli a3, a1, 32
+; RV64I-NEXT: sb a3, 28(a2)
+; RV64I-NEXT: srli a3, a1, 24
+; RV64I-NEXT: sb a3, 27(a2)
+; RV64I-NEXT: srli a3, a1, 16
+; RV64I-NEXT: sb a3, 26(a2)
+; RV64I-NEXT: srli a1, a1, 8
+; RV64I-NEXT: sb a1, 25(a2)
+; RV64I-NEXT: srli a1, a7, 56
+; RV64I-NEXT: sb a1, 23(a2)
+; RV64I-NEXT: srli a1, a7, 48
+; RV64I-NEXT: sb a1, 22(a2)
+; RV64I-NEXT: srli a1, a7, 40
+; RV64I-NEXT: sb a1, 21(a2)
+; RV64I-NEXT: srli a1, a7, 32
+; RV64I-NEXT: sb a1, 20(a2)
+; RV64I-NEXT: srli a1, a7, 24
+; RV64I-NEXT: sb a1, 19(a2)
+; RV64I-NEXT: srli a1, a7, 16
+; RV64I-NEXT: sb a1, 18(a2)
+; RV64I-NEXT: srli a1, a7, 8
+; RV64I-NEXT: sb a1, 17(a2)
+; RV64I-NEXT: srli a1, a4, 56
+; RV64I-NEXT: sb a1, 7(a2)
+; RV64I-NEXT: srli a1, a4, 48
+; RV64I-NEXT: sb a1, 6(a2)
+; RV64I-NEXT: srli a1, a4, 40
+; RV64I-NEXT: sb a1, 5(a2)
+; RV64I-NEXT: srli a1, a4, 32
+; RV64I-NEXT: sb a1, 4(a2)
+; RV64I-NEXT: srli a1, a4, 24
+; RV64I-NEXT: sb a1, 3(a2)
+; RV64I-NEXT: srli a1, a4, 16
+; RV64I-NEXT: sb a1, 2(a2)
+; RV64I-NEXT: srli a4, a4, 8
+; RV64I-NEXT: sb a4, 1(a2)
+; RV64I-NEXT: srli a1, a0, 56
+; RV64I-NEXT: sb a1, 15(a2)
+; RV64I-NEXT: srli a1, a0, 48
+; RV64I-NEXT: sb a1, 14(a2)
+; RV64I-NEXT: srli a1, a0, 40
+; RV64I-NEXT: sb a1, 13(a2)
+; RV64I-NEXT: srli a1, a0, 32
+; RV64I-NEXT: sb a1, 12(a2)
+; RV64I-NEXT: srli a1, a0, 24
+; RV64I-NEXT: sb a1, 11(a2)
+; RV64I-NEXT: srli a1, a0, 16
+; RV64I-NEXT: sb a1, 10(a2)
+; RV64I-NEXT: srli a0, a0, 8
+; RV64I-NEXT: sb a0, 9(a2)
+; RV64I-NEXT: addi sp, sp, 64
+; RV64I-NEXT: ret
+;
+; RV32I-LABEL: ashr_32bytes:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -80
+; RV32I-NEXT: sw s0, 76(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s1, 72(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s2, 68(sp) # 4-byte Folded Spill
+; RV32I-NEXT: lbu a3, 1(a0)
+; RV32I-NEXT: lbu a4, 0(a0)
+; RV32I-NEXT: lbu a5, 2(a0)
+; RV32I-NEXT: lbu a6, 3(a0)
+; RV32I-NEXT: slli a3, a3, 8
+; RV32I-NEXT: or a3, a3, a4
+; RV32I-NEXT: slli a5, a5, 16
+; RV32I-NEXT: slli a6, a6, 24
+; RV32I-NEXT: or a4, a6, a5
+; RV32I-NEXT: or a3, a4, a3
+; RV32I-NEXT: lbu a4, 5(a0)
+; RV32I-NEXT: lbu a5, 4(a0)
+; RV32I-NEXT: lbu a6, 6(a0)
+; RV32I-NEXT: lbu a7, 7(a0)
+; RV32I-NEXT: slli a4, a4, 8
+; RV32I-NEXT: or a4, a4, a5
+; RV32I-NEXT: slli a6, a6, 16
+; RV32I-NEXT: slli a7, a7, 24
+; RV32I-NEXT: or a5, a7, a6
+; RV32I-NEXT: or a4, a5, a4
+; RV32I-NEXT: lbu a5, 9(a0)
+; RV32I-NEXT: lbu a6, 8(a0)
+; RV32I-NEXT: lbu a7, 10(a0)
+; RV32I-NEXT: lbu t0, 11(a0)
+; RV32I-NEXT: slli a5, a5, 8
+; RV32I-NEXT: or a5, a5, a6
+; RV32I-NEXT: slli a7, a7, 16
+; RV32I-NEXT: slli t0, t0, 24
+; RV32I-NEXT: or a6, t0, a7
+; RV32I-NEXT: or a5, a6, a5
+; RV32I-NEXT: lbu a6, 13(a0)
+; RV32I-NEXT: lbu a7, 12(a0)
+; RV32I-NEXT: lbu t0, 14(a0)
+; RV32I-NEXT: lbu t1, 15(a0)
+; RV32I-NEXT: slli a6, a6, 8
+; RV32I-NEXT: or a6, a6, a7
+; RV32I-NEXT: slli t0, t0, 16
+; RV32I-NEXT: slli t1, t1, 24
+; RV32I-NEXT: or a7, t1, t0
+; RV32I-NEXT: or a6, a7, a6
+; RV32I-NEXT: lbu a7, 17(a0)
+; RV32I-NEXT: lbu t0, 16(a0)
+; RV32I-NEXT: lbu t1, 18(a0)
+; RV32I-NEXT: lbu t2, 19(a0)
+; RV32I-NEXT: slli a7, a7, 8
+; RV32I-NEXT: or a7, a7, t0
+; RV32I-NEXT: slli t1, t1, 16
+; RV32I-NEXT: slli t2, t2, 24
+; RV32I-NEXT: or t0, t2, t1
+; RV32I-NEXT: or a7, t0, a7
+; RV32I-NEXT: lbu t0, 21(a0)
+; RV32I-NEXT: lbu t1, 20(a0)
+; RV32I-NEXT: lbu t2, 22(a0)
+; RV32I-NEXT: lbu t3, 23(a0)
+; RV32I-NEXT: slli t0, t0, 8
+; RV32I-NEXT: or t0, t0, t1
+; RV32I-NEXT: slli t2, t2, 16
+; RV32I-NEXT: slli t3, t3, 24
+; RV32I-NEXT: or t1, t3, t2
+; RV32I-NEXT: or t0, t1, t0
+; RV32I-NEXT: lbu t1, 25(a0)
+; RV32I-NEXT: lbu t2, 24(a0)
+; RV32I-NEXT: lbu t3, 26(a0)
+; RV32I-NEXT: lbu t4, 27(a0)
+; RV32I-NEXT: slli t1, t1, 8
+; RV32I-NEXT: or t1, t1, t2
+; RV32I-NEXT: slli t3, t3, 16
+; RV32I-NEXT: slli t4, t4, 24
+; RV32I-NEXT: or t2, t4, t3
+; RV32I-NEXT: or t1, t2, t1
+; RV32I-NEXT: lbu t2, 29(a0)
+; RV32I-NEXT: lbu t3, 28(a0)
+; RV32I-NEXT: lbu t4, 30(a0)
+; RV32I-NEXT: lbu a0, 31(a0)
+; RV32I-NEXT: slli t2, t2, 8
+; RV32I-NEXT: or t2, t2, t3
+; RV32I-NEXT: slli t4, t4, 16
+; RV32I-NEXT: slli a0, a0, 24
+; RV32I-NEXT: or t3, a0, t4
+; RV32I-NEXT: or t2, t3, t2
+; RV32I-NEXT: lbu t3, 1(a1)
+; RV32I-NEXT: lbu t4, 0(a1)
+; RV32I-NEXT: lbu t5, 2(a1)
+; RV32I-NEXT: lbu a1, 3(a1)
+; RV32I-NEXT: slli t3, t3, 8
+; RV32I-NEXT: or t3, t3, t4
+; RV32I-NEXT: slli t5, t5, 16
+; RV32I-NEXT: slli a1, a1, 24
+; RV32I-NEXT: or a1, a1, t5
+; RV32I-NEXT: or a1, a1, t3
+; RV32I-NEXT: srai a0, a0, 31
+; RV32I-NEXT: sw a0, 60(sp)
+; RV32I-NEXT: sw a0, 56(sp)
+; RV32I-NEXT: sw a0, 52(sp)
+; RV32I-NEXT: sw a0, 48(sp)
+; RV32I-NEXT: sw a0, 44(sp)
+; RV32I-NEXT: sw a0, 40(sp)
+; RV32I-NEXT: sw a0, 36(sp)
+; RV32I-NEXT: sw a0, 32(sp)
+; RV32I-NEXT: sw t2, 28(sp)
+; RV32I-NEXT: sw t1, 24(sp)
+; RV32I-NEXT: sw t0, 20(sp)
+; RV32I-NEXT: sw a7, 16(sp)
+; RV32I-NEXT: sw a6, 12(sp)
+; RV32I-NEXT: sw a5, 8(sp)
+; RV32I-NEXT: sw a4, 4(sp)
+; RV32I-NEXT: sw a3, 0(sp)
+; RV32I-NEXT: andi a0, a1, 28
+; RV32I-NEXT: mv a3, sp
+; RV32I-NEXT: add a5, a3, a0
+; RV32I-NEXT: lw a3, 4(a5)
+; RV32I-NEXT: slli a6, a1, 3
+; RV32I-NEXT: srl a4, a3, a6
+; RV32I-NEXT: lw a7, 8(a5)
+; RV32I-NEXT: andi a0, a6, 24
+; RV32I-NEXT: xori t0, a0, 31
+; RV32I-NEXT: lw a1, 0(a5)
+; RV32I-NEXT: slli a0, a7, 1
+; RV32I-NEXT: sll a0, a0, t0
+; RV32I-NEXT: or a0, a4, a0
+; RV32I-NEXT: srl t1, a1, a6
+; RV32I-NEXT: slli a3, a3, 1
+; RV32I-NEXT: lw t2, 12(a5)
+; RV32I-NEXT: lw t3, 16(a5)
+; RV32I-NEXT: sll a1, a3, t0
+; RV32I-NEXT: or a1, t1, a1
+; RV32I-NEXT: srl t4, t2, a6
+; RV32I-NEXT: slli a3, t3, 1
+; RV32I-NEXT: sll a3, a3, t0
+; RV32I-NEXT: or a3, t4, a3
+; RV32I-NEXT: srl a7, a7, a6
+; RV32I-NEXT: slli t2, t2, 1
+; RV32I-NEXT: lw t5, 20(a5)
+; RV32I-NEXT: lw t6, 24(a5)
+; RV32I-NEXT: sll t2, t2, t0
+; RV32I-NEXT: or t2, a7, t2
+; RV32I-NEXT: srl s0, t5, a6
+; RV32I-NEXT: slli s1, t6, 1
+; RV32I-NEXT: sll s1, s1, t0
+; RV32I-NEXT: or s1, s0, s1
+; RV32I-NEXT: srl t3, t3, a6
+; RV32I-NEXT: slli t5, t5, 1
+; RV32I-NEXT: lw a5, 28(a5)
+; RV32I-NEXT: sll t5, t5, t0
+; RV32I-NEXT: or t5, t3, t5
+; RV32I-NEXT: srl t6, t6, a6
+; RV32I-NEXT: slli s2, a5, 1
+; RV32I-NEXT: sll t0, s2, t0
+; RV32I-NEXT: or t0, t6, t0
+; RV32I-NEXT: sra a5, a5, a6
+; RV32I-NEXT: sb t6, 24(a2)
; RV32I-NEXT: sb a5, 28(a2)
-; RV32I-NEXT: sb ra, 31(a2)
-; RV32I-NEXT: sb s11, 30(a2)
-; RV32I-NEXT: sb s10, 17(a2)
-; RV32I-NEXT: sb s9, 16(a2)
-; RV32I-NEXT: sb s8, 19(a2)
-; RV32I-NEXT: sb s7, 18(a2)
-; RV32I-NEXT: sb s6, 21(a2)
-; RV32I-NEXT: sb s5, 20(a2)
-; RV32I-NEXT: sb s4, 23(a2)
-; RV32I-NEXT: sb s3, 22(a2)
-; RV32I-NEXT: sb s2, 9(a2)
-; RV32I-NEXT: sb s1, 8(a2)
-; RV32I-NEXT: sb s0, 11(a2)
-; RV32I-NEXT: sb t6, 10(a2)
-; RV32I-NEXT: sb t5, 13(a2)
+; RV32I-NEXT: sb t3, 16(a2)
+; RV32I-NEXT: sb s0, 20(a2)
+; RV32I-NEXT: sb a7, 8(a2)
; RV32I-NEXT: sb t4, 12(a2)
-; RV32I-NEXT: sb t3, 15(a2)
-; RV32I-NEXT: sb t2, 14(a2)
-; RV32I-NEXT: sb t1, 3(a2)
-; RV32I-NEXT: sb t0, 2(a2)
-; RV32I-NEXT: sb a7, 1(a2)
-; RV32I-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 0(a2)
-; RV32I-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sb t1, 0(a2)
+; RV32I-NEXT: sb a4, 4(a2)
+; RV32I-NEXT: srli a4, a5, 24
+; RV32I-NEXT: sb a4, 31(a2)
+; RV32I-NEXT: srli a4, a5, 16
+; RV32I-NEXT: sb a4, 30(a2)
+; RV32I-NEXT: srli a5, a5, 8
+; RV32I-NEXT: sb a5, 29(a2)
+; RV32I-NEXT: srli a4, t0, 24
+; RV32I-NEXT: sb a4, 27(a2)
+; RV32I-NEXT: srli a4, t0, 16
+; RV32I-NEXT: sb a4, 26(a2)
+; RV32I-NEXT: srli a4, t0, 8
+; RV32I-NEXT: sb a4, 25(a2)
+; RV32I-NEXT: srli a4, t5, 24
+; RV32I-NEXT: sb a4, 19(a2)
+; RV32I-NEXT: srli a4, t5, 16
+; RV32I-NEXT: sb a4, 18(a2)
+; RV32I-NEXT: srli a4, t5, 8
+; RV32I-NEXT: sb a4, 17(a2)
+; RV32I-NEXT: srli a4, s1, 24
+; RV32I-NEXT: sb a4, 23(a2)
+; RV32I-NEXT: srli a4, s1, 16
+; RV32I-NEXT: sb a4, 22(a2)
+; RV32I-NEXT: srli s1, s1, 8
+; RV32I-NEXT: sb s1, 21(a2)
+; RV32I-NEXT: srli a4, t2, 24
+; RV32I-NEXT: sb a4, 11(a2)
+; RV32I-NEXT: srli a4, t2, 16
+; RV32I-NEXT: sb a4, 10(a2)
+; RV32I-NEXT: srli a4, t2, 8
+; RV32I-NEXT: sb a4, 9(a2)
+; RV32I-NEXT: srli a4, a3, 24
+; RV32I-NEXT: sb a4, 15(a2)
+; RV32I-NEXT: srli a4, a3, 16
+; RV32I-NEXT: sb a4, 14(a2)
+; RV32I-NEXT: srli a3, a3, 8
+; RV32I-NEXT: sb a3, 13(a2)
+; RV32I-NEXT: srli a3, a1, 24
+; RV32I-NEXT: sb a3, 3(a2)
+; RV32I-NEXT: srli a3, a1, 16
+; RV32I-NEXT: sb a3, 2(a2)
+; RV32I-NEXT: srli a1, a1, 8
+; RV32I-NEXT: sb a1, 1(a2)
+; RV32I-NEXT: srli a1, a0, 24
+; RV32I-NEXT: sb a1, 7(a2)
+; RV32I-NEXT: srli a1, a0, 16
+; RV32I-NEXT: sb a1, 6(a2)
+; RV32I-NEXT: srli a0, a0, 8
; RV32I-NEXT: sb a0, 5(a2)
-; RV32I-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 4(a2)
-; RV32I-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 7(a2)
-; RV32I-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 6(a2)
-; RV32I-NEXT: lw ra, 140(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s0, 136(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 132(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 128(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 124(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s4, 120(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s5, 116(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s6, 112(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s7, 108(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s8, 104(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s9, 100(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s10, 96(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s11, 92(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 144
+; RV32I-NEXT: lw s0, 76(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s1, 72(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s2, 68(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 80
; RV32I-NEXT: ret
%src = load i256, ptr %src.ptr, align 1
%byteOff = load i256, ptr %byteOff.ptr, align 1
@@ -2614,3 +4740,744 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
store i256 %res, ptr %dst, align 1
ret void
}
+
+define void @ashr_32bytes_wordOff(ptr %src.ptr, ptr %wordOff.ptr, ptr %dst) nounwind {
+; RV64I-LABEL: ashr_32bytes_wordOff:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -64
+; RV64I-NEXT: lbu a3, 1(a0)
+; RV64I-NEXT: lbu a4, 0(a0)
+; RV64I-NEXT: lbu a5, 2(a0)
+; RV64I-NEXT: lbu a6, 3(a0)
+; RV64I-NEXT: slli a3, a3, 8
+; RV64I-NEXT: or a3, a3, a4
+; RV64I-NEXT: slli a5, a5, 16
+; RV64I-NEXT: slli a6, a6, 24
+; RV64I-NEXT: or a4, a6, a5
+; RV64I-NEXT: or a3, a4, a3
+; RV64I-NEXT: lbu a4, 5(a0)
+; RV64I-NEXT: lbu a5, 4(a0)
+; RV64I-NEXT: lbu a6, 6(a0)
+; RV64I-NEXT: lbu a7, 7(a0)
+; RV64I-NEXT: slli a4, a4, 8
+; RV64I-NEXT: or a4, a4, a5
+; RV64I-NEXT: slli a6, a6, 16
+; RV64I-NEXT: slli a7, a7, 24
+; RV64I-NEXT: or a5, a7, a6
+; RV64I-NEXT: or a4, a5, a4
+; RV64I-NEXT: slli a4, a4, 32
+; RV64I-NEXT: or a3, a4, a3
+; RV64I-NEXT: lbu a4, 9(a0)
+; RV64I-NEXT: lbu a5, 8(a0)
+; RV64I-NEXT: lbu a6, 10(a0)
+; RV64I-NEXT: lbu a7, 11(a0)
+; RV64I-NEXT: slli a4, a4, 8
+; RV64I-NEXT: or a4, a4, a5
+; RV64I-NEXT: slli a6, a6, 16
+; RV64I-NEXT: slli a7, a7, 24
+; RV64I-NEXT: or a5, a7, a6
+; RV64I-NEXT: or a4, a5, a4
+; RV64I-NEXT: lbu a5, 13(a0)
+; RV64I-NEXT: lbu a6, 12(a0)
+; RV64I-NEXT: lbu a7, 14(a0)
+; RV64I-NEXT: lbu t0, 15(a0)
+; RV64I-NEXT: slli a5, a5, 8
+; RV64I-NEXT: or a5, a5, a6
+; RV64I-NEXT: slli a7, a7, 16
+; RV64I-NEXT: slli t0, t0, 24
+; RV64I-NEXT: or a6, t0, a7
+; RV64I-NEXT: or a5, a6, a5
+; RV64I-NEXT: slli a5, a5, 32
+; RV64I-NEXT: or a4, a5, a4
+; RV64I-NEXT: lbu a5, 17(a0)
+; RV64I-NEXT: lbu a6, 16(a0)
+; RV64I-NEXT: lbu a7, 18(a0)
+; RV64I-NEXT: lbu t0, 19(a0)
+; RV64I-NEXT: slli a5, a5, 8
+; RV64I-NEXT: or a5, a5, a6
+; RV64I-NEXT: slli a7, a7, 16
+; RV64I-NEXT: slli t0, t0, 24
+; RV64I-NEXT: or a6, t0, a7
+; RV64I-NEXT: or a5, a6, a5
+; RV64I-NEXT: lbu a6, 21(a0)
+; RV64I-NEXT: lbu a7, 20(a0)
+; RV64I-NEXT: lbu t0, 22(a0)
+; RV64I-NEXT: lbu t1, 23(a0)
+; RV64I-NEXT: slli a6, a6, 8
+; RV64I-NEXT: or a6, a6, a7
+; RV64I-NEXT: slli t0, t0, 16
+; RV64I-NEXT: slli t1, t1, 24
+; RV64I-NEXT: or a7, t1, t0
+; RV64I-NEXT: or a6, a7, a6
+; RV64I-NEXT: slli a6, a6, 32
+; RV64I-NEXT: or a5, a6, a5
+; RV64I-NEXT: lbu a6, 25(a0)
+; RV64I-NEXT: lbu a7, 24(a0)
+; RV64I-NEXT: lbu t0, 26(a0)
+; RV64I-NEXT: lbu t1, 27(a0)
+; RV64I-NEXT: slli a6, a6, 8
+; RV64I-NEXT: or a6, a6, a7
+; RV64I-NEXT: slli t0, t0, 16
+; RV64I-NEXT: slli t1, t1, 24
+; RV64I-NEXT: or a7, t1, t0
+; RV64I-NEXT: or a6, a7, a6
+; RV64I-NEXT: lbu a7, 29(a0)
+; RV64I-NEXT: lbu t0, 28(a0)
+; RV64I-NEXT: lbu t1, 30(a0)
+; RV64I-NEXT: lbu a0, 31(a0)
+; RV64I-NEXT: slli a7, a7, 8
+; RV64I-NEXT: or a7, a7, t0
+; RV64I-NEXT: slli t1, t1, 16
+; RV64I-NEXT: slli a0, a0, 24
+; RV64I-NEXT: or a0, a0, t1
+; RV64I-NEXT: or a0, a0, a7
+; RV64I-NEXT: slli a7, a0, 32
+; RV64I-NEXT: or a6, a7, a6
+; RV64I-NEXT: lbu a7, 1(a1)
+; RV64I-NEXT: lbu t0, 0(a1)
+; RV64I-NEXT: lbu t1, 2(a1)
+; RV64I-NEXT: lbu t2, 3(a1)
+; RV64I-NEXT: slli a7, a7, 8
+; RV64I-NEXT: or a7, a7, t0
+; RV64I-NEXT: slli t1, t1, 16
+; RV64I-NEXT: slli t2, t2, 24
+; RV64I-NEXT: or t0, t2, t1
+; RV64I-NEXT: or a7, t0, a7
+; RV64I-NEXT: lbu t0, 5(a1)
+; RV64I-NEXT: lbu t1, 4(a1)
+; RV64I-NEXT: lbu t2, 6(a1)
+; RV64I-NEXT: lbu a1, 7(a1)
+; RV64I-NEXT: slli t0, t0, 8
+; RV64I-NEXT: or t0, t0, t1
+; RV64I-NEXT: slli t2, t2, 16
+; RV64I-NEXT: slli a1, a1, 24
+; RV64I-NEXT: or a1, a1, t2
+; RV64I-NEXT: or a1, a1, t0
+; RV64I-NEXT: slli a1, a1, 32
+; RV64I-NEXT: or a1, a1, a7
+; RV64I-NEXT: sraiw a0, a0, 31
+; RV64I-NEXT: sd a0, 56(sp)
+; RV64I-NEXT: sd a0, 48(sp)
+; RV64I-NEXT: sd a0, 40(sp)
+; RV64I-NEXT: sd a0, 32(sp)
+; RV64I-NEXT: sd a6, 24(sp)
+; RV64I-NEXT: sd a5, 16(sp)
+; RV64I-NEXT: sd a4, 8(sp)
+; RV64I-NEXT: sd a3, 0(sp)
+; RV64I-NEXT: slli a0, a1, 2
+; RV64I-NEXT: andi a0, a0, 24
+; RV64I-NEXT: mv a3, sp
+; RV64I-NEXT: add a3, a3, a0
+; RV64I-NEXT: ld a4, 8(a3)
+; RV64I-NEXT: slli a5, a1, 5
+; RV64I-NEXT: srl a1, a4, a5
+; RV64I-NEXT: ld a6, 16(a3)
+; RV64I-NEXT: andi a0, a5, 32
+; RV64I-NEXT: xori a7, a0, 63
+; RV64I-NEXT: ld t0, 0(a3)
+; RV64I-NEXT: slli a0, a6, 1
+; RV64I-NEXT: sll a0, a0, a7
+; RV64I-NEXT: or a0, a1, a0
+; RV64I-NEXT: srl t0, t0, a5
+; RV64I-NEXT: slli a4, a4, 1
+; RV64I-NEXT: ld a3, 24(a3)
+; RV64I-NEXT: sll a4, a4, a7
+; RV64I-NEXT: or a4, t0, a4
+; RV64I-NEXT: srl a6, a6, a5
+; RV64I-NEXT: slli t1, a3, 1
+; RV64I-NEXT: sll a7, t1, a7
+; RV64I-NEXT: or a7, a6, a7
+; RV64I-NEXT: sra a3, a3, a5
+; RV64I-NEXT: sb a6, 16(a2)
+; RV64I-NEXT: sb a3, 24(a2)
+; RV64I-NEXT: sb t0, 0(a2)
+; RV64I-NEXT: sb a1, 8(a2)
+; RV64I-NEXT: srli a5, a6, 24
+; RV64I-NEXT: sb a5, 19(a2)
+; RV64I-NEXT: srli a5, a6, 16
+; RV64I-NEXT: sb a5, 18(a2)
+; RV64I-NEXT: srli a5, a6, 8
+; RV64I-NEXT: sb a5, 17(a2)
+; RV64I-NEXT: srli a5, a3, 56
+; RV64I-NEXT: sb a5, 31(a2)
+; RV64I-NEXT: srli a5, a3, 48
+; RV64I-NEXT: sb a5, 30(a2)
+; RV64I-NEXT: srli a5, a3, 40
+; RV64I-NEXT: sb a5, 29(a2)
+; RV64I-NEXT: srli a5, a3, 32
+; RV64I-NEXT: sb a5, 28(a2)
+; RV64I-NEXT: srli a5, a3, 24
+; RV64I-NEXT: sb a5, 27(a2)
+; RV64I-NEXT: srli a5, a3, 16
+; RV64I-NEXT: sb a5, 26(a2)
+; RV64I-NEXT: srli a3, a3, 8
+; RV64I-NEXT: sb a3, 25(a2)
+; RV64I-NEXT: srli a3, t0, 24
+; RV64I-NEXT: sb a3, 3(a2)
+; RV64I-NEXT: srli a3, t0, 16
+; RV64I-NEXT: sb a3, 2(a2)
+; RV64I-NEXT: srli a3, t0, 8
+; RV64I-NEXT: sb a3, 1(a2)
+; RV64I-NEXT: srli a3, a1, 24
+; RV64I-NEXT: sb a3, 11(a2)
+; RV64I-NEXT: srli a3, a1, 16
+; RV64I-NEXT: sb a3, 10(a2)
+; RV64I-NEXT: srli a1, a1, 8
+; RV64I-NEXT: sb a1, 9(a2)
+; RV64I-NEXT: srli a1, a7, 56
+; RV64I-NEXT: sb a1, 23(a2)
+; RV64I-NEXT: srli a1, a7, 48
+; RV64I-NEXT: sb a1, 22(a2)
+; RV64I-NEXT: srli a1, a7, 40
+; RV64I-NEXT: sb a1, 21(a2)
+; RV64I-NEXT: srli a1, a7, 32
+; RV64I-NEXT: sb a1, 20(a2)
+; RV64I-NEXT: srli a1, a4, 56
+; RV64I-NEXT: sb a1, 7(a2)
+; RV64I-NEXT: srli a1, a4, 48
+; RV64I-NEXT: sb a1, 6(a2)
+; RV64I-NEXT: srli a1, a4, 40
+; RV64I-NEXT: sb a1, 5(a2)
+; RV64I-NEXT: srli a4, a4, 32
+; RV64I-NEXT: sb a4, 4(a2)
+; RV64I-NEXT: srli a1, a0, 56
+; RV64I-NEXT: sb a1, 15(a2)
+; RV64I-NEXT: srli a1, a0, 48
+; RV64I-NEXT: sb a1, 14(a2)
+; RV64I-NEXT: srli a1, a0, 40
+; RV64I-NEXT: sb a1, 13(a2)
+; RV64I-NEXT: srli a0, a0, 32
+; RV64I-NEXT: sb a0, 12(a2)
+; RV64I-NEXT: addi sp, sp, 64
+; RV64I-NEXT: ret
+;
+; RV32I-LABEL: ashr_32bytes_wordOff:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -64
+; RV32I-NEXT: lbu a3, 1(a0)
+; RV32I-NEXT: lbu a4, 0(a0)
+; RV32I-NEXT: lbu a5, 2(a0)
+; RV32I-NEXT: lbu a6, 3(a0)
+; RV32I-NEXT: slli a3, a3, 8
+; RV32I-NEXT: or a3, a3, a4
+; RV32I-NEXT: slli a5, a5, 16
+; RV32I-NEXT: slli a6, a6, 24
+; RV32I-NEXT: or a4, a6, a5
+; RV32I-NEXT: or a3, a4, a3
+; RV32I-NEXT: lbu a4, 5(a0)
+; RV32I-NEXT: lbu a5, 4(a0)
+; RV32I-NEXT: lbu a6, 6(a0)
+; RV32I-NEXT: lbu a7, 7(a0)
+; RV32I-NEXT: slli a4, a4, 8
+; RV32I-NEXT: or a4, a4, a5
+; RV32I-NEXT: slli a6, a6, 16
+; RV32I-NEXT: slli a7, a7, 24
+; RV32I-NEXT: or a5, a7, a6
+; RV32I-NEXT: or a4, a5, a4
+; RV32I-NEXT: lbu a5, 9(a0)
+; RV32I-NEXT: lbu a6, 8(a0)
+; RV32I-NEXT: lbu a7, 10(a0)
+; RV32I-NEXT: lbu t0, 11(a0)
+; RV32I-NEXT: slli a5, a5, 8
+; RV32I-NEXT: or a5, a5, a6
+; RV32I-NEXT: slli a7, a7, 16
+; RV32I-NEXT: slli t0, t0, 24
+; RV32I-NEXT: or a6, t0, a7
+; RV32I-NEXT: or a5, a6, a5
+; RV32I-NEXT: lbu a6, 13(a0)
+; RV32I-NEXT: lbu a7, 12(a0)
+; RV32I-NEXT: lbu t0, 14(a0)
+; RV32I-NEXT: lbu t1, 15(a0)
+; RV32I-NEXT: slli a6, a6, 8
+; RV32I-NEXT: or a6, a6, a7
+; RV32I-NEXT: slli t0, t0, 16
+; RV32I-NEXT: slli t1, t1, 24
+; RV32I-NEXT: or a7, t1, t0
+; RV32I-NEXT: or a6, a7, a6
+; RV32I-NEXT: lbu a7, 17(a0)
+; RV32I-NEXT: lbu t0, 16(a0)
+; RV32I-NEXT: lbu t1, 18(a0)
+; RV32I-NEXT: lbu t2, 19(a0)
+; RV32I-NEXT: slli a7, a7, 8
+; RV32I-NEXT: or a7, a7, t0
+; RV32I-NEXT: slli t1, t1, 16
+; RV32I-NEXT: slli t2, t2, 24
+; RV32I-NEXT: or t0, t2, t1
+; RV32I-NEXT: or a7, t0, a7
+; RV32I-NEXT: lbu t0, 21(a0)
+; RV32I-NEXT: lbu t1, 20(a0)
+; RV32I-NEXT: lbu t2, 22(a0)
+; RV32I-NEXT: lbu t3, 23(a0)
+; RV32I-NEXT: slli t0, t0, 8
+; RV32I-NEXT: or t0, t0, t1
+; RV32I-NEXT: slli t2, t2, 16
+; RV32I-NEXT: slli t3, t3, 24
+; RV32I-NEXT: or t1, t3, t2
+; RV32I-NEXT: or t0, t1, t0
+; RV32I-NEXT: lbu t1, 25(a0)
+; RV32I-NEXT: lbu t2, 24(a0)
+; RV32I-NEXT: lbu t3, 26(a0)
+; RV32I-NEXT: lbu t4, 27(a0)
+; RV32I-NEXT: slli t1, t1, 8
+; RV32I-NEXT: or t1, t1, t2
+; RV32I-NEXT: slli t3, t3, 16
+; RV32I-NEXT: slli t4, t4, 24
+; RV32I-NEXT: or t2, t4, t3
+; RV32I-NEXT: or t1, t2, t1
+; RV32I-NEXT: lbu t2, 29(a0)
+; RV32I-NEXT: lbu t3, 28(a0)
+; RV32I-NEXT: lbu t4, 30(a0)
+; RV32I-NEXT: lbu a0, 31(a0)
+; RV32I-NEXT: slli t2, t2, 8
+; RV32I-NEXT: or t2, t2, t3
+; RV32I-NEXT: slli t4, t4, 16
+; RV32I-NEXT: slli a0, a0, 24
+; RV32I-NEXT: or t3, a0, t4
+; RV32I-NEXT: or t2, t3, t2
+; RV32I-NEXT: lbu a1, 0(a1)
+; RV32I-NEXT: srai a0, a0, 31
+; RV32I-NEXT: sw a0, 60(sp)
+; RV32I-NEXT: sw a0, 56(sp)
+; RV32I-NEXT: sw a0, 52(sp)
+; RV32I-NEXT: sw a0, 48(sp)
+; RV32I-NEXT: sw a0, 44(sp)
+; RV32I-NEXT: sw a0, 40(sp)
+; RV32I-NEXT: sw a0, 36(sp)
+; RV32I-NEXT: sw a0, 32(sp)
+; RV32I-NEXT: sw t2, 28(sp)
+; RV32I-NEXT: sw t1, 24(sp)
+; RV32I-NEXT: sw t0, 20(sp)
+; RV32I-NEXT: sw a7, 16(sp)
+; RV32I-NEXT: sw a6, 12(sp)
+; RV32I-NEXT: sw a5, 8(sp)
+; RV32I-NEXT: sw a4, 4(sp)
+; RV32I-NEXT: sw a3, 0(sp)
+; RV32I-NEXT: slli a1, a1, 2
+; RV32I-NEXT: andi a1, a1, 28
+; RV32I-NEXT: mv a0, sp
+; RV32I-NEXT: add a3, a0, a1
+; RV32I-NEXT: lw a0, 4(a3)
+; RV32I-NEXT: lw a1, 0(a3)
+; RV32I-NEXT: lw a4, 12(a3)
+; RV32I-NEXT: lw a5, 8(a3)
+; RV32I-NEXT: lw a6, 24(a3)
+; RV32I-NEXT: lw a7, 28(a3)
+; RV32I-NEXT: lw t0, 16(a3)
+; RV32I-NEXT: lw a3, 20(a3)
+; RV32I-NEXT: sb a6, 24(a2)
+; RV32I-NEXT: sb a7, 28(a2)
+; RV32I-NEXT: sb t0, 16(a2)
+; RV32I-NEXT: sb a3, 20(a2)
+; RV32I-NEXT: sb a5, 8(a2)
+; RV32I-NEXT: sb a4, 12(a2)
+; RV32I-NEXT: sb a1, 0(a2)
+; RV32I-NEXT: sb a0, 4(a2)
+; RV32I-NEXT: srli t1, a6, 24
+; RV32I-NEXT: sb t1, 27(a2)
+; RV32I-NEXT: srli t1, a6, 16
+; RV32I-NEXT: sb t1, 26(a2)
+; RV32I-NEXT: srli a6, a6, 8
+; RV32I-NEXT: sb a6, 25(a2)
+; RV32I-NEXT: srli a6, a7, 24
+; RV32I-NEXT: sb a6, 31(a2)
+; RV32I-NEXT: srli a6, a7, 16
+; RV32I-NEXT: sb a6, 30(a2)
+; RV32I-NEXT: srli a6, a7, 8
+; RV32I-NEXT: sb a6, 29(a2)
+; RV32I-NEXT: srli a6, t0, 24
+; RV32I-NEXT: sb a6, 19(a2)
+; RV32I-NEXT: srli a6, t0, 16
+; RV32I-NEXT: sb a6, 18(a2)
+; RV32I-NEXT: srli a6, t0, 8
+; RV32I-NEXT: sb a6, 17(a2)
+; RV32I-NEXT: srli a6, a3, 24
+; RV32I-NEXT: sb a6, 23(a2)
+; RV32I-NEXT: srli a6, a3, 16
+; RV32I-NEXT: sb a6, 22(a2)
+; RV32I-NEXT: srli a3, a3, 8
+; RV32I-NEXT: sb a3, 21(a2)
+; RV32I-NEXT: srli a3, a5, 24
+; RV32I-NEXT: sb a3, 11(a2)
+; RV32I-NEXT: srli a3, a5, 16
+; RV32I-NEXT: sb a3, 10(a2)
+; RV32I-NEXT: srli a5, a5, 8
+; RV32I-NEXT: sb a5, 9(a2)
+; RV32I-NEXT: srli a3, a4, 24
+; RV32I-NEXT: sb a3, 15(a2)
+; RV32I-NEXT: srli a3, a4, 16
+; RV32I-NEXT: sb a3, 14(a2)
+; RV32I-NEXT: srli a4, a4, 8
+; RV32I-NEXT: sb a4, 13(a2)
+; RV32I-NEXT: srli a3, a1, 24
+; RV32I-NEXT: sb a3, 3(a2)
+; RV32I-NEXT: srli a3, a1, 16
+; RV32I-NEXT: sb a3, 2(a2)
+; RV32I-NEXT: srli a1, a1, 8
+; RV32I-NEXT: sb a1, 1(a2)
+; RV32I-NEXT: srli a1, a0, 24
+; RV32I-NEXT: sb a1, 7(a2)
+; RV32I-NEXT: srli a1, a0, 16
+; RV32I-NEXT: sb a1, 6(a2)
+; RV32I-NEXT: srli a0, a0, 8
+; RV32I-NEXT: sb a0, 5(a2)
+; RV32I-NEXT: addi sp, sp, 64
+; RV32I-NEXT: ret
+ %src = load i256, ptr %src.ptr, align 1
+ %wordOff = load i256, ptr %wordOff.ptr, align 1
+ %bitOff = shl i256 %wordOff, 5
+ %res = ashr i256 %src, %bitOff
+ store i256 %res, ptr %dst, align 1
+ ret void
+}
+
+define void @ashr_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) nounwind {
+; RV64I-LABEL: ashr_32bytes_dwordOff:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -64
+; RV64I-NEXT: lbu a3, 1(a0)
+; RV64I-NEXT: lbu a4, 0(a0)
+; RV64I-NEXT: lbu a5, 2(a0)
+; RV64I-NEXT: lbu a6, 3(a0)
+; RV64I-NEXT: slli a3, a3, 8
+; RV64I-NEXT: or a3, a3, a4
+; RV64I-NEXT: slli a5, a5, 16
+; RV64I-NEXT: slli a6, a6, 24
+; RV64I-NEXT: or a4, a6, a5
+; RV64I-NEXT: or a3, a4, a3
+; RV64I-NEXT: lbu a4, 5(a0)
+; RV64I-NEXT: lbu a5, 4(a0)
+; RV64I-NEXT: lbu a6, 6(a0)
+; RV64I-NEXT: lbu a7, 7(a0)
+; RV64I-NEXT: slli a4, a4, 8
+; RV64I-NEXT: or a4, a4, a5
+; RV64I-NEXT: slli a6, a6, 16
+; RV64I-NEXT: slli a7, a7, 24
+; RV64I-NEXT: or a5, a7, a6
+; RV64I-NEXT: or a4, a5, a4
+; RV64I-NEXT: slli a4, a4, 32
+; RV64I-NEXT: or a3, a4, a3
+; RV64I-NEXT: lbu a4, 9(a0)
+; RV64I-NEXT: lbu a5, 8(a0)
+; RV64I-NEXT: lbu a6, 10(a0)
+; RV64I-NEXT: lbu a7, 11(a0)
+; RV64I-NEXT: slli a4, a4, 8
+; RV64I-NEXT: or a4, a4, a5
+; RV64I-NEXT: slli a6, a6, 16
+; RV64I-NEXT: slli a7, a7, 24
+; RV64I-NEXT: or a5, a7, a6
+; RV64I-NEXT: or a4, a5, a4
+; RV64I-NEXT: lbu a5, 13(a0)
+; RV64I-NEXT: lbu a6, 12(a0)
+; RV64I-NEXT: lbu a7, 14(a0)
+; RV64I-NEXT: lbu t0, 15(a0)
+; RV64I-NEXT: slli a5, a5, 8
+; RV64I-NEXT: or a5, a5, a6
+; RV64I-NEXT: slli a7, a7, 16
+; RV64I-NEXT: slli t0, t0, 24
+; RV64I-NEXT: or a6, t0, a7
+; RV64I-NEXT: or a5, a6, a5
+; RV64I-NEXT: slli a5, a5, 32
+; RV64I-NEXT: or a4, a5, a4
+; RV64I-NEXT: lbu a5, 17(a0)
+; RV64I-NEXT: lbu a6, 16(a0)
+; RV64I-NEXT: lbu a7, 18(a0)
+; RV64I-NEXT: lbu t0, 19(a0)
+; RV64I-NEXT: slli a5, a5, 8
+; RV64I-NEXT: or a5, a5, a6
+; RV64I-NEXT: slli a7, a7, 16
+; RV64I-NEXT: slli t0, t0, 24
+; RV64I-NEXT: or a6, t0, a7
+; RV64I-NEXT: or a5, a6, a5
+; RV64I-NEXT: lbu a6, 21(a0)
+; RV64I-NEXT: lbu a7, 20(a0)
+; RV64I-NEXT: lbu t0, 22(a0)
+; RV64I-NEXT: lbu t1, 23(a0)
+; RV64I-NEXT: slli a6, a6, 8
+; RV64I-NEXT: or a6, a6, a7
+; RV64I-NEXT: slli t0, t0, 16
+; RV64I-NEXT: slli t1, t1, 24
+; RV64I-NEXT: or a7, t1, t0
+; RV64I-NEXT: or a6, a7, a6
+; RV64I-NEXT: slli a6, a6, 32
+; RV64I-NEXT: or a5, a6, a5
+; RV64I-NEXT: lbu a6, 25(a0)
+; RV64I-NEXT: lbu a7, 24(a0)
+; RV64I-NEXT: lbu t0, 26(a0)
+; RV64I-NEXT: lbu t1, 27(a0)
+; RV64I-NEXT: slli a6, a6, 8
+; RV64I-NEXT: or a6, a6, a7
+; RV64I-NEXT: slli t0, t0, 16
+; RV64I-NEXT: slli t1, t1, 24
+; RV64I-NEXT: or a7, t1, t0
+; RV64I-NEXT: or a6, a7, a6
+; RV64I-NEXT: lbu a7, 29(a0)
+; RV64I-NEXT: lbu t0, 28(a0)
+; RV64I-NEXT: lbu t1, 30(a0)
+; RV64I-NEXT: lbu a0, 31(a0)
+; RV64I-NEXT: slli a7, a7, 8
+; RV64I-NEXT: or a7, a7, t0
+; RV64I-NEXT: slli t1, t1, 16
+; RV64I-NEXT: slli a0, a0, 24
+; RV64I-NEXT: or a0, a0, t1
+; RV64I-NEXT: or a0, a0, a7
+; RV64I-NEXT: slli a7, a0, 32
+; RV64I-NEXT: or a6, a7, a6
+; RV64I-NEXT: lbu a1, 0(a1)
+; RV64I-NEXT: sraiw a0, a0, 31
+; RV64I-NEXT: sd a0, 56(sp)
+; RV64I-NEXT: sd a0, 48(sp)
+; RV64I-NEXT: sd a0, 40(sp)
+; RV64I-NEXT: sd a0, 32(sp)
+; RV64I-NEXT: sd a6, 24(sp)
+; RV64I-NEXT: sd a5, 16(sp)
+; RV64I-NEXT: sd a4, 8(sp)
+; RV64I-NEXT: sd a3, 0(sp)
+; RV64I-NEXT: slli a1, a1, 3
+; RV64I-NEXT: andi a1, a1, 24
+; RV64I-NEXT: mv a0, sp
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: ld a1, 16(a0)
+; RV64I-NEXT: ld a3, 24(a0)
+; RV64I-NEXT: ld a4, 0(a0)
+; RV64I-NEXT: ld a0, 8(a0)
+; RV64I-NEXT: sb a1, 16(a2)
+; RV64I-NEXT: sb a3, 24(a2)
+; RV64I-NEXT: sb a4, 0(a2)
+; RV64I-NEXT: sb a0, 8(a2)
+; RV64I-NEXT: srli a5, a1, 56
+; RV64I-NEXT: sb a5, 23(a2)
+; RV64I-NEXT: srli a5, a1, 48
+; RV64I-NEXT: sb a5, 22(a2)
+; RV64I-NEXT: srli a5, a1, 40
+; RV64I-NEXT: sb a5, 21(a2)
+; RV64I-NEXT: srli a5, a1, 32
+; RV64I-NEXT: sb a5, 20(a2)
+; RV64I-NEXT: srli a5, a1, 24
+; RV64I-NEXT: sb a5, 19(a2)
+; RV64I-NEXT: srli a5, a1, 16
+; RV64I-NEXT: sb a5, 18(a2)
+; RV64I-NEXT: srli a1, a1, 8
+; RV64I-NEXT: sb a1, 17(a2)
+; RV64I-NEXT: srli a1, a3, 56
+; RV64I-NEXT: sb a1, 31(a2)
+; RV64I-NEXT: srli a1, a3, 48
+; RV64I-NEXT: sb a1, 30(a2)
+; RV64I-NEXT: srli a1, a3, 40
+; RV64I-NEXT: sb a1, 29(a2)
+; RV64I-NEXT: srli a1, a3, 32
+; RV64I-NEXT: sb a1, 28(a2)
+; RV64I-NEXT: srli a1, a3, 24
+; RV64I-NEXT: sb a1, 27(a2)
+; RV64I-NEXT: srli a1, a3, 16
+; RV64I-NEXT: sb a1, 26(a2)
+; RV64I-NEXT: srli a3, a3, 8
+; RV64I-NEXT: sb a3, 25(a2)
+; RV64I-NEXT: srli a1, a4, 56
+; RV64I-NEXT: sb a1, 7(a2)
+; RV64I-NEXT: srli a1, a4, 48
+; RV64I-NEXT: sb a1, 6(a2)
+; RV64I-NEXT: srli a1, a4, 40
+; RV64I-NEXT: sb a1, 5(a2)
+; RV64I-NEXT: srli a1, a4, 32
+; RV64I-NEXT: sb a1, 4(a2)
+; RV64I-NEXT: srli a1, a4, 24
+; RV64I-NEXT: sb a1, 3(a2)
+; RV64I-NEXT: srli a1, a4, 16
+; RV64I-NEXT: sb a1, 2(a2)
+; RV64I-NEXT: srli a4, a4, 8
+; RV64I-NEXT: sb a4, 1(a2)
+; RV64I-NEXT: srli a1, a0, 56
+; RV64I-NEXT: sb a1, 15(a2)
+; RV64I-NEXT: srli a1, a0, 48
+; RV64I-NEXT: sb a1, 14(a2)
+; RV64I-NEXT: srli a1, a0, 40
+; RV64I-NEXT: sb a1, 13(a2)
+; RV64I-NEXT: srli a1, a0, 32
+; RV64I-NEXT: sb a1, 12(a2)
+; RV64I-NEXT: srli a1, a0, 24
+; RV64I-NEXT: sb a1, 11(a2)
+; RV64I-NEXT: srli a1, a0, 16
+; RV64I-NEXT: sb a1, 10(a2)
+; RV64I-NEXT: srli a0, a0, 8
+; RV64I-NEXT: sb a0, 9(a2)
+; RV64I-NEXT: addi sp, sp, 64
+; RV64I-NEXT: ret
+;
+; RV32I-LABEL: ashr_32bytes_dwordOff:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -64
+; RV32I-NEXT: lbu a3, 1(a0)
+; RV32I-NEXT: lbu a4, 0(a0)
+; RV32I-NEXT: lbu a5, 2(a0)
+; RV32I-NEXT: lbu a6, 3(a0)
+; RV32I-NEXT: slli a3, a3, 8
+; RV32I-NEXT: or a3, a3, a4
+; RV32I-NEXT: slli a5, a5, 16
+; RV32I-NEXT: slli a6, a6, 24
+; RV32I-NEXT: or a4, a6, a5
+; RV32I-NEXT: or a3, a4, a3
+; RV32I-NEXT: lbu a4, 5(a0)
+; RV32I-NEXT: lbu a5, 4(a0)
+; RV32I-NEXT: lbu a6, 6(a0)
+; RV32I-NEXT: lbu a7, 7(a0)
+; RV32I-NEXT: slli a4, a4, 8
+; RV32I-NEXT: or a4, a4, a5
+; RV32I-NEXT: slli a6, a6, 16
+; RV32I-NEXT: slli a7, a7, 24
+; RV32I-NEXT: or a5, a7, a6
+; RV32I-NEXT: or a4, a5, a4
+; RV32I-NEXT: lbu a5, 9(a0)
+; RV32I-NEXT: lbu a6, 8(a0)
+; RV32I-NEXT: lbu a7, 10(a0)
+; RV32I-NEXT: lbu t0, 11(a0)
+; RV32I-NEXT: slli a5, a5, 8
+; RV32I-NEXT: or a5, a5, a6
+; RV32I-NEXT: slli a7, a7, 16
+; RV32I-NEXT: slli t0, t0, 24
+; RV32I-NEXT: or a6, t0, a7
+; RV32I-NEXT: or a5, a6, a5
+; RV32I-NEXT: lbu a6, 13(a0)
+; RV32I-NEXT: lbu a7, 12(a0)
+; RV32I-NEXT: lbu t0, 14(a0)
+; RV32I-NEXT: lbu t1, 15(a0)
+; RV32I-NEXT: slli a6, a6, 8
+; RV32I-NEXT: or a6, a6, a7
+; RV32I-NEXT: slli t0, t0, 16
+; RV32I-NEXT: slli t1, t1, 24
+; RV32I-NEXT: or a7, t1, t0
+; RV32I-NEXT: or a6, a7, a6
+; RV32I-NEXT: lbu a7, 17(a0)
+; RV32I-NEXT: lbu t0, 16(a0)
+; RV32I-NEXT: lbu t1, 18(a0)
+; RV32I-NEXT: lbu t2, 19(a0)
+; RV32I-NEXT: slli a7, a7, 8
+; RV32I-NEXT: or a7, a7, t0
+; RV32I-NEXT: slli t1, t1, 16
+; RV32I-NEXT: slli t2, t2, 24
+; RV32I-NEXT: or t0, t2, t1
+; RV32I-NEXT: or a7, t0, a7
+; RV32I-NEXT: lbu t0, 21(a0)
+; RV32I-NEXT: lbu t1, 20(a0)
+; RV32I-NEXT: lbu t2, 22(a0)
+; RV32I-NEXT: lbu t3, 23(a0)
+; RV32I-NEXT: slli t0, t0, 8
+; RV32I-NEXT: or t0, t0, t1
+; RV32I-NEXT: slli t2, t2, 16
+; RV32I-NEXT: slli t3, t3, 24
+; RV32I-NEXT: or t1, t3, t2
+; RV32I-NEXT: or t0, t1, t0
+; RV32I-NEXT: lbu t1, 25(a0)
+; RV32I-NEXT: lbu t2, 24(a0)
+; RV32I-NEXT: lbu t3, 26(a0)
+; RV32I-NEXT: lbu t4, 27(a0)
+; RV32I-NEXT: slli t1, t1, 8
+; RV32I-NEXT: or t1, t1, t2
+; RV32I-NEXT: slli t3, t3, 16
+; RV32I-NEXT: slli t4, t4, 24
+; RV32I-NEXT: or t2, t4, t3
+; RV32I-NEXT: or t1, t2, t1
+; RV32I-NEXT: lbu t2, 29(a0)
+; RV32I-NEXT: lbu t3, 28(a0)
+; RV32I-NEXT: lbu t4, 30(a0)
+; RV32I-NEXT: lbu a0, 31(a0)
+; RV32I-NEXT: slli t2, t2, 8
+; RV32I-NEXT: or t2, t2, t3
+; RV32I-NEXT: slli t4, t4, 16
+; RV32I-NEXT: slli a0, a0, 24
+; RV32I-NEXT: or t3, a0, t4
+; RV32I-NEXT: or t2, t3, t2
+; RV32I-NEXT: lbu a1, 0(a1)
+; RV32I-NEXT: srai a0, a0, 31
+; RV32I-NEXT: sw a0, 60(sp)
+; RV32I-NEXT: sw a0, 56(sp)
+; RV32I-NEXT: sw a0, 52(sp)
+; RV32I-NEXT: sw a0, 48(sp)
+; RV32I-NEXT: sw a0, 44(sp)
+; RV32I-NEXT: sw a0, 40(sp)
+; RV32I-NEXT: sw a0, 36(sp)
+; RV32I-NEXT: sw a0, 32(sp)
+; RV32I-NEXT: sw t2, 28(sp)
+; RV32I-NEXT: sw t1, 24(sp)
+; RV32I-NEXT: sw t0, 20(sp)
+; RV32I-NEXT: sw a7, 16(sp)
+; RV32I-NEXT: sw a6, 12(sp)
+; RV32I-NEXT: sw a5, 8(sp)
+; RV32I-NEXT: sw a4, 4(sp)
+; RV32I-NEXT: sw a3, 0(sp)
+; RV32I-NEXT: slli a1, a1, 3
+; RV32I-NEXT: andi a1, a1, 24
+; RV32I-NEXT: mv a0, sp
+; RV32I-NEXT: add a3, a0, a1
+; RV32I-NEXT: lw a0, 4(a3)
+; RV32I-NEXT: lw a1, 0(a3)
+; RV32I-NEXT: lw a4, 12(a3)
+; RV32I-NEXT: lw a5, 8(a3)
+; RV32I-NEXT: lw a6, 24(a3)
+; RV32I-NEXT: lw a7, 28(a3)
+; RV32I-NEXT: lw t0, 16(a3)
+; RV32I-NEXT: lw a3, 20(a3)
+; RV32I-NEXT: sb a6, 24(a2)
+; RV32I-NEXT: sb a7, 28(a2)
+; RV32I-NEXT: sb t0, 16(a2)
+; RV32I-NEXT: sb a3, 20(a2)
+; RV32I-NEXT: sb a5, 8(a2)
+; RV32I-NEXT: sb a4, 12(a2)
+; RV32I-NEXT: sb a1, 0(a2)
+; RV32I-NEXT: sb a0, 4(a2)
+; RV32I-NEXT: srli t1, a6, 24
+; RV32I-NEXT: sb t1, 27(a2)
+; RV32I-NEXT: srli t1, a6, 16
+; RV32I-NEXT: sb t1, 26(a2)
+; RV32I-NEXT: srli a6, a6, 8
+; RV32I-NEXT: sb a6, 25(a2)
+; RV32I-NEXT: srli a6, a7, 24
+; RV32I-NEXT: sb a6, 31(a2)
+; RV32I-NEXT: srli a6, a7, 16
+; RV32I-NEXT: sb a6, 30(a2)
+; RV32I-NEXT: srli a6, a7, 8
+; RV32I-NEXT: sb a6, 29(a2)
+; RV32I-NEXT: srli a6, t0, 24
+; RV32I-NEXT: sb a6, 19(a2)
+; RV32I-NEXT: srli a6, t0, 16
+; RV32I-NEXT: sb a6, 18(a2)
+; RV32I-NEXT: srli a6, t0, 8
+; RV32I-NEXT: sb a6, 17(a2)
+; RV32I-NEXT: srli a6, a3, 24
+; RV32I-NEXT: sb a6, 23(a2)
+; RV32I-NEXT: srli a6, a3, 16
+; RV32I-NEXT: sb a6, 22(a2)
+; RV32I-NEXT: srli a3, a3, 8
+; RV32I-NEXT: sb a3, 21(a2)
+; RV32I-NEXT: srli a3, a5, 24
+; RV32I-NEXT: sb a3, 11(a2)
+; RV32I-NEXT: srli a3, a5, 16
+; RV32I-NEXT: sb a3, 10(a2)
+; RV32I-NEXT: srli a5, a5, 8
+; RV32I-NEXT: sb a5, 9(a2)
+; RV32I-NEXT: srli a3, a4, 24
+; RV32I-NEXT: sb a3, 15(a2)
+; RV32I-NEXT: srli a3, a4, 16
+; RV32I-NEXT: sb a3, 14(a2)
+; RV32I-NEXT: srli a4, a4, 8
+; RV32I-NEXT: sb a4, 13(a2)
+; RV32I-NEXT: srli a3, a1, 24
+; RV32I-NEXT: sb a3, 3(a2)
+; RV32I-NEXT: srli a3, a1, 16
+; RV32I-NEXT: sb a3, 2(a2)
+; RV32I-NEXT: srli a1, a1, 8
+; RV32I-NEXT: sb a1, 1(a2)
+; RV32I-NEXT: srli a1, a0, 24
+; RV32I-NEXT: sb a1, 7(a2)
+; RV32I-NEXT: srli a1, a0, 16
+; RV32I-NEXT: sb a1, 6(a2)
+; RV32I-NEXT: srli a0, a0, 8
+; RV32I-NEXT: sb a0, 5(a2)
+; RV32I-NEXT: addi sp, sp, 64
+; RV32I-NEXT: ret
+ %src = load i256, ptr %src.ptr, align 1
+ %dwordOff = load i256, ptr %dwordOff.ptr, align 1
+ %bitOff = shl i256 %dwordOff, 6
+ %res = ashr i256 %src, %bitOff
+ store i256 %res, ptr %dst, align 1
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/wide-scalar-shift-legalization.ll b/llvm/test/CodeGen/RISCV/wide-scalar-shift-legalization.ll
index a601256..7e879b1 100644
--- a/llvm/test/CodeGen/RISCV/wide-scalar-shift-legalization.ll
+++ b/llvm/test/CodeGen/RISCV/wide-scalar-shift-legalization.ll
@@ -704,164 +704,117 @@ define void @lshr_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
;
; RV32I-LABEL: lshr_16bytes:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -64
-; RV32I-NEXT: sw s0, 60(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 56(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 52(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 48(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s4, 44(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a3, 0(a0)
-; RV32I-NEXT: lbu a4, 1(a0)
+; RV32I-NEXT: addi sp, sp, -32
+; RV32I-NEXT: lbu a3, 1(a0)
+; RV32I-NEXT: lbu a4, 0(a0)
; RV32I-NEXT: lbu a5, 2(a0)
; RV32I-NEXT: lbu a6, 3(a0)
-; RV32I-NEXT: lbu a7, 4(a0)
-; RV32I-NEXT: lbu t0, 5(a0)
-; RV32I-NEXT: lbu t1, 6(a0)
-; RV32I-NEXT: lbu t2, 7(a0)
-; RV32I-NEXT: lbu t3, 8(a0)
-; RV32I-NEXT: lbu t4, 9(a0)
-; RV32I-NEXT: lbu t5, 10(a0)
-; RV32I-NEXT: lbu t6, 11(a0)
-; RV32I-NEXT: lbu s0, 1(a1)
-; RV32I-NEXT: lbu s1, 0(a1)
-; RV32I-NEXT: lbu s2, 12(a0)
-; RV32I-NEXT: lbu s3, 13(a0)
-; RV32I-NEXT: slli s0, s0, 8
-; RV32I-NEXT: or s0, s0, s1
-; RV32I-NEXT: lbu s1, 2(a1)
-; RV32I-NEXT: lbu a1, 3(a1)
-; RV32I-NEXT: lbu s4, 14(a0)
-; RV32I-NEXT: lbu a0, 15(a0)
-; RV32I-NEXT: slli s1, s1, 16
-; RV32I-NEXT: slli a1, a1, 24
-; RV32I-NEXT: or a1, a1, s1
-; RV32I-NEXT: or a1, a1, s0
-; RV32I-NEXT: sb zero, 43(sp)
-; RV32I-NEXT: sb zero, 42(sp)
-; RV32I-NEXT: sb zero, 41(sp)
-; RV32I-NEXT: sb zero, 40(sp)
-; RV32I-NEXT: sb zero, 39(sp)
-; RV32I-NEXT: sb zero, 38(sp)
-; RV32I-NEXT: sb zero, 37(sp)
-; RV32I-NEXT: sb zero, 36(sp)
-; RV32I-NEXT: sb zero, 35(sp)
-; RV32I-NEXT: sb zero, 34(sp)
-; RV32I-NEXT: sb zero, 33(sp)
-; RV32I-NEXT: sb zero, 32(sp)
-; RV32I-NEXT: sb zero, 31(sp)
-; RV32I-NEXT: sb zero, 30(sp)
-; RV32I-NEXT: sb zero, 29(sp)
-; RV32I-NEXT: sb zero, 28(sp)
-; RV32I-NEXT: sb a0, 27(sp)
-; RV32I-NEXT: sb s4, 26(sp)
-; RV32I-NEXT: sb s3, 25(sp)
-; RV32I-NEXT: sb s2, 24(sp)
-; RV32I-NEXT: sb t6, 23(sp)
-; RV32I-NEXT: sb t5, 22(sp)
-; RV32I-NEXT: sb t4, 21(sp)
-; RV32I-NEXT: sb t3, 20(sp)
-; RV32I-NEXT: sb t2, 19(sp)
-; RV32I-NEXT: sb t1, 18(sp)
-; RV32I-NEXT: sb t0, 17(sp)
-; RV32I-NEXT: sb a7, 16(sp)
-; RV32I-NEXT: sb a6, 15(sp)
-; RV32I-NEXT: sb a5, 14(sp)
-; RV32I-NEXT: sb a4, 13(sp)
-; RV32I-NEXT: sb a3, 12(sp)
-; RV32I-NEXT: slli a0, a1, 25
-; RV32I-NEXT: srli a0, a0, 28
-; RV32I-NEXT: addi a3, sp, 12
-; RV32I-NEXT: add a3, a3, a0
-; RV32I-NEXT: lbu a0, 5(a3)
-; RV32I-NEXT: lbu a4, 4(a3)
-; RV32I-NEXT: lbu a5, 6(a3)
-; RV32I-NEXT: lbu a6, 7(a3)
-; RV32I-NEXT: slli a0, a0, 8
-; RV32I-NEXT: or a0, a0, a4
+; RV32I-NEXT: slli a3, a3, 8
+; RV32I-NEXT: or a3, a3, a4
; RV32I-NEXT: slli a5, a5, 16
; RV32I-NEXT: slli a6, a6, 24
; RV32I-NEXT: or a4, a6, a5
-; RV32I-NEXT: or a5, a4, a0
-; RV32I-NEXT: andi a4, a1, 7
-; RV32I-NEXT: srl a0, a5, a4
-; RV32I-NEXT: lbu a1, 9(a3)
-; RV32I-NEXT: lbu a6, 8(a3)
-; RV32I-NEXT: lbu a7, 10(a3)
-; RV32I-NEXT: lbu t0, 11(a3)
-; RV32I-NEXT: slli a1, a1, 8
-; RV32I-NEXT: or a1, a1, a6
+; RV32I-NEXT: or a3, a4, a3
+; RV32I-NEXT: lbu a4, 5(a0)
+; RV32I-NEXT: lbu a5, 4(a0)
+; RV32I-NEXT: lbu a6, 6(a0)
+; RV32I-NEXT: lbu a7, 7(a0)
+; RV32I-NEXT: slli a4, a4, 8
+; RV32I-NEXT: or a4, a4, a5
+; RV32I-NEXT: slli a6, a6, 16
+; RV32I-NEXT: slli a7, a7, 24
+; RV32I-NEXT: or a5, a7, a6
+; RV32I-NEXT: or a4, a5, a4
+; RV32I-NEXT: lbu a5, 9(a0)
+; RV32I-NEXT: lbu a6, 8(a0)
+; RV32I-NEXT: lbu a7, 10(a0)
+; RV32I-NEXT: lbu t0, 11(a0)
+; RV32I-NEXT: slli a5, a5, 8
+; RV32I-NEXT: or a5, a5, a6
; RV32I-NEXT: slli a7, a7, 16
; RV32I-NEXT: slli t0, t0, 24
; RV32I-NEXT: or a6, t0, a7
-; RV32I-NEXT: or a6, a6, a1
-; RV32I-NEXT: slli a1, a6, 1
-; RV32I-NEXT: not a7, a4
-; RV32I-NEXT: sll a1, a1, a7
-; RV32I-NEXT: or a1, a0, a1
-; RV32I-NEXT: lbu a7, 1(a3)
-; RV32I-NEXT: lbu t0, 0(a3)
-; RV32I-NEXT: lbu t1, 2(a3)
-; RV32I-NEXT: lbu t2, 3(a3)
-; RV32I-NEXT: slli a7, a7, 8
-; RV32I-NEXT: or a7, a7, t0
-; RV32I-NEXT: slli t1, t1, 16
-; RV32I-NEXT: slli t2, t2, 24
-; RV32I-NEXT: or t0, t2, t1
-; RV32I-NEXT: or a7, t0, a7
-; RV32I-NEXT: srl a7, a7, a4
-; RV32I-NEXT: slli a5, a5, 1
-; RV32I-NEXT: xori t0, a4, 31
-; RV32I-NEXT: sll a5, a5, t0
-; RV32I-NEXT: or a5, a7, a5
-; RV32I-NEXT: srl a6, a6, a4
-; RV32I-NEXT: lbu t1, 13(a3)
-; RV32I-NEXT: lbu t2, 12(a3)
-; RV32I-NEXT: lbu t3, 14(a3)
-; RV32I-NEXT: lbu a3, 15(a3)
-; RV32I-NEXT: slli t1, t1, 8
-; RV32I-NEXT: or t1, t1, t2
-; RV32I-NEXT: slli t3, t3, 16
-; RV32I-NEXT: slli a3, a3, 24
-; RV32I-NEXT: or a3, a3, t3
-; RV32I-NEXT: or a3, a3, t1
-; RV32I-NEXT: slli t1, a3, 1
-; RV32I-NEXT: sll t0, t1, t0
-; RV32I-NEXT: or t0, a6, t0
-; RV32I-NEXT: srl a3, a3, a4
-; RV32I-NEXT: sb a6, 8(a2)
-; RV32I-NEXT: sb a3, 12(a2)
-; RV32I-NEXT: sb a7, 0(a2)
-; RV32I-NEXT: sb a0, 4(a2)
-; RV32I-NEXT: srli a4, a6, 16
-; RV32I-NEXT: sb a4, 10(a2)
-; RV32I-NEXT: srli a4, a6, 8
-; RV32I-NEXT: sb a4, 9(a2)
-; RV32I-NEXT: srli a4, a3, 16
-; RV32I-NEXT: sb a4, 14(a2)
-; RV32I-NEXT: srli a4, a3, 24
-; RV32I-NEXT: sb a4, 15(a2)
-; RV32I-NEXT: srli a3, a3, 8
-; RV32I-NEXT: sb a3, 13(a2)
-; RV32I-NEXT: srli a3, a7, 16
-; RV32I-NEXT: sb a3, 2(a2)
-; RV32I-NEXT: srli a3, a7, 8
-; RV32I-NEXT: sb a3, 1(a2)
-; RV32I-NEXT: srli a3, a0, 16
-; RV32I-NEXT: sb a3, 6(a2)
+; RV32I-NEXT: or a5, a6, a5
+; RV32I-NEXT: lbu a6, 13(a0)
+; RV32I-NEXT: lbu a7, 12(a0)
+; RV32I-NEXT: lbu t0, 14(a0)
+; RV32I-NEXT: lbu a0, 15(a0)
+; RV32I-NEXT: slli a6, a6, 8
+; RV32I-NEXT: or a6, a6, a7
+; RV32I-NEXT: slli t0, t0, 16
+; RV32I-NEXT: slli a0, a0, 24
+; RV32I-NEXT: or a0, a0, t0
+; RV32I-NEXT: or a0, a0, a6
+; RV32I-NEXT: lbu a6, 1(a1)
+; RV32I-NEXT: lbu a7, 0(a1)
+; RV32I-NEXT: lbu t0, 2(a1)
+; RV32I-NEXT: lbu a1, 3(a1)
+; RV32I-NEXT: slli a6, a6, 8
+; RV32I-NEXT: or a6, a6, a7
+; RV32I-NEXT: slli t0, t0, 16
+; RV32I-NEXT: slli a1, a1, 24
+; RV32I-NEXT: or a1, a1, t0
+; RV32I-NEXT: or a1, a1, a6
+; RV32I-NEXT: sw zero, 28(sp)
+; RV32I-NEXT: sw zero, 24(sp)
+; RV32I-NEXT: sw zero, 20(sp)
+; RV32I-NEXT: sw zero, 16(sp)
+; RV32I-NEXT: sw a0, 12(sp)
+; RV32I-NEXT: sw a5, 8(sp)
+; RV32I-NEXT: sw a4, 4(sp)
+; RV32I-NEXT: sw a3, 0(sp)
+; RV32I-NEXT: srli a0, a1, 3
+; RV32I-NEXT: andi a0, a0, 12
+; RV32I-NEXT: mv a3, sp
+; RV32I-NEXT: add a0, a3, a0
+; RV32I-NEXT: lw a3, 4(a0)
+; RV32I-NEXT: srl a4, a3, a1
+; RV32I-NEXT: lw a5, 8(a0)
+; RV32I-NEXT: andi a6, a1, 31
+; RV32I-NEXT: xori a6, a6, 31
+; RV32I-NEXT: lw a7, 0(a0)
+; RV32I-NEXT: slli t0, a5, 1
+; RV32I-NEXT: sll t0, t0, a6
+; RV32I-NEXT: or a4, a4, t0
+; RV32I-NEXT: srl a7, a7, a1
+; RV32I-NEXT: slli a3, a3, 1
+; RV32I-NEXT: lw a0, 12(a0)
+; RV32I-NEXT: sll a3, a3, a6
+; RV32I-NEXT: or a3, a7, a3
+; RV32I-NEXT: srl a5, a5, a1
+; RV32I-NEXT: slli a7, a0, 1
+; RV32I-NEXT: sll a6, a7, a6
+; RV32I-NEXT: or a5, a5, a6
+; RV32I-NEXT: srl a0, a0, a1
+; RV32I-NEXT: sb a0, 12(a2)
+; RV32I-NEXT: srli a1, a0, 16
+; RV32I-NEXT: sb a1, 14(a2)
+; RV32I-NEXT: srli a1, a0, 24
+; RV32I-NEXT: sb a1, 15(a2)
; RV32I-NEXT: srli a0, a0, 8
-; RV32I-NEXT: sb a0, 5(a2)
-; RV32I-NEXT: srli a0, t0, 24
+; RV32I-NEXT: sb a0, 13(a2)
+; RV32I-NEXT: sb a5, 8(a2)
+; RV32I-NEXT: sb a3, 0(a2)
+; RV32I-NEXT: sb a4, 4(a2)
+; RV32I-NEXT: srli a0, a5, 16
+; RV32I-NEXT: sb a0, 10(a2)
+; RV32I-NEXT: srli a0, a5, 24
; RV32I-NEXT: sb a0, 11(a2)
-; RV32I-NEXT: srli a5, a5, 24
-; RV32I-NEXT: sb a5, 3(a2)
-; RV32I-NEXT: srli a1, a1, 24
-; RV32I-NEXT: sb a1, 7(a2)
-; RV32I-NEXT: lw s0, 60(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 56(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 52(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 48(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s4, 44(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 64
+; RV32I-NEXT: srli a5, a5, 8
+; RV32I-NEXT: sb a5, 9(a2)
+; RV32I-NEXT: srli a0, a3, 16
+; RV32I-NEXT: sb a0, 2(a2)
+; RV32I-NEXT: srli a0, a3, 24
+; RV32I-NEXT: sb a0, 3(a2)
+; RV32I-NEXT: srli a3, a3, 8
+; RV32I-NEXT: sb a3, 1(a2)
+; RV32I-NEXT: srli a0, a4, 16
+; RV32I-NEXT: sb a0, 6(a2)
+; RV32I-NEXT: srli a0, a4, 24
+; RV32I-NEXT: sb a0, 7(a2)
+; RV32I-NEXT: srli a4, a4, 8
+; RV32I-NEXT: sb a4, 5(a2)
+; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
%src = load i128, ptr %src.ptr, align 1
%bitOff = load i128, ptr %bitOff.ptr, align 1
@@ -987,164 +940,117 @@ define void @shl_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
;
; RV32I-LABEL: shl_16bytes:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -64
-; RV32I-NEXT: sw s0, 60(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 56(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 52(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 48(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s4, 44(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a3, 0(a0)
-; RV32I-NEXT: lbu a4, 1(a0)
+; RV32I-NEXT: addi sp, sp, -32
+; RV32I-NEXT: lbu a3, 1(a0)
+; RV32I-NEXT: lbu a4, 0(a0)
; RV32I-NEXT: lbu a5, 2(a0)
; RV32I-NEXT: lbu a6, 3(a0)
-; RV32I-NEXT: lbu a7, 4(a0)
-; RV32I-NEXT: lbu t0, 5(a0)
-; RV32I-NEXT: lbu t1, 6(a0)
-; RV32I-NEXT: lbu t2, 7(a0)
-; RV32I-NEXT: lbu t3, 8(a0)
-; RV32I-NEXT: lbu t4, 9(a0)
-; RV32I-NEXT: lbu t5, 10(a0)
-; RV32I-NEXT: lbu t6, 11(a0)
-; RV32I-NEXT: lbu s0, 1(a1)
-; RV32I-NEXT: lbu s1, 0(a1)
-; RV32I-NEXT: lbu s2, 12(a0)
-; RV32I-NEXT: lbu s3, 13(a0)
-; RV32I-NEXT: slli s0, s0, 8
-; RV32I-NEXT: or s0, s0, s1
-; RV32I-NEXT: lbu s1, 2(a1)
-; RV32I-NEXT: lbu a1, 3(a1)
-; RV32I-NEXT: lbu s4, 14(a0)
-; RV32I-NEXT: lbu a0, 15(a0)
-; RV32I-NEXT: slli s1, s1, 16
-; RV32I-NEXT: slli a1, a1, 24
-; RV32I-NEXT: or a1, a1, s1
-; RV32I-NEXT: or a1, a1, s0
-; RV32I-NEXT: sb zero, 27(sp)
-; RV32I-NEXT: sb zero, 26(sp)
-; RV32I-NEXT: sb zero, 25(sp)
-; RV32I-NEXT: sb zero, 24(sp)
-; RV32I-NEXT: sb zero, 23(sp)
-; RV32I-NEXT: sb zero, 22(sp)
-; RV32I-NEXT: sb zero, 21(sp)
-; RV32I-NEXT: sb zero, 20(sp)
-; RV32I-NEXT: sb zero, 19(sp)
-; RV32I-NEXT: sb zero, 18(sp)
-; RV32I-NEXT: sb zero, 17(sp)
-; RV32I-NEXT: sb zero, 16(sp)
-; RV32I-NEXT: sb zero, 15(sp)
-; RV32I-NEXT: sb zero, 14(sp)
-; RV32I-NEXT: sb zero, 13(sp)
-; RV32I-NEXT: sb zero, 12(sp)
-; RV32I-NEXT: sb a0, 43(sp)
-; RV32I-NEXT: sb s4, 42(sp)
-; RV32I-NEXT: sb s3, 41(sp)
-; RV32I-NEXT: sb s2, 40(sp)
-; RV32I-NEXT: sb t6, 39(sp)
-; RV32I-NEXT: sb t5, 38(sp)
-; RV32I-NEXT: sb t4, 37(sp)
-; RV32I-NEXT: sb t3, 36(sp)
-; RV32I-NEXT: sb t2, 35(sp)
-; RV32I-NEXT: sb t1, 34(sp)
-; RV32I-NEXT: sb t0, 33(sp)
-; RV32I-NEXT: sb a7, 32(sp)
-; RV32I-NEXT: sb a6, 31(sp)
-; RV32I-NEXT: sb a5, 30(sp)
-; RV32I-NEXT: sb a4, 29(sp)
-; RV32I-NEXT: sb a3, 28(sp)
-; RV32I-NEXT: slli a0, a1, 25
-; RV32I-NEXT: srli a0, a0, 28
-; RV32I-NEXT: addi a3, sp, 28
-; RV32I-NEXT: sub a3, a3, a0
-; RV32I-NEXT: lbu a0, 5(a3)
-; RV32I-NEXT: lbu a4, 4(a3)
-; RV32I-NEXT: lbu a5, 6(a3)
-; RV32I-NEXT: lbu a6, 7(a3)
-; RV32I-NEXT: slli a0, a0, 8
-; RV32I-NEXT: or a0, a0, a4
+; RV32I-NEXT: slli a3, a3, 8
+; RV32I-NEXT: or a3, a3, a4
; RV32I-NEXT: slli a5, a5, 16
; RV32I-NEXT: slli a6, a6, 24
; RV32I-NEXT: or a4, a6, a5
-; RV32I-NEXT: or a5, a4, a0
-; RV32I-NEXT: andi a4, a1, 7
-; RV32I-NEXT: sll a0, a5, a4
-; RV32I-NEXT: lbu a1, 1(a3)
-; RV32I-NEXT: lbu a6, 0(a3)
-; RV32I-NEXT: lbu a7, 2(a3)
-; RV32I-NEXT: lbu t0, 3(a3)
-; RV32I-NEXT: slli a1, a1, 8
-; RV32I-NEXT: or a1, a1, a6
+; RV32I-NEXT: or a3, a4, a3
+; RV32I-NEXT: lbu a4, 5(a0)
+; RV32I-NEXT: lbu a5, 4(a0)
+; RV32I-NEXT: lbu a6, 6(a0)
+; RV32I-NEXT: lbu a7, 7(a0)
+; RV32I-NEXT: slli a4, a4, 8
+; RV32I-NEXT: or a4, a4, a5
+; RV32I-NEXT: slli a6, a6, 16
+; RV32I-NEXT: slli a7, a7, 24
+; RV32I-NEXT: or a5, a7, a6
+; RV32I-NEXT: or a4, a5, a4
+; RV32I-NEXT: lbu a5, 9(a0)
+; RV32I-NEXT: lbu a6, 8(a0)
+; RV32I-NEXT: lbu a7, 10(a0)
+; RV32I-NEXT: lbu t0, 11(a0)
+; RV32I-NEXT: slli a5, a5, 8
+; RV32I-NEXT: or a5, a5, a6
; RV32I-NEXT: slli a7, a7, 16
; RV32I-NEXT: slli t0, t0, 24
; RV32I-NEXT: or a6, t0, a7
-; RV32I-NEXT: or a6, a6, a1
-; RV32I-NEXT: srli a1, a6, 1
-; RV32I-NEXT: xori a7, a4, 31
-; RV32I-NEXT: srl a1, a1, a7
-; RV32I-NEXT: or a1, a0, a1
-; RV32I-NEXT: lbu t0, 13(a3)
-; RV32I-NEXT: lbu t1, 12(a3)
-; RV32I-NEXT: lbu t2, 14(a3)
-; RV32I-NEXT: lbu t3, 15(a3)
-; RV32I-NEXT: slli t0, t0, 8
-; RV32I-NEXT: or t0, t0, t1
-; RV32I-NEXT: slli t2, t2, 16
-; RV32I-NEXT: slli t3, t3, 24
-; RV32I-NEXT: or t1, t3, t2
-; RV32I-NEXT: or t0, t1, t0
-; RV32I-NEXT: sll t0, t0, a4
-; RV32I-NEXT: lbu t1, 9(a3)
-; RV32I-NEXT: lbu t2, 8(a3)
-; RV32I-NEXT: lbu t3, 10(a3)
-; RV32I-NEXT: lbu a3, 11(a3)
-; RV32I-NEXT: slli t1, t1, 8
-; RV32I-NEXT: or t1, t1, t2
-; RV32I-NEXT: slli t3, t3, 16
-; RV32I-NEXT: slli a3, a3, 24
-; RV32I-NEXT: or a3, a3, t3
-; RV32I-NEXT: or a3, a3, t1
-; RV32I-NEXT: srli t1, a3, 1
-; RV32I-NEXT: srl a7, t1, a7
-; RV32I-NEXT: or a7, t0, a7
-; RV32I-NEXT: sll a3, a3, a4
-; RV32I-NEXT: srli a5, a5, 1
-; RV32I-NEXT: not t1, a4
-; RV32I-NEXT: srl a5, a5, t1
-; RV32I-NEXT: or a5, a3, a5
-; RV32I-NEXT: sll a4, a6, a4
-; RV32I-NEXT: sb a4, 0(a2)
-; RV32I-NEXT: srli a6, a3, 16
-; RV32I-NEXT: sb a6, 10(a2)
-; RV32I-NEXT: srli a6, a3, 24
-; RV32I-NEXT: sb a6, 11(a2)
-; RV32I-NEXT: srli a3, a3, 8
-; RV32I-NEXT: sb a3, 9(a2)
-; RV32I-NEXT: srli a3, t0, 16
-; RV32I-NEXT: sb a3, 14(a2)
-; RV32I-NEXT: srli a3, t0, 24
-; RV32I-NEXT: sb a3, 15(a2)
-; RV32I-NEXT: srli a3, t0, 8
-; RV32I-NEXT: sb a3, 13(a2)
-; RV32I-NEXT: srli a3, a4, 16
+; RV32I-NEXT: or a5, a6, a5
+; RV32I-NEXT: lbu a6, 13(a0)
+; RV32I-NEXT: lbu a7, 12(a0)
+; RV32I-NEXT: lbu t0, 14(a0)
+; RV32I-NEXT: lbu a0, 15(a0)
+; RV32I-NEXT: slli a6, a6, 8
+; RV32I-NEXT: or a6, a6, a7
+; RV32I-NEXT: slli t0, t0, 16
+; RV32I-NEXT: slli a0, a0, 24
+; RV32I-NEXT: or a0, a0, t0
+; RV32I-NEXT: or a0, a0, a6
+; RV32I-NEXT: lbu a6, 1(a1)
+; RV32I-NEXT: lbu a7, 0(a1)
+; RV32I-NEXT: lbu t0, 2(a1)
+; RV32I-NEXT: lbu a1, 3(a1)
+; RV32I-NEXT: slli a6, a6, 8
+; RV32I-NEXT: or a6, a6, a7
+; RV32I-NEXT: slli t0, t0, 16
+; RV32I-NEXT: slli a1, a1, 24
+; RV32I-NEXT: or a1, a1, t0
+; RV32I-NEXT: or a1, a1, a6
+; RV32I-NEXT: sw zero, 12(sp)
+; RV32I-NEXT: sw zero, 8(sp)
+; RV32I-NEXT: sw zero, 4(sp)
+; RV32I-NEXT: sw zero, 0(sp)
+; RV32I-NEXT: sw a0, 28(sp)
+; RV32I-NEXT: sw a5, 24(sp)
+; RV32I-NEXT: sw a4, 20(sp)
+; RV32I-NEXT: sw a3, 16(sp)
+; RV32I-NEXT: srli a0, a1, 3
+; RV32I-NEXT: andi a0, a0, 12
+; RV32I-NEXT: addi a3, sp, 16
+; RV32I-NEXT: sub a3, a3, a0
+; RV32I-NEXT: lw a0, 4(a3)
+; RV32I-NEXT: lw a4, 0(a3)
+; RV32I-NEXT: sll a5, a0, a1
+; RV32I-NEXT: andi a6, a1, 31
+; RV32I-NEXT: xori a6, a6, 31
+; RV32I-NEXT: srli a7, a4, 1
+; RV32I-NEXT: lw t0, 12(a3)
+; RV32I-NEXT: lw a3, 8(a3)
+; RV32I-NEXT: srl a7, a7, a6
+; RV32I-NEXT: or a5, a5, a7
+; RV32I-NEXT: sll a7, t0, a1
+; RV32I-NEXT: srli t0, a3, 1
+; RV32I-NEXT: srl t0, t0, a6
+; RV32I-NEXT: or a7, a7, t0
+; RV32I-NEXT: sll a3, a3, a1
+; RV32I-NEXT: srli a0, a0, 1
+; RV32I-NEXT: srl a0, a0, a6
+; RV32I-NEXT: or a0, a3, a0
+; RV32I-NEXT: sll a1, a4, a1
+; RV32I-NEXT: sb a1, 0(a2)
+; RV32I-NEXT: srli a3, a1, 16
; RV32I-NEXT: sb a3, 2(a2)
-; RV32I-NEXT: srli a3, a4, 24
+; RV32I-NEXT: srli a3, a1, 24
; RV32I-NEXT: sb a3, 3(a2)
-; RV32I-NEXT: srli a4, a4, 8
-; RV32I-NEXT: sb a4, 1(a2)
-; RV32I-NEXT: srli a3, a0, 16
-; RV32I-NEXT: sb a3, 6(a2)
-; RV32I-NEXT: srli a3, a0, 24
-; RV32I-NEXT: sb a3, 7(a2)
-; RV32I-NEXT: srli a0, a0, 8
-; RV32I-NEXT: sb a0, 5(a2)
-; RV32I-NEXT: sb a5, 8(a2)
+; RV32I-NEXT: srli a1, a1, 8
+; RV32I-NEXT: sb a1, 1(a2)
+; RV32I-NEXT: sb a0, 8(a2)
; RV32I-NEXT: sb a7, 12(a2)
-; RV32I-NEXT: sb a1, 4(a2)
-; RV32I-NEXT: lw s0, 60(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 56(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 52(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 48(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s4, 44(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 64
+; RV32I-NEXT: sb a5, 4(a2)
+; RV32I-NEXT: srli a1, a0, 16
+; RV32I-NEXT: sb a1, 10(a2)
+; RV32I-NEXT: srli a1, a0, 24
+; RV32I-NEXT: sb a1, 11(a2)
+; RV32I-NEXT: srli a0, a0, 8
+; RV32I-NEXT: sb a0, 9(a2)
+; RV32I-NEXT: srli a0, a7, 16
+; RV32I-NEXT: sb a0, 14(a2)
+; RV32I-NEXT: srli a0, a7, 24
+; RV32I-NEXT: sb a0, 15(a2)
+; RV32I-NEXT: srli a0, a7, 8
+; RV32I-NEXT: sb a0, 13(a2)
+; RV32I-NEXT: srli a0, a5, 16
+; RV32I-NEXT: sb a0, 6(a2)
+; RV32I-NEXT: srli a0, a5, 24
+; RV32I-NEXT: sb a0, 7(a2)
+; RV32I-NEXT: srli a5, a5, 8
+; RV32I-NEXT: sb a5, 5(a2)
+; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
%src = load i128, ptr %src.ptr, align 1
%bitOff = load i128, ptr %bitOff.ptr, align 1
@@ -1270,171 +1176,118 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
;
; RV32I-LABEL: ashr_16bytes:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -64
-; RV32I-NEXT: sw s0, 60(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 56(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 52(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 48(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s4, 44(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s5, 40(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a3, 15(a0)
-; RV32I-NEXT: slli a4, a3, 24
-; RV32I-NEXT: lbu a5, 0(a0)
-; RV32I-NEXT: lbu a6, 1(a0)
-; RV32I-NEXT: lbu a7, 2(a0)
-; RV32I-NEXT: lbu t0, 3(a0)
-; RV32I-NEXT: lbu t1, 4(a0)
-; RV32I-NEXT: lbu t2, 5(a0)
-; RV32I-NEXT: lbu t3, 6(a0)
-; RV32I-NEXT: lbu t4, 7(a0)
-; RV32I-NEXT: lbu t5, 8(a0)
-; RV32I-NEXT: lbu t6, 9(a0)
-; RV32I-NEXT: lbu s0, 10(a0)
-; RV32I-NEXT: lbu s1, 1(a1)
-; RV32I-NEXT: lbu s2, 0(a1)
-; RV32I-NEXT: lbu s3, 11(a0)
-; RV32I-NEXT: lbu s4, 12(a0)
-; RV32I-NEXT: slli s1, s1, 8
-; RV32I-NEXT: or s1, s1, s2
-; RV32I-NEXT: lbu s2, 2(a1)
-; RV32I-NEXT: lbu a1, 3(a1)
-; RV32I-NEXT: lbu s5, 13(a0)
-; RV32I-NEXT: lbu a0, 14(a0)
-; RV32I-NEXT: slli s2, s2, 16
-; RV32I-NEXT: slli a1, a1, 24
-; RV32I-NEXT: or a1, a1, s2
-; RV32I-NEXT: or a1, a1, s1
-; RV32I-NEXT: sb a3, 23(sp)
-; RV32I-NEXT: sb a0, 22(sp)
-; RV32I-NEXT: sb s5, 21(sp)
-; RV32I-NEXT: sb s4, 20(sp)
-; RV32I-NEXT: sb s3, 19(sp)
-; RV32I-NEXT: sb s0, 18(sp)
-; RV32I-NEXT: sb t6, 17(sp)
-; RV32I-NEXT: sb t5, 16(sp)
-; RV32I-NEXT: sb t4, 15(sp)
-; RV32I-NEXT: sb t3, 14(sp)
-; RV32I-NEXT: sb t2, 13(sp)
-; RV32I-NEXT: sb t1, 12(sp)
-; RV32I-NEXT: sb t0, 11(sp)
-; RV32I-NEXT: sb a7, 10(sp)
-; RV32I-NEXT: sb a6, 9(sp)
-; RV32I-NEXT: sb a5, 8(sp)
-; RV32I-NEXT: srai a4, a4, 31
-; RV32I-NEXT: sb a4, 36(sp)
-; RV32I-NEXT: sb a4, 32(sp)
-; RV32I-NEXT: sb a4, 28(sp)
-; RV32I-NEXT: sb a4, 24(sp)
-; RV32I-NEXT: srli a0, a4, 24
-; RV32I-NEXT: sb a0, 39(sp)
-; RV32I-NEXT: srli a3, a4, 16
-; RV32I-NEXT: sb a3, 38(sp)
-; RV32I-NEXT: srli a4, a4, 8
-; RV32I-NEXT: sb a4, 37(sp)
-; RV32I-NEXT: sb a0, 35(sp)
-; RV32I-NEXT: sb a3, 34(sp)
-; RV32I-NEXT: sb a4, 33(sp)
-; RV32I-NEXT: sb a0, 31(sp)
-; RV32I-NEXT: sb a3, 30(sp)
-; RV32I-NEXT: sb a4, 29(sp)
-; RV32I-NEXT: sb a0, 27(sp)
-; RV32I-NEXT: sb a3, 26(sp)
-; RV32I-NEXT: sb a4, 25(sp)
-; RV32I-NEXT: slli a0, a1, 25
-; RV32I-NEXT: srli a0, a0, 28
-; RV32I-NEXT: addi a3, sp, 8
-; RV32I-NEXT: add a3, a3, a0
-; RV32I-NEXT: lbu a0, 5(a3)
-; RV32I-NEXT: lbu a4, 4(a3)
-; RV32I-NEXT: lbu a5, 6(a3)
-; RV32I-NEXT: lbu a6, 7(a3)
-; RV32I-NEXT: slli a0, a0, 8
-; RV32I-NEXT: or a0, a0, a4
+; RV32I-NEXT: addi sp, sp, -32
+; RV32I-NEXT: lbu a3, 1(a0)
+; RV32I-NEXT: lbu a4, 0(a0)
+; RV32I-NEXT: lbu a5, 2(a0)
+; RV32I-NEXT: lbu a6, 3(a0)
+; RV32I-NEXT: slli a3, a3, 8
+; RV32I-NEXT: or a3, a3, a4
; RV32I-NEXT: slli a5, a5, 16
; RV32I-NEXT: slli a6, a6, 24
; RV32I-NEXT: or a4, a6, a5
-; RV32I-NEXT: or a5, a4, a0
-; RV32I-NEXT: andi a4, a1, 7
-; RV32I-NEXT: srl a0, a5, a4
-; RV32I-NEXT: lbu a1, 9(a3)
-; RV32I-NEXT: lbu a6, 8(a3)
-; RV32I-NEXT: lbu a7, 10(a3)
-; RV32I-NEXT: lbu t0, 11(a3)
-; RV32I-NEXT: slli a1, a1, 8
-; RV32I-NEXT: or a1, a1, a6
+; RV32I-NEXT: or a3, a4, a3
+; RV32I-NEXT: lbu a4, 5(a0)
+; RV32I-NEXT: lbu a5, 4(a0)
+; RV32I-NEXT: lbu a6, 6(a0)
+; RV32I-NEXT: lbu a7, 7(a0)
+; RV32I-NEXT: slli a4, a4, 8
+; RV32I-NEXT: or a4, a4, a5
+; RV32I-NEXT: slli a6, a6, 16
+; RV32I-NEXT: slli a7, a7, 24
+; RV32I-NEXT: or a5, a7, a6
+; RV32I-NEXT: or a4, a5, a4
+; RV32I-NEXT: lbu a5, 9(a0)
+; RV32I-NEXT: lbu a6, 8(a0)
+; RV32I-NEXT: lbu a7, 10(a0)
+; RV32I-NEXT: lbu t0, 11(a0)
+; RV32I-NEXT: slli a5, a5, 8
+; RV32I-NEXT: or a5, a5, a6
; RV32I-NEXT: slli a7, a7, 16
; RV32I-NEXT: slli t0, t0, 24
; RV32I-NEXT: or a6, t0, a7
-; RV32I-NEXT: or a6, a6, a1
-; RV32I-NEXT: slli a1, a6, 1
-; RV32I-NEXT: not a7, a4
-; RV32I-NEXT: sll a1, a1, a7
-; RV32I-NEXT: or a1, a0, a1
-; RV32I-NEXT: lbu a7, 1(a3)
-; RV32I-NEXT: lbu t0, 0(a3)
-; RV32I-NEXT: lbu t1, 2(a3)
-; RV32I-NEXT: lbu t2, 3(a3)
+; RV32I-NEXT: or a5, a6, a5
+; RV32I-NEXT: lbu a6, 13(a0)
+; RV32I-NEXT: lbu a7, 12(a0)
+; RV32I-NEXT: lbu t0, 14(a0)
+; RV32I-NEXT: lbu a0, 15(a0)
+; RV32I-NEXT: slli a6, a6, 8
+; RV32I-NEXT: or a6, a6, a7
+; RV32I-NEXT: slli t0, t0, 16
+; RV32I-NEXT: slli a0, a0, 24
+; RV32I-NEXT: or a7, a0, t0
+; RV32I-NEXT: or a6, a7, a6
+; RV32I-NEXT: lbu a7, 1(a1)
+; RV32I-NEXT: lbu t0, 0(a1)
+; RV32I-NEXT: lbu t1, 2(a1)
+; RV32I-NEXT: lbu a1, 3(a1)
; RV32I-NEXT: slli a7, a7, 8
; RV32I-NEXT: or a7, a7, t0
; RV32I-NEXT: slli t1, t1, 16
-; RV32I-NEXT: slli t2, t2, 24
-; RV32I-NEXT: or t0, t2, t1
-; RV32I-NEXT: or a7, t0, a7
-; RV32I-NEXT: srl a7, a7, a4
-; RV32I-NEXT: slli a5, a5, 1
-; RV32I-NEXT: xori t0, a4, 31
-; RV32I-NEXT: sll a5, a5, t0
-; RV32I-NEXT: or a5, a7, a5
-; RV32I-NEXT: srl a6, a6, a4
-; RV32I-NEXT: lbu t1, 13(a3)
-; RV32I-NEXT: lbu t2, 12(a3)
-; RV32I-NEXT: lbu t3, 14(a3)
-; RV32I-NEXT: lbu a3, 15(a3)
-; RV32I-NEXT: slli t1, t1, 8
-; RV32I-NEXT: or t1, t1, t2
-; RV32I-NEXT: slli t3, t3, 16
-; RV32I-NEXT: slli a3, a3, 24
-; RV32I-NEXT: or a3, a3, t3
-; RV32I-NEXT: or a3, a3, t1
-; RV32I-NEXT: slli t1, a3, 1
-; RV32I-NEXT: sll t0, t1, t0
-; RV32I-NEXT: or t0, a6, t0
-; RV32I-NEXT: sra a3, a3, a4
-; RV32I-NEXT: sb a6, 8(a2)
-; RV32I-NEXT: sb a3, 12(a2)
-; RV32I-NEXT: sb a7, 0(a2)
-; RV32I-NEXT: sb a0, 4(a2)
-; RV32I-NEXT: srli a4, a6, 16
-; RV32I-NEXT: sb a4, 10(a2)
-; RV32I-NEXT: srli a4, a6, 8
-; RV32I-NEXT: sb a4, 9(a2)
-; RV32I-NEXT: srli a4, a3, 16
-; RV32I-NEXT: sb a4, 14(a2)
-; RV32I-NEXT: srli a4, a3, 24
-; RV32I-NEXT: sb a4, 15(a2)
-; RV32I-NEXT: srli a3, a3, 8
-; RV32I-NEXT: sb a3, 13(a2)
-; RV32I-NEXT: srli a3, a7, 16
-; RV32I-NEXT: sb a3, 2(a2)
-; RV32I-NEXT: srli a3, a7, 8
-; RV32I-NEXT: sb a3, 1(a2)
-; RV32I-NEXT: srli a3, a0, 16
-; RV32I-NEXT: sb a3, 6(a2)
+; RV32I-NEXT: slli a1, a1, 24
+; RV32I-NEXT: or a1, a1, t1
+; RV32I-NEXT: or a1, a1, a7
+; RV32I-NEXT: srai a0, a0, 31
+; RV32I-NEXT: sw a0, 28(sp)
+; RV32I-NEXT: sw a0, 24(sp)
+; RV32I-NEXT: sw a0, 20(sp)
+; RV32I-NEXT: sw a0, 16(sp)
+; RV32I-NEXT: sw a6, 12(sp)
+; RV32I-NEXT: sw a5, 8(sp)
+; RV32I-NEXT: sw a4, 4(sp)
+; RV32I-NEXT: sw a3, 0(sp)
+; RV32I-NEXT: srli a0, a1, 3
+; RV32I-NEXT: andi a0, a0, 12
+; RV32I-NEXT: mv a3, sp
+; RV32I-NEXT: add a0, a3, a0
+; RV32I-NEXT: lw a3, 4(a0)
+; RV32I-NEXT: srl a4, a3, a1
+; RV32I-NEXT: lw a5, 8(a0)
+; RV32I-NEXT: andi a6, a1, 31
+; RV32I-NEXT: xori a6, a6, 31
+; RV32I-NEXT: lw a7, 0(a0)
+; RV32I-NEXT: slli t0, a5, 1
+; RV32I-NEXT: sll t0, t0, a6
+; RV32I-NEXT: or a4, a4, t0
+; RV32I-NEXT: srl a7, a7, a1
+; RV32I-NEXT: slli a3, a3, 1
+; RV32I-NEXT: lw a0, 12(a0)
+; RV32I-NEXT: sll a3, a3, a6
+; RV32I-NEXT: or a3, a7, a3
+; RV32I-NEXT: srl a5, a5, a1
+; RV32I-NEXT: slli a7, a0, 1
+; RV32I-NEXT: sll a6, a7, a6
+; RV32I-NEXT: or a5, a5, a6
+; RV32I-NEXT: sra a0, a0, a1
+; RV32I-NEXT: sb a0, 12(a2)
+; RV32I-NEXT: srli a1, a0, 16
+; RV32I-NEXT: sb a1, 14(a2)
+; RV32I-NEXT: srli a1, a0, 24
+; RV32I-NEXT: sb a1, 15(a2)
; RV32I-NEXT: srli a0, a0, 8
-; RV32I-NEXT: sb a0, 5(a2)
-; RV32I-NEXT: srli a0, t0, 24
+; RV32I-NEXT: sb a0, 13(a2)
+; RV32I-NEXT: sb a5, 8(a2)
+; RV32I-NEXT: sb a3, 0(a2)
+; RV32I-NEXT: sb a4, 4(a2)
+; RV32I-NEXT: srli a0, a5, 16
+; RV32I-NEXT: sb a0, 10(a2)
+; RV32I-NEXT: srli a0, a5, 24
; RV32I-NEXT: sb a0, 11(a2)
-; RV32I-NEXT: srli a5, a5, 24
-; RV32I-NEXT: sb a5, 3(a2)
-; RV32I-NEXT: srli a1, a1, 24
-; RV32I-NEXT: sb a1, 7(a2)
-; RV32I-NEXT: lw s0, 60(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 56(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 52(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 48(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s4, 44(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s5, 40(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 64
+; RV32I-NEXT: srli a5, a5, 8
+; RV32I-NEXT: sb a5, 9(a2)
+; RV32I-NEXT: srli a0, a3, 16
+; RV32I-NEXT: sb a0, 2(a2)
+; RV32I-NEXT: srli a0, a3, 24
+; RV32I-NEXT: sb a0, 3(a2)
+; RV32I-NEXT: srli a3, a3, 8
+; RV32I-NEXT: sb a3, 1(a2)
+; RV32I-NEXT: srli a0, a4, 16
+; RV32I-NEXT: sb a0, 6(a2)
+; RV32I-NEXT: srli a0, a4, 24
+; RV32I-NEXT: sb a0, 7(a2)
+; RV32I-NEXT: srli a4, a4, 8
+; RV32I-NEXT: sb a4, 5(a2)
+; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
%src = load i128, ptr %src.ptr, align 1
%bitOff = load i128, ptr %bitOff.ptr, align 1
@@ -1446,191 +1299,43 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; RV64I-LABEL: lshr_32bytes:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -224
-; RV64I-NEXT: sd ra, 216(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s0, 208(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s1, 200(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s2, 192(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s3, 184(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s4, 176(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s5, 168(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s6, 160(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s7, 152(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s8, 144(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s9, 136(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s10, 128(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s11, 120(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a3, 0(a0)
-; RV64I-NEXT: sd a3, 48(sp) # 8-byte Folded Spill
+; RV64I-NEXT: addi sp, sp, -64
; RV64I-NEXT: lbu a3, 1(a0)
-; RV64I-NEXT: sd a3, 40(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a3, 2(a0)
-; RV64I-NEXT: sd a3, 32(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a3, 3(a0)
-; RV64I-NEXT: sd a3, 24(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a3, 4(a0)
-; RV64I-NEXT: sd a3, 16(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a3, 5(a0)
-; RV64I-NEXT: sd a3, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu t1, 6(a0)
-; RV64I-NEXT: lbu t2, 7(a0)
-; RV64I-NEXT: lbu t3, 8(a0)
-; RV64I-NEXT: lbu t4, 9(a0)
-; RV64I-NEXT: lbu t5, 10(a0)
-; RV64I-NEXT: lbu t6, 11(a0)
-; RV64I-NEXT: lbu s0, 12(a0)
-; RV64I-NEXT: lbu s1, 13(a0)
-; RV64I-NEXT: lbu s2, 14(a0)
-; RV64I-NEXT: lbu s3, 15(a0)
-; RV64I-NEXT: lbu s4, 16(a0)
-; RV64I-NEXT: lbu s5, 17(a0)
-; RV64I-NEXT: lbu s6, 18(a0)
-; RV64I-NEXT: lbu s7, 19(a0)
-; RV64I-NEXT: lbu s8, 20(a0)
-; RV64I-NEXT: lbu s9, 1(a1)
-; RV64I-NEXT: lbu s10, 0(a1)
-; RV64I-NEXT: lbu s11, 2(a1)
-; RV64I-NEXT: lbu ra, 3(a1)
-; RV64I-NEXT: slli s9, s9, 8
-; RV64I-NEXT: or s9, s9, s10
-; RV64I-NEXT: slli s11, s11, 16
-; RV64I-NEXT: slli ra, ra, 24
-; RV64I-NEXT: lbu s10, 5(a1)
-; RV64I-NEXT: or s11, ra, s11
-; RV64I-NEXT: or s11, s11, s9
-; RV64I-NEXT: lbu s9, 4(a1)
-; RV64I-NEXT: slli s10, s10, 8
-; RV64I-NEXT: lbu ra, 6(a1)
-; RV64I-NEXT: lbu a1, 7(a1)
-; RV64I-NEXT: or s10, s10, s9
-; RV64I-NEXT: lbu s9, 21(a0)
-; RV64I-NEXT: slli ra, ra, 16
-; RV64I-NEXT: slli a1, a1, 24
-; RV64I-NEXT: or a1, a1, ra
-; RV64I-NEXT: lbu ra, 22(a0)
-; RV64I-NEXT: or a1, a1, s10
-; RV64I-NEXT: lbu s10, 23(a0)
-; RV64I-NEXT: slli a1, a1, 32
-; RV64I-NEXT: or t0, a1, s11
-; RV64I-NEXT: lbu s11, 24(a0)
-; RV64I-NEXT: lbu a7, 25(a0)
-; RV64I-NEXT: lbu a6, 26(a0)
-; RV64I-NEXT: lbu a5, 27(a0)
-; RV64I-NEXT: lbu a1, 31(a0)
-; RV64I-NEXT: lbu a3, 30(a0)
-; RV64I-NEXT: lbu a4, 29(a0)
-; RV64I-NEXT: lbu a0, 28(a0)
-; RV64I-NEXT: sb a1, 87(sp)
-; RV64I-NEXT: sb a3, 86(sp)
-; RV64I-NEXT: sb a4, 85(sp)
-; RV64I-NEXT: sb a0, 84(sp)
-; RV64I-NEXT: sb a5, 83(sp)
-; RV64I-NEXT: sb a6, 82(sp)
-; RV64I-NEXT: sb a7, 81(sp)
-; RV64I-NEXT: sb s11, 80(sp)
-; RV64I-NEXT: sb s10, 79(sp)
-; RV64I-NEXT: sb ra, 78(sp)
-; RV64I-NEXT: sb s9, 77(sp)
-; RV64I-NEXT: sb s8, 76(sp)
-; RV64I-NEXT: sb s7, 75(sp)
-; RV64I-NEXT: sb s6, 74(sp)
-; RV64I-NEXT: sb s5, 73(sp)
-; RV64I-NEXT: sb s4, 72(sp)
-; RV64I-NEXT: sb s3, 71(sp)
-; RV64I-NEXT: sb s2, 70(sp)
-; RV64I-NEXT: sb s1, 69(sp)
-; RV64I-NEXT: sb s0, 68(sp)
-; RV64I-NEXT: sb t6, 67(sp)
-; RV64I-NEXT: sb t5, 66(sp)
-; RV64I-NEXT: sb t4, 65(sp)
-; RV64I-NEXT: sb zero, 119(sp)
-; RV64I-NEXT: sb zero, 118(sp)
-; RV64I-NEXT: sb zero, 117(sp)
-; RV64I-NEXT: sb zero, 116(sp)
-; RV64I-NEXT: sb zero, 115(sp)
-; RV64I-NEXT: sb zero, 114(sp)
-; RV64I-NEXT: sb zero, 113(sp)
-; RV64I-NEXT: sb zero, 112(sp)
-; RV64I-NEXT: sb zero, 111(sp)
-; RV64I-NEXT: sb zero, 110(sp)
-; RV64I-NEXT: sb zero, 109(sp)
-; RV64I-NEXT: sb zero, 108(sp)
-; RV64I-NEXT: sb zero, 107(sp)
-; RV64I-NEXT: sb zero, 106(sp)
-; RV64I-NEXT: sb zero, 105(sp)
-; RV64I-NEXT: sb zero, 104(sp)
-; RV64I-NEXT: sb zero, 103(sp)
-; RV64I-NEXT: sb zero, 102(sp)
-; RV64I-NEXT: sb zero, 101(sp)
-; RV64I-NEXT: sb zero, 100(sp)
-; RV64I-NEXT: sb zero, 99(sp)
-; RV64I-NEXT: sb zero, 98(sp)
-; RV64I-NEXT: sb zero, 97(sp)
-; RV64I-NEXT: sb zero, 96(sp)
-; RV64I-NEXT: sb zero, 95(sp)
-; RV64I-NEXT: sb zero, 94(sp)
-; RV64I-NEXT: sb zero, 93(sp)
-; RV64I-NEXT: sb zero, 92(sp)
-; RV64I-NEXT: sb zero, 91(sp)
-; RV64I-NEXT: sb zero, 90(sp)
-; RV64I-NEXT: sb zero, 89(sp)
-; RV64I-NEXT: sb zero, 88(sp)
-; RV64I-NEXT: sb t3, 64(sp)
-; RV64I-NEXT: sb t2, 63(sp)
-; RV64I-NEXT: sb t1, 62(sp)
-; RV64I-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 61(sp)
-; RV64I-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 60(sp)
-; RV64I-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 59(sp)
-; RV64I-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 58(sp)
-; RV64I-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 57(sp)
-; RV64I-NEXT: ld a0, 48(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 56(sp)
-; RV64I-NEXT: slli a0, t0, 56
-; RV64I-NEXT: srli a0, a0, 59
-; RV64I-NEXT: addi a3, sp, 56
-; RV64I-NEXT: add a3, a3, a0
-; RV64I-NEXT: lbu a0, 9(a3)
-; RV64I-NEXT: lbu a1, 8(a3)
-; RV64I-NEXT: lbu a4, 10(a3)
-; RV64I-NEXT: lbu a5, 11(a3)
-; RV64I-NEXT: slli a0, a0, 8
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: slli a4, a4, 16
-; RV64I-NEXT: slli a5, a5, 24
-; RV64I-NEXT: or a4, a5, a4
-; RV64I-NEXT: or a0, a4, a0
-; RV64I-NEXT: lbu a1, 13(a3)
-; RV64I-NEXT: lbu a4, 12(a3)
-; RV64I-NEXT: lbu a5, 14(a3)
-; RV64I-NEXT: lbu a6, 15(a3)
-; RV64I-NEXT: slli a1, a1, 8
-; RV64I-NEXT: or a1, a1, a4
+; RV64I-NEXT: lbu a4, 0(a0)
+; RV64I-NEXT: lbu a5, 2(a0)
+; RV64I-NEXT: lbu a6, 3(a0)
+; RV64I-NEXT: slli a3, a3, 8
+; RV64I-NEXT: or a3, a3, a4
; RV64I-NEXT: slli a5, a5, 16
; RV64I-NEXT: slli a6, a6, 24
; RV64I-NEXT: or a4, a6, a5
-; RV64I-NEXT: or a1, a4, a1
-; RV64I-NEXT: slli a1, a1, 32
-; RV64I-NEXT: or a4, a1, a0
-; RV64I-NEXT: andi a1, t0, 7
-; RV64I-NEXT: lbu a0, 17(a3)
-; RV64I-NEXT: lbu a5, 16(a3)
-; RV64I-NEXT: lbu a6, 18(a3)
-; RV64I-NEXT: lbu a7, 19(a3)
-; RV64I-NEXT: slli a0, a0, 8
-; RV64I-NEXT: or a0, a0, a5
+; RV64I-NEXT: or a3, a4, a3
+; RV64I-NEXT: lbu a4, 5(a0)
+; RV64I-NEXT: lbu a5, 4(a0)
+; RV64I-NEXT: lbu a6, 6(a0)
+; RV64I-NEXT: lbu a7, 7(a0)
+; RV64I-NEXT: slli a4, a4, 8
+; RV64I-NEXT: or a4, a4, a5
; RV64I-NEXT: slli a6, a6, 16
; RV64I-NEXT: slli a7, a7, 24
; RV64I-NEXT: or a5, a7, a6
-; RV64I-NEXT: or a0, a5, a0
-; RV64I-NEXT: lbu a5, 21(a3)
-; RV64I-NEXT: lbu a6, 20(a3)
-; RV64I-NEXT: lbu a7, 22(a3)
-; RV64I-NEXT: lbu t0, 23(a3)
+; RV64I-NEXT: or a4, a5, a4
+; RV64I-NEXT: slli a4, a4, 32
+; RV64I-NEXT: or a3, a4, a3
+; RV64I-NEXT: lbu a4, 9(a0)
+; RV64I-NEXT: lbu a5, 8(a0)
+; RV64I-NEXT: lbu a6, 10(a0)
+; RV64I-NEXT: lbu a7, 11(a0)
+; RV64I-NEXT: slli a4, a4, 8
+; RV64I-NEXT: or a4, a4, a5
+; RV64I-NEXT: slli a6, a6, 16
+; RV64I-NEXT: slli a7, a7, 24
+; RV64I-NEXT: or a5, a7, a6
+; RV64I-NEXT: or a4, a5, a4
+; RV64I-NEXT: lbu a5, 13(a0)
+; RV64I-NEXT: lbu a6, 12(a0)
+; RV64I-NEXT: lbu a7, 14(a0)
+; RV64I-NEXT: lbu t0, 15(a0)
; RV64I-NEXT: slli a5, a5, 8
; RV64I-NEXT: or a5, a5, a6
; RV64I-NEXT: slli a7, a7, 16
@@ -1638,92 +1343,138 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; RV64I-NEXT: or a6, t0, a7
; RV64I-NEXT: or a5, a6, a5
; RV64I-NEXT: slli a5, a5, 32
-; RV64I-NEXT: or a5, a5, a0
-; RV64I-NEXT: slli a0, a5, 1
-; RV64I-NEXT: not a6, a1
-; RV64I-NEXT: sll a0, a0, a6
-; RV64I-NEXT: lbu a6, 1(a3)
-; RV64I-NEXT: lbu a7, 0(a3)
-; RV64I-NEXT: lbu t0, 2(a3)
-; RV64I-NEXT: lbu t1, 3(a3)
+; RV64I-NEXT: or a4, a5, a4
+; RV64I-NEXT: lbu a5, 17(a0)
+; RV64I-NEXT: lbu a6, 16(a0)
+; RV64I-NEXT: lbu a7, 18(a0)
+; RV64I-NEXT: lbu t0, 19(a0)
+; RV64I-NEXT: slli a5, a5, 8
+; RV64I-NEXT: or a5, a5, a6
+; RV64I-NEXT: slli a7, a7, 16
+; RV64I-NEXT: slli t0, t0, 24
+; RV64I-NEXT: or a6, t0, a7
+; RV64I-NEXT: or a5, a6, a5
+; RV64I-NEXT: lbu a6, 21(a0)
+; RV64I-NEXT: lbu a7, 20(a0)
+; RV64I-NEXT: lbu t0, 22(a0)
+; RV64I-NEXT: lbu t1, 23(a0)
+; RV64I-NEXT: slli a6, a6, 8
+; RV64I-NEXT: or a6, a6, a7
+; RV64I-NEXT: slli t0, t0, 16
+; RV64I-NEXT: slli t1, t1, 24
+; RV64I-NEXT: or a7, t1, t0
+; RV64I-NEXT: or a6, a7, a6
+; RV64I-NEXT: slli a6, a6, 32
+; RV64I-NEXT: or a5, a6, a5
+; RV64I-NEXT: lbu a6, 25(a0)
+; RV64I-NEXT: lbu a7, 24(a0)
+; RV64I-NEXT: lbu t0, 26(a0)
+; RV64I-NEXT: lbu t1, 27(a0)
; RV64I-NEXT: slli a6, a6, 8
; RV64I-NEXT: or a6, a6, a7
; RV64I-NEXT: slli t0, t0, 16
; RV64I-NEXT: slli t1, t1, 24
; RV64I-NEXT: or a7, t1, t0
; RV64I-NEXT: or a6, a7, a6
-; RV64I-NEXT: lbu a7, 5(a3)
-; RV64I-NEXT: lbu t0, 4(a3)
-; RV64I-NEXT: lbu t1, 6(a3)
-; RV64I-NEXT: lbu t2, 7(a3)
+; RV64I-NEXT: lbu a7, 29(a0)
+; RV64I-NEXT: lbu t0, 28(a0)
+; RV64I-NEXT: lbu t1, 30(a0)
+; RV64I-NEXT: lbu a0, 31(a0)
; RV64I-NEXT: slli a7, a7, 8
; RV64I-NEXT: or a7, a7, t0
; RV64I-NEXT: slli t1, t1, 16
-; RV64I-NEXT: slli t2, t2, 24
-; RV64I-NEXT: or t0, t2, t1
-; RV64I-NEXT: or a7, t0, a7
-; RV64I-NEXT: slli a7, a7, 32
+; RV64I-NEXT: slli a0, a0, 24
+; RV64I-NEXT: or a0, a0, t1
+; RV64I-NEXT: or a0, a0, a7
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: or a0, a0, a6
+; RV64I-NEXT: lbu a6, 1(a1)
+; RV64I-NEXT: lbu a7, 0(a1)
+; RV64I-NEXT: lbu t0, 2(a1)
+; RV64I-NEXT: lbu t1, 3(a1)
+; RV64I-NEXT: slli a6, a6, 8
+; RV64I-NEXT: or a6, a6, a7
+; RV64I-NEXT: slli t0, t0, 16
+; RV64I-NEXT: slli t1, t1, 24
+; RV64I-NEXT: or a7, t1, t0
; RV64I-NEXT: or a6, a7, a6
-; RV64I-NEXT: lbu a7, 25(a3)
-; RV64I-NEXT: lbu t0, 24(a3)
-; RV64I-NEXT: lbu t1, 26(a3)
-; RV64I-NEXT: lbu t2, 27(a3)
+; RV64I-NEXT: lbu a7, 5(a1)
+; RV64I-NEXT: lbu t0, 4(a1)
+; RV64I-NEXT: lbu t1, 6(a1)
+; RV64I-NEXT: lbu a1, 7(a1)
; RV64I-NEXT: slli a7, a7, 8
; RV64I-NEXT: or a7, a7, t0
; RV64I-NEXT: slli t1, t1, 16
-; RV64I-NEXT: slli t2, t2, 24
-; RV64I-NEXT: or t0, t2, t1
-; RV64I-NEXT: or a7, t0, a7
-; RV64I-NEXT: lbu t0, 29(a3)
-; RV64I-NEXT: lbu t1, 28(a3)
-; RV64I-NEXT: lbu t2, 30(a3)
-; RV64I-NEXT: lbu a3, 31(a3)
-; RV64I-NEXT: slli t0, t0, 8
-; RV64I-NEXT: or t0, t0, t1
-; RV64I-NEXT: slli t2, t2, 16
-; RV64I-NEXT: slli a3, a3, 24
-; RV64I-NEXT: or a3, a3, t2
-; RV64I-NEXT: slli t1, a4, 1
-; RV64I-NEXT: or a3, a3, t0
-; RV64I-NEXT: xori t0, a1, 63
-; RV64I-NEXT: sll t1, t1, t0
-; RV64I-NEXT: slli a3, a3, 32
-; RV64I-NEXT: or a7, a3, a7
-; RV64I-NEXT: slli a3, a7, 1
-; RV64I-NEXT: sll t0, a3, t0
-; RV64I-NEXT: srl a3, a4, a1
-; RV64I-NEXT: srl a4, a6, a1
+; RV64I-NEXT: slli a1, a1, 24
+; RV64I-NEXT: or a1, a1, t1
+; RV64I-NEXT: or a1, a1, a7
+; RV64I-NEXT: slli a1, a1, 32
+; RV64I-NEXT: or a1, a1, a6
+; RV64I-NEXT: sd zero, 56(sp)
+; RV64I-NEXT: sd zero, 48(sp)
+; RV64I-NEXT: sd zero, 40(sp)
+; RV64I-NEXT: sd zero, 32(sp)
+; RV64I-NEXT: sd a0, 24(sp)
+; RV64I-NEXT: sd a5, 16(sp)
+; RV64I-NEXT: sd a4, 8(sp)
+; RV64I-NEXT: sd a3, 0(sp)
+; RV64I-NEXT: srli a0, a1, 3
+; RV64I-NEXT: andi a0, a0, 24
+; RV64I-NEXT: mv a3, sp
+; RV64I-NEXT: add a3, a3, a0
+; RV64I-NEXT: ld a4, 8(a3)
+; RV64I-NEXT: srl a0, a4, a1
+; RV64I-NEXT: ld a5, 16(a3)
+; RV64I-NEXT: andi a6, a1, 63
+; RV64I-NEXT: xori a6, a6, 63
+; RV64I-NEXT: ld a7, 0(a3)
+; RV64I-NEXT: slli t0, a5, 1
+; RV64I-NEXT: sll t0, t0, a6
+; RV64I-NEXT: or a0, a0, t0
+; RV64I-NEXT: srl a7, a7, a1
+; RV64I-NEXT: slli a4, a4, 1
+; RV64I-NEXT: ld a3, 24(a3)
+; RV64I-NEXT: sll a4, a4, a6
+; RV64I-NEXT: or a4, a7, a4
; RV64I-NEXT: srl a5, a5, a1
-; RV64I-NEXT: srl a1, a7, a1
-; RV64I-NEXT: srli a6, a5, 48
-; RV64I-NEXT: sb a6, 22(a2)
-; RV64I-NEXT: srli a6, a5, 40
-; RV64I-NEXT: sb a6, 21(a2)
-; RV64I-NEXT: srli a6, a5, 32
-; RV64I-NEXT: sb a6, 20(a2)
-; RV64I-NEXT: srli a6, a5, 24
-; RV64I-NEXT: sb a6, 19(a2)
-; RV64I-NEXT: srli a6, a5, 16
-; RV64I-NEXT: sb a6, 18(a2)
-; RV64I-NEXT: or a6, a5, t0
-; RV64I-NEXT: sb a5, 16(a2)
-; RV64I-NEXT: srli a5, a5, 8
-; RV64I-NEXT: sb a5, 17(a2)
-; RV64I-NEXT: srli a5, a1, 56
-; RV64I-NEXT: sb a5, 31(a2)
-; RV64I-NEXT: srli a5, a1, 48
-; RV64I-NEXT: sb a5, 30(a2)
-; RV64I-NEXT: srli a5, a1, 40
-; RV64I-NEXT: sb a5, 29(a2)
-; RV64I-NEXT: srli a5, a1, 32
-; RV64I-NEXT: sb a5, 28(a2)
-; RV64I-NEXT: srli a5, a1, 24
-; RV64I-NEXT: sb a5, 27(a2)
-; RV64I-NEXT: srli a5, a1, 16
-; RV64I-NEXT: sb a5, 26(a2)
+; RV64I-NEXT: slli a7, a3, 1
+; RV64I-NEXT: sll a6, a7, a6
+; RV64I-NEXT: or a5, a5, a6
+; RV64I-NEXT: srl a1, a3, a1
; RV64I-NEXT: sb a1, 24(a2)
+; RV64I-NEXT: srli a3, a1, 56
+; RV64I-NEXT: sb a3, 31(a2)
+; RV64I-NEXT: srli a3, a1, 48
+; RV64I-NEXT: sb a3, 30(a2)
+; RV64I-NEXT: srli a3, a1, 40
+; RV64I-NEXT: sb a3, 29(a2)
+; RV64I-NEXT: srli a3, a1, 32
+; RV64I-NEXT: sb a3, 28(a2)
+; RV64I-NEXT: srli a3, a1, 24
+; RV64I-NEXT: sb a3, 27(a2)
+; RV64I-NEXT: srli a3, a1, 16
+; RV64I-NEXT: sb a3, 26(a2)
; RV64I-NEXT: srli a1, a1, 8
; RV64I-NEXT: sb a1, 25(a2)
+; RV64I-NEXT: sb a5, 16(a2)
+; RV64I-NEXT: sb a4, 0(a2)
+; RV64I-NEXT: sb a0, 8(a2)
+; RV64I-NEXT: srli a1, a5, 56
+; RV64I-NEXT: sb a1, 23(a2)
+; RV64I-NEXT: srli a1, a5, 48
+; RV64I-NEXT: sb a1, 22(a2)
+; RV64I-NEXT: srli a1, a5, 40
+; RV64I-NEXT: sb a1, 21(a2)
+; RV64I-NEXT: srli a1, a5, 32
+; RV64I-NEXT: sb a1, 20(a2)
+; RV64I-NEXT: srli a1, a5, 24
+; RV64I-NEXT: sb a1, 19(a2)
+; RV64I-NEXT: srli a1, a5, 16
+; RV64I-NEXT: sb a1, 18(a2)
+; RV64I-NEXT: srli a5, a5, 8
+; RV64I-NEXT: sb a5, 17(a2)
+; RV64I-NEXT: srli a1, a4, 56
+; RV64I-NEXT: sb a1, 7(a2)
; RV64I-NEXT: srli a1, a4, 48
; RV64I-NEXT: sb a1, 6(a2)
; RV64I-NEXT: srli a1, a4, 40
@@ -1734,366 +1485,234 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; RV64I-NEXT: sb a1, 3(a2)
; RV64I-NEXT: srli a1, a4, 16
; RV64I-NEXT: sb a1, 2(a2)
-; RV64I-NEXT: or a1, a4, t1
-; RV64I-NEXT: sb a4, 0(a2)
; RV64I-NEXT: srli a4, a4, 8
; RV64I-NEXT: sb a4, 1(a2)
-; RV64I-NEXT: srli a4, a3, 48
-; RV64I-NEXT: sb a4, 14(a2)
-; RV64I-NEXT: srli a4, a3, 40
-; RV64I-NEXT: sb a4, 13(a2)
-; RV64I-NEXT: srli a4, a3, 32
-; RV64I-NEXT: sb a4, 12(a2)
-; RV64I-NEXT: srli a4, a3, 24
-; RV64I-NEXT: sb a4, 11(a2)
-; RV64I-NEXT: srli a4, a3, 16
-; RV64I-NEXT: sb a4, 10(a2)
-; RV64I-NEXT: or a0, a3, a0
-; RV64I-NEXT: sb a3, 8(a2)
-; RV64I-NEXT: srli a3, a3, 8
-; RV64I-NEXT: sb a3, 9(a2)
-; RV64I-NEXT: srli a3, a6, 56
-; RV64I-NEXT: sb a3, 23(a2)
-; RV64I-NEXT: srli a1, a1, 56
-; RV64I-NEXT: sb a1, 7(a2)
-; RV64I-NEXT: srli a0, a0, 56
-; RV64I-NEXT: sb a0, 15(a2)
-; RV64I-NEXT: ld ra, 216(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s0, 208(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s1, 200(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s2, 192(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s3, 184(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s4, 176(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s5, 168(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s6, 160(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s7, 152(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s8, 144(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s9, 136(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s10, 128(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s11, 120(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 224
+; RV64I-NEXT: srli a1, a0, 56
+; RV64I-NEXT: sb a1, 15(a2)
+; RV64I-NEXT: srli a1, a0, 48
+; RV64I-NEXT: sb a1, 14(a2)
+; RV64I-NEXT: srli a1, a0, 40
+; RV64I-NEXT: sb a1, 13(a2)
+; RV64I-NEXT: srli a1, a0, 32
+; RV64I-NEXT: sb a1, 12(a2)
+; RV64I-NEXT: srli a1, a0, 24
+; RV64I-NEXT: sb a1, 11(a2)
+; RV64I-NEXT: srli a1, a0, 16
+; RV64I-NEXT: sb a1, 10(a2)
+; RV64I-NEXT: srli a0, a0, 8
+; RV64I-NEXT: sb a0, 9(a2)
+; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
;
; RV32I-LABEL: lshr_32bytes:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -144
-; RV32I-NEXT: sw ra, 140(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s0, 136(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 132(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 128(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 124(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s4, 120(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s5, 116(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s6, 112(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s7, 108(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s8, 104(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s9, 100(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s10, 96(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s11, 92(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a3, 0(a0)
-; RV32I-NEXT: sw a3, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT: addi sp, sp, -64
; RV32I-NEXT: lbu a3, 1(a0)
-; RV32I-NEXT: sw a3, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a3, 2(a0)
-; RV32I-NEXT: sw a3, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a3, 3(a0)
-; RV32I-NEXT: sw a3, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a3, 4(a0)
-; RV32I-NEXT: sw a3, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a3, 5(a0)
-; RV32I-NEXT: sw a3, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu t1, 6(a0)
-; RV32I-NEXT: lbu t2, 7(a0)
-; RV32I-NEXT: lbu t3, 8(a0)
-; RV32I-NEXT: lbu t4, 9(a0)
-; RV32I-NEXT: lbu t5, 10(a0)
-; RV32I-NEXT: lbu t6, 11(a0)
-; RV32I-NEXT: lbu s0, 12(a0)
-; RV32I-NEXT: lbu s1, 13(a0)
-; RV32I-NEXT: lbu s2, 14(a0)
-; RV32I-NEXT: lbu s3, 15(a0)
-; RV32I-NEXT: lbu s4, 16(a0)
-; RV32I-NEXT: lbu s5, 17(a0)
-; RV32I-NEXT: lbu s6, 18(a0)
-; RV32I-NEXT: lbu s7, 19(a0)
-; RV32I-NEXT: lbu s10, 1(a1)
-; RV32I-NEXT: lbu s8, 20(a0)
-; RV32I-NEXT: lbu s9, 21(a0)
-; RV32I-NEXT: lbu s11, 0(a1)
-; RV32I-NEXT: slli s10, s10, 8
-; RV32I-NEXT: lbu ra, 2(a1)
-; RV32I-NEXT: lbu a1, 3(a1)
-; RV32I-NEXT: or s10, s10, s11
-; RV32I-NEXT: lbu s11, 22(a0)
-; RV32I-NEXT: slli ra, ra, 16
-; RV32I-NEXT: slli a1, a1, 24
-; RV32I-NEXT: or a1, a1, ra
-; RV32I-NEXT: lbu ra, 23(a0)
-; RV32I-NEXT: or t0, a1, s10
-; RV32I-NEXT: lbu s10, 24(a0)
-; RV32I-NEXT: lbu a7, 25(a0)
-; RV32I-NEXT: lbu a6, 26(a0)
-; RV32I-NEXT: lbu a5, 27(a0)
-; RV32I-NEXT: lbu a1, 31(a0)
-; RV32I-NEXT: lbu a3, 30(a0)
-; RV32I-NEXT: lbu a4, 29(a0)
-; RV32I-NEXT: lbu a0, 28(a0)
-; RV32I-NEXT: sb a1, 59(sp)
-; RV32I-NEXT: sb a3, 58(sp)
-; RV32I-NEXT: sb a4, 57(sp)
-; RV32I-NEXT: sb a0, 56(sp)
-; RV32I-NEXT: sb a5, 55(sp)
-; RV32I-NEXT: sb a6, 54(sp)
-; RV32I-NEXT: sb a7, 53(sp)
-; RV32I-NEXT: sb s10, 52(sp)
-; RV32I-NEXT: sb ra, 51(sp)
-; RV32I-NEXT: sb s11, 50(sp)
-; RV32I-NEXT: sb s9, 49(sp)
-; RV32I-NEXT: sb s8, 48(sp)
-; RV32I-NEXT: sb s7, 47(sp)
-; RV32I-NEXT: sb s6, 46(sp)
-; RV32I-NEXT: sb s5, 45(sp)
-; RV32I-NEXT: sb s4, 44(sp)
-; RV32I-NEXT: sb zero, 91(sp)
-; RV32I-NEXT: sb zero, 90(sp)
-; RV32I-NEXT: sb zero, 89(sp)
-; RV32I-NEXT: sb zero, 88(sp)
-; RV32I-NEXT: sb zero, 87(sp)
-; RV32I-NEXT: sb zero, 86(sp)
-; RV32I-NEXT: sb zero, 85(sp)
-; RV32I-NEXT: sb zero, 84(sp)
-; RV32I-NEXT: sb zero, 83(sp)
-; RV32I-NEXT: sb zero, 82(sp)
-; RV32I-NEXT: sb zero, 81(sp)
-; RV32I-NEXT: sb zero, 80(sp)
-; RV32I-NEXT: sb zero, 79(sp)
-; RV32I-NEXT: sb zero, 78(sp)
-; RV32I-NEXT: sb zero, 77(sp)
-; RV32I-NEXT: sb zero, 76(sp)
-; RV32I-NEXT: sb zero, 75(sp)
-; RV32I-NEXT: sb zero, 74(sp)
-; RV32I-NEXT: sb zero, 73(sp)
-; RV32I-NEXT: sb zero, 72(sp)
-; RV32I-NEXT: sb zero, 71(sp)
-; RV32I-NEXT: sb zero, 70(sp)
-; RV32I-NEXT: sb zero, 69(sp)
-; RV32I-NEXT: sb zero, 68(sp)
-; RV32I-NEXT: sb zero, 67(sp)
-; RV32I-NEXT: sb zero, 66(sp)
-; RV32I-NEXT: sb zero, 65(sp)
-; RV32I-NEXT: sb zero, 64(sp)
-; RV32I-NEXT: sb zero, 63(sp)
-; RV32I-NEXT: sb zero, 62(sp)
-; RV32I-NEXT: sb zero, 61(sp)
-; RV32I-NEXT: sb zero, 60(sp)
-; RV32I-NEXT: sb s3, 43(sp)
-; RV32I-NEXT: sb s2, 42(sp)
-; RV32I-NEXT: sb s1, 41(sp)
-; RV32I-NEXT: sb s0, 40(sp)
-; RV32I-NEXT: sb t6, 39(sp)
-; RV32I-NEXT: sb t5, 38(sp)
-; RV32I-NEXT: sb t4, 37(sp)
-; RV32I-NEXT: sb t3, 36(sp)
-; RV32I-NEXT: sb t2, 35(sp)
-; RV32I-NEXT: sb t1, 34(sp)
-; RV32I-NEXT: lw a0, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 33(sp)
-; RV32I-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 32(sp)
-; RV32I-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 31(sp)
-; RV32I-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 30(sp)
-; RV32I-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 29(sp)
-; RV32I-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 28(sp)
-; RV32I-NEXT: slli a0, t0, 24
-; RV32I-NEXT: srli a0, a0, 27
-; RV32I-NEXT: addi a4, sp, 28
-; RV32I-NEXT: add a4, a4, a0
-; RV32I-NEXT: lbu a0, 5(a4)
-; RV32I-NEXT: lbu a1, 4(a4)
-; RV32I-NEXT: lbu a3, 6(a4)
-; RV32I-NEXT: lbu a5, 7(a4)
-; RV32I-NEXT: slli a0, a0, 8
-; RV32I-NEXT: or a0, a0, a1
-; RV32I-NEXT: slli a3, a3, 16
-; RV32I-NEXT: slli a5, a5, 24
-; RV32I-NEXT: or a3, a5, a3
-; RV32I-NEXT: or t5, a3, a0
-; RV32I-NEXT: andi a3, t0, 7
-; RV32I-NEXT: lbu a0, 9(a4)
-; RV32I-NEXT: lbu a1, 8(a4)
-; RV32I-NEXT: lbu a5, 10(a4)
-; RV32I-NEXT: lbu a6, 11(a4)
-; RV32I-NEXT: slli a0, a0, 8
-; RV32I-NEXT: or a0, a0, a1
+; RV32I-NEXT: lbu a4, 0(a0)
+; RV32I-NEXT: lbu a5, 2(a0)
+; RV32I-NEXT: lbu a6, 3(a0)
+; RV32I-NEXT: slli a3, a3, 8
+; RV32I-NEXT: or a3, a3, a4
; RV32I-NEXT: slli a5, a5, 16
; RV32I-NEXT: slli a6, a6, 24
-; RV32I-NEXT: or a1, a6, a5
-; RV32I-NEXT: or a6, a1, a0
-; RV32I-NEXT: slli a0, a6, 1
-; RV32I-NEXT: not t1, a3
-; RV32I-NEXT: sll a0, a0, t1
-; RV32I-NEXT: lbu a1, 1(a4)
-; RV32I-NEXT: lbu a5, 0(a4)
-; RV32I-NEXT: lbu a7, 2(a4)
-; RV32I-NEXT: lbu t0, 3(a4)
-; RV32I-NEXT: slli a1, a1, 8
-; RV32I-NEXT: or a1, a1, a5
+; RV32I-NEXT: or a4, a6, a5
+; RV32I-NEXT: or a3, a4, a3
+; RV32I-NEXT: lbu a4, 5(a0)
+; RV32I-NEXT: lbu a5, 4(a0)
+; RV32I-NEXT: lbu a6, 6(a0)
+; RV32I-NEXT: lbu a7, 7(a0)
+; RV32I-NEXT: slli a4, a4, 8
+; RV32I-NEXT: or a4, a4, a5
+; RV32I-NEXT: slli a6, a6, 16
+; RV32I-NEXT: slli a7, a7, 24
+; RV32I-NEXT: or a5, a7, a6
+; RV32I-NEXT: or a4, a5, a4
+; RV32I-NEXT: lbu a5, 9(a0)
+; RV32I-NEXT: lbu a6, 8(a0)
+; RV32I-NEXT: lbu a7, 10(a0)
+; RV32I-NEXT: lbu t0, 11(a0)
+; RV32I-NEXT: slli a5, a5, 8
+; RV32I-NEXT: or a5, a5, a6
; RV32I-NEXT: slli a7, a7, 16
; RV32I-NEXT: slli t0, t0, 24
-; RV32I-NEXT: or a5, t0, a7
-; RV32I-NEXT: or t0, a5, a1
-; RV32I-NEXT: slli a1, t5, 1
-; RV32I-NEXT: xori t2, a3, 31
-; RV32I-NEXT: sll a1, a1, t2
-; RV32I-NEXT: lbu a5, 13(a4)
-; RV32I-NEXT: lbu a7, 12(a4)
-; RV32I-NEXT: lbu t3, 14(a4)
-; RV32I-NEXT: lbu t4, 15(a4)
-; RV32I-NEXT: slli a5, a5, 8
-; RV32I-NEXT: or a5, a5, a7
+; RV32I-NEXT: or a6, t0, a7
+; RV32I-NEXT: or a5, a6, a5
+; RV32I-NEXT: lbu a6, 13(a0)
+; RV32I-NEXT: lbu a7, 12(a0)
+; RV32I-NEXT: lbu t0, 14(a0)
+; RV32I-NEXT: lbu t1, 15(a0)
+; RV32I-NEXT: slli a6, a6, 8
+; RV32I-NEXT: or a6, a6, a7
+; RV32I-NEXT: slli t0, t0, 16
+; RV32I-NEXT: slli t1, t1, 24
+; RV32I-NEXT: or a7, t1, t0
+; RV32I-NEXT: or a6, a7, a6
+; RV32I-NEXT: lbu a7, 17(a0)
+; RV32I-NEXT: lbu t0, 16(a0)
+; RV32I-NEXT: lbu t1, 18(a0)
+; RV32I-NEXT: lbu t2, 19(a0)
+; RV32I-NEXT: slli a7, a7, 8
+; RV32I-NEXT: or a7, a7, t0
+; RV32I-NEXT: slli t1, t1, 16
+; RV32I-NEXT: slli t2, t2, 24
+; RV32I-NEXT: or t0, t2, t1
+; RV32I-NEXT: or t0, t0, a7
+; RV32I-NEXT: lbu a7, 21(a0)
+; RV32I-NEXT: lbu t1, 20(a0)
+; RV32I-NEXT: lbu t2, 22(a0)
+; RV32I-NEXT: lbu t3, 23(a0)
+; RV32I-NEXT: slli a7, a7, 8
+; RV32I-NEXT: or a7, a7, t1
+; RV32I-NEXT: slli t2, t2, 16
+; RV32I-NEXT: slli t3, t3, 24
+; RV32I-NEXT: or t1, t3, t2
+; RV32I-NEXT: or t1, t1, a7
+; RV32I-NEXT: lbu a7, 25(a0)
+; RV32I-NEXT: lbu t2, 24(a0)
+; RV32I-NEXT: lbu t3, 26(a0)
+; RV32I-NEXT: lbu t4, 27(a0)
+; RV32I-NEXT: slli a7, a7, 8
+; RV32I-NEXT: or a7, a7, t2
; RV32I-NEXT: slli t3, t3, 16
; RV32I-NEXT: slli t4, t4, 24
-; RV32I-NEXT: or a7, t4, t3
-; RV32I-NEXT: or t3, a7, a5
-; RV32I-NEXT: lbu a5, 17(a4)
-; RV32I-NEXT: lbu a7, 16(a4)
-; RV32I-NEXT: lbu t4, 18(a4)
-; RV32I-NEXT: lbu t6, 19(a4)
-; RV32I-NEXT: slli a5, a5, 8
-; RV32I-NEXT: or a5, a5, a7
+; RV32I-NEXT: or t2, t4, t3
+; RV32I-NEXT: or t2, t2, a7
+; RV32I-NEXT: lbu a7, 29(a0)
+; RV32I-NEXT: lbu t3, 28(a0)
+; RV32I-NEXT: lbu t4, 30(a0)
+; RV32I-NEXT: lbu a0, 31(a0)
+; RV32I-NEXT: slli a7, a7, 8
+; RV32I-NEXT: or a7, a7, t3
; RV32I-NEXT: slli t4, t4, 16
-; RV32I-NEXT: slli t6, t6, 24
-; RV32I-NEXT: or a7, t6, t4
-; RV32I-NEXT: or t4, a7, a5
-; RV32I-NEXT: slli a5, t4, 1
-; RV32I-NEXT: sll a7, a5, t1
-; RV32I-NEXT: lbu a5, 21(a4)
-; RV32I-NEXT: lbu t6, 20(a4)
-; RV32I-NEXT: lbu s0, 22(a4)
-; RV32I-NEXT: lbu s1, 23(a4)
-; RV32I-NEXT: slli a5, a5, 8
-; RV32I-NEXT: or a5, a5, t6
-; RV32I-NEXT: slli s0, s0, 16
-; RV32I-NEXT: slli s1, s1, 24
-; RV32I-NEXT: or s0, s1, s0
-; RV32I-NEXT: or s0, s0, a5
-; RV32I-NEXT: lbu a5, 25(a4)
-; RV32I-NEXT: lbu t6, 24(a4)
-; RV32I-NEXT: lbu s1, 26(a4)
-; RV32I-NEXT: lbu s2, 27(a4)
-; RV32I-NEXT: slli a5, a5, 8
-; RV32I-NEXT: or a5, a5, t6
-; RV32I-NEXT: slli s1, s1, 16
-; RV32I-NEXT: slli s2, s2, 24
-; RV32I-NEXT: or t6, s2, s1
-; RV32I-NEXT: or t6, t6, a5
-; RV32I-NEXT: lbu a5, 29(a4)
-; RV32I-NEXT: lbu s1, 28(a4)
-; RV32I-NEXT: slli s2, t6, 1
-; RV32I-NEXT: sll t1, s2, t1
-; RV32I-NEXT: slli a5, a5, 8
-; RV32I-NEXT: or a5, a5, s1
-; RV32I-NEXT: lbu s1, 30(a4)
-; RV32I-NEXT: lbu a4, 31(a4)
-; RV32I-NEXT: slli s2, t3, 1
-; RV32I-NEXT: sll s2, s2, t2
-; RV32I-NEXT: slli s1, s1, 16
-; RV32I-NEXT: slli a4, a4, 24
-; RV32I-NEXT: or a4, a4, s1
-; RV32I-NEXT: slli s1, s0, 1
-; RV32I-NEXT: sll s1, s1, t2
-; RV32I-NEXT: or s3, a4, a5
-; RV32I-NEXT: slli a4, s3, 1
-; RV32I-NEXT: sll t2, a4, t2
-; RV32I-NEXT: srl a4, t5, a3
-; RV32I-NEXT: srl a5, t0, a3
-; RV32I-NEXT: srl t0, t3, a3
-; RV32I-NEXT: srl a6, a6, a3
-; RV32I-NEXT: srl t3, s0, a3
-; RV32I-NEXT: srl t4, t4, a3
-; RV32I-NEXT: srl t5, t6, a3
-; RV32I-NEXT: srl a3, s3, a3
-; RV32I-NEXT: srli t6, t5, 16
-; RV32I-NEXT: sb t6, 26(a2)
-; RV32I-NEXT: or t2, t5, t2
-; RV32I-NEXT: sb t5, 24(a2)
-; RV32I-NEXT: srli t5, t5, 8
-; RV32I-NEXT: sb t5, 25(a2)
-; RV32I-NEXT: srli t5, a3, 24
-; RV32I-NEXT: sb t5, 31(a2)
-; RV32I-NEXT: srli t5, a3, 16
-; RV32I-NEXT: sb t5, 30(a2)
-; RV32I-NEXT: sb a3, 28(a2)
-; RV32I-NEXT: srli a3, a3, 8
-; RV32I-NEXT: sb a3, 29(a2)
-; RV32I-NEXT: srli a3, t4, 16
-; RV32I-NEXT: sb a3, 18(a2)
-; RV32I-NEXT: or a3, t4, s1
-; RV32I-NEXT: sb t4, 16(a2)
-; RV32I-NEXT: srli t4, t4, 8
-; RV32I-NEXT: sb t4, 17(a2)
-; RV32I-NEXT: srli t4, t3, 16
-; RV32I-NEXT: sb t4, 22(a2)
-; RV32I-NEXT: or t1, t3, t1
-; RV32I-NEXT: sb t3, 20(a2)
-; RV32I-NEXT: srli t3, t3, 8
-; RV32I-NEXT: sb t3, 21(a2)
-; RV32I-NEXT: srli t3, a6, 16
-; RV32I-NEXT: sb t3, 10(a2)
-; RV32I-NEXT: or t3, a6, s2
-; RV32I-NEXT: sb a6, 8(a2)
-; RV32I-NEXT: srli a6, a6, 8
-; RV32I-NEXT: sb a6, 9(a2)
-; RV32I-NEXT: srli a6, t0, 16
-; RV32I-NEXT: sb a6, 14(a2)
-; RV32I-NEXT: or a6, t0, a7
-; RV32I-NEXT: sb t0, 12(a2)
-; RV32I-NEXT: srli a7, t0, 8
-; RV32I-NEXT: sb a7, 13(a2)
-; RV32I-NEXT: srli a7, a5, 16
-; RV32I-NEXT: sb a7, 2(a2)
-; RV32I-NEXT: or a1, a5, a1
-; RV32I-NEXT: sb a5, 0(a2)
-; RV32I-NEXT: srli a5, a5, 8
-; RV32I-NEXT: sb a5, 1(a2)
-; RV32I-NEXT: srli a5, a4, 16
-; RV32I-NEXT: sb a5, 6(a2)
-; RV32I-NEXT: or a0, a4, a0
-; RV32I-NEXT: sb a4, 4(a2)
+; RV32I-NEXT: slli a0, a0, 24
+; RV32I-NEXT: or a0, a0, t4
+; RV32I-NEXT: or a0, a0, a7
+; RV32I-NEXT: lbu a7, 1(a1)
+; RV32I-NEXT: lbu t3, 0(a1)
+; RV32I-NEXT: lbu t4, 2(a1)
+; RV32I-NEXT: lbu a1, 3(a1)
+; RV32I-NEXT: slli a7, a7, 8
+; RV32I-NEXT: or a7, a7, t3
+; RV32I-NEXT: slli t4, t4, 16
+; RV32I-NEXT: slli a1, a1, 24
+; RV32I-NEXT: or a1, a1, t4
+; RV32I-NEXT: or a7, a1, a7
+; RV32I-NEXT: sw zero, 60(sp)
+; RV32I-NEXT: sw zero, 56(sp)
+; RV32I-NEXT: sw zero, 52(sp)
+; RV32I-NEXT: sw zero, 48(sp)
+; RV32I-NEXT: sw zero, 44(sp)
+; RV32I-NEXT: sw zero, 40(sp)
+; RV32I-NEXT: sw zero, 36(sp)
+; RV32I-NEXT: sw zero, 32(sp)
+; RV32I-NEXT: sw a0, 28(sp)
+; RV32I-NEXT: sw t2, 24(sp)
+; RV32I-NEXT: sw t1, 20(sp)
+; RV32I-NEXT: sw t0, 16(sp)
+; RV32I-NEXT: sw a6, 12(sp)
+; RV32I-NEXT: sw a5, 8(sp)
+; RV32I-NEXT: sw a4, 4(sp)
+; RV32I-NEXT: sw a3, 0(sp)
+; RV32I-NEXT: srli a0, a7, 3
+; RV32I-NEXT: andi a0, a0, 28
+; RV32I-NEXT: mv a1, sp
+; RV32I-NEXT: add a4, a1, a0
+; RV32I-NEXT: lw a1, 4(a4)
+; RV32I-NEXT: srl a0, a1, a7
+; RV32I-NEXT: lw a5, 8(a4)
+; RV32I-NEXT: andi a3, a7, 31
+; RV32I-NEXT: xori a6, a3, 31
+; RV32I-NEXT: lw a3, 0(a4)
+; RV32I-NEXT: slli t0, a5, 1
+; RV32I-NEXT: sll t0, t0, a6
+; RV32I-NEXT: or a0, a0, t0
+; RV32I-NEXT: srl a3, a3, a7
+; RV32I-NEXT: slli a1, a1, 1
+; RV32I-NEXT: lw t0, 12(a4)
+; RV32I-NEXT: lw t1, 16(a4)
+; RV32I-NEXT: sll a1, a1, a6
+; RV32I-NEXT: or a1, a3, a1
+; RV32I-NEXT: srl a3, t0, a7
+; RV32I-NEXT: slli t2, t1, 1
+; RV32I-NEXT: sll t2, t2, a6
+; RV32I-NEXT: or a3, a3, t2
+; RV32I-NEXT: srl a5, a5, a7
+; RV32I-NEXT: slli t0, t0, 1
+; RV32I-NEXT: lw t2, 20(a4)
+; RV32I-NEXT: lw t3, 24(a4)
+; RV32I-NEXT: sll t0, t0, a6
+; RV32I-NEXT: or a5, a5, t0
+; RV32I-NEXT: srl t0, t2, a7
+; RV32I-NEXT: slli t4, t3, 1
+; RV32I-NEXT: sll t4, t4, a6
+; RV32I-NEXT: or t0, t0, t4
+; RV32I-NEXT: srl t1, t1, a7
+; RV32I-NEXT: slli t2, t2, 1
+; RV32I-NEXT: lw a4, 28(a4)
+; RV32I-NEXT: sll t2, t2, a6
+; RV32I-NEXT: or t1, t1, t2
+; RV32I-NEXT: srl t2, t3, a7
+; RV32I-NEXT: slli t3, a4, 1
+; RV32I-NEXT: sll a6, t3, a6
+; RV32I-NEXT: or a6, t2, a6
+; RV32I-NEXT: srl a4, a4, a7
+; RV32I-NEXT: sb a4, 28(a2)
+; RV32I-NEXT: srli a7, a4, 24
+; RV32I-NEXT: sb a7, 31(a2)
+; RV32I-NEXT: srli a7, a4, 16
+; RV32I-NEXT: sb a7, 30(a2)
; RV32I-NEXT: srli a4, a4, 8
-; RV32I-NEXT: sb a4, 5(a2)
-; RV32I-NEXT: srli a4, t2, 24
+; RV32I-NEXT: sb a4, 29(a2)
+; RV32I-NEXT: sb a6, 24(a2)
+; RV32I-NEXT: sb t1, 16(a2)
+; RV32I-NEXT: sb t0, 20(a2)
+; RV32I-NEXT: sb a5, 8(a2)
+; RV32I-NEXT: sb a3, 12(a2)
+; RV32I-NEXT: sb a1, 0(a2)
+; RV32I-NEXT: sb a0, 4(a2)
+; RV32I-NEXT: srli a4, a6, 24
; RV32I-NEXT: sb a4, 27(a2)
-; RV32I-NEXT: srli a3, a3, 24
-; RV32I-NEXT: sb a3, 19(a2)
-; RV32I-NEXT: srli a3, t1, 24
-; RV32I-NEXT: sb a3, 23(a2)
-; RV32I-NEXT: srli a3, t3, 24
-; RV32I-NEXT: sb a3, 11(a2)
-; RV32I-NEXT: srli a3, a6, 24
-; RV32I-NEXT: sb a3, 15(a2)
-; RV32I-NEXT: srli a1, a1, 24
-; RV32I-NEXT: sb a1, 3(a2)
-; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: sb a0, 7(a2)
-; RV32I-NEXT: lw ra, 140(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s0, 136(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 132(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 128(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 124(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s4, 120(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s5, 116(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s6, 112(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s7, 108(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s8, 104(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s9, 100(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s10, 96(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s11, 92(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 144
+; RV32I-NEXT: srli a4, a6, 16
+; RV32I-NEXT: sb a4, 26(a2)
+; RV32I-NEXT: srli a4, a6, 8
+; RV32I-NEXT: sb a4, 25(a2)
+; RV32I-NEXT: srli a4, t1, 24
+; RV32I-NEXT: sb a4, 19(a2)
+; RV32I-NEXT: srli a4, t1, 16
+; RV32I-NEXT: sb a4, 18(a2)
+; RV32I-NEXT: srli a4, t1, 8
+; RV32I-NEXT: sb a4, 17(a2)
+; RV32I-NEXT: srli a4, t0, 24
+; RV32I-NEXT: sb a4, 23(a2)
+; RV32I-NEXT: srli a4, t0, 16
+; RV32I-NEXT: sb a4, 22(a2)
+; RV32I-NEXT: srli a4, t0, 8
+; RV32I-NEXT: sb a4, 21(a2)
+; RV32I-NEXT: srli a4, a5, 24
+; RV32I-NEXT: sb a4, 11(a2)
+; RV32I-NEXT: srli a4, a5, 16
+; RV32I-NEXT: sb a4, 10(a2)
+; RV32I-NEXT: srli a5, a5, 8
+; RV32I-NEXT: sb a5, 9(a2)
+; RV32I-NEXT: srli a4, a3, 24
+; RV32I-NEXT: sb a4, 15(a2)
+; RV32I-NEXT: srli a4, a3, 16
+; RV32I-NEXT: sb a4, 14(a2)
+; RV32I-NEXT: srli a3, a3, 8
+; RV32I-NEXT: sb a3, 13(a2)
+; RV32I-NEXT: srli a3, a1, 24
+; RV32I-NEXT: sb a3, 3(a2)
+; RV32I-NEXT: srli a3, a1, 16
+; RV32I-NEXT: sb a3, 2(a2)
+; RV32I-NEXT: srli a1, a1, 8
+; RV32I-NEXT: sb a1, 1(a2)
+; RV32I-NEXT: srli a1, a0, 24
+; RV32I-NEXT: sb a1, 7(a2)
+; RV32I-NEXT: srli a1, a0, 16
+; RV32I-NEXT: sb a1, 6(a2)
+; RV32I-NEXT: srli a0, a0, 8
+; RV32I-NEXT: sb a0, 5(a2)
+; RV32I-NEXT: addi sp, sp, 64
; RV32I-NEXT: ret
%src = load i256, ptr %src.ptr, align 1
%bitOff = load i256, ptr %bitOff.ptr, align 1
@@ -2104,191 +1723,43 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; RV64I-LABEL: shl_32bytes:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -224
-; RV64I-NEXT: sd ra, 216(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s0, 208(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s1, 200(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s2, 192(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s3, 184(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s4, 176(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s5, 168(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s6, 160(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s7, 152(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s8, 144(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s9, 136(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s10, 128(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s11, 120(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a3, 0(a0)
-; RV64I-NEXT: sd a3, 48(sp) # 8-byte Folded Spill
+; RV64I-NEXT: addi sp, sp, -64
; RV64I-NEXT: lbu a3, 1(a0)
-; RV64I-NEXT: sd a3, 40(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a3, 2(a0)
-; RV64I-NEXT: sd a3, 32(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a3, 3(a0)
-; RV64I-NEXT: sd a3, 24(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a3, 4(a0)
-; RV64I-NEXT: sd a3, 16(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a3, 5(a0)
-; RV64I-NEXT: sd a3, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu t1, 6(a0)
-; RV64I-NEXT: lbu t2, 7(a0)
-; RV64I-NEXT: lbu t3, 8(a0)
-; RV64I-NEXT: lbu t4, 9(a0)
-; RV64I-NEXT: lbu t5, 10(a0)
-; RV64I-NEXT: lbu t6, 11(a0)
-; RV64I-NEXT: lbu s0, 12(a0)
-; RV64I-NEXT: lbu s1, 13(a0)
-; RV64I-NEXT: lbu s2, 14(a0)
-; RV64I-NEXT: lbu s3, 15(a0)
-; RV64I-NEXT: lbu s4, 16(a0)
-; RV64I-NEXT: lbu s5, 17(a0)
-; RV64I-NEXT: lbu s6, 18(a0)
-; RV64I-NEXT: lbu s7, 19(a0)
-; RV64I-NEXT: lbu s8, 20(a0)
-; RV64I-NEXT: lbu s9, 1(a1)
-; RV64I-NEXT: lbu s10, 0(a1)
-; RV64I-NEXT: lbu s11, 2(a1)
-; RV64I-NEXT: lbu ra, 3(a1)
-; RV64I-NEXT: slli s9, s9, 8
-; RV64I-NEXT: or s9, s9, s10
-; RV64I-NEXT: slli s11, s11, 16
-; RV64I-NEXT: slli ra, ra, 24
-; RV64I-NEXT: lbu s10, 5(a1)
-; RV64I-NEXT: or s11, ra, s11
-; RV64I-NEXT: or s11, s11, s9
-; RV64I-NEXT: lbu s9, 4(a1)
-; RV64I-NEXT: slli s10, s10, 8
-; RV64I-NEXT: lbu ra, 6(a1)
-; RV64I-NEXT: lbu a1, 7(a1)
-; RV64I-NEXT: or s10, s10, s9
-; RV64I-NEXT: lbu s9, 21(a0)
-; RV64I-NEXT: slli ra, ra, 16
-; RV64I-NEXT: slli a1, a1, 24
-; RV64I-NEXT: or a1, a1, ra
-; RV64I-NEXT: lbu ra, 22(a0)
-; RV64I-NEXT: or a1, a1, s10
-; RV64I-NEXT: lbu s10, 23(a0)
-; RV64I-NEXT: slli a1, a1, 32
-; RV64I-NEXT: or t0, a1, s11
-; RV64I-NEXT: lbu s11, 24(a0)
-; RV64I-NEXT: lbu a7, 25(a0)
-; RV64I-NEXT: lbu a6, 26(a0)
-; RV64I-NEXT: lbu a5, 27(a0)
-; RV64I-NEXT: lbu a1, 31(a0)
-; RV64I-NEXT: lbu a3, 30(a0)
-; RV64I-NEXT: lbu a4, 29(a0)
-; RV64I-NEXT: lbu a0, 28(a0)
-; RV64I-NEXT: sb a1, 119(sp)
-; RV64I-NEXT: sb a3, 118(sp)
-; RV64I-NEXT: sb a4, 117(sp)
-; RV64I-NEXT: sb a0, 116(sp)
-; RV64I-NEXT: sb a5, 115(sp)
-; RV64I-NEXT: sb a6, 114(sp)
-; RV64I-NEXT: sb a7, 113(sp)
-; RV64I-NEXT: sb s11, 112(sp)
-; RV64I-NEXT: sb s10, 111(sp)
-; RV64I-NEXT: sb ra, 110(sp)
-; RV64I-NEXT: sb s9, 109(sp)
-; RV64I-NEXT: sb s8, 108(sp)
-; RV64I-NEXT: sb s7, 107(sp)
-; RV64I-NEXT: sb s6, 106(sp)
-; RV64I-NEXT: sb s5, 105(sp)
-; RV64I-NEXT: sb s4, 104(sp)
-; RV64I-NEXT: sb s3, 103(sp)
-; RV64I-NEXT: sb s2, 102(sp)
-; RV64I-NEXT: sb s1, 101(sp)
-; RV64I-NEXT: sb s0, 100(sp)
-; RV64I-NEXT: sb t6, 99(sp)
-; RV64I-NEXT: sb t5, 98(sp)
-; RV64I-NEXT: sb t4, 97(sp)
-; RV64I-NEXT: sb t3, 96(sp)
-; RV64I-NEXT: sb zero, 87(sp)
-; RV64I-NEXT: sb zero, 86(sp)
-; RV64I-NEXT: sb zero, 85(sp)
-; RV64I-NEXT: sb zero, 84(sp)
-; RV64I-NEXT: sb zero, 83(sp)
-; RV64I-NEXT: sb zero, 82(sp)
-; RV64I-NEXT: sb zero, 81(sp)
-; RV64I-NEXT: sb zero, 80(sp)
-; RV64I-NEXT: sb zero, 79(sp)
-; RV64I-NEXT: sb zero, 78(sp)
-; RV64I-NEXT: sb zero, 77(sp)
-; RV64I-NEXT: sb zero, 76(sp)
-; RV64I-NEXT: sb zero, 75(sp)
-; RV64I-NEXT: sb zero, 74(sp)
-; RV64I-NEXT: sb zero, 73(sp)
-; RV64I-NEXT: sb zero, 72(sp)
-; RV64I-NEXT: sb zero, 71(sp)
-; RV64I-NEXT: sb zero, 70(sp)
-; RV64I-NEXT: sb zero, 69(sp)
-; RV64I-NEXT: sb zero, 68(sp)
-; RV64I-NEXT: sb zero, 67(sp)
-; RV64I-NEXT: sb zero, 66(sp)
-; RV64I-NEXT: sb zero, 65(sp)
-; RV64I-NEXT: sb zero, 64(sp)
-; RV64I-NEXT: sb zero, 63(sp)
-; RV64I-NEXT: sb zero, 62(sp)
-; RV64I-NEXT: sb zero, 61(sp)
-; RV64I-NEXT: sb zero, 60(sp)
-; RV64I-NEXT: sb zero, 59(sp)
-; RV64I-NEXT: sb zero, 58(sp)
-; RV64I-NEXT: sb zero, 57(sp)
-; RV64I-NEXT: sb zero, 56(sp)
-; RV64I-NEXT: sb t2, 95(sp)
-; RV64I-NEXT: sb t1, 94(sp)
-; RV64I-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 93(sp)
-; RV64I-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 92(sp)
-; RV64I-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 91(sp)
-; RV64I-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 90(sp)
-; RV64I-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 89(sp)
-; RV64I-NEXT: ld a0, 48(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 88(sp)
-; RV64I-NEXT: slli a0, t0, 56
-; RV64I-NEXT: srli a0, a0, 59
-; RV64I-NEXT: addi a1, sp, 88
-; RV64I-NEXT: sub a0, a1, a0
-; RV64I-NEXT: lbu a1, 9(a0)
-; RV64I-NEXT: lbu a3, 8(a0)
-; RV64I-NEXT: lbu a4, 10(a0)
-; RV64I-NEXT: lbu a5, 11(a0)
-; RV64I-NEXT: slli a1, a1, 8
-; RV64I-NEXT: or a1, a1, a3
-; RV64I-NEXT: slli a4, a4, 16
-; RV64I-NEXT: slli a5, a5, 24
-; RV64I-NEXT: or a4, a5, a4
-; RV64I-NEXT: or a1, a4, a1
-; RV64I-NEXT: lbu a3, 13(a0)
-; RV64I-NEXT: lbu a4, 12(a0)
-; RV64I-NEXT: lbu a5, 14(a0)
-; RV64I-NEXT: lbu a6, 15(a0)
+; RV64I-NEXT: lbu a4, 0(a0)
+; RV64I-NEXT: lbu a5, 2(a0)
+; RV64I-NEXT: lbu a6, 3(a0)
; RV64I-NEXT: slli a3, a3, 8
; RV64I-NEXT: or a3, a3, a4
; RV64I-NEXT: slli a5, a5, 16
; RV64I-NEXT: slli a6, a6, 24
; RV64I-NEXT: or a4, a6, a5
; RV64I-NEXT: or a3, a4, a3
-; RV64I-NEXT: slli a3, a3, 32
-; RV64I-NEXT: or a3, a3, a1
-; RV64I-NEXT: andi a1, t0, 7
-; RV64I-NEXT: lbu a4, 1(a0)
-; RV64I-NEXT: lbu a5, 0(a0)
-; RV64I-NEXT: lbu a6, 2(a0)
-; RV64I-NEXT: lbu a7, 3(a0)
+; RV64I-NEXT: lbu a4, 5(a0)
+; RV64I-NEXT: lbu a5, 4(a0)
+; RV64I-NEXT: lbu a6, 6(a0)
+; RV64I-NEXT: lbu a7, 7(a0)
+; RV64I-NEXT: slli a4, a4, 8
+; RV64I-NEXT: or a4, a4, a5
+; RV64I-NEXT: slli a6, a6, 16
+; RV64I-NEXT: slli a7, a7, 24
+; RV64I-NEXT: or a5, a7, a6
+; RV64I-NEXT: or a4, a5, a4
+; RV64I-NEXT: slli a4, a4, 32
+; RV64I-NEXT: or a3, a4, a3
+; RV64I-NEXT: lbu a4, 9(a0)
+; RV64I-NEXT: lbu a5, 8(a0)
+; RV64I-NEXT: lbu a6, 10(a0)
+; RV64I-NEXT: lbu a7, 11(a0)
; RV64I-NEXT: slli a4, a4, 8
; RV64I-NEXT: or a4, a4, a5
; RV64I-NEXT: slli a6, a6, 16
; RV64I-NEXT: slli a7, a7, 24
; RV64I-NEXT: or a5, a7, a6
; RV64I-NEXT: or a4, a5, a4
-; RV64I-NEXT: lbu a5, 5(a0)
-; RV64I-NEXT: lbu a6, 4(a0)
-; RV64I-NEXT: lbu a7, 6(a0)
-; RV64I-NEXT: lbu t0, 7(a0)
+; RV64I-NEXT: lbu a5, 13(a0)
+; RV64I-NEXT: lbu a6, 12(a0)
+; RV64I-NEXT: lbu a7, 14(a0)
+; RV64I-NEXT: lbu t0, 15(a0)
; RV64I-NEXT: slli a5, a5, 8
; RV64I-NEXT: or a5, a5, a6
; RV64I-NEXT: slli a7, a7, 16
@@ -2297,20 +1768,20 @@ define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; RV64I-NEXT: or a5, a6, a5
; RV64I-NEXT: slli a5, a5, 32
; RV64I-NEXT: or a4, a5, a4
-; RV64I-NEXT: lbu a5, 25(a0)
-; RV64I-NEXT: lbu a6, 24(a0)
-; RV64I-NEXT: lbu a7, 26(a0)
-; RV64I-NEXT: lbu t0, 27(a0)
+; RV64I-NEXT: lbu a5, 17(a0)
+; RV64I-NEXT: lbu a6, 16(a0)
+; RV64I-NEXT: lbu a7, 18(a0)
+; RV64I-NEXT: lbu t0, 19(a0)
; RV64I-NEXT: slli a5, a5, 8
; RV64I-NEXT: or a5, a5, a6
; RV64I-NEXT: slli a7, a7, 16
; RV64I-NEXT: slli t0, t0, 24
; RV64I-NEXT: or a6, t0, a7
; RV64I-NEXT: or a5, a6, a5
-; RV64I-NEXT: lbu a6, 29(a0)
-; RV64I-NEXT: lbu a7, 28(a0)
-; RV64I-NEXT: lbu t0, 30(a0)
-; RV64I-NEXT: lbu t1, 31(a0)
+; RV64I-NEXT: lbu a6, 21(a0)
+; RV64I-NEXT: lbu a7, 20(a0)
+; RV64I-NEXT: lbu t0, 22(a0)
+; RV64I-NEXT: lbu t1, 23(a0)
; RV64I-NEXT: slli a6, a6, 8
; RV64I-NEXT: or a6, a6, a7
; RV64I-NEXT: slli t0, t0, 16
@@ -2319,439 +1790,353 @@ define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; RV64I-NEXT: or a6, a7, a6
; RV64I-NEXT: slli a6, a6, 32
; RV64I-NEXT: or a5, a6, a5
-; RV64I-NEXT: lbu a6, 17(a0)
-; RV64I-NEXT: lbu a7, 16(a0)
-; RV64I-NEXT: lbu t0, 18(a0)
-; RV64I-NEXT: lbu t1, 19(a0)
+; RV64I-NEXT: lbu a6, 25(a0)
+; RV64I-NEXT: lbu a7, 24(a0)
+; RV64I-NEXT: lbu t0, 26(a0)
+; RV64I-NEXT: lbu t1, 27(a0)
; RV64I-NEXT: slli a6, a6, 8
; RV64I-NEXT: or a6, a6, a7
; RV64I-NEXT: slli t0, t0, 16
; RV64I-NEXT: slli t1, t1, 24
-; RV64I-NEXT: lbu a7, 21(a0)
-; RV64I-NEXT: or t0, t1, t0
-; RV64I-NEXT: or a6, t0, a6
-; RV64I-NEXT: lbu t0, 20(a0)
+; RV64I-NEXT: or a7, t1, t0
+; RV64I-NEXT: or a6, a7, a6
+; RV64I-NEXT: lbu a7, 29(a0)
+; RV64I-NEXT: lbu t0, 28(a0)
+; RV64I-NEXT: lbu t1, 30(a0)
+; RV64I-NEXT: lbu a0, 31(a0)
; RV64I-NEXT: slli a7, a7, 8
-; RV64I-NEXT: lbu t1, 22(a0)
-; RV64I-NEXT: lbu a0, 23(a0)
; RV64I-NEXT: or a7, a7, t0
-; RV64I-NEXT: srli t0, a4, 1
; RV64I-NEXT: slli t1, t1, 16
; RV64I-NEXT: slli a0, a0, 24
-; RV64I-NEXT: or t1, a0, t1
-; RV64I-NEXT: xori t2, a1, 63
-; RV64I-NEXT: srl a0, t0, t2
-; RV64I-NEXT: or a7, t1, a7
-; RV64I-NEXT: slli a7, a7, 32
+; RV64I-NEXT: or a0, a0, t1
+; RV64I-NEXT: or a0, a0, a7
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: or a0, a0, a6
+; RV64I-NEXT: lbu a6, 1(a1)
+; RV64I-NEXT: lbu a7, 0(a1)
+; RV64I-NEXT: lbu t0, 2(a1)
+; RV64I-NEXT: lbu t1, 3(a1)
+; RV64I-NEXT: slli a6, a6, 8
+; RV64I-NEXT: or a6, a6, a7
+; RV64I-NEXT: slli t0, t0, 16
+; RV64I-NEXT: slli t1, t1, 24
+; RV64I-NEXT: or a7, t1, t0
; RV64I-NEXT: or a6, a7, a6
-; RV64I-NEXT: srli a7, a6, 1
-; RV64I-NEXT: srl a7, a7, t2
+; RV64I-NEXT: lbu a7, 5(a1)
+; RV64I-NEXT: lbu t0, 4(a1)
+; RV64I-NEXT: lbu t1, 6(a1)
+; RV64I-NEXT: lbu a1, 7(a1)
+; RV64I-NEXT: slli a7, a7, 8
+; RV64I-NEXT: or a7, a7, t0
+; RV64I-NEXT: slli t1, t1, 16
+; RV64I-NEXT: slli a1, a1, 24
+; RV64I-NEXT: or a1, a1, t1
+; RV64I-NEXT: or a1, a1, a7
+; RV64I-NEXT: slli a1, a1, 32
+; RV64I-NEXT: or a1, a1, a6
+; RV64I-NEXT: sd zero, 24(sp)
+; RV64I-NEXT: sd zero, 16(sp)
+; RV64I-NEXT: sd zero, 8(sp)
+; RV64I-NEXT: sd zero, 0(sp)
+; RV64I-NEXT: sd a0, 56(sp)
+; RV64I-NEXT: sd a5, 48(sp)
+; RV64I-NEXT: sd a4, 40(sp)
+; RV64I-NEXT: sd a3, 32(sp)
+; RV64I-NEXT: srli a0, a1, 3
+; RV64I-NEXT: andi a0, a0, 24
+; RV64I-NEXT: addi a3, sp, 32
+; RV64I-NEXT: sub a3, a3, a0
+; RV64I-NEXT: ld a4, 8(a3)
+; RV64I-NEXT: ld a5, 0(a3)
+; RV64I-NEXT: sll a0, a4, a1
+; RV64I-NEXT: andi a6, a1, 63
+; RV64I-NEXT: xori a6, a6, 63
+; RV64I-NEXT: srli a7, a5, 1
+; RV64I-NEXT: ld t0, 24(a3)
+; RV64I-NEXT: ld a3, 16(a3)
+; RV64I-NEXT: srl a7, a7, a6
+; RV64I-NEXT: or a0, a0, a7
+; RV64I-NEXT: sll a7, t0, a1
; RV64I-NEXT: srli t0, a3, 1
-; RV64I-NEXT: not t1, a1
-; RV64I-NEXT: srl t0, t0, t1
+; RV64I-NEXT: srl t0, t0, a6
+; RV64I-NEXT: or a7, a7, t0
; RV64I-NEXT: sll a3, a3, a1
-; RV64I-NEXT: sll a5, a5, a1
-; RV64I-NEXT: sll a6, a6, a1
-; RV64I-NEXT: sll a1, a4, a1
-; RV64I-NEXT: srli a4, a6, 56
-; RV64I-NEXT: sb a4, 23(a2)
-; RV64I-NEXT: srli a4, a6, 48
-; RV64I-NEXT: sb a4, 22(a2)
-; RV64I-NEXT: srli a4, a6, 40
-; RV64I-NEXT: sb a4, 21(a2)
-; RV64I-NEXT: srli a4, a6, 32
-; RV64I-NEXT: sb a4, 20(a2)
-; RV64I-NEXT: srli a4, a6, 24
-; RV64I-NEXT: sb a4, 19(a2)
-; RV64I-NEXT: srli a4, a6, 16
-; RV64I-NEXT: sb a4, 18(a2)
-; RV64I-NEXT: or a4, a6, t0
-; RV64I-NEXT: srli a6, a6, 8
-; RV64I-NEXT: sb a6, 17(a2)
-; RV64I-NEXT: srli a6, a5, 56
-; RV64I-NEXT: sb a6, 31(a2)
-; RV64I-NEXT: srli a6, a5, 48
-; RV64I-NEXT: sb a6, 30(a2)
-; RV64I-NEXT: srli a6, a5, 40
-; RV64I-NEXT: sb a6, 29(a2)
-; RV64I-NEXT: srli a6, a5, 32
-; RV64I-NEXT: sb a6, 28(a2)
-; RV64I-NEXT: srli a6, a5, 24
-; RV64I-NEXT: sb a6, 27(a2)
-; RV64I-NEXT: srli a6, a5, 16
-; RV64I-NEXT: sb a6, 26(a2)
-; RV64I-NEXT: or a6, a5, a7
-; RV64I-NEXT: srli a5, a5, 8
-; RV64I-NEXT: sb a5, 25(a2)
-; RV64I-NEXT: srli a5, a1, 56
-; RV64I-NEXT: sb a5, 7(a2)
-; RV64I-NEXT: srli a5, a1, 48
-; RV64I-NEXT: sb a5, 6(a2)
-; RV64I-NEXT: srli a5, a1, 40
-; RV64I-NEXT: sb a5, 5(a2)
-; RV64I-NEXT: srli a5, a1, 32
-; RV64I-NEXT: sb a5, 4(a2)
-; RV64I-NEXT: srli a5, a1, 24
-; RV64I-NEXT: sb a5, 3(a2)
-; RV64I-NEXT: srli a5, a1, 16
-; RV64I-NEXT: sb a5, 2(a2)
+; RV64I-NEXT: srli a4, a4, 1
+; RV64I-NEXT: srl a4, a4, a6
+; RV64I-NEXT: or a3, a3, a4
+; RV64I-NEXT: sll a1, a5, a1
; RV64I-NEXT: sb a1, 0(a2)
+; RV64I-NEXT: srli a4, a1, 56
+; RV64I-NEXT: sb a4, 7(a2)
+; RV64I-NEXT: srli a4, a1, 48
+; RV64I-NEXT: sb a4, 6(a2)
+; RV64I-NEXT: srli a4, a1, 40
+; RV64I-NEXT: sb a4, 5(a2)
+; RV64I-NEXT: srli a4, a1, 32
+; RV64I-NEXT: sb a4, 4(a2)
+; RV64I-NEXT: srli a4, a1, 24
+; RV64I-NEXT: sb a4, 3(a2)
+; RV64I-NEXT: srli a4, a1, 16
+; RV64I-NEXT: sb a4, 2(a2)
; RV64I-NEXT: srli a1, a1, 8
; RV64I-NEXT: sb a1, 1(a2)
+; RV64I-NEXT: sb a3, 16(a2)
+; RV64I-NEXT: sb a7, 24(a2)
+; RV64I-NEXT: sb a0, 8(a2)
; RV64I-NEXT: srli a1, a3, 56
-; RV64I-NEXT: sb a1, 15(a2)
+; RV64I-NEXT: sb a1, 23(a2)
; RV64I-NEXT: srli a1, a3, 48
-; RV64I-NEXT: sb a1, 14(a2)
+; RV64I-NEXT: sb a1, 22(a2)
; RV64I-NEXT: srli a1, a3, 40
-; RV64I-NEXT: sb a1, 13(a2)
+; RV64I-NEXT: sb a1, 21(a2)
; RV64I-NEXT: srli a1, a3, 32
-; RV64I-NEXT: sb a1, 12(a2)
+; RV64I-NEXT: sb a1, 20(a2)
; RV64I-NEXT: srli a1, a3, 24
-; RV64I-NEXT: sb a1, 11(a2)
+; RV64I-NEXT: sb a1, 19(a2)
; RV64I-NEXT: srli a1, a3, 16
-; RV64I-NEXT: sb a1, 10(a2)
-; RV64I-NEXT: or a0, a3, a0
+; RV64I-NEXT: sb a1, 18(a2)
; RV64I-NEXT: srli a3, a3, 8
-; RV64I-NEXT: sb a3, 9(a2)
-; RV64I-NEXT: sb a4, 16(a2)
-; RV64I-NEXT: sb a6, 24(a2)
-; RV64I-NEXT: sb a0, 8(a2)
-; RV64I-NEXT: ld ra, 216(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s0, 208(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s1, 200(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s2, 192(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s3, 184(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s4, 176(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s5, 168(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s6, 160(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s7, 152(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s8, 144(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s9, 136(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s10, 128(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s11, 120(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 224
+; RV64I-NEXT: sb a3, 17(a2)
+; RV64I-NEXT: srli a1, a7, 56
+; RV64I-NEXT: sb a1, 31(a2)
+; RV64I-NEXT: srli a1, a7, 48
+; RV64I-NEXT: sb a1, 30(a2)
+; RV64I-NEXT: srli a1, a7, 40
+; RV64I-NEXT: sb a1, 29(a2)
+; RV64I-NEXT: srli a1, a7, 32
+; RV64I-NEXT: sb a1, 28(a2)
+; RV64I-NEXT: srli a1, a7, 24
+; RV64I-NEXT: sb a1, 27(a2)
+; RV64I-NEXT: srli a1, a7, 16
+; RV64I-NEXT: sb a1, 26(a2)
+; RV64I-NEXT: srli a1, a7, 8
+; RV64I-NEXT: sb a1, 25(a2)
+; RV64I-NEXT: srli a1, a0, 56
+; RV64I-NEXT: sb a1, 15(a2)
+; RV64I-NEXT: srli a1, a0, 48
+; RV64I-NEXT: sb a1, 14(a2)
+; RV64I-NEXT: srli a1, a0, 40
+; RV64I-NEXT: sb a1, 13(a2)
+; RV64I-NEXT: srli a1, a0, 32
+; RV64I-NEXT: sb a1, 12(a2)
+; RV64I-NEXT: srli a1, a0, 24
+; RV64I-NEXT: sb a1, 11(a2)
+; RV64I-NEXT: srli a1, a0, 16
+; RV64I-NEXT: sb a1, 10(a2)
+; RV64I-NEXT: srli a0, a0, 8
+; RV64I-NEXT: sb a0, 9(a2)
+; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
;
; RV32I-LABEL: shl_32bytes:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -144
-; RV32I-NEXT: sw ra, 140(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s0, 136(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 132(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 128(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 124(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s4, 120(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s5, 116(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s6, 112(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s7, 108(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s8, 104(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s9, 100(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s10, 96(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s11, 92(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a3, 0(a0)
-; RV32I-NEXT: sw a3, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT: addi sp, sp, -64
; RV32I-NEXT: lbu a3, 1(a0)
-; RV32I-NEXT: sw a3, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a3, 2(a0)
-; RV32I-NEXT: sw a3, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a3, 3(a0)
-; RV32I-NEXT: sw a3, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a3, 4(a0)
-; RV32I-NEXT: sw a3, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a3, 5(a0)
-; RV32I-NEXT: sw a3, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu t1, 6(a0)
-; RV32I-NEXT: lbu t2, 7(a0)
-; RV32I-NEXT: lbu t3, 8(a0)
-; RV32I-NEXT: lbu t4, 9(a0)
-; RV32I-NEXT: lbu t5, 10(a0)
-; RV32I-NEXT: lbu t6, 11(a0)
-; RV32I-NEXT: lbu s0, 12(a0)
-; RV32I-NEXT: lbu s1, 13(a0)
-; RV32I-NEXT: lbu s2, 14(a0)
-; RV32I-NEXT: lbu s3, 15(a0)
-; RV32I-NEXT: lbu s4, 16(a0)
-; RV32I-NEXT: lbu s5, 17(a0)
-; RV32I-NEXT: lbu s6, 18(a0)
-; RV32I-NEXT: lbu s7, 19(a0)
-; RV32I-NEXT: lbu s10, 1(a1)
-; RV32I-NEXT: lbu s8, 20(a0)
-; RV32I-NEXT: lbu s9, 21(a0)
-; RV32I-NEXT: lbu s11, 0(a1)
-; RV32I-NEXT: slli s10, s10, 8
-; RV32I-NEXT: lbu ra, 2(a1)
-; RV32I-NEXT: lbu a1, 3(a1)
-; RV32I-NEXT: or s10, s10, s11
-; RV32I-NEXT: lbu s11, 22(a0)
-; RV32I-NEXT: slli ra, ra, 16
-; RV32I-NEXT: slli a1, a1, 24
-; RV32I-NEXT: or a1, a1, ra
-; RV32I-NEXT: lbu ra, 23(a0)
-; RV32I-NEXT: or t0, a1, s10
-; RV32I-NEXT: lbu s10, 24(a0)
-; RV32I-NEXT: lbu a7, 25(a0)
-; RV32I-NEXT: lbu a6, 26(a0)
-; RV32I-NEXT: lbu a5, 27(a0)
-; RV32I-NEXT: lbu a1, 31(a0)
-; RV32I-NEXT: lbu a3, 30(a0)
-; RV32I-NEXT: lbu a4, 29(a0)
-; RV32I-NEXT: lbu a0, 28(a0)
-; RV32I-NEXT: sb a1, 91(sp)
-; RV32I-NEXT: sb a3, 90(sp)
-; RV32I-NEXT: sb a4, 89(sp)
-; RV32I-NEXT: sb a0, 88(sp)
-; RV32I-NEXT: sb a5, 87(sp)
-; RV32I-NEXT: sb a6, 86(sp)
-; RV32I-NEXT: sb a7, 85(sp)
-; RV32I-NEXT: sb s10, 84(sp)
-; RV32I-NEXT: sb ra, 83(sp)
-; RV32I-NEXT: sb s11, 82(sp)
-; RV32I-NEXT: sb s9, 81(sp)
-; RV32I-NEXT: sb s8, 80(sp)
-; RV32I-NEXT: sb s7, 79(sp)
-; RV32I-NEXT: sb s6, 78(sp)
-; RV32I-NEXT: sb s5, 77(sp)
-; RV32I-NEXT: sb s4, 76(sp)
-; RV32I-NEXT: sb zero, 59(sp)
-; RV32I-NEXT: sb zero, 58(sp)
-; RV32I-NEXT: sb zero, 57(sp)
-; RV32I-NEXT: sb zero, 56(sp)
-; RV32I-NEXT: sb zero, 55(sp)
-; RV32I-NEXT: sb zero, 54(sp)
-; RV32I-NEXT: sb zero, 53(sp)
-; RV32I-NEXT: sb zero, 52(sp)
-; RV32I-NEXT: sb zero, 51(sp)
-; RV32I-NEXT: sb zero, 50(sp)
-; RV32I-NEXT: sb zero, 49(sp)
-; RV32I-NEXT: sb zero, 48(sp)
-; RV32I-NEXT: sb zero, 47(sp)
-; RV32I-NEXT: sb zero, 46(sp)
-; RV32I-NEXT: sb zero, 45(sp)
-; RV32I-NEXT: sb zero, 44(sp)
-; RV32I-NEXT: sb zero, 43(sp)
-; RV32I-NEXT: sb zero, 42(sp)
-; RV32I-NEXT: sb zero, 41(sp)
-; RV32I-NEXT: sb zero, 40(sp)
-; RV32I-NEXT: sb zero, 39(sp)
-; RV32I-NEXT: sb zero, 38(sp)
-; RV32I-NEXT: sb zero, 37(sp)
-; RV32I-NEXT: sb zero, 36(sp)
-; RV32I-NEXT: sb zero, 35(sp)
-; RV32I-NEXT: sb zero, 34(sp)
-; RV32I-NEXT: sb zero, 33(sp)
-; RV32I-NEXT: sb zero, 32(sp)
-; RV32I-NEXT: sb zero, 31(sp)
-; RV32I-NEXT: sb zero, 30(sp)
-; RV32I-NEXT: sb zero, 29(sp)
-; RV32I-NEXT: sb zero, 28(sp)
-; RV32I-NEXT: sb s3, 75(sp)
-; RV32I-NEXT: sb s2, 74(sp)
-; RV32I-NEXT: sb s1, 73(sp)
-; RV32I-NEXT: sb s0, 72(sp)
-; RV32I-NEXT: sb t6, 71(sp)
-; RV32I-NEXT: sb t5, 70(sp)
-; RV32I-NEXT: sb t4, 69(sp)
-; RV32I-NEXT: sb t3, 68(sp)
-; RV32I-NEXT: sb t2, 67(sp)
-; RV32I-NEXT: sb t1, 66(sp)
-; RV32I-NEXT: lw a0, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 65(sp)
-; RV32I-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 64(sp)
-; RV32I-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 63(sp)
-; RV32I-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 62(sp)
-; RV32I-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 61(sp)
-; RV32I-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 60(sp)
-; RV32I-NEXT: slli a0, t0, 24
-; RV32I-NEXT: srli a0, a0, 27
-; RV32I-NEXT: addi a4, sp, 60
-; RV32I-NEXT: sub a4, a4, a0
-; RV32I-NEXT: lbu a0, 5(a4)
-; RV32I-NEXT: lbu a1, 4(a4)
-; RV32I-NEXT: lbu a3, 6(a4)
-; RV32I-NEXT: lbu a5, 7(a4)
-; RV32I-NEXT: slli a0, a0, 8
-; RV32I-NEXT: or a0, a0, a1
-; RV32I-NEXT: slli a3, a3, 16
-; RV32I-NEXT: slli a5, a5, 24
-; RV32I-NEXT: or a3, a5, a3
-; RV32I-NEXT: or t5, a3, a0
-; RV32I-NEXT: andi a1, t0, 7
-; RV32I-NEXT: lbu a0, 1(a4)
-; RV32I-NEXT: lbu a3, 0(a4)
-; RV32I-NEXT: lbu a5, 2(a4)
-; RV32I-NEXT: lbu a6, 3(a4)
-; RV32I-NEXT: slli a0, a0, 8
-; RV32I-NEXT: or a0, a0, a3
+; RV32I-NEXT: lbu a4, 0(a0)
+; RV32I-NEXT: lbu a5, 2(a0)
+; RV32I-NEXT: lbu a6, 3(a0)
+; RV32I-NEXT: slli a3, a3, 8
+; RV32I-NEXT: or a3, a3, a4
; RV32I-NEXT: slli a5, a5, 16
; RV32I-NEXT: slli a6, a6, 24
-; RV32I-NEXT: or a3, a6, a5
-; RV32I-NEXT: or a6, a3, a0
-; RV32I-NEXT: srli a0, a6, 1
-; RV32I-NEXT: xori a7, a1, 31
-; RV32I-NEXT: srl a0, a0, a7
-; RV32I-NEXT: lbu a3, 13(a4)
-; RV32I-NEXT: lbu a5, 12(a4)
-; RV32I-NEXT: lbu t0, 14(a4)
-; RV32I-NEXT: lbu t1, 15(a4)
-; RV32I-NEXT: slli a3, a3, 8
-; RV32I-NEXT: or a3, a3, a5
+; RV32I-NEXT: or a4, a6, a5
+; RV32I-NEXT: or a3, a4, a3
+; RV32I-NEXT: lbu a4, 5(a0)
+; RV32I-NEXT: lbu a5, 4(a0)
+; RV32I-NEXT: lbu a6, 6(a0)
+; RV32I-NEXT: lbu a7, 7(a0)
+; RV32I-NEXT: slli a4, a4, 8
+; RV32I-NEXT: or a4, a4, a5
+; RV32I-NEXT: slli a6, a6, 16
+; RV32I-NEXT: slli a7, a7, 24
+; RV32I-NEXT: or a5, a7, a6
+; RV32I-NEXT: or a4, a5, a4
+; RV32I-NEXT: lbu a5, 9(a0)
+; RV32I-NEXT: lbu a6, 8(a0)
+; RV32I-NEXT: lbu a7, 10(a0)
+; RV32I-NEXT: lbu t0, 11(a0)
+; RV32I-NEXT: slli a5, a5, 8
+; RV32I-NEXT: or a5, a5, a6
+; RV32I-NEXT: slli a7, a7, 16
+; RV32I-NEXT: slli t0, t0, 24
+; RV32I-NEXT: or a6, t0, a7
+; RV32I-NEXT: or a5, a6, a5
+; RV32I-NEXT: lbu a6, 13(a0)
+; RV32I-NEXT: lbu a7, 12(a0)
+; RV32I-NEXT: lbu t0, 14(a0)
+; RV32I-NEXT: lbu t1, 15(a0)
+; RV32I-NEXT: slli a6, a6, 8
+; RV32I-NEXT: or a6, a6, a7
; RV32I-NEXT: slli t0, t0, 16
; RV32I-NEXT: slli t1, t1, 24
-; RV32I-NEXT: or a5, t1, t0
-; RV32I-NEXT: or t0, a5, a3
-; RV32I-NEXT: lbu a3, 9(a4)
-; RV32I-NEXT: lbu a5, 8(a4)
-; RV32I-NEXT: lbu t1, 10(a4)
-; RV32I-NEXT: lbu t2, 11(a4)
-; RV32I-NEXT: slli a3, a3, 8
-; RV32I-NEXT: or a3, a3, a5
+; RV32I-NEXT: or a7, t1, t0
+; RV32I-NEXT: or a6, a7, a6
+; RV32I-NEXT: lbu a7, 17(a0)
+; RV32I-NEXT: lbu t0, 16(a0)
+; RV32I-NEXT: lbu t1, 18(a0)
+; RV32I-NEXT: lbu t2, 19(a0)
+; RV32I-NEXT: slli a7, a7, 8
+; RV32I-NEXT: or a7, a7, t0
; RV32I-NEXT: slli t1, t1, 16
; RV32I-NEXT: slli t2, t2, 24
-; RV32I-NEXT: or a5, t2, t1
-; RV32I-NEXT: or t1, a5, a3
-; RV32I-NEXT: srli a3, t1, 1
-; RV32I-NEXT: srl a5, a3, a7
-; RV32I-NEXT: srli t4, t5, 1
-; RV32I-NEXT: not t2, a1
-; RV32I-NEXT: lbu a3, 21(a4)
-; RV32I-NEXT: lbu t3, 20(a4)
-; RV32I-NEXT: lbu t6, 22(a4)
-; RV32I-NEXT: lbu s0, 23(a4)
-; RV32I-NEXT: slli a3, a3, 8
-; RV32I-NEXT: or a3, a3, t3
-; RV32I-NEXT: slli t6, t6, 16
-; RV32I-NEXT: slli s0, s0, 24
-; RV32I-NEXT: or t3, s0, t6
-; RV32I-NEXT: or t3, t3, a3
-; RV32I-NEXT: lbu a3, 17(a4)
-; RV32I-NEXT: lbu t6, 16(a4)
-; RV32I-NEXT: lbu s0, 18(a4)
-; RV32I-NEXT: lbu s1, 19(a4)
-; RV32I-NEXT: slli a3, a3, 8
-; RV32I-NEXT: or a3, a3, t6
-; RV32I-NEXT: slli s0, s0, 16
-; RV32I-NEXT: slli s1, s1, 24
-; RV32I-NEXT: or s0, s1, s0
-; RV32I-NEXT: or s0, s0, a3
-; RV32I-NEXT: lbu a3, 29(a4)
-; RV32I-NEXT: lbu t6, 28(a4)
-; RV32I-NEXT: lbu s1, 30(a4)
-; RV32I-NEXT: lbu s2, 31(a4)
-; RV32I-NEXT: slli a3, a3, 8
-; RV32I-NEXT: or a3, a3, t6
-; RV32I-NEXT: slli s1, s1, 16
-; RV32I-NEXT: slli s2, s2, 24
-; RV32I-NEXT: or t6, s2, s1
-; RV32I-NEXT: lbu s1, 25(a4)
-; RV32I-NEXT: lbu s2, 24(a4)
-; RV32I-NEXT: srl t4, t4, t2
-; RV32I-NEXT: or t6, t6, a3
-; RV32I-NEXT: slli s1, s1, 8
-; RV32I-NEXT: or a3, s1, s2
-; RV32I-NEXT: lbu s1, 26(a4)
-; RV32I-NEXT: lbu a4, 27(a4)
-; RV32I-NEXT: srli s2, s0, 1
-; RV32I-NEXT: srl s2, s2, a7
-; RV32I-NEXT: slli s1, s1, 16
-; RV32I-NEXT: slli a4, a4, 24
-; RV32I-NEXT: or a4, a4, s1
-; RV32I-NEXT: srli s1, t0, 1
-; RV32I-NEXT: srl s1, s1, t2
-; RV32I-NEXT: or a4, a4, a3
-; RV32I-NEXT: srli a3, a4, 1
-; RV32I-NEXT: srl a7, a3, a7
-; RV32I-NEXT: srli a3, t3, 1
-; RV32I-NEXT: srl t2, a3, t2
-; RV32I-NEXT: sll a3, t5, a1
-; RV32I-NEXT: sll t0, t0, a1
-; RV32I-NEXT: sll t1, t1, a1
-; RV32I-NEXT: sll t3, t3, a1
-; RV32I-NEXT: sll t5, s0, a1
-; RV32I-NEXT: sll t6, t6, a1
-; RV32I-NEXT: sll a4, a4, a1
-; RV32I-NEXT: sll a1, a6, a1
-; RV32I-NEXT: srli a6, a4, 24
-; RV32I-NEXT: sb a6, 27(a2)
-; RV32I-NEXT: srli a6, a4, 16
-; RV32I-NEXT: sb a6, 26(a2)
-; RV32I-NEXT: or a6, a4, t2
+; RV32I-NEXT: or t0, t2, t1
+; RV32I-NEXT: or t0, t0, a7
+; RV32I-NEXT: lbu a7, 21(a0)
+; RV32I-NEXT: lbu t1, 20(a0)
+; RV32I-NEXT: lbu t2, 22(a0)
+; RV32I-NEXT: lbu t3, 23(a0)
+; RV32I-NEXT: slli a7, a7, 8
+; RV32I-NEXT: or a7, a7, t1
+; RV32I-NEXT: slli t2, t2, 16
+; RV32I-NEXT: slli t3, t3, 24
+; RV32I-NEXT: or t1, t3, t2
+; RV32I-NEXT: or t1, t1, a7
+; RV32I-NEXT: lbu a7, 25(a0)
+; RV32I-NEXT: lbu t2, 24(a0)
+; RV32I-NEXT: lbu t3, 26(a0)
+; RV32I-NEXT: lbu t4, 27(a0)
+; RV32I-NEXT: slli a7, a7, 8
+; RV32I-NEXT: or a7, a7, t2
+; RV32I-NEXT: slli t3, t3, 16
+; RV32I-NEXT: slli t4, t4, 24
+; RV32I-NEXT: or t2, t4, t3
+; RV32I-NEXT: or t2, t2, a7
+; RV32I-NEXT: lbu a7, 29(a0)
+; RV32I-NEXT: lbu t3, 28(a0)
+; RV32I-NEXT: lbu t4, 30(a0)
+; RV32I-NEXT: lbu a0, 31(a0)
+; RV32I-NEXT: slli a7, a7, 8
+; RV32I-NEXT: or a7, a7, t3
+; RV32I-NEXT: slli t4, t4, 16
+; RV32I-NEXT: slli a0, a0, 24
+; RV32I-NEXT: or a0, a0, t4
+; RV32I-NEXT: or a0, a0, a7
+; RV32I-NEXT: lbu a7, 1(a1)
+; RV32I-NEXT: lbu t3, 0(a1)
+; RV32I-NEXT: lbu t4, 2(a1)
+; RV32I-NEXT: lbu a1, 3(a1)
+; RV32I-NEXT: slli a7, a7, 8
+; RV32I-NEXT: or a7, a7, t3
+; RV32I-NEXT: slli t4, t4, 16
+; RV32I-NEXT: slli a1, a1, 24
+; RV32I-NEXT: or a1, a1, t4
+; RV32I-NEXT: or a7, a1, a7
+; RV32I-NEXT: sw zero, 28(sp)
+; RV32I-NEXT: sw zero, 24(sp)
+; RV32I-NEXT: sw zero, 20(sp)
+; RV32I-NEXT: sw zero, 16(sp)
+; RV32I-NEXT: sw zero, 12(sp)
+; RV32I-NEXT: sw zero, 8(sp)
+; RV32I-NEXT: sw zero, 4(sp)
+; RV32I-NEXT: sw zero, 0(sp)
+; RV32I-NEXT: sw a0, 60(sp)
+; RV32I-NEXT: sw t2, 56(sp)
+; RV32I-NEXT: sw t1, 52(sp)
+; RV32I-NEXT: sw t0, 48(sp)
+; RV32I-NEXT: sw a6, 44(sp)
+; RV32I-NEXT: sw a5, 40(sp)
+; RV32I-NEXT: sw a4, 36(sp)
+; RV32I-NEXT: sw a3, 32(sp)
+; RV32I-NEXT: srli a0, a7, 3
+; RV32I-NEXT: andi a0, a0, 28
+; RV32I-NEXT: addi a1, sp, 32
+; RV32I-NEXT: sub a4, a1, a0
+; RV32I-NEXT: lw a3, 4(a4)
+; RV32I-NEXT: lw a5, 0(a4)
+; RV32I-NEXT: sll a0, a3, a7
+; RV32I-NEXT: andi a1, a7, 31
+; RV32I-NEXT: xori a6, a1, 31
+; RV32I-NEXT: srli a1, a5, 1
+; RV32I-NEXT: lw t0, 12(a4)
+; RV32I-NEXT: lw t1, 8(a4)
+; RV32I-NEXT: srl a1, a1, a6
+; RV32I-NEXT: or a0, a0, a1
+; RV32I-NEXT: sll a1, t0, a7
+; RV32I-NEXT: srli t2, t1, 1
+; RV32I-NEXT: srl t2, t2, a6
+; RV32I-NEXT: or a1, a1, t2
+; RV32I-NEXT: sll t1, t1, a7
+; RV32I-NEXT: srli a3, a3, 1
+; RV32I-NEXT: lw t2, 20(a4)
+; RV32I-NEXT: lw t3, 16(a4)
+; RV32I-NEXT: srl a3, a3, a6
+; RV32I-NEXT: or a3, t1, a3
+; RV32I-NEXT: sll t1, t2, a7
+; RV32I-NEXT: srli t4, t3, 1
+; RV32I-NEXT: srl t4, t4, a6
+; RV32I-NEXT: or t1, t1, t4
+; RV32I-NEXT: sll t3, t3, a7
+; RV32I-NEXT: srli t0, t0, 1
+; RV32I-NEXT: lw t4, 28(a4)
+; RV32I-NEXT: lw a4, 24(a4)
+; RV32I-NEXT: srl t0, t0, a6
+; RV32I-NEXT: or t0, t3, t0
+; RV32I-NEXT: sll t3, t4, a7
+; RV32I-NEXT: srli t4, a4, 1
+; RV32I-NEXT: srl t4, t4, a6
+; RV32I-NEXT: or t3, t3, t4
+; RV32I-NEXT: sll a4, a4, a7
+; RV32I-NEXT: srli t2, t2, 1
+; RV32I-NEXT: srl a6, t2, a6
+; RV32I-NEXT: or a4, a4, a6
+; RV32I-NEXT: sll a5, a5, a7
+; RV32I-NEXT: sb a5, 0(a2)
+; RV32I-NEXT: srli a6, a5, 24
+; RV32I-NEXT: sb a6, 3(a2)
+; RV32I-NEXT: srli a6, a5, 16
+; RV32I-NEXT: sb a6, 2(a2)
+; RV32I-NEXT: srli a5, a5, 8
+; RV32I-NEXT: sb a5, 1(a2)
+; RV32I-NEXT: sb a4, 24(a2)
+; RV32I-NEXT: sb t3, 28(a2)
+; RV32I-NEXT: sb t0, 16(a2)
+; RV32I-NEXT: sb t1, 20(a2)
+; RV32I-NEXT: sb a3, 8(a2)
+; RV32I-NEXT: sb a1, 12(a2)
+; RV32I-NEXT: sb a0, 4(a2)
+; RV32I-NEXT: srli a5, a4, 24
+; RV32I-NEXT: sb a5, 27(a2)
+; RV32I-NEXT: srli a5, a4, 16
+; RV32I-NEXT: sb a5, 26(a2)
; RV32I-NEXT: srli a4, a4, 8
; RV32I-NEXT: sb a4, 25(a2)
-; RV32I-NEXT: srli a4, t6, 24
+; RV32I-NEXT: srli a4, t3, 24
; RV32I-NEXT: sb a4, 31(a2)
-; RV32I-NEXT: srli a4, t6, 16
+; RV32I-NEXT: srli a4, t3, 16
; RV32I-NEXT: sb a4, 30(a2)
-; RV32I-NEXT: or a4, t6, a7
-; RV32I-NEXT: srli a7, t6, 8
-; RV32I-NEXT: sb a7, 29(a2)
-; RV32I-NEXT: srli a7, t5, 24
-; RV32I-NEXT: sb a7, 19(a2)
-; RV32I-NEXT: srli a7, t5, 16
-; RV32I-NEXT: sb a7, 18(a2)
-; RV32I-NEXT: or a7, t5, s1
-; RV32I-NEXT: srli t2, t5, 8
-; RV32I-NEXT: sb t2, 17(a2)
-; RV32I-NEXT: srli t2, t3, 24
-; RV32I-NEXT: sb t2, 23(a2)
-; RV32I-NEXT: srli t2, t3, 16
-; RV32I-NEXT: sb t2, 22(a2)
-; RV32I-NEXT: or t2, t3, s2
-; RV32I-NEXT: srli t3, t3, 8
-; RV32I-NEXT: sb t3, 21(a2)
-; RV32I-NEXT: srli t3, t1, 24
-; RV32I-NEXT: sb t3, 11(a2)
-; RV32I-NEXT: srli t3, t1, 16
-; RV32I-NEXT: sb t3, 10(a2)
-; RV32I-NEXT: or t3, t1, t4
-; RV32I-NEXT: srli t1, t1, 8
-; RV32I-NEXT: sb t1, 9(a2)
-; RV32I-NEXT: srli t1, t0, 24
-; RV32I-NEXT: sb t1, 15(a2)
-; RV32I-NEXT: srli t1, t0, 16
-; RV32I-NEXT: sb t1, 14(a2)
-; RV32I-NEXT: or a5, t0, a5
-; RV32I-NEXT: srli t0, t0, 8
-; RV32I-NEXT: sb t0, 13(a2)
-; RV32I-NEXT: srli t0, a1, 24
-; RV32I-NEXT: sb t0, 3(a2)
-; RV32I-NEXT: srli t0, a1, 16
-; RV32I-NEXT: sb t0, 2(a2)
-; RV32I-NEXT: sb a1, 0(a2)
+; RV32I-NEXT: srli a4, t3, 8
+; RV32I-NEXT: sb a4, 29(a2)
+; RV32I-NEXT: srli a4, t0, 24
+; RV32I-NEXT: sb a4, 19(a2)
+; RV32I-NEXT: srli a4, t0, 16
+; RV32I-NEXT: sb a4, 18(a2)
+; RV32I-NEXT: srli a4, t0, 8
+; RV32I-NEXT: sb a4, 17(a2)
+; RV32I-NEXT: srli a4, t1, 24
+; RV32I-NEXT: sb a4, 23(a2)
+; RV32I-NEXT: srli a4, t1, 16
+; RV32I-NEXT: sb a4, 22(a2)
+; RV32I-NEXT: srli a4, t1, 8
+; RV32I-NEXT: sb a4, 21(a2)
+; RV32I-NEXT: srli a4, a3, 24
+; RV32I-NEXT: sb a4, 11(a2)
+; RV32I-NEXT: srli a4, a3, 16
+; RV32I-NEXT: sb a4, 10(a2)
+; RV32I-NEXT: srli a3, a3, 8
+; RV32I-NEXT: sb a3, 9(a2)
+; RV32I-NEXT: srli a3, a1, 24
+; RV32I-NEXT: sb a3, 15(a2)
+; RV32I-NEXT: srli a3, a1, 16
+; RV32I-NEXT: sb a3, 14(a2)
; RV32I-NEXT: srli a1, a1, 8
-; RV32I-NEXT: sb a1, 1(a2)
-; RV32I-NEXT: srli a1, a3, 24
+; RV32I-NEXT: sb a1, 13(a2)
+; RV32I-NEXT: srli a1, a0, 24
; RV32I-NEXT: sb a1, 7(a2)
-; RV32I-NEXT: srli a1, a3, 16
+; RV32I-NEXT: srli a1, a0, 16
; RV32I-NEXT: sb a1, 6(a2)
-; RV32I-NEXT: or a0, a3, a0
-; RV32I-NEXT: srli a3, a3, 8
-; RV32I-NEXT: sb a3, 5(a2)
-; RV32I-NEXT: sb a6, 24(a2)
-; RV32I-NEXT: sb a4, 28(a2)
-; RV32I-NEXT: sb a7, 16(a2)
-; RV32I-NEXT: sb t2, 20(a2)
-; RV32I-NEXT: sb t3, 8(a2)
-; RV32I-NEXT: sb a5, 12(a2)
-; RV32I-NEXT: sb a0, 4(a2)
-; RV32I-NEXT: lw ra, 140(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s0, 136(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 132(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 128(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 124(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s4, 120(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s5, 116(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s6, 112(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s7, 108(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s8, 104(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s9, 100(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s10, 96(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s11, 92(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 144
+; RV32I-NEXT: srli a0, a0, 8
+; RV32I-NEXT: sb a0, 5(a2)
+; RV32I-NEXT: addi sp, sp, 64
; RV32I-NEXT: ret
%src = load i256, ptr %src.ptr, align 1
%bitOff = load i256, ptr %bitOff.ptr, align 1
@@ -2762,200 +2147,43 @@ define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
define void @ashr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; RV64I-LABEL: ashr_32bytes:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -224
-; RV64I-NEXT: sd ra, 216(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s0, 208(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s1, 200(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s2, 192(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s3, 184(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s4, 176(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s5, 168(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s6, 160(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s7, 152(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s8, 144(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s9, 136(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s10, 128(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s11, 120(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu t1, 31(a0)
-; RV64I-NEXT: lbu a3, 0(a0)
-; RV64I-NEXT: sd a3, 48(sp) # 8-byte Folded Spill
+; RV64I-NEXT: addi sp, sp, -64
; RV64I-NEXT: lbu a3, 1(a0)
-; RV64I-NEXT: sd a3, 40(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a3, 2(a0)
-; RV64I-NEXT: sd a3, 32(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a3, 3(a0)
-; RV64I-NEXT: sd a3, 24(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a3, 4(a0)
-; RV64I-NEXT: sd a3, 16(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu a3, 5(a0)
-; RV64I-NEXT: sd a3, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lbu t3, 6(a0)
-; RV64I-NEXT: lbu t4, 7(a0)
-; RV64I-NEXT: lbu t5, 8(a0)
-; RV64I-NEXT: lbu t6, 9(a0)
-; RV64I-NEXT: lbu s0, 10(a0)
-; RV64I-NEXT: lbu s1, 11(a0)
-; RV64I-NEXT: lbu s2, 12(a0)
-; RV64I-NEXT: lbu s3, 13(a0)
-; RV64I-NEXT: lbu s4, 14(a0)
-; RV64I-NEXT: lbu s5, 15(a0)
-; RV64I-NEXT: lbu s6, 16(a0)
-; RV64I-NEXT: lbu s7, 17(a0)
-; RV64I-NEXT: lbu s8, 18(a0)
-; RV64I-NEXT: lbu s9, 19(a0)
-; RV64I-NEXT: lbu a3, 1(a1)
-; RV64I-NEXT: lbu s10, 0(a1)
-; RV64I-NEXT: lbu s11, 2(a1)
-; RV64I-NEXT: lbu ra, 3(a1)
-; RV64I-NEXT: slli a3, a3, 8
-; RV64I-NEXT: or a3, a3, s10
-; RV64I-NEXT: slli s11, s11, 16
-; RV64I-NEXT: slli ra, ra, 24
-; RV64I-NEXT: lbu s10, 5(a1)
-; RV64I-NEXT: or s11, ra, s11
-; RV64I-NEXT: or a3, s11, a3
-; RV64I-NEXT: lbu s11, 4(a1)
-; RV64I-NEXT: slli s10, s10, 8
-; RV64I-NEXT: lbu ra, 6(a1)
-; RV64I-NEXT: lbu a1, 7(a1)
-; RV64I-NEXT: or s10, s10, s11
-; RV64I-NEXT: lbu s11, 20(a0)
-; RV64I-NEXT: slli ra, ra, 16
-; RV64I-NEXT: slli a1, a1, 24
-; RV64I-NEXT: or a1, a1, ra
-; RV64I-NEXT: lbu ra, 21(a0)
-; RV64I-NEXT: or a1, a1, s10
-; RV64I-NEXT: lbu s10, 22(a0)
-; RV64I-NEXT: slli a1, a1, 32
-; RV64I-NEXT: or t2, a1, a3
-; RV64I-NEXT: lbu t0, 23(a0)
-; RV64I-NEXT: lbu a7, 24(a0)
-; RV64I-NEXT: lbu a6, 25(a0)
-; RV64I-NEXT: lbu a5, 26(a0)
-; RV64I-NEXT: lbu a1, 30(a0)
-; RV64I-NEXT: lbu a3, 29(a0)
-; RV64I-NEXT: lbu a4, 28(a0)
-; RV64I-NEXT: lbu a0, 27(a0)
-; RV64I-NEXT: sb a1, 86(sp)
-; RV64I-NEXT: sb a3, 85(sp)
-; RV64I-NEXT: sb a4, 84(sp)
-; RV64I-NEXT: sb a0, 83(sp)
-; RV64I-NEXT: sb a5, 82(sp)
-; RV64I-NEXT: sb a6, 81(sp)
-; RV64I-NEXT: sb a7, 80(sp)
-; RV64I-NEXT: sb t0, 79(sp)
-; RV64I-NEXT: sb s10, 78(sp)
-; RV64I-NEXT: sb ra, 77(sp)
-; RV64I-NEXT: sb s11, 76(sp)
-; RV64I-NEXT: sb s9, 75(sp)
-; RV64I-NEXT: sb s8, 74(sp)
-; RV64I-NEXT: sb s7, 73(sp)
-; RV64I-NEXT: sb s6, 72(sp)
-; RV64I-NEXT: sb s5, 71(sp)
-; RV64I-NEXT: sb s4, 70(sp)
-; RV64I-NEXT: sb s3, 69(sp)
-; RV64I-NEXT: sb s2, 68(sp)
-; RV64I-NEXT: sb s1, 67(sp)
-; RV64I-NEXT: sb s0, 66(sp)
-; RV64I-NEXT: sb t6, 65(sp)
-; RV64I-NEXT: sb t5, 64(sp)
-; RV64I-NEXT: sb t1, 87(sp)
-; RV64I-NEXT: slli t1, t1, 56
-; RV64I-NEXT: sb t4, 63(sp)
-; RV64I-NEXT: sb t3, 62(sp)
-; RV64I-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 61(sp)
-; RV64I-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 60(sp)
-; RV64I-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 59(sp)
-; RV64I-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 58(sp)
-; RV64I-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 57(sp)
-; RV64I-NEXT: ld a0, 48(sp) # 8-byte Folded Reload
-; RV64I-NEXT: sb a0, 56(sp)
-; RV64I-NEXT: srai a0, t1, 63
-; RV64I-NEXT: sb a0, 112(sp)
-; RV64I-NEXT: sb a0, 104(sp)
-; RV64I-NEXT: sb a0, 96(sp)
-; RV64I-NEXT: sb a0, 88(sp)
-; RV64I-NEXT: srli a1, a0, 56
-; RV64I-NEXT: sb a1, 119(sp)
-; RV64I-NEXT: srli a3, a0, 48
-; RV64I-NEXT: sb a3, 118(sp)
-; RV64I-NEXT: srli a4, a0, 40
-; RV64I-NEXT: sb a4, 117(sp)
-; RV64I-NEXT: srli a5, a0, 32
-; RV64I-NEXT: sb a5, 116(sp)
-; RV64I-NEXT: srli a6, a0, 24
-; RV64I-NEXT: sb a6, 115(sp)
-; RV64I-NEXT: srli a7, a0, 16
-; RV64I-NEXT: sb a7, 114(sp)
-; RV64I-NEXT: srli a0, a0, 8
-; RV64I-NEXT: sb a0, 113(sp)
-; RV64I-NEXT: sb a1, 111(sp)
-; RV64I-NEXT: sb a3, 110(sp)
-; RV64I-NEXT: sb a4, 109(sp)
-; RV64I-NEXT: sb a5, 108(sp)
-; RV64I-NEXT: sb a6, 107(sp)
-; RV64I-NEXT: sb a7, 106(sp)
-; RV64I-NEXT: sb a0, 105(sp)
-; RV64I-NEXT: sb a1, 103(sp)
-; RV64I-NEXT: sb a3, 102(sp)
-; RV64I-NEXT: sb a4, 101(sp)
-; RV64I-NEXT: sb a5, 100(sp)
-; RV64I-NEXT: sb a6, 99(sp)
-; RV64I-NEXT: sb a7, 98(sp)
-; RV64I-NEXT: sb a0, 97(sp)
-; RV64I-NEXT: sb a1, 95(sp)
-; RV64I-NEXT: sb a3, 94(sp)
-; RV64I-NEXT: sb a4, 93(sp)
-; RV64I-NEXT: sb a5, 92(sp)
-; RV64I-NEXT: sb a6, 91(sp)
-; RV64I-NEXT: sb a7, 90(sp)
-; RV64I-NEXT: sb a0, 89(sp)
-; RV64I-NEXT: slli a0, t2, 56
-; RV64I-NEXT: srli a0, a0, 59
-; RV64I-NEXT: addi a1, sp, 56
-; RV64I-NEXT: add a1, a1, a0
-; RV64I-NEXT: lbu a0, 9(a1)
-; RV64I-NEXT: lbu a3, 8(a1)
-; RV64I-NEXT: lbu a4, 10(a1)
-; RV64I-NEXT: lbu a5, 11(a1)
-; RV64I-NEXT: slli a0, a0, 8
-; RV64I-NEXT: or a0, a0, a3
-; RV64I-NEXT: slli a4, a4, 16
-; RV64I-NEXT: slli a5, a5, 24
-; RV64I-NEXT: or a4, a5, a4
-; RV64I-NEXT: or a0, a4, a0
-; RV64I-NEXT: lbu a3, 13(a1)
-; RV64I-NEXT: lbu a4, 12(a1)
-; RV64I-NEXT: lbu a5, 14(a1)
-; RV64I-NEXT: lbu a6, 15(a1)
+; RV64I-NEXT: lbu a4, 0(a0)
+; RV64I-NEXT: lbu a5, 2(a0)
+; RV64I-NEXT: lbu a6, 3(a0)
; RV64I-NEXT: slli a3, a3, 8
; RV64I-NEXT: or a3, a3, a4
; RV64I-NEXT: slli a5, a5, 16
; RV64I-NEXT: slli a6, a6, 24
; RV64I-NEXT: or a4, a6, a5
; RV64I-NEXT: or a3, a4, a3
-; RV64I-NEXT: slli a3, a3, 32
-; RV64I-NEXT: or a4, a3, a0
-; RV64I-NEXT: andi a3, t2, 7
-; RV64I-NEXT: lbu a0, 17(a1)
-; RV64I-NEXT: lbu a5, 16(a1)
-; RV64I-NEXT: lbu a6, 18(a1)
-; RV64I-NEXT: lbu a7, 19(a1)
-; RV64I-NEXT: slli a0, a0, 8
-; RV64I-NEXT: or a0, a0, a5
+; RV64I-NEXT: lbu a4, 5(a0)
+; RV64I-NEXT: lbu a5, 4(a0)
+; RV64I-NEXT: lbu a6, 6(a0)
+; RV64I-NEXT: lbu a7, 7(a0)
+; RV64I-NEXT: slli a4, a4, 8
+; RV64I-NEXT: or a4, a4, a5
; RV64I-NEXT: slli a6, a6, 16
; RV64I-NEXT: slli a7, a7, 24
; RV64I-NEXT: or a5, a7, a6
-; RV64I-NEXT: or a0, a5, a0
-; RV64I-NEXT: lbu a5, 21(a1)
-; RV64I-NEXT: lbu a6, 20(a1)
-; RV64I-NEXT: lbu a7, 22(a1)
-; RV64I-NEXT: lbu t0, 23(a1)
+; RV64I-NEXT: or a4, a5, a4
+; RV64I-NEXT: slli a4, a4, 32
+; RV64I-NEXT: or a3, a4, a3
+; RV64I-NEXT: lbu a4, 9(a0)
+; RV64I-NEXT: lbu a5, 8(a0)
+; RV64I-NEXT: lbu a6, 10(a0)
+; RV64I-NEXT: lbu a7, 11(a0)
+; RV64I-NEXT: slli a4, a4, 8
+; RV64I-NEXT: or a4, a4, a5
+; RV64I-NEXT: slli a6, a6, 16
+; RV64I-NEXT: slli a7, a7, 24
+; RV64I-NEXT: or a5, a7, a6
+; RV64I-NEXT: or a4, a5, a4
+; RV64I-NEXT: lbu a5, 13(a0)
+; RV64I-NEXT: lbu a6, 12(a0)
+; RV64I-NEXT: lbu a7, 14(a0)
+; RV64I-NEXT: lbu t0, 15(a0)
; RV64I-NEXT: slli a5, a5, 8
; RV64I-NEXT: or a5, a5, a6
; RV64I-NEXT: slli a7, a7, 16
@@ -2963,467 +2191,378 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; RV64I-NEXT: or a6, t0, a7
; RV64I-NEXT: or a5, a6, a5
; RV64I-NEXT: slli a5, a5, 32
-; RV64I-NEXT: or a5, a5, a0
-; RV64I-NEXT: slli a0, a5, 1
-; RV64I-NEXT: not a6, a3
-; RV64I-NEXT: sll a0, a0, a6
-; RV64I-NEXT: lbu a6, 1(a1)
-; RV64I-NEXT: lbu a7, 0(a1)
-; RV64I-NEXT: lbu t0, 2(a1)
-; RV64I-NEXT: lbu t1, 3(a1)
+; RV64I-NEXT: or a4, a5, a4
+; RV64I-NEXT: lbu a5, 17(a0)
+; RV64I-NEXT: lbu a6, 16(a0)
+; RV64I-NEXT: lbu a7, 18(a0)
+; RV64I-NEXT: lbu t0, 19(a0)
+; RV64I-NEXT: slli a5, a5, 8
+; RV64I-NEXT: or a5, a5, a6
+; RV64I-NEXT: slli a7, a7, 16
+; RV64I-NEXT: slli t0, t0, 24
+; RV64I-NEXT: or a6, t0, a7
+; RV64I-NEXT: or a5, a6, a5
+; RV64I-NEXT: lbu a6, 21(a0)
+; RV64I-NEXT: lbu a7, 20(a0)
+; RV64I-NEXT: lbu t0, 22(a0)
+; RV64I-NEXT: lbu t1, 23(a0)
; RV64I-NEXT: slli a6, a6, 8
; RV64I-NEXT: or a6, a6, a7
; RV64I-NEXT: slli t0, t0, 16
; RV64I-NEXT: slli t1, t1, 24
; RV64I-NEXT: or a7, t1, t0
; RV64I-NEXT: or a6, a7, a6
-; RV64I-NEXT: lbu a7, 5(a1)
-; RV64I-NEXT: lbu t0, 4(a1)
-; RV64I-NEXT: lbu t1, 6(a1)
-; RV64I-NEXT: lbu t2, 7(a1)
+; RV64I-NEXT: slli a6, a6, 32
+; RV64I-NEXT: or a5, a6, a5
+; RV64I-NEXT: lbu a6, 25(a0)
+; RV64I-NEXT: lbu a7, 24(a0)
+; RV64I-NEXT: lbu t0, 26(a0)
+; RV64I-NEXT: lbu t1, 27(a0)
+; RV64I-NEXT: slli a6, a6, 8
+; RV64I-NEXT: or a6, a6, a7
+; RV64I-NEXT: slli t0, t0, 16
+; RV64I-NEXT: slli t1, t1, 24
+; RV64I-NEXT: or a7, t1, t0
+; RV64I-NEXT: or a6, a7, a6
+; RV64I-NEXT: lbu a7, 29(a0)
+; RV64I-NEXT: lbu t0, 28(a0)
+; RV64I-NEXT: lbu t1, 30(a0)
+; RV64I-NEXT: lbu a0, 31(a0)
; RV64I-NEXT: slli a7, a7, 8
; RV64I-NEXT: or a7, a7, t0
; RV64I-NEXT: slli t1, t1, 16
-; RV64I-NEXT: slli t2, t2, 24
-; RV64I-NEXT: or t0, t2, t1
-; RV64I-NEXT: or a7, t0, a7
-; RV64I-NEXT: slli a7, a7, 32
+; RV64I-NEXT: slli a0, a0, 24
+; RV64I-NEXT: or a0, a0, t1
+; RV64I-NEXT: or a0, a0, a7
+; RV64I-NEXT: slli a7, a0, 32
; RV64I-NEXT: or a6, a7, a6
-; RV64I-NEXT: lbu a7, 25(a1)
-; RV64I-NEXT: lbu t0, 24(a1)
-; RV64I-NEXT: lbu t1, 26(a1)
-; RV64I-NEXT: lbu t2, 27(a1)
+; RV64I-NEXT: lbu a7, 1(a1)
+; RV64I-NEXT: lbu t0, 0(a1)
+; RV64I-NEXT: lbu t1, 2(a1)
+; RV64I-NEXT: lbu t2, 3(a1)
; RV64I-NEXT: slli a7, a7, 8
; RV64I-NEXT: or a7, a7, t0
; RV64I-NEXT: slli t1, t1, 16
; RV64I-NEXT: slli t2, t2, 24
; RV64I-NEXT: or t0, t2, t1
; RV64I-NEXT: or a7, t0, a7
-; RV64I-NEXT: lbu t0, 29(a1)
-; RV64I-NEXT: lbu t1, 28(a1)
-; RV64I-NEXT: lbu t2, 30(a1)
-; RV64I-NEXT: lbu a1, 31(a1)
+; RV64I-NEXT: lbu t0, 5(a1)
+; RV64I-NEXT: lbu t1, 4(a1)
+; RV64I-NEXT: lbu t2, 6(a1)
+; RV64I-NEXT: lbu a1, 7(a1)
; RV64I-NEXT: slli t0, t0, 8
; RV64I-NEXT: or t0, t0, t1
; RV64I-NEXT: slli t2, t2, 16
; RV64I-NEXT: slli a1, a1, 24
; RV64I-NEXT: or a1, a1, t2
-; RV64I-NEXT: slli t1, a4, 1
; RV64I-NEXT: or a1, a1, t0
-; RV64I-NEXT: xori t0, a3, 63
-; RV64I-NEXT: sll t1, t1, t0
; RV64I-NEXT: slli a1, a1, 32
-; RV64I-NEXT: or a7, a1, a7
-; RV64I-NEXT: slli a1, a7, 1
-; RV64I-NEXT: sll t0, a1, t0
-; RV64I-NEXT: srl a1, a4, a3
-; RV64I-NEXT: srl a4, a6, a3
-; RV64I-NEXT: srl a5, a5, a3
-; RV64I-NEXT: sra a3, a7, a3
-; RV64I-NEXT: srli a6, a5, 48
-; RV64I-NEXT: sb a6, 22(a2)
-; RV64I-NEXT: srli a6, a5, 40
-; RV64I-NEXT: sb a6, 21(a2)
-; RV64I-NEXT: srli a6, a5, 32
-; RV64I-NEXT: sb a6, 20(a2)
-; RV64I-NEXT: srli a6, a5, 24
-; RV64I-NEXT: sb a6, 19(a2)
-; RV64I-NEXT: srli a6, a5, 16
-; RV64I-NEXT: sb a6, 18(a2)
-; RV64I-NEXT: or a6, a5, t0
+; RV64I-NEXT: or a1, a1, a7
+; RV64I-NEXT: sraiw a0, a0, 31
+; RV64I-NEXT: sd a0, 56(sp)
+; RV64I-NEXT: sd a0, 48(sp)
+; RV64I-NEXT: sd a0, 40(sp)
+; RV64I-NEXT: sd a0, 32(sp)
+; RV64I-NEXT: sd a6, 24(sp)
+; RV64I-NEXT: sd a5, 16(sp)
+; RV64I-NEXT: sd a4, 8(sp)
+; RV64I-NEXT: sd a3, 0(sp)
+; RV64I-NEXT: srli a0, a1, 3
+; RV64I-NEXT: andi a0, a0, 24
+; RV64I-NEXT: mv a3, sp
+; RV64I-NEXT: add a3, a3, a0
+; RV64I-NEXT: ld a4, 8(a3)
+; RV64I-NEXT: srl a0, a4, a1
+; RV64I-NEXT: ld a5, 16(a3)
+; RV64I-NEXT: andi a6, a1, 63
+; RV64I-NEXT: xori a6, a6, 63
+; RV64I-NEXT: ld a7, 0(a3)
+; RV64I-NEXT: slli t0, a5, 1
+; RV64I-NEXT: sll t0, t0, a6
+; RV64I-NEXT: or a0, a0, t0
+; RV64I-NEXT: srl a7, a7, a1
+; RV64I-NEXT: slli a4, a4, 1
+; RV64I-NEXT: ld a3, 24(a3)
+; RV64I-NEXT: sll a4, a4, a6
+; RV64I-NEXT: or a4, a7, a4
+; RV64I-NEXT: srl a5, a5, a1
+; RV64I-NEXT: slli a7, a3, 1
+; RV64I-NEXT: sll a6, a7, a6
+; RV64I-NEXT: or a5, a5, a6
+; RV64I-NEXT: sra a1, a3, a1
+; RV64I-NEXT: sb a1, 24(a2)
+; RV64I-NEXT: srli a3, a1, 56
+; RV64I-NEXT: sb a3, 31(a2)
+; RV64I-NEXT: srli a3, a1, 48
+; RV64I-NEXT: sb a3, 30(a2)
+; RV64I-NEXT: srli a3, a1, 40
+; RV64I-NEXT: sb a3, 29(a2)
+; RV64I-NEXT: srli a3, a1, 32
+; RV64I-NEXT: sb a3, 28(a2)
+; RV64I-NEXT: srli a3, a1, 24
+; RV64I-NEXT: sb a3, 27(a2)
+; RV64I-NEXT: srli a3, a1, 16
+; RV64I-NEXT: sb a3, 26(a2)
+; RV64I-NEXT: srli a1, a1, 8
+; RV64I-NEXT: sb a1, 25(a2)
; RV64I-NEXT: sb a5, 16(a2)
+; RV64I-NEXT: sb a4, 0(a2)
+; RV64I-NEXT: sb a0, 8(a2)
+; RV64I-NEXT: srli a1, a5, 56
+; RV64I-NEXT: sb a1, 23(a2)
+; RV64I-NEXT: srli a1, a5, 48
+; RV64I-NEXT: sb a1, 22(a2)
+; RV64I-NEXT: srli a1, a5, 40
+; RV64I-NEXT: sb a1, 21(a2)
+; RV64I-NEXT: srli a1, a5, 32
+; RV64I-NEXT: sb a1, 20(a2)
+; RV64I-NEXT: srli a1, a5, 24
+; RV64I-NEXT: sb a1, 19(a2)
+; RV64I-NEXT: srli a1, a5, 16
+; RV64I-NEXT: sb a1, 18(a2)
; RV64I-NEXT: srli a5, a5, 8
; RV64I-NEXT: sb a5, 17(a2)
-; RV64I-NEXT: srli a5, a3, 56
-; RV64I-NEXT: sb a5, 31(a2)
-; RV64I-NEXT: srli a5, a3, 48
-; RV64I-NEXT: sb a5, 30(a2)
-; RV64I-NEXT: srli a5, a3, 40
-; RV64I-NEXT: sb a5, 29(a2)
-; RV64I-NEXT: srli a5, a3, 32
-; RV64I-NEXT: sb a5, 28(a2)
-; RV64I-NEXT: srli a5, a3, 24
-; RV64I-NEXT: sb a5, 27(a2)
-; RV64I-NEXT: srli a5, a3, 16
-; RV64I-NEXT: sb a5, 26(a2)
-; RV64I-NEXT: sb a3, 24(a2)
-; RV64I-NEXT: srli a3, a3, 8
-; RV64I-NEXT: sb a3, 25(a2)
-; RV64I-NEXT: srli a3, a4, 48
-; RV64I-NEXT: sb a3, 6(a2)
-; RV64I-NEXT: srli a3, a4, 40
-; RV64I-NEXT: sb a3, 5(a2)
-; RV64I-NEXT: srli a3, a4, 32
-; RV64I-NEXT: sb a3, 4(a2)
-; RV64I-NEXT: srli a3, a4, 24
-; RV64I-NEXT: sb a3, 3(a2)
-; RV64I-NEXT: srli a3, a4, 16
-; RV64I-NEXT: sb a3, 2(a2)
-; RV64I-NEXT: or a3, a4, t1
-; RV64I-NEXT: sb a4, 0(a2)
+; RV64I-NEXT: srli a1, a4, 56
+; RV64I-NEXT: sb a1, 7(a2)
+; RV64I-NEXT: srli a1, a4, 48
+; RV64I-NEXT: sb a1, 6(a2)
+; RV64I-NEXT: srli a1, a4, 40
+; RV64I-NEXT: sb a1, 5(a2)
+; RV64I-NEXT: srli a1, a4, 32
+; RV64I-NEXT: sb a1, 4(a2)
+; RV64I-NEXT: srli a1, a4, 24
+; RV64I-NEXT: sb a1, 3(a2)
+; RV64I-NEXT: srli a1, a4, 16
+; RV64I-NEXT: sb a1, 2(a2)
; RV64I-NEXT: srli a4, a4, 8
; RV64I-NEXT: sb a4, 1(a2)
-; RV64I-NEXT: srli a4, a1, 48
-; RV64I-NEXT: sb a4, 14(a2)
-; RV64I-NEXT: srli a4, a1, 40
-; RV64I-NEXT: sb a4, 13(a2)
-; RV64I-NEXT: srli a4, a1, 32
-; RV64I-NEXT: sb a4, 12(a2)
-; RV64I-NEXT: srli a4, a1, 24
-; RV64I-NEXT: sb a4, 11(a2)
-; RV64I-NEXT: srli a4, a1, 16
-; RV64I-NEXT: sb a4, 10(a2)
-; RV64I-NEXT: or a0, a1, a0
-; RV64I-NEXT: sb a1, 8(a2)
-; RV64I-NEXT: srli a1, a1, 8
-; RV64I-NEXT: sb a1, 9(a2)
-; RV64I-NEXT: srli a1, a6, 56
-; RV64I-NEXT: sb a1, 23(a2)
-; RV64I-NEXT: srli a3, a3, 56
-; RV64I-NEXT: sb a3, 7(a2)
-; RV64I-NEXT: srli a0, a0, 56
-; RV64I-NEXT: sb a0, 15(a2)
-; RV64I-NEXT: ld ra, 216(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s0, 208(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s1, 200(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s2, 192(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s3, 184(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s4, 176(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s5, 168(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s6, 160(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s7, 152(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s8, 144(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s9, 136(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s10, 128(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s11, 120(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 224
+; RV64I-NEXT: srli a1, a0, 56
+; RV64I-NEXT: sb a1, 15(a2)
+; RV64I-NEXT: srli a1, a0, 48
+; RV64I-NEXT: sb a1, 14(a2)
+; RV64I-NEXT: srli a1, a0, 40
+; RV64I-NEXT: sb a1, 13(a2)
+; RV64I-NEXT: srli a1, a0, 32
+; RV64I-NEXT: sb a1, 12(a2)
+; RV64I-NEXT: srli a1, a0, 24
+; RV64I-NEXT: sb a1, 11(a2)
+; RV64I-NEXT: srli a1, a0, 16
+; RV64I-NEXT: sb a1, 10(a2)
+; RV64I-NEXT: srli a0, a0, 8
+; RV64I-NEXT: sb a0, 9(a2)
+; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
;
; RV32I-LABEL: ashr_32bytes:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -144
-; RV32I-NEXT: sw ra, 140(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s0, 136(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 132(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 128(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 124(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s4, 120(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s5, 116(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s6, 112(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s7, 108(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s8, 104(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s9, 100(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s10, 96(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s11, 92(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu t3, 31(a0)
-; RV32I-NEXT: lbu a3, 0(a0)
-; RV32I-NEXT: sw a3, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT: addi sp, sp, -64
; RV32I-NEXT: lbu a3, 1(a0)
-; RV32I-NEXT: sw a3, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a3, 2(a0)
-; RV32I-NEXT: sw a3, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a3, 3(a0)
-; RV32I-NEXT: sw a3, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a3, 4(a0)
-; RV32I-NEXT: sw a3, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu a3, 5(a0)
-; RV32I-NEXT: sw a3, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lbu t2, 6(a0)
-; RV32I-NEXT: lbu t4, 7(a0)
-; RV32I-NEXT: lbu t5, 8(a0)
-; RV32I-NEXT: lbu t6, 9(a0)
-; RV32I-NEXT: lbu s0, 10(a0)
-; RV32I-NEXT: lbu s1, 11(a0)
-; RV32I-NEXT: lbu s2, 12(a0)
-; RV32I-NEXT: lbu s3, 13(a0)
-; RV32I-NEXT: lbu s4, 14(a0)
-; RV32I-NEXT: lbu s5, 15(a0)
-; RV32I-NEXT: lbu s6, 16(a0)
-; RV32I-NEXT: lbu s7, 17(a0)
-; RV32I-NEXT: lbu s8, 18(a0)
-; RV32I-NEXT: lbu a3, 1(a1)
-; RV32I-NEXT: lbu s9, 19(a0)
-; RV32I-NEXT: lbu s10, 20(a0)
-; RV32I-NEXT: lbu s11, 0(a1)
+; RV32I-NEXT: lbu a4, 0(a0)
+; RV32I-NEXT: lbu a5, 2(a0)
+; RV32I-NEXT: lbu a6, 3(a0)
; RV32I-NEXT: slli a3, a3, 8
-; RV32I-NEXT: lbu ra, 2(a1)
-; RV32I-NEXT: lbu a1, 3(a1)
-; RV32I-NEXT: or a3, a3, s11
-; RV32I-NEXT: lbu s11, 21(a0)
-; RV32I-NEXT: slli ra, ra, 16
-; RV32I-NEXT: slli a1, a1, 24
-; RV32I-NEXT: or a1, a1, ra
-; RV32I-NEXT: lbu ra, 22(a0)
-; RV32I-NEXT: or t1, a1, a3
-; RV32I-NEXT: lbu t0, 23(a0)
-; RV32I-NEXT: lbu a7, 24(a0)
-; RV32I-NEXT: lbu a6, 25(a0)
-; RV32I-NEXT: lbu a5, 26(a0)
-; RV32I-NEXT: lbu a1, 30(a0)
-; RV32I-NEXT: lbu a3, 29(a0)
-; RV32I-NEXT: lbu a4, 28(a0)
-; RV32I-NEXT: lbu a0, 27(a0)
-; RV32I-NEXT: sb a1, 58(sp)
-; RV32I-NEXT: sb a3, 57(sp)
-; RV32I-NEXT: sb a4, 56(sp)
-; RV32I-NEXT: sb a0, 55(sp)
-; RV32I-NEXT: sb a5, 54(sp)
-; RV32I-NEXT: sb a6, 53(sp)
-; RV32I-NEXT: sb a7, 52(sp)
-; RV32I-NEXT: sb t0, 51(sp)
-; RV32I-NEXT: sb ra, 50(sp)
-; RV32I-NEXT: sb s11, 49(sp)
-; RV32I-NEXT: sb s10, 48(sp)
-; RV32I-NEXT: sb s9, 47(sp)
-; RV32I-NEXT: sb s8, 46(sp)
-; RV32I-NEXT: sb s7, 45(sp)
-; RV32I-NEXT: sb s6, 44(sp)
-; RV32I-NEXT: sb s5, 43(sp)
-; RV32I-NEXT: sb t3, 59(sp)
-; RV32I-NEXT: slli t3, t3, 24
-; RV32I-NEXT: sb s4, 42(sp)
-; RV32I-NEXT: sb s3, 41(sp)
-; RV32I-NEXT: sb s2, 40(sp)
-; RV32I-NEXT: sb s1, 39(sp)
-; RV32I-NEXT: sb s0, 38(sp)
-; RV32I-NEXT: sb t6, 37(sp)
-; RV32I-NEXT: sb t5, 36(sp)
-; RV32I-NEXT: sb t4, 35(sp)
-; RV32I-NEXT: sb t2, 34(sp)
-; RV32I-NEXT: lw a0, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 33(sp)
-; RV32I-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 32(sp)
-; RV32I-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 31(sp)
-; RV32I-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 30(sp)
-; RV32I-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 29(sp)
-; RV32I-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sb a0, 28(sp)
-; RV32I-NEXT: srai a0, t3, 31
-; RV32I-NEXT: sb a0, 88(sp)
-; RV32I-NEXT: sb a0, 84(sp)
-; RV32I-NEXT: sb a0, 80(sp)
-; RV32I-NEXT: sb a0, 76(sp)
-; RV32I-NEXT: sb a0, 72(sp)
-; RV32I-NEXT: sb a0, 68(sp)
-; RV32I-NEXT: sb a0, 64(sp)
-; RV32I-NEXT: sb a0, 60(sp)
-; RV32I-NEXT: srli a1, a0, 24
-; RV32I-NEXT: sb a1, 91(sp)
-; RV32I-NEXT: srli a3, a0, 16
-; RV32I-NEXT: sb a3, 90(sp)
-; RV32I-NEXT: srli a0, a0, 8
-; RV32I-NEXT: sb a0, 89(sp)
-; RV32I-NEXT: sb a1, 87(sp)
-; RV32I-NEXT: sb a3, 86(sp)
-; RV32I-NEXT: sb a0, 85(sp)
-; RV32I-NEXT: sb a1, 83(sp)
-; RV32I-NEXT: sb a3, 82(sp)
-; RV32I-NEXT: sb a0, 81(sp)
-; RV32I-NEXT: sb a1, 79(sp)
-; RV32I-NEXT: sb a3, 78(sp)
-; RV32I-NEXT: sb a0, 77(sp)
-; RV32I-NEXT: sb a1, 75(sp)
-; RV32I-NEXT: sb a3, 74(sp)
-; RV32I-NEXT: sb a0, 73(sp)
-; RV32I-NEXT: sb a1, 71(sp)
-; RV32I-NEXT: sb a3, 70(sp)
-; RV32I-NEXT: sb a0, 69(sp)
-; RV32I-NEXT: sb a1, 67(sp)
-; RV32I-NEXT: sb a3, 66(sp)
-; RV32I-NEXT: sb a0, 65(sp)
-; RV32I-NEXT: sb a1, 63(sp)
-; RV32I-NEXT: sb a3, 62(sp)
-; RV32I-NEXT: sb a0, 61(sp)
-; RV32I-NEXT: slli a0, t1, 24
-; RV32I-NEXT: srli a0, a0, 27
-; RV32I-NEXT: addi a4, sp, 28
-; RV32I-NEXT: add a4, a4, a0
-; RV32I-NEXT: lbu a0, 5(a4)
-; RV32I-NEXT: lbu a1, 4(a4)
-; RV32I-NEXT: lbu a3, 6(a4)
-; RV32I-NEXT: lbu a5, 7(a4)
-; RV32I-NEXT: slli a0, a0, 8
-; RV32I-NEXT: or a0, a0, a1
-; RV32I-NEXT: slli a3, a3, 16
-; RV32I-NEXT: slli a5, a5, 24
-; RV32I-NEXT: or a3, a5, a3
-; RV32I-NEXT: or t5, a3, a0
-; RV32I-NEXT: andi a3, t1, 7
-; RV32I-NEXT: lbu a0, 9(a4)
-; RV32I-NEXT: lbu a1, 8(a4)
-; RV32I-NEXT: lbu a5, 10(a4)
-; RV32I-NEXT: lbu a6, 11(a4)
-; RV32I-NEXT: slli a0, a0, 8
-; RV32I-NEXT: or a0, a0, a1
+; RV32I-NEXT: or a3, a3, a4
; RV32I-NEXT: slli a5, a5, 16
; RV32I-NEXT: slli a6, a6, 24
-; RV32I-NEXT: or a1, a6, a5
-; RV32I-NEXT: or a6, a1, a0
-; RV32I-NEXT: slli a0, a6, 1
-; RV32I-NEXT: not t1, a3
-; RV32I-NEXT: sll a0, a0, t1
-; RV32I-NEXT: lbu a1, 1(a4)
-; RV32I-NEXT: lbu a5, 0(a4)
-; RV32I-NEXT: lbu a7, 2(a4)
-; RV32I-NEXT: lbu t0, 3(a4)
-; RV32I-NEXT: slli a1, a1, 8
-; RV32I-NEXT: or a1, a1, a5
+; RV32I-NEXT: or a4, a6, a5
+; RV32I-NEXT: or a3, a4, a3
+; RV32I-NEXT: lbu a4, 5(a0)
+; RV32I-NEXT: lbu a5, 4(a0)
+; RV32I-NEXT: lbu a6, 6(a0)
+; RV32I-NEXT: lbu a7, 7(a0)
+; RV32I-NEXT: slli a4, a4, 8
+; RV32I-NEXT: or a4, a4, a5
+; RV32I-NEXT: slli a6, a6, 16
+; RV32I-NEXT: slli a7, a7, 24
+; RV32I-NEXT: or a5, a7, a6
+; RV32I-NEXT: or a4, a5, a4
+; RV32I-NEXT: lbu a5, 9(a0)
+; RV32I-NEXT: lbu a6, 8(a0)
+; RV32I-NEXT: lbu a7, 10(a0)
+; RV32I-NEXT: lbu t0, 11(a0)
+; RV32I-NEXT: slli a5, a5, 8
+; RV32I-NEXT: or a5, a5, a6
; RV32I-NEXT: slli a7, a7, 16
; RV32I-NEXT: slli t0, t0, 24
-; RV32I-NEXT: or a5, t0, a7
-; RV32I-NEXT: or t0, a5, a1
-; RV32I-NEXT: slli a1, t5, 1
-; RV32I-NEXT: xori t2, a3, 31
-; RV32I-NEXT: sll a1, a1, t2
-; RV32I-NEXT: lbu a5, 13(a4)
-; RV32I-NEXT: lbu a7, 12(a4)
-; RV32I-NEXT: lbu t3, 14(a4)
-; RV32I-NEXT: lbu t4, 15(a4)
-; RV32I-NEXT: slli a5, a5, 8
-; RV32I-NEXT: or a5, a5, a7
+; RV32I-NEXT: or a6, t0, a7
+; RV32I-NEXT: or a5, a6, a5
+; RV32I-NEXT: lbu a6, 13(a0)
+; RV32I-NEXT: lbu a7, 12(a0)
+; RV32I-NEXT: lbu t0, 14(a0)
+; RV32I-NEXT: lbu t1, 15(a0)
+; RV32I-NEXT: slli a6, a6, 8
+; RV32I-NEXT: or a6, a6, a7
+; RV32I-NEXT: slli t0, t0, 16
+; RV32I-NEXT: slli t1, t1, 24
+; RV32I-NEXT: or a7, t1, t0
+; RV32I-NEXT: or a6, a7, a6
+; RV32I-NEXT: lbu a7, 17(a0)
+; RV32I-NEXT: lbu t0, 16(a0)
+; RV32I-NEXT: lbu t1, 18(a0)
+; RV32I-NEXT: lbu t2, 19(a0)
+; RV32I-NEXT: slli a7, a7, 8
+; RV32I-NEXT: or a7, a7, t0
+; RV32I-NEXT: slli t1, t1, 16
+; RV32I-NEXT: slli t2, t2, 24
+; RV32I-NEXT: or t0, t2, t1
+; RV32I-NEXT: or t0, t0, a7
+; RV32I-NEXT: lbu a7, 21(a0)
+; RV32I-NEXT: lbu t1, 20(a0)
+; RV32I-NEXT: lbu t2, 22(a0)
+; RV32I-NEXT: lbu t3, 23(a0)
+; RV32I-NEXT: slli a7, a7, 8
+; RV32I-NEXT: or a7, a7, t1
+; RV32I-NEXT: slli t2, t2, 16
+; RV32I-NEXT: slli t3, t3, 24
+; RV32I-NEXT: or t1, t3, t2
+; RV32I-NEXT: or t1, t1, a7
+; RV32I-NEXT: lbu a7, 25(a0)
+; RV32I-NEXT: lbu t2, 24(a0)
+; RV32I-NEXT: lbu t3, 26(a0)
+; RV32I-NEXT: lbu t4, 27(a0)
+; RV32I-NEXT: slli a7, a7, 8
+; RV32I-NEXT: or a7, a7, t2
; RV32I-NEXT: slli t3, t3, 16
; RV32I-NEXT: slli t4, t4, 24
-; RV32I-NEXT: or a7, t4, t3
-; RV32I-NEXT: or t3, a7, a5
-; RV32I-NEXT: lbu a5, 17(a4)
-; RV32I-NEXT: lbu a7, 16(a4)
-; RV32I-NEXT: lbu t4, 18(a4)
-; RV32I-NEXT: lbu t6, 19(a4)
-; RV32I-NEXT: slli a5, a5, 8
-; RV32I-NEXT: or a5, a5, a7
+; RV32I-NEXT: or t2, t4, t3
+; RV32I-NEXT: or t2, t2, a7
+; RV32I-NEXT: lbu a7, 29(a0)
+; RV32I-NEXT: lbu t3, 28(a0)
+; RV32I-NEXT: lbu t4, 30(a0)
+; RV32I-NEXT: lbu a0, 31(a0)
+; RV32I-NEXT: slli a7, a7, 8
+; RV32I-NEXT: or a7, a7, t3
; RV32I-NEXT: slli t4, t4, 16
-; RV32I-NEXT: slli t6, t6, 24
-; RV32I-NEXT: or a7, t6, t4
-; RV32I-NEXT: or t4, a7, a5
-; RV32I-NEXT: slli a5, t4, 1
-; RV32I-NEXT: sll a7, a5, t1
-; RV32I-NEXT: lbu a5, 21(a4)
-; RV32I-NEXT: lbu t6, 20(a4)
-; RV32I-NEXT: lbu s0, 22(a4)
-; RV32I-NEXT: lbu s1, 23(a4)
-; RV32I-NEXT: slli a5, a5, 8
-; RV32I-NEXT: or a5, a5, t6
-; RV32I-NEXT: slli s0, s0, 16
-; RV32I-NEXT: slli s1, s1, 24
-; RV32I-NEXT: or s0, s1, s0
-; RV32I-NEXT: or s0, s0, a5
-; RV32I-NEXT: lbu a5, 25(a4)
-; RV32I-NEXT: lbu t6, 24(a4)
-; RV32I-NEXT: lbu s1, 26(a4)
-; RV32I-NEXT: lbu s2, 27(a4)
-; RV32I-NEXT: slli a5, a5, 8
-; RV32I-NEXT: or a5, a5, t6
-; RV32I-NEXT: slli s1, s1, 16
-; RV32I-NEXT: slli s2, s2, 24
-; RV32I-NEXT: or t6, s2, s1
-; RV32I-NEXT: or t6, t6, a5
-; RV32I-NEXT: lbu a5, 29(a4)
-; RV32I-NEXT: lbu s1, 28(a4)
-; RV32I-NEXT: slli s2, t6, 1
-; RV32I-NEXT: sll t1, s2, t1
-; RV32I-NEXT: slli a5, a5, 8
-; RV32I-NEXT: or a5, a5, s1
-; RV32I-NEXT: lbu s1, 30(a4)
-; RV32I-NEXT: lbu a4, 31(a4)
-; RV32I-NEXT: slli s2, t3, 1
-; RV32I-NEXT: sll s2, s2, t2
-; RV32I-NEXT: slli s1, s1, 16
-; RV32I-NEXT: slli a4, a4, 24
-; RV32I-NEXT: or a4, a4, s1
-; RV32I-NEXT: slli s1, s0, 1
-; RV32I-NEXT: sll s1, s1, t2
-; RV32I-NEXT: or s3, a4, a5
-; RV32I-NEXT: slli a4, s3, 1
-; RV32I-NEXT: sll t2, a4, t2
-; RV32I-NEXT: srl a4, t5, a3
-; RV32I-NEXT: srl a5, t0, a3
-; RV32I-NEXT: srl t0, t3, a3
-; RV32I-NEXT: srl a6, a6, a3
-; RV32I-NEXT: srl t3, s0, a3
-; RV32I-NEXT: srl t4, t4, a3
-; RV32I-NEXT: srl t5, t6, a3
-; RV32I-NEXT: sra a3, s3, a3
-; RV32I-NEXT: srli t6, t5, 16
-; RV32I-NEXT: sb t6, 26(a2)
-; RV32I-NEXT: or t2, t5, t2
-; RV32I-NEXT: sb t5, 24(a2)
-; RV32I-NEXT: srli t5, t5, 8
-; RV32I-NEXT: sb t5, 25(a2)
-; RV32I-NEXT: srli t5, a3, 24
-; RV32I-NEXT: sb t5, 31(a2)
-; RV32I-NEXT: srli t5, a3, 16
-; RV32I-NEXT: sb t5, 30(a2)
-; RV32I-NEXT: sb a3, 28(a2)
-; RV32I-NEXT: srli a3, a3, 8
-; RV32I-NEXT: sb a3, 29(a2)
-; RV32I-NEXT: srli a3, t4, 16
-; RV32I-NEXT: sb a3, 18(a2)
-; RV32I-NEXT: or a3, t4, s1
-; RV32I-NEXT: sb t4, 16(a2)
-; RV32I-NEXT: srli t4, t4, 8
-; RV32I-NEXT: sb t4, 17(a2)
-; RV32I-NEXT: srli t4, t3, 16
-; RV32I-NEXT: sb t4, 22(a2)
-; RV32I-NEXT: or t1, t3, t1
-; RV32I-NEXT: sb t3, 20(a2)
-; RV32I-NEXT: srli t3, t3, 8
-; RV32I-NEXT: sb t3, 21(a2)
-; RV32I-NEXT: srli t3, a6, 16
-; RV32I-NEXT: sb t3, 10(a2)
-; RV32I-NEXT: or t3, a6, s2
-; RV32I-NEXT: sb a6, 8(a2)
-; RV32I-NEXT: srli a6, a6, 8
-; RV32I-NEXT: sb a6, 9(a2)
-; RV32I-NEXT: srli a6, t0, 16
-; RV32I-NEXT: sb a6, 14(a2)
-; RV32I-NEXT: or a6, t0, a7
-; RV32I-NEXT: sb t0, 12(a2)
-; RV32I-NEXT: srli a7, t0, 8
-; RV32I-NEXT: sb a7, 13(a2)
-; RV32I-NEXT: srli a7, a5, 16
-; RV32I-NEXT: sb a7, 2(a2)
-; RV32I-NEXT: or a1, a5, a1
-; RV32I-NEXT: sb a5, 0(a2)
-; RV32I-NEXT: srli a5, a5, 8
-; RV32I-NEXT: sb a5, 1(a2)
-; RV32I-NEXT: srli a5, a4, 16
-; RV32I-NEXT: sb a5, 6(a2)
-; RV32I-NEXT: or a0, a4, a0
-; RV32I-NEXT: sb a4, 4(a2)
+; RV32I-NEXT: slli a0, a0, 24
+; RV32I-NEXT: or t3, a0, t4
+; RV32I-NEXT: or t3, t3, a7
+; RV32I-NEXT: lbu a7, 1(a1)
+; RV32I-NEXT: lbu t4, 0(a1)
+; RV32I-NEXT: lbu t5, 2(a1)
+; RV32I-NEXT: lbu a1, 3(a1)
+; RV32I-NEXT: slli a7, a7, 8
+; RV32I-NEXT: or a7, a7, t4
+; RV32I-NEXT: slli t5, t5, 16
+; RV32I-NEXT: slli a1, a1, 24
+; RV32I-NEXT: or a1, a1, t5
+; RV32I-NEXT: or a7, a1, a7
+; RV32I-NEXT: srai a0, a0, 31
+; RV32I-NEXT: sw a0, 60(sp)
+; RV32I-NEXT: sw a0, 56(sp)
+; RV32I-NEXT: sw a0, 52(sp)
+; RV32I-NEXT: sw a0, 48(sp)
+; RV32I-NEXT: sw a0, 44(sp)
+; RV32I-NEXT: sw a0, 40(sp)
+; RV32I-NEXT: sw a0, 36(sp)
+; RV32I-NEXT: sw a0, 32(sp)
+; RV32I-NEXT: sw t3, 28(sp)
+; RV32I-NEXT: sw t2, 24(sp)
+; RV32I-NEXT: sw t1, 20(sp)
+; RV32I-NEXT: sw t0, 16(sp)
+; RV32I-NEXT: sw a6, 12(sp)
+; RV32I-NEXT: sw a5, 8(sp)
+; RV32I-NEXT: sw a4, 4(sp)
+; RV32I-NEXT: sw a3, 0(sp)
+; RV32I-NEXT: srli a0, a7, 3
+; RV32I-NEXT: andi a0, a0, 28
+; RV32I-NEXT: mv a1, sp
+; RV32I-NEXT: add a4, a1, a0
+; RV32I-NEXT: lw a1, 4(a4)
+; RV32I-NEXT: srl a0, a1, a7
+; RV32I-NEXT: lw a5, 8(a4)
+; RV32I-NEXT: andi a3, a7, 31
+; RV32I-NEXT: xori a6, a3, 31
+; RV32I-NEXT: lw a3, 0(a4)
+; RV32I-NEXT: slli t0, a5, 1
+; RV32I-NEXT: sll t0, t0, a6
+; RV32I-NEXT: or a0, a0, t0
+; RV32I-NEXT: srl a3, a3, a7
+; RV32I-NEXT: slli a1, a1, 1
+; RV32I-NEXT: lw t0, 12(a4)
+; RV32I-NEXT: lw t1, 16(a4)
+; RV32I-NEXT: sll a1, a1, a6
+; RV32I-NEXT: or a1, a3, a1
+; RV32I-NEXT: srl a3, t0, a7
+; RV32I-NEXT: slli t2, t1, 1
+; RV32I-NEXT: sll t2, t2, a6
+; RV32I-NEXT: or a3, a3, t2
+; RV32I-NEXT: srl a5, a5, a7
+; RV32I-NEXT: slli t0, t0, 1
+; RV32I-NEXT: lw t2, 20(a4)
+; RV32I-NEXT: lw t3, 24(a4)
+; RV32I-NEXT: sll t0, t0, a6
+; RV32I-NEXT: or a5, a5, t0
+; RV32I-NEXT: srl t0, t2, a7
+; RV32I-NEXT: slli t4, t3, 1
+; RV32I-NEXT: sll t4, t4, a6
+; RV32I-NEXT: or t0, t0, t4
+; RV32I-NEXT: srl t1, t1, a7
+; RV32I-NEXT: slli t2, t2, 1
+; RV32I-NEXT: lw a4, 28(a4)
+; RV32I-NEXT: sll t2, t2, a6
+; RV32I-NEXT: or t1, t1, t2
+; RV32I-NEXT: srl t2, t3, a7
+; RV32I-NEXT: slli t3, a4, 1
+; RV32I-NEXT: sll a6, t3, a6
+; RV32I-NEXT: or a6, t2, a6
+; RV32I-NEXT: sra a4, a4, a7
+; RV32I-NEXT: sb a4, 28(a2)
+; RV32I-NEXT: srli a7, a4, 24
+; RV32I-NEXT: sb a7, 31(a2)
+; RV32I-NEXT: srli a7, a4, 16
+; RV32I-NEXT: sb a7, 30(a2)
; RV32I-NEXT: srli a4, a4, 8
-; RV32I-NEXT: sb a4, 5(a2)
-; RV32I-NEXT: srli a4, t2, 24
+; RV32I-NEXT: sb a4, 29(a2)
+; RV32I-NEXT: sb a6, 24(a2)
+; RV32I-NEXT: sb t1, 16(a2)
+; RV32I-NEXT: sb t0, 20(a2)
+; RV32I-NEXT: sb a5, 8(a2)
+; RV32I-NEXT: sb a3, 12(a2)
+; RV32I-NEXT: sb a1, 0(a2)
+; RV32I-NEXT: sb a0, 4(a2)
+; RV32I-NEXT: srli a4, a6, 24
; RV32I-NEXT: sb a4, 27(a2)
-; RV32I-NEXT: srli a3, a3, 24
-; RV32I-NEXT: sb a3, 19(a2)
-; RV32I-NEXT: srli a3, t1, 24
-; RV32I-NEXT: sb a3, 23(a2)
-; RV32I-NEXT: srli a3, t3, 24
-; RV32I-NEXT: sb a3, 11(a2)
-; RV32I-NEXT: srli a3, a6, 24
-; RV32I-NEXT: sb a3, 15(a2)
-; RV32I-NEXT: srli a1, a1, 24
-; RV32I-NEXT: sb a1, 3(a2)
-; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: sb a0, 7(a2)
-; RV32I-NEXT: lw ra, 140(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s0, 136(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 132(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 128(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 124(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s4, 120(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s5, 116(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s6, 112(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s7, 108(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s8, 104(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s9, 100(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s10, 96(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s11, 92(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 144
+; RV32I-NEXT: srli a4, a6, 16
+; RV32I-NEXT: sb a4, 26(a2)
+; RV32I-NEXT: srli a4, a6, 8
+; RV32I-NEXT: sb a4, 25(a2)
+; RV32I-NEXT: srli a4, t1, 24
+; RV32I-NEXT: sb a4, 19(a2)
+; RV32I-NEXT: srli a4, t1, 16
+; RV32I-NEXT: sb a4, 18(a2)
+; RV32I-NEXT: srli a4, t1, 8
+; RV32I-NEXT: sb a4, 17(a2)
+; RV32I-NEXT: srli a4, t0, 24
+; RV32I-NEXT: sb a4, 23(a2)
+; RV32I-NEXT: srli a4, t0, 16
+; RV32I-NEXT: sb a4, 22(a2)
+; RV32I-NEXT: srli a4, t0, 8
+; RV32I-NEXT: sb a4, 21(a2)
+; RV32I-NEXT: srli a4, a5, 24
+; RV32I-NEXT: sb a4, 11(a2)
+; RV32I-NEXT: srli a4, a5, 16
+; RV32I-NEXT: sb a4, 10(a2)
+; RV32I-NEXT: srli a5, a5, 8
+; RV32I-NEXT: sb a5, 9(a2)
+; RV32I-NEXT: srli a4, a3, 24
+; RV32I-NEXT: sb a4, 15(a2)
+; RV32I-NEXT: srli a4, a3, 16
+; RV32I-NEXT: sb a4, 14(a2)
+; RV32I-NEXT: srli a3, a3, 8
+; RV32I-NEXT: sb a3, 13(a2)
+; RV32I-NEXT: srli a3, a1, 24
+; RV32I-NEXT: sb a3, 3(a2)
+; RV32I-NEXT: srli a3, a1, 16
+; RV32I-NEXT: sb a3, 2(a2)
+; RV32I-NEXT: srli a1, a1, 8
+; RV32I-NEXT: sb a1, 1(a2)
+; RV32I-NEXT: srli a1, a0, 24
+; RV32I-NEXT: sb a1, 7(a2)
+; RV32I-NEXT: srli a1, a0, 16
+; RV32I-NEXT: sb a1, 6(a2)
+; RV32I-NEXT: srli a0, a0, 8
+; RV32I-NEXT: sb a0, 5(a2)
+; RV32I-NEXT: addi sp, sp, 64
; RV32I-NEXT: ret
%src = load i256, ptr %src.ptr, align 1
%bitOff = load i256, ptr %bitOff.ptr, align 1
diff --git a/llvm/test/CodeGen/SPARC/salvage-debug-isel.ll b/llvm/test/CodeGen/SPARC/salvage-debug-isel.ll
new file mode 100644
index 0000000..ce44d3a
--- /dev/null
+++ b/llvm/test/CodeGen/SPARC/salvage-debug-isel.ll
@@ -0,0 +1,69 @@
+; RUN: llc -march=sparc -O1 %s -o - -stop-after=finalize-isel | FileCheck %s
+
+; Debug info salvaging in isel means we should see a location for this variable.
+
+; CHECK-LABEL: name: a
+; CHECK: DBG_VALUE %stack.0.b, $noreg, ![[#]], !DIExpression(DW_OP_plus_uconst, 3, DW_OP_stack_value)
+
+define dso_local zeroext i16 @a() local_unnamed_addr #0 !dbg !7 {
+entry:
+ %b = alloca [6 x i8], align 1
+ %arrayidx = getelementptr inbounds [6 x i8], ptr %b, i32 0, i32 undef, !dbg !27
+ store i8 4, ptr %arrayidx, align 1, !dbg !28
+ %arrayidx1 = getelementptr inbounds i8, ptr %b, i32 3, !dbg !32
+ #dbg_value(ptr %arrayidx1, !22, !DIExpression(), !25)
+ %0 = load i8, ptr %arrayidx1, align 1, !dbg !33
+ %tobool.not = icmp eq i8 %0, 0, !dbg !35
+ br i1 %tobool.not, label %if.end, label %for.cond, !dbg !36
+
+for.cond: ; preds = %entry, %for.cond
+ br label %for.cond, !dbg !37, !llvm.loop !40
+
+if.end: ; preds = %entry
+ ret i16 undef, !dbg !44
+}
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!2, !3, !4, !5}
+!llvm.ident = !{!6}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C11, file: !1, producer: "clang version 20.0.0git.prerel", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, splitDebugInlining: false, nameTableKind: None)
+!1 = !DIFile(filename: "file.c", directory: "/path", checksumkind: CSK_MD5, checksum: "aa7b5139660a2329a6409414c44cc1f6")
+!2 = !{i32 7, !"Dwarf Version", i32 5}
+!3 = !{i32 2, !"Debug Info Version", i32 3}
+!4 = !{i32 1, !"wchar_size", i32 4}
+!5 = !{i32 7, !"debug-info-assignment-tracking", i1 true}
+!6 = !{!"clang version 20.0.0git.prerel"}
+!7 = distinct !DISubprogram(name: "a", scope: !1, file: !1, line: 2, type: !8, scopeLine: 2, flags: DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !13)
+!8 = !DISubroutineType(types: !9)
+!9 = !{!10}
+!10 = !DIDerivedType(tag: DW_TAG_typedef, name: "uint16_t", file: !11, line: 277, baseType: !12)
+!11 = !DIFile(filename: "stdint.h", directory: "", checksumkind: CSK_MD5, checksum: "d9e8f73f3756bbd642f1729623e09484")
+!12 = !DIBasicType(name: "unsigned short", size: 16, encoding: DW_ATE_unsigned)
+!13 = !{!14, !20, !22}
+!14 = !DILocalVariable(name: "b", scope: !7, file: !1, line: 3, type: !15)
+!15 = !DICompositeType(tag: DW_TAG_array_type, baseType: !16, size: 48, elements: !18)
+!16 = !DIDerivedType(tag: DW_TAG_typedef, name: "int8_t", file: !11, line: 298, baseType: !17)
+!17 = !DIBasicType(name: "signed char", size: 8, encoding: DW_ATE_signed_char)
+!18 = !{!19}
+!19 = !DISubrange(count: 6)
+!20 = !DILocalVariable(name: "c", scope: !7, file: !1, line: 4, type: !21)
+!21 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!22 = !DILocalVariable(name: "d", scope: !7, file: !1, line: 6, type: !23)
+!23 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !16, size: 32)
+!25 = !DILocation(line: 0, scope: !7)
+!27 = !DILocation(line: 5, column: 3, scope: !7)
+!28 = !DILocation(line: 5, column: 8, scope: !7)
+!32 = !DILocation(line: 6, column: 16, scope: !7)
+!33 = !DILocation(line: 7, column: 33, scope: !34)
+!34 = distinct !DILexicalBlock(scope: !7, file: !1, line: 7, column: 7)
+!35 = !DILocation(line: 7, column: 7, scope: !34)
+!36 = !DILocation(line: 7, column: 7, scope: !7)
+!37 = !DILocation(line: 8, column: 5, scope: !38)
+!38 = distinct !DILexicalBlock(scope: !39, file: !1, line: 8, column: 5)
+!39 = distinct !DILexicalBlock(scope: !34, file: !1, line: 8, column: 5)
+!40 = distinct !{!40, !41, !42, !43}
+!41 = !DILocation(line: 8, column: 5, scope: !39)
+!42 = !DILocation(line: 9, column: 7, scope: !39)
+!43 = !{!"llvm.loop.unroll.disable"}
+!44 = !DILocation(line: 10, column: 1, scope: !7)
diff --git a/llvm/test/CodeGen/SPIRV/AtomicCompareExchange.ll b/llvm/test/CodeGen/SPIRV/AtomicCompareExchange.ll
index 323afec..f8207c56 100644
--- a/llvm/test/CodeGen/SPIRV/AtomicCompareExchange.ll
+++ b/llvm/test/CodeGen/SPIRV/AtomicCompareExchange.ll
@@ -1,7 +1,7 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
; CHECK-SPIRV: %[[#Int:]] = OpTypeInt 32 0
-; CHECK-SPIRV-DAG: %[[#MemScope_Device:]] = OpConstant %[[#Int]] 1
+; CHECK-SPIRV-DAG: %[[#MemScope_CrossDevice:]] = OpConstant %[[#Int]] 0
; CHECK-SPIRV-DAG: %[[#MemSemEqual_SeqCst:]] = OpConstant %[[#Int]] 16
; CHECK-SPIRV-DAG: %[[#MemSemUnequal_Acquire:]] = OpConstant %[[#Int]] 2
; CHECK-SPIRV-DAG: %[[#Constant_456:]] = OpConstant %[[#Int]] 456
@@ -11,7 +11,7 @@
; CHECK-SPIRV-DAG: %[[#UndefStruct:]] = OpUndef %[[#Struct]]
; CHECK-SPIRV: %[[#Value:]] = OpLoad %[[#Int]] %[[#Value_ptr:]]
-; CHECK-SPIRV: %[[#Res:]] = OpAtomicCompareExchange %[[#Int]] %[[#Pointer:]] %[[#MemScope_Device]]
+; CHECK-SPIRV: %[[#Res:]] = OpAtomicCompareExchange %[[#Int]] %[[#Pointer:]] %[[#MemScope_CrossDevice]]
; CHECK-SPIRV-SAME: %[[#MemSemEqual_SeqCst]] %[[#MemSemUnequal_Acquire]] %[[#Value]] %[[#Comparator:]]
; CHECK-SPIRV: %[[#Success:]] = OpIEqual %[[#]] %[[#Res]] %[[#Comparator]]
; CHECK-SPIRV: %[[#Composite_0:]] = OpCompositeInsert %[[#Struct]] %[[#Res]] %[[#UndefStruct]] 0
@@ -34,7 +34,7 @@ cmpxchg.continue: ; preds = %cmpxchg.store_expec
ret void
}
-; CHECK-SPIRV: %[[#Res_1:]] = OpAtomicCompareExchange %[[#Int]] %[[#Ptr:]] %[[#MemScope_Device]]
+; CHECK-SPIRV: %[[#Res_1:]] = OpAtomicCompareExchange %[[#Int]] %[[#Ptr:]] %[[#MemScope_CrossDevice]]
; CHECK-SPIRV-SAME: %[[#MemSemEqual_SeqCst]] %[[#MemSemUnequal_Acquire]] %[[#Constant_456]] %[[#Constant_128]]
; CHECK-SPIRV: %[[#Success_1:]] = OpIEqual %[[#]] %[[#Res_1]] %[[#Constant_128]]
; CHECK-SPIRV: %[[#Composite:]] = OpCompositeInsert %[[#Struct]] %[[#Res_1]] %[[#UndefStruct]] 0
diff --git a/llvm/test/CodeGen/SPIRV/atomicrmw.ll b/llvm/test/CodeGen/SPIRV/atomicrmw.ll
index 5f95a97..0757605 100644
--- a/llvm/test/CodeGen/SPIRV/atomicrmw.ll
+++ b/llvm/test/CodeGen/SPIRV/atomicrmw.ll
@@ -5,8 +5,7 @@
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK: %[[#Int:]] = OpTypeInt 32 0
-; CHECK-DAG: %[[#Scope_Device:]] = OpConstant %[[#Int]] 1{{$}}
-; CHECK-DAG: %[[#MemSem_Relaxed:]] = OpConstant %[[#Int]] 0
+; CHECK-DAG: %[[#Scope_CrossDevice:]] = OpConstant %[[#Int]] 0{{$}}
; CHECK-DAG: %[[#MemSem_Acquire:]] = OpConstant %[[#Int]] 2
; CHECK-DAG: %[[#MemSem_Release:]] = OpConstant %[[#Int]] 4{{$}}
; CHECK-DAG: %[[#MemSem_AcquireRelease:]] = OpConstant %[[#Int]] 8
@@ -25,37 +24,37 @@
define dso_local spir_func void @test_atomicrmw() local_unnamed_addr {
entry:
%0 = atomicrmw xchg i32 addrspace(1)* @ui, i32 42 acq_rel
-; CHECK: %[[#]] = OpAtomicExchange %[[#Int]] %[[#Pointer]] %[[#Scope_Device]] %[[#MemSem_AcquireRelease]] %[[#Value]]
+; CHECK: %[[#]] = OpAtomicExchange %[[#Int]] %[[#Pointer]] %[[#Scope_CrossDevice]] %[[#MemSem_AcquireRelease]] %[[#Value]]
%1 = atomicrmw xchg float addrspace(1)* @f, float 42.000000e+00 seq_cst
-; CHECK: %[[#]] = OpAtomicExchange %[[#Float]] %[[#FPPointer]] %[[#Scope_Device]] %[[#MemSem_SequentiallyConsistent]] %[[#FPValue]]
+; CHECK: %[[#]] = OpAtomicExchange %[[#Float]] %[[#FPPointer]] %[[#Scope_CrossDevice]] %[[#MemSem_SequentiallyConsistent]] %[[#FPValue]]
%2 = atomicrmw add i32 addrspace(1)* @ui, i32 42 monotonic
-; CHECK: %[[#]] = OpAtomicIAdd %[[#Int]] %[[#Pointer]] %[[#Scope_Device]] %[[#MemSem_Relaxed]] %[[#Value]]
+; CHECK: %[[#]] = OpAtomicIAdd %[[#Int]] %[[#Pointer]] %[[#Scope_CrossDevice]] %{{.+}} %[[#Value]]
%3 = atomicrmw sub i32 addrspace(1)* @ui, i32 42 acquire
-; CHECK: %[[#]] = OpAtomicISub %[[#Int]] %[[#Pointer]] %[[#Scope_Device]] %[[#MemSem_Acquire]] %[[#Value]]
+; CHECK: %[[#]] = OpAtomicISub %[[#Int]] %[[#Pointer]] %[[#Scope_CrossDevice]] %[[#MemSem_Acquire]] %[[#Value]]
%4 = atomicrmw or i32 addrspace(1)* @ui, i32 42 release
-; CHECK: %[[#]] = OpAtomicOr %[[#Int]] %[[#Pointer]] %[[#Scope_Device]] %[[#MemSem_Release]] %[[#Value]]
+; CHECK: %[[#]] = OpAtomicOr %[[#Int]] %[[#Pointer]] %[[#Scope_CrossDevice]] %[[#MemSem_Release]] %[[#Value]]
%5 = atomicrmw xor i32 addrspace(1)* @ui, i32 42 acq_rel
-; CHECK: %[[#]] = OpAtomicXor %[[#Int]] %[[#Pointer]] %[[#Scope_Device]] %[[#MemSem_AcquireRelease]] %[[#Value]]
+; CHECK: %[[#]] = OpAtomicXor %[[#Int]] %[[#Pointer]] %[[#Scope_CrossDevice]] %[[#MemSem_AcquireRelease]] %[[#Value]]
%6 = atomicrmw and i32 addrspace(1)* @ui, i32 42 seq_cst
-; CHECK: %[[#]] = OpAtomicAnd %[[#Int]] %[[#Pointer]] %[[#Scope_Device]] %[[#MemSem_SequentiallyConsistent]] %[[#Value]]
+; CHECK: %[[#]] = OpAtomicAnd %[[#Int]] %[[#Pointer]] %[[#Scope_CrossDevice]] %[[#MemSem_SequentiallyConsistent]] %[[#Value]]
%7 = atomicrmw max i32 addrspace(1)* @ui, i32 42 monotonic
-; CHECK: %[[#]] = OpAtomicSMax %[[#Int]] %[[#Pointer]] %[[#Scope_Device]] %[[#MemSem_Relaxed]] %[[#Value]]
+; CHECK: %[[#]] = OpAtomicSMax %[[#Int]] %[[#Pointer]] %[[#Scope_CrossDevice]] %{{.*}} %[[#Value]]
%8 = atomicrmw min i32 addrspace(1)* @ui, i32 42 acquire
-; CHECK: %[[#]] = OpAtomicSMin %[[#Int]] %[[#Pointer]] %[[#Scope_Device]] %[[#MemSem_Acquire]] %[[#Value]]
+; CHECK: %[[#]] = OpAtomicSMin %[[#Int]] %[[#Pointer]] %[[#Scope_CrossDevice]] %[[#MemSem_Acquire]] %[[#Value]]
%9 = atomicrmw umax i32 addrspace(1)* @ui, i32 42 release
-; CHECK: %[[#]] = OpAtomicUMax %[[#Int]] %[[#Pointer]] %[[#Scope_Device]] %[[#MemSem_Release]] %[[#Value]]
+; CHECK: %[[#]] = OpAtomicUMax %[[#Int]] %[[#Pointer]] %[[#Scope_CrossDevice]] %[[#MemSem_Release]] %[[#Value]]
%10 = atomicrmw umin i32 addrspace(1)* @ui, i32 42 acq_rel
-; CHECK: %[[#]] = OpAtomicUMin %[[#Int]] %[[#Pointer]] %[[#Scope_Device]] %[[#MemSem_AcquireRelease]] %[[#Value]]
+; CHECK: %[[#]] = OpAtomicUMin %[[#Int]] %[[#Pointer]] %[[#Scope_CrossDevice]] %[[#MemSem_AcquireRelease]] %[[#Value]]
ret void
}
diff --git a/llvm/test/CodeGen/SPIRV/debug-info/debug-compilation-unit.ll b/llvm/test/CodeGen/SPIRV/debug-info/debug-compilation-unit.ll
index bff4660..794dcd6 100644
--- a/llvm/test/CodeGen/SPIRV/debug-info/debug-compilation-unit.ll
+++ b/llvm/test/CodeGen/SPIRV/debug-info/debug-compilation-unit.ll
@@ -29,11 +29,13 @@ define spir_func void @foo() {
entry:
ret void
}
+; CHECK-SPIRV-NOT: Lfunc_end0:
define spir_func void @bar() {
entry:
ret void
}
+; CHECK-SPIRV-NOT: Lfunc_end1:
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!2, !3, !4, !5}
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_double.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_double.ll
index 14035a6..c2ed2f8 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_double.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_double.ll
@@ -10,13 +10,14 @@
; CHECK-DAG: %[[TyInt32:[0-9]+]] = OpTypeInt 32 0
; CHECK-DAG: %[[Const0:[0-9]+]] = OpConstant %[[TyFP64]] 0
; CHECK-DAG: %[[Const42:[0-9]+]] = OpConstant %[[TyFP64]] 42
-; CHECK-DAG: %[[ScopeDevice:[0-9]+]] = OpConstant %[[TyInt32]] 1
+; CHECK-DAG: %[[ScopeAllSvmDevices:[0-9]+]] = OpConstantNull %[[TyInt32]]
; CHECK-DAG: %[[MemSeqCst:[0-9]+]] = OpConstant %[[TyInt32]] 16
+; CHECK-DAG: %[[ScopeDevice:[0-9]+]] = OpConstant %[[TyInt32]] 1
; CHECK-DAG: %[[TyFP64Ptr:[0-9]+]] = OpTypePointer {{[a-zA-Z]+}} %[[TyFP64]]
; CHECK-DAG: %[[DblPtr:[0-9]+]] = OpVariable %[[TyFP64Ptr]] {{[a-zA-Z]+}} %[[Const0]]
-; CHECK: OpAtomicFAddEXT %[[TyFP64]] %[[DblPtr]] %[[ScopeDevice]] %[[MemSeqCst]] %[[Const42]]
+; CHECK: OpAtomicFAddEXT %[[TyFP64]] %[[DblPtr]] %[[ScopeAllSvmDevices]] %[[MemSeqCst]] %[[Const42]]
; CHECK: %[[Const42Neg:[0-9]+]] = OpFNegate %[[TyFP64]] %[[Const42]]
-; CHECK: OpAtomicFAddEXT %[[TyFP64]] %[[DblPtr]] %[[ScopeDevice]] %[[MemSeqCst]] %[[Const42Neg]]
+; CHECK: OpAtomicFAddEXT %[[TyFP64]] %[[DblPtr]] %[[ScopeAllSvmDevices]] %[[MemSeqCst]] %[[Const42Neg]]
; CHECK: OpAtomicFAddEXT %[[TyFP64]] %[[DblPtr]] %[[ScopeDevice]] %[[MemSeqCst]] %[[Const42]]
target datalayout = "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024"
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_float.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_float.ll
index d348114..075e63e 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_float.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_float.ll
@@ -10,15 +10,16 @@
; CHECK-DAG: %[[TyInt32:[0-9]+]] = OpTypeInt 32 0
; CHECK-DAG: %[[Const0:[0-9]+]] = OpConstant %[[TyFP32]] 0
; CHECK-DAG: %[[Const42:[0-9]+]] = OpConstant %[[TyFP32]] 42
+; CHECK-DAG: %[[ScopeAllSvmDevices:[0-9]+]] = OpConstantNull %[[TyInt32]]
+; CHECK-DAG: %[[MemSeqCst:[0-9]+]] = OpConstant %[[TyInt32]] 16
; CHECK-DAG: %[[ScopeDevice:[0-9]+]] = OpConstant %[[TyInt32]] 1
; CHECK-DAG: %[[ScopeWorkgroup:[0-9]+]] = OpConstant %[[TyInt32]] 2
-; CHECK-DAG: %[[MemSeqCst:[0-9]+]] = OpConstant %[[TyInt32]] 16
; CHECK-DAG: %[[WorkgroupMemory:[0-9]+]] = OpConstant %[[TyInt32]] 512
; CHECK-DAG: %[[TyFP32Ptr:[0-9]+]] = OpTypePointer {{[a-zA-Z]+}} %[[TyFP32]]
; CHECK-DAG: %[[DblPtr:[0-9]+]] = OpVariable %[[TyFP32Ptr]] {{[a-zA-Z]+}} %[[Const0]]
-; CHECK: OpAtomicFAddEXT %[[TyFP32]] %[[DblPtr]] %[[ScopeDevice]] %[[MemSeqCst]] %[[Const42]]
+; CHECK: OpAtomicFAddEXT %[[TyFP32]] %[[DblPtr]] %[[ScopeAllSvmDevices]] %[[MemSeqCst]] %[[Const42]]
; CHECK: %[[Const42Neg:[0-9]+]] = OpFNegate %[[TyFP32]] %[[Const42]]
-; CHECK: OpAtomicFAddEXT %[[TyFP32]] %[[DblPtr]] %[[ScopeDevice]] %[[MemSeqCst]] %[[Const42Neg]]
+; CHECK: OpAtomicFAddEXT %[[TyFP32]] %[[DblPtr]] %[[ScopeAllSvmDevices]] %[[MemSeqCst]] %[[Const42Neg]]
; CHECK: OpAtomicFAddEXT %[[TyFP32]] %[[DblPtr]] %[[ScopeDevice]] %[[MemSeqCst]] %[[Const42]]
; CHECK: OpAtomicFAddEXT %[[TyFP32]] %[[DblPtr]] %[[ScopeWorkgroup]] %[[WorkgroupMemory]] %[[Const42]]
; CHECK: %[[Neg42:[0-9]+]] = OpFNegate %[[TyFP32]] %[[Const42]]
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_half.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_half.ll
index 7da9941..2c93840 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_half.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_half.ll
@@ -13,13 +13,14 @@
; CHECK-DAG: %[[TyInt32:[0-9]+]] = OpTypeInt 32 0
; CHECK-DAG: %[[Const0:[0-9]+]] = OpConstant %[[TyFP16]] 0
; CHECK-DAG: %[[Const42:[0-9]+]] = OpConstant %[[TyFP16]] 20800
-; CHECK-DAG: %[[ScopeDevice:[0-9]+]] = OpConstant %[[TyInt32]] 1
+; CHECK-DAG: %[[ScopeAllSvmDevices:[0-9]+]] = OpConstantNull %[[TyInt32]]
; CHECK-DAG: %[[MemSeqCst:[0-9]+]] = OpConstant %[[TyInt32]] 16
+; CHECK-DAG: %[[ScopeDevice:[0-9]+]] = OpConstant %[[TyInt32]] 1
; CHECK-DAG: %[[TyFP16Ptr:[0-9]+]] = OpTypePointer {{[a-zA-Z]+}} %[[TyFP16]]
; CHECK-DAG: %[[DblPtr:[0-9]+]] = OpVariable %[[TyFP16Ptr]] {{[a-zA-Z]+}} %[[Const0]]
-; CHECK: OpAtomicFAddEXT %[[TyFP16]] %[[DblPtr]] %[[ScopeDevice]] %[[MemSeqCst]] %[[Const42]]
+; CHECK: OpAtomicFAddEXT %[[TyFP16]] %[[DblPtr]] %[[ScopeAllSvmDevices]] %[[MemSeqCst]] %[[Const42]]
; CHECK: %[[Const42Neg:[0-9]+]] = OpFNegate %[[TyFP16]] %[[Const42]]
-; CHECK: OpAtomicFAddEXT %[[TyFP16]] %[[DblPtr]] %[[ScopeDevice]] %[[MemSeqCst]] %[[Const42Neg]]
+; CHECK: OpAtomicFAddEXT %[[TyFP16]] %[[DblPtr]] %[[ScopeAllSvmDevices]] %[[MemSeqCst]] %[[Const42Neg]]
; CHECK: OpAtomicFAddEXT %[[TyFP16]] %[[DblPtr]] %[[ScopeDevice]] %[[MemSeqCst]] %[[Const42]]
target datalayout = "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024"
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_double.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_double.ll
index a2d0a594..fdc05f4 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_double.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_double.ll
@@ -10,12 +10,13 @@
; CHECK-DAG: %[[TyInt32:[0-9]+]] = OpTypeInt 32 0
; CHECK-DAG: %[[Const0:[0-9]+]] = OpConstant %[[TyFP64]] 0
; CHECK-DAG: %[[Const42:[0-9]+]] = OpConstant %[[TyFP64]] 42
-; CHECK-DAG: %[[ScopeDevice:[0-9]+]] = OpConstant %[[TyInt32]] 1
+; CHECK-DAG: %[[ScopeAllSvmDevices:[0-9]+]] = OpConstantNull %[[TyInt32]]
; CHECK-DAG: %[[MemSeqCst:[0-9]+]] = OpConstant %[[TyInt32]] 16
+; CHECK-DAG: %[[ScopeDevice:[0-9]+]] = OpConstant %[[TyInt32]] 1
; CHECK-DAG: %[[TyFP64Ptr:[0-9]+]] = OpTypePointer {{[a-zA-Z]+}} %[[TyFP64]]
; CHECK-DAG: %[[DblPtr:[0-9]+]] = OpVariable %[[TyFP64Ptr]] {{[a-zA-Z]+}} %[[Const0]]
-; CHECK: OpAtomicFMinEXT %[[TyFP64]] %[[DblPtr]] %[[ScopeDevice]] %[[MemSeqCst]] %[[Const42]]
-; CHECK: OpAtomicFMaxEXT %[[TyFP64]] %[[DblPtr]] %[[ScopeDevice]] %[[MemSeqCst]] %[[Const42]]
+; CHECK: OpAtomicFMinEXT %[[TyFP64]] %[[DblPtr]] %[[ScopeAllSvmDevices]] %[[MemSeqCst]] %[[Const42]]
+; CHECK: OpAtomicFMaxEXT %[[TyFP64]] %[[DblPtr]] %[[ScopeAllSvmDevices]] %[[MemSeqCst]] %[[Const42]]
; CHECK: OpAtomicFMinEXT %[[TyFP64]] %[[DblPtr]] %[[ScopeDevice]] %[[MemSeqCst]] %[[Const42]]
; CHECK: OpAtomicFMaxEXT %[[TyFP64]] %[[DblPtr]] %[[ScopeDevice]] %[[MemSeqCst]] %[[Const42]]
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_float.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_float.ll
index 896b7ac..a7ff448 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_float.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_float.ll
@@ -10,12 +10,13 @@
; CHECK-DAG: %[[TyInt32:[0-9]+]] = OpTypeInt 32 0
; CHECK-DAG: %[[Const0:[0-9]+]] = OpConstant %[[TyFP32]] 0
; CHECK-DAG: %[[Const42:[0-9]+]] = OpConstant %[[TyFP32]] 42
-; CHECK-DAG: %[[ScopeDevice:[0-9]+]] = OpConstant %[[TyInt32]] 1
+; CHECK-DAG: %[[ScopeAllSvmDevices:[0-9]+]] = OpConstantNull %[[TyInt32]]
; CHECK-DAG: %[[MemSeqCst:[0-9]+]] = OpConstant %[[TyInt32]] 16
+; CHECK-DAG: %[[ScopeDevice:[0-9]+]] = OpConstant %[[TyInt32]] 1
; CHECK-DAG: %[[TyFP32Ptr:[0-9]+]] = OpTypePointer {{[a-zA-Z]+}} %[[TyFP32]]
; CHECK-DAG: %[[DblPtr:[0-9]+]] = OpVariable %[[TyFP32Ptr]] {{[a-zA-Z]+}} %[[Const0]]
-; CHECK: OpAtomicFMinEXT %[[TyFP32]] %[[DblPtr]] %[[ScopeDevice]] %[[MemSeqCst]] %[[Const42]]
-; CHECK: OpAtomicFMaxEXT %[[TyFP32]] %[[DblPtr]] %[[ScopeDevice]] %[[MemSeqCst]] %[[Const42]]
+; CHECK: OpAtomicFMinEXT %[[TyFP32]] %[[DblPtr]] %[[ScopeAllSvmDevices]] %[[MemSeqCst]] %[[Const42]]
+; CHECK: OpAtomicFMaxEXT %[[TyFP32]] %[[DblPtr]] %[[ScopeAllSvmDevices]] %[[MemSeqCst]] %[[Const42]]
; CHECK: OpAtomicFMinEXT %[[TyFP32]] %[[DblPtr]] %[[ScopeDevice]] %[[MemSeqCst]] %[[Const42]]
; CHECK: OpAtomicFMaxEXT %[[TyFP32]] %[[DblPtr]] %[[ScopeDevice]] %[[MemSeqCst]] %[[Const42]]
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_half.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_half.ll
index b3f4871..d5576d1 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_half.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_half.ll
@@ -10,12 +10,13 @@
; CHECK-DAG: %[[TyInt32:[0-9]+]] = OpTypeInt 32 0
; CHECK-DAG: %[[Const0:[0-9]+]] = OpConstant %[[TyFP16]] 0
; CHECK-DAG: %[[ConstHalf:[0-9]+]] = OpConstant %[[TyFP16]] 20800
-; CHECK-DAG: %[[ScopeDevice:[0-9]+]] = OpConstant %[[TyInt32]] 1
+; CHECK-DAG: %[[ScopeAllSvmDevices:[0-9]+]] = OpConstantNull %[[TyInt32]]
; CHECK-DAG: %[[MemSeqCst:[0-9]+]] = OpConstant %[[TyInt32]] 16
+; CHECK-DAG: %[[ScopeDevice:[0-9]+]] = OpConstant %[[TyInt32]] 1
; CHECK-DAG: %[[TyFP16Ptr:[0-9]+]] = OpTypePointer {{[a-zA-Z]+}} %[[TyFP16]]
; CHECK-DAG: %[[DblPtr:[0-9]+]] = OpVariable %[[TyFP16Ptr]] {{[a-zA-Z]+}} %[[Const0]]
-; CHECK: OpAtomicFMinEXT %[[TyFP16]] %[[DblPtr]] %[[ScopeDevice]] %[[MemSeqCst]] %[[ConstHalf]]
-; CHECK: OpAtomicFMaxEXT %[[TyFP16]] %[[DblPtr]] %[[ScopeDevice]] %[[MemSeqCst]] %[[ConstHalf]]
+; CHECK: OpAtomicFMinEXT %[[TyFP16]] %[[DblPtr]] %[[ScopeAllSvmDevices]] %[[MemSeqCst]] %[[ConstHalf]]
+; CHECK: OpAtomicFMaxEXT %[[TyFP16]] %[[DblPtr]] %[[ScopeAllSvmDevices]] %[[MemSeqCst]] %[[ConstHalf]]
; CHECK: OpAtomicFMinEXT %[[TyFP16]] %[[DblPtr]] %[[ScopeDevice]] %[[MemSeqCst]] %[[ConstHalf]]
; CHECK: OpAtomicFMaxEXT %[[TyFP16]] %[[DblPtr]] %[[ScopeDevice]] %[[MemSeqCst]] %[[ConstHalf]]
diff --git a/llvm/test/CodeGen/SPIRV/fence.ll b/llvm/test/CodeGen/SPIRV/fence.ll
index 5da5866..c7496c1 100644
--- a/llvm/test/CodeGen/SPIRV/fence.ll
+++ b/llvm/test/CodeGen/SPIRV/fence.ll
@@ -3,16 +3,16 @@
; CHECK-DAG: OpName %[[#GetScope:]] "_Z8getScopev"
; CHECK-DAG: %[[#Long:]] = OpTypeInt 32 0
-; CHECK-DAG: %[[#ScopeDevice:]] = OpConstant %[[#Long]] 1
; CHECK-DAG: %[[#WrkGrpConst2:]] = OpConstant %[[#Long]] 2
-; CHECK-DAG: %[[#Const3:]] = OpConstant %[[#Long]] 3
+; CHECK-DAG: %[[#ScopeAllSvmDevices:]] = OpConstantNull %[[#Long]]
; CHECK-DAG: %[[#InvocationConst4:]] = OpConstant %[[#Long]] 4
; CHECK-DAG: %[[#Const8:]] = OpConstant %[[#Long]] 8
; CHECK-DAG: %[[#Const16:]] = OpConstant %[[#Long]] 16
+; CHECK-DAG: %[[#Const3:]] = OpConstant %[[#Long]] 3
; CHECK-DAG: %[[#Const912:]] = OpConstant %[[#Long]] 912
-; CHECK: OpMemoryBarrier %[[#ScopeDevice]] %[[#WrkGrpConst2]]
-; CHECK: OpMemoryBarrier %[[#ScopeDevice]] %[[#InvocationConst4]]
-; CHECK: OpMemoryBarrier %[[#ScopeDevice]] %[[#Const8]]
+; CHECK: OpMemoryBarrier %[[#ScopeAllSvmDevices]] %[[#WrkGrpConst2]]
+; CHECK: OpMemoryBarrier %[[#ScopeAllSvmDevices]] %[[#InvocationConst4]]
+; CHECK: OpMemoryBarrier %[[#ScopeAllSvmDevices]] %[[#Const8]]
; CHECK: OpMemoryBarrier %[[#InvocationConst4]] %[[#Const16]]
; CHECK: OpMemoryBarrier %[[#WrkGrpConst2]] %[[#InvocationConst4]]
; CHECK: OpFunctionEnd
diff --git a/llvm/test/CodeGen/SPIRV/instructions/atomic-ptr.ll b/llvm/test/CodeGen/SPIRV/instructions/atomic-ptr.ll
index 9469d24..54d0843 100644
--- a/llvm/test/CodeGen/SPIRV/instructions/atomic-ptr.ll
+++ b/llvm/test/CodeGen/SPIRV/instructions/atomic-ptr.ll
@@ -9,7 +9,7 @@
; CHECK-DAG: %[[#LongTy:]] = OpTypeInt 64 0
; CHECK-DAG: %[[#PtrLongTy:]] = OpTypePointer CrossWorkgroup %[[#LongTy]]
; CHECK-DAG: %[[#IntTy:]] = OpTypeInt 32 0
-; CHECK-DAG: %[[#Scope:]] = OpConstant %[[#IntTy]] 1
+; CHECK-DAG: %[[#Scope:]] = OpConstantNull %[[#IntTy]]
; CHECK-DAG: %[[#MemSem:]] = OpConstant %[[#IntTy]] 8
; CHECK-DAG: %[[#PtrPtrLongTy:]] = OpTypePointer CrossWorkgroup %[[#PtrLongTy]]
diff --git a/llvm/test/CodeGen/SPIRV/instructions/atomic.ll b/llvm/test/CodeGen/SPIRV/instructions/atomic.ll
index 8c5c036..f4e7b12 100644
--- a/llvm/test/CodeGen/SPIRV/instructions/atomic.ll
+++ b/llvm/test/CodeGen/SPIRV/instructions/atomic.ll
@@ -18,16 +18,15 @@
; CHECK-DAG: [[PtrI32Ty:%.*]] = OpTypePointer Function [[I32Ty]]
; CHECK-DAG: [[I64Ty:%.*]] = OpTypeInt 64 0
; CHECK-DAG: [[PtrI64Ty:%.*]] = OpTypePointer Generic [[I64Ty]]
-;; Device scope is encoded with constant 1
-; CHECK-DAG: [[SCOPE:%.*]] = OpConstant [[I32Ty]] 1
+; CHECK-DAG: [[CROSSDEVICESCOPE:%.*]] = OpConstantNull [[I32Ty]]
+; CHECK-DAG: [[DEVICESCOPE:%.*]] = OpConstant [[I32Ty]] 1
;; "monotonic" maps to the relaxed memory semantics, encoded with constant 0
-; CHECK-DAG: [[RELAXED:%.*]] = OpConstantNull [[I32Ty]]
; CHECK: [[ADD]] = OpFunction [[I32Ty]]
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter [[PtrI32Ty]]
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter [[I32Ty]]
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicIAdd [[I32Ty]] [[A]] [[SCOPE]] [[RELAXED]] [[B]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicIAdd [[I32Ty]] [[A]] [[CROSSDEVICESCOPE]] {{.+}} [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_add(i32* %ptr, i32 %val) {
@@ -39,7 +38,7 @@ define i32 @test_add(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter [[PtrI32Ty]]
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter [[I32Ty]]
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicISub [[I32Ty]] [[A]] [[SCOPE]] [[RELAXED]] [[B]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicISub [[I32Ty]] [[A]] [[CROSSDEVICESCOPE]] {{.+}} [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_sub(i32* %ptr, i32 %val) {
@@ -51,7 +50,7 @@ define i32 @test_sub(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter [[PtrI32Ty]]
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter [[I32Ty]]
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicSMin [[I32Ty]] [[A]] [[SCOPE]] [[RELAXED]] [[B]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicSMin [[I32Ty]] [[A]] [[CROSSDEVICESCOPE]] {{.+}} [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_min(i32* %ptr, i32 %val) {
@@ -63,7 +62,7 @@ define i32 @test_min(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter [[PtrI32Ty]]
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter [[I32Ty]]
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicSMax [[I32Ty]] [[A]] [[SCOPE]] [[RELAXED]] [[B]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicSMax [[I32Ty]] [[A]] [[CROSSDEVICESCOPE]] {{.+}} [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_max(i32* %ptr, i32 %val) {
@@ -75,7 +74,7 @@ define i32 @test_max(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter [[PtrI32Ty]]
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter [[I32Ty]]
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicUMin [[I32Ty]] [[A]] [[SCOPE]] [[RELAXED]] [[B]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicUMin [[I32Ty]] [[A]] [[CROSSDEVICESCOPE]] {{.+}} [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_umin(i32* %ptr, i32 %val) {
@@ -87,7 +86,7 @@ define i32 @test_umin(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter [[PtrI32Ty]]
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter [[I32Ty]]
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicUMax [[I32Ty]] [[A]] [[SCOPE]] [[RELAXED]] [[B]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicUMax [[I32Ty]] [[A]] [[CROSSDEVICESCOPE]] {{.+}} [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_umax(i32* %ptr, i32 %val) {
@@ -99,7 +98,7 @@ define i32 @test_umax(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter [[PtrI32Ty]]
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter [[I32Ty]]
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicAnd [[I32Ty]] [[A]] [[SCOPE]] [[RELAXED]] [[B]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicAnd [[I32Ty]] [[A]] [[CROSSDEVICESCOPE]] {{.+}} [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_and(i32* %ptr, i32 %val) {
@@ -111,7 +110,7 @@ define i32 @test_and(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter [[PtrI32Ty]]
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter [[I32Ty]]
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicOr [[I32Ty]] [[A]] [[SCOPE]] [[RELAXED]] [[B]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicOr [[I32Ty]] [[A]] [[CROSSDEVICESCOPE]] {{.+}} [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_or(i32* %ptr, i32 %val) {
@@ -123,7 +122,7 @@ define i32 @test_or(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[A:%.*]] = OpFunctionParameter [[PtrI32Ty]]
; CHECK-NEXT: [[B:%.*]] = OpFunctionParameter [[I32Ty]]
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: [[R:%.*]] = OpAtomicXor [[I32Ty]] [[A]] [[SCOPE]] [[RELAXED]] [[B]]
+; CHECK-NEXT: [[R:%.*]] = OpAtomicXor [[I32Ty]] [[A]] [[CROSSDEVICESCOPE]] {{.+}} [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
define i32 @test_xor(i32* %ptr, i32 %val) {
@@ -135,10 +134,10 @@ define i32 @test_xor(i32* %ptr, i32 %val) {
; CHECK-NEXT: [[Arg1:%.*]] = OpFunctionParameter [[PtrI64Ty]]
; CHECK-NEXT: [[Arg2:%.*]] = OpFunctionParameter [[I64Ty]]
; CHECK-NEXT: OpLabel
-; CHECK-NEXT: OpAtomicSMin [[I64Ty]] [[Arg1]] [[SCOPE]] [[RELAXED]] [[Arg2]]
-; CHECK-NEXT: OpAtomicSMax [[I64Ty]] [[Arg1]] [[SCOPE]] [[RELAXED]] [[Arg2]]
-; CHECK-NEXT: OpAtomicUMin [[I64Ty]] [[Arg1]] [[SCOPE]] [[RELAXED]] [[Arg2]]
-; CHECK-NEXT: OpAtomicUMax [[I64Ty]] [[Arg1]] [[SCOPE]] [[RELAXED]] [[Arg2]]
+; CHECK-NEXT: OpAtomicSMin [[I64Ty]] [[Arg1]] [[DEVICESCOPE]] {{.+}} [[Arg2]]
+; CHECK-NEXT: OpAtomicSMax [[I64Ty]] [[Arg1]] [[DEVICESCOPE]] {{.+}} [[Arg2]]
+; CHECK-NEXT: OpAtomicUMin [[I64Ty]] [[Arg1]] [[DEVICESCOPE]] {{.+}} [[Arg2]]
+; CHECK-NEXT: OpAtomicUMax [[I64Ty]] [[Arg1]] [[DEVICESCOPE]] {{.+}} [[Arg2]]
; CHECK-NEXT: OpReturn
; CHECK-NEXT: OpFunctionEnd
define dso_local spir_kernel void @test_wrappers(ptr addrspace(4) %arg, i64 %val) {
diff --git a/llvm/test/CodeGen/SPIRV/instructions/atomic_acqrel.ll b/llvm/test/CodeGen/SPIRV/instructions/atomic_acqrel.ll
index 07d1a5c..4d5aca6 100644
--- a/llvm/test/CodeGen/SPIRV/instructions/atomic_acqrel.ll
+++ b/llvm/test/CodeGen/SPIRV/instructions/atomic_acqrel.ll
@@ -13,8 +13,8 @@
; CHECK-DAG: [[I32Ty:%.*]] = OpTypeInt 32 0
; CHECK-DAG: [[PtrI32Ty:%.*]] = OpTypePointer Function [[I32Ty]]
-;; Device scope is encoded with constant 1
-; CHECK-DAG: [[SCOPE:%.*]] = OpConstant [[I32Ty]] 1
+;; AllSvmDevices scope is encoded with constant 0
+; CHECK-DAG: [[SCOPE:%.*]] = OpConstantNull [[I32Ty]]
;; "acq_rel" maps to the constant 8
; CHECK-DAG: [[ACQREL:%.*]] = OpConstant [[I32Ty]] 8
diff --git a/llvm/test/CodeGen/SPIRV/instructions/atomic_seq.ll b/llvm/test/CodeGen/SPIRV/instructions/atomic_seq.ll
index 4078ffe..9fd3d8e 100644
--- a/llvm/test/CodeGen/SPIRV/instructions/atomic_seq.ll
+++ b/llvm/test/CodeGen/SPIRV/instructions/atomic_seq.ll
@@ -13,8 +13,8 @@
; CHECK-DAG: [[I32Ty:%.*]] = OpTypeInt 32 0
; CHECK-DAG: [[PtrI32Ty:%.*]] = OpTypePointer Function [[I32Ty]]
-;; Device scope is encoded with constant 1
-; CHECK-DAG: [[SCOPE:%.*]] = OpConstant [[I32Ty]] 1
+;; AllSvmDevices scope is encoded with constant 0
+; CHECK-DAG: [[SCOPE:%.*]] = OpConstantNull [[I32Ty]]
;; "sequentially consistent" maps to constant 16
; CHECK-DAG: [[SEQ:%.*]] = OpConstant [[I32Ty]] 16
diff --git a/llvm/test/CodeGen/SPIRV/scoped_atomicrmw.ll b/llvm/test/CodeGen/SPIRV/scoped_atomicrmw.ll
new file mode 100644
index 0000000..130db18
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/scoped_atomicrmw.ll
@@ -0,0 +1,163 @@
+; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: %[[#Int:]] = OpTypeInt 32 0
+; CHECK-DAG: %[[#Float:]] = OpTypeFloat 32
+; CHECK-DAG: %[[#Scope_CrossDevice:]] = OpConstant %[[#Int]] 0
+; CHECK-DAG: %[[#Value:]] = OpConstant %[[#Int]] 42
+; CHECK-DAG: %[[#FPValue:]] = OpConstant %[[#Float]] 42
+; CHECK-DAG: %[[#Scope_Invocation:]] = OpConstant %[[#Int]] 4
+; CHECK-DAG: %[[#MemSem_SeqCst:]] = OpConstant %[[#Int]] 16
+; CHECK-DAG: %[[#Scope_Subgroup:]] = OpConstant %[[#Int]] 3
+; CHECK-DAG: %[[#Scope_Workgroup:]] = OpConstant %[[#Int]] 2
+; CHECK-DAG: %[[#Scope_Device:]] = OpConstant %[[#Int]] 1
+; CHECK-DAG: %[[#PointerType:]] = OpTypePointer CrossWorkgroup %[[#Int]]
+; CHECK-DAG: %[[#FPPointerType:]] = OpTypePointer CrossWorkgroup %[[#Float]]
+; CHECK-DAG: %[[#Pointer:]] = OpVariable %[[#PointerType]] CrossWorkgroup
+; CHECK-DAG: %[[#FPPointer:]] = OpVariable %[[#FPPointerType]] CrossWorkgroup
+
+@ui = common dso_local addrspace(1) global i32 0, align 4
+@f = common dso_local local_unnamed_addr addrspace(1) global float 0.000000e+00, align 4
+
+define dso_local spir_func void @test_singlethread_atomicrmw() local_unnamed_addr {
+entry:
+ %0 = atomicrmw xchg i32 addrspace(1)* @ui, i32 42 syncscope("singlethread") seq_cst
+ ; CHECK: %[[#]] = OpAtomicExchange %[[#Int]] %[[#Pointer:]] %[[#Scope_Invocation:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %1 = atomicrmw xchg float addrspace(1)* @f, float 42.000000e+00 syncscope("singlethread") seq_cst
+ ; CHECK: %[[#]] = OpAtomicExchange %[[#Float:]] %[[#FPPointer:]] %[[#Scope_Invocation:]] %[[#MemSem_SeqCst:]] %[[#FPValue:]]
+ %2 = atomicrmw add i32 addrspace(1)* @ui, i32 42 syncscope("singlethread") seq_cst
+ ; CHECK: %[[#]] = OpAtomicIAdd %[[#Int]] %[[#Pointer:]] %[[#Scope_Invocation:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %3 = atomicrmw sub i32 addrspace(1)* @ui, i32 42 syncscope("singlethread") seq_cst
+ ; CHECK: %[[#]] = OpAtomicISub %[[#Int]] %[[#Pointer:]] %[[#Scope_Invocation:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %4 = atomicrmw or i32 addrspace(1)* @ui, i32 42 syncscope("singlethread") seq_cst
+ ; CHECK: %[[#]] = OpAtomicOr %[[#Int]] %[[#Pointer:]] %[[#Scope_Invocation:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %5 = atomicrmw xor i32 addrspace(1)* @ui, i32 42 syncscope("singlethread") seq_cst
+ ; CHECK: %[[#]] = OpAtomicXor %[[#Int]] %[[#Pointer:]] %[[#Scope_Invocation:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %6 = atomicrmw and i32 addrspace(1)* @ui, i32 42 syncscope("singlethread") seq_cst
+ ; CHECK: %[[#]] = OpAtomicAnd %[[#Int]] %[[#Pointer:]] %[[#Scope_Invocation:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %7 = atomicrmw max i32 addrspace(1)* @ui, i32 42 syncscope("singlethread") seq_cst
+ ; CHECK: %[[#]] = OpAtomicSMax %[[#Int]] %[[#Pointer:]] %[[#Scope_Invocation:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %8 = atomicrmw min i32 addrspace(1)* @ui, i32 42 syncscope("singlethread") seq_cst
+ ; CHECK: %[[#]] = OpAtomicSMin %[[#Int]] %[[#Pointer:]] %[[#Scope_Invocation:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %9 = atomicrmw umax i32 addrspace(1)* @ui, i32 42 syncscope("singlethread") seq_cst
+ ; CHECK: %[[#]] = OpAtomicUMax %[[#Int]] %[[#Pointer:]] %[[#Scope_Invocation:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %10 = atomicrmw umin i32 addrspace(1)* @ui, i32 42 syncscope("singlethread") seq_cst
+ ; CHECK: %[[#]] = OpAtomicUMin %[[#Int]] %[[#Pointer:]] %[[#Scope_Invocation:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+
+ ret void
+}
+
+define dso_local spir_func void @test_subgroup_atomicrmw() local_unnamed_addr {
+entry:
+ %0 = atomicrmw xchg i32 addrspace(1)* @ui, i32 42 syncscope("subgroup") seq_cst
+ ; CHECK: %[[#]] = OpAtomicExchange %[[#Int]] %[[#Pointer:]] %[[#Scope_Subgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %1 = atomicrmw xchg float addrspace(1)* @f, float 42.000000e+00 syncscope("subgroup") seq_cst
+ ; CHECK: %[[#]] = OpAtomicExchange %[[#Float:]] %[[#FPPointer:]] %[[#Scope_Subgroup:]] %[[#MemSem_SeqCst:]] %[[#FPValue:]]
+ %2 = atomicrmw add i32 addrspace(1)* @ui, i32 42 syncscope("subgroup") seq_cst
+ ; CHECK: %[[#]] = OpAtomicIAdd %[[#Int]] %[[#Pointer:]] %[[#Scope_Subgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %3 = atomicrmw sub i32 addrspace(1)* @ui, i32 42 syncscope("subgroup") seq_cst
+ ; CHECK: %[[#]] = OpAtomicISub %[[#Int]] %[[#Pointer:]] %[[#Scope_Subgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %4 = atomicrmw or i32 addrspace(1)* @ui, i32 42 syncscope("subgroup") seq_cst
+ ; CHECK: %[[#]] = OpAtomicOr %[[#Int]] %[[#Pointer:]] %[[#Scope_Subgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %5 = atomicrmw xor i32 addrspace(1)* @ui, i32 42 syncscope("subgroup") seq_cst
+ ; CHECK: %[[#]] = OpAtomicXor %[[#Int]] %[[#Pointer:]] %[[#Scope_Subgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %6 = atomicrmw and i32 addrspace(1)* @ui, i32 42 syncscope("subgroup") seq_cst
+ ; CHECK: %[[#]] = OpAtomicAnd %[[#Int]] %[[#Pointer:]] %[[#Scope_Subgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %7 = atomicrmw max i32 addrspace(1)* @ui, i32 42 syncscope("subgroup") seq_cst
+ ; CHECK: %[[#]] = OpAtomicSMax %[[#Int]] %[[#Pointer:]] %[[#Scope_Subgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %8 = atomicrmw min i32 addrspace(1)* @ui, i32 42 syncscope("subgroup") seq_cst
+ ; CHECK: %[[#]] = OpAtomicSMin %[[#Int]] %[[#Pointer:]] %[[#Scope_Subgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %9 = atomicrmw umax i32 addrspace(1)* @ui, i32 42 syncscope("subgroup") seq_cst
+ ; CHECK: %[[#]] = OpAtomicUMax %[[#Int]] %[[#Pointer:]] %[[#Scope_Subgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %10 = atomicrmw umin i32 addrspace(1)* @ui, i32 42 syncscope("subgroup") seq_cst
+ ; CHECK: %[[#]] = OpAtomicUMin %[[#Int]] %[[#Pointer:]] %[[#Scope_Subgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+
+ ret void
+}
+
+define dso_local spir_func void @test_workgroup_atomicrmw() local_unnamed_addr {
+entry:
+ %0 = atomicrmw xchg i32 addrspace(1)* @ui, i32 42 syncscope("workgroup") seq_cst
+ ; CHECK: %[[#]] = OpAtomicExchange %[[#Int]] %[[#Pointer:]] %[[#Scope_Workgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %1 = atomicrmw xchg float addrspace(1)* @f, float 42.000000e+00 syncscope("workgroup") seq_cst
+ ; CHECK: %[[#]] = OpAtomicExchange %[[#Float:]] %[[#FPPointer:]] %[[#Scope_Workgroup:]] %[[#MemSem_SeqCst:]] %[[#FPValue:]]
+ %2 = atomicrmw add i32 addrspace(1)* @ui, i32 42 syncscope("workgroup") seq_cst
+ ; CHECK: %[[#]] = OpAtomicIAdd %[[#Int]] %[[#Pointer:]] %[[#Scope_Workgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %3 = atomicrmw sub i32 addrspace(1)* @ui, i32 42 syncscope("workgroup") seq_cst
+ ; CHECK: %[[#]] = OpAtomicISub %[[#Int]] %[[#Pointer:]] %[[#Scope_Workgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %4 = atomicrmw or i32 addrspace(1)* @ui, i32 42 syncscope("workgroup") seq_cst
+ ; CHECK: %[[#]] = OpAtomicOr %[[#Int]] %[[#Pointer:]] %[[#Scope_Workgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %5 = atomicrmw xor i32 addrspace(1)* @ui, i32 42 syncscope("workgroup") seq_cst
+ ; CHECK: %[[#]] = OpAtomicXor %[[#Int]] %[[#Pointer:]] %[[#Scope_Workgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %6 = atomicrmw and i32 addrspace(1)* @ui, i32 42 syncscope("workgroup") seq_cst
+ ; CHECK: %[[#]] = OpAtomicAnd %[[#Int]] %[[#Pointer:]] %[[#Scope_Workgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %7 = atomicrmw max i32 addrspace(1)* @ui, i32 42 syncscope("workgroup") seq_cst
+ ; CHECK: %[[#]] = OpAtomicSMax %[[#Int]] %[[#Pointer:]] %[[#Scope_Workgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %8 = atomicrmw min i32 addrspace(1)* @ui, i32 42 syncscope("workgroup") seq_cst
+ ; CHECK: %[[#]] = OpAtomicSMin %[[#Int]] %[[#Pointer:]] %[[#Scope_Workgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %9 = atomicrmw umax i32 addrspace(1)* @ui, i32 42 syncscope("workgroup") seq_cst
+ ; CHECK: %[[#]] = OpAtomicUMax %[[#Int]] %[[#Pointer:]] %[[#Scope_Workgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %10 = atomicrmw umin i32 addrspace(1)* @ui, i32 42 syncscope("workgroup") seq_cst
+ ; CHECK: %[[#]] = OpAtomicUMin %[[#Int]] %[[#Pointer:]] %[[#Scope_Workgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+
+ ret void
+}
+
+define dso_local spir_func void @test_device_atomicrmw() local_unnamed_addr {
+entry:
+ %0 = atomicrmw xchg i32 addrspace(1)* @ui, i32 42 syncscope("device") seq_cst
+ ; CHECK: %[[#]] = OpAtomicExchange %[[#Int]] %[[#Pointer:]] %[[#Scope_Device:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %1 = atomicrmw xchg float addrspace(1)* @f, float 42.000000e+00 syncscope("device") seq_cst
+ ; CHECK: %[[#]] = OpAtomicExchange %[[#Float:]] %[[#FPPointer:]] %[[#Scope_Device:]] %[[#MemSem_SeqCst:]] %[[#FPValue:]]
+ %2 = atomicrmw add i32 addrspace(1)* @ui, i32 42 syncscope("device") seq_cst
+ ; CHECK: %[[#]] = OpAtomicIAdd %[[#Int]] %[[#Pointer:]] %[[#Scope_Device:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %3 = atomicrmw sub i32 addrspace(1)* @ui, i32 42 syncscope("device") seq_cst
+ ; CHECK: %[[#]] = OpAtomicISub %[[#Int]] %[[#Pointer:]] %[[#Scope_Device:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %4 = atomicrmw or i32 addrspace(1)* @ui, i32 42 syncscope("device") seq_cst
+ ; CHECK: %[[#]] = OpAtomicOr %[[#Int]] %[[#Pointer:]] %[[#Scope_Device:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %5 = atomicrmw xor i32 addrspace(1)* @ui, i32 42 syncscope("device") seq_cst
+ ; CHECK: %[[#]] = OpAtomicXor %[[#Int]] %[[#Pointer:]] %[[#Scope_Device:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %6 = atomicrmw and i32 addrspace(1)* @ui, i32 42 syncscope("device") seq_cst
+ ; CHECK: %[[#]] = OpAtomicAnd %[[#Int]] %[[#Pointer:]] %[[#Scope_Device:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %7 = atomicrmw max i32 addrspace(1)* @ui, i32 42 syncscope("device") seq_cst
+ ; CHECK: %[[#]] = OpAtomicSMax %[[#Int]] %[[#Pointer:]] %[[#Scope_Device:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %8 = atomicrmw min i32 addrspace(1)* @ui, i32 42 syncscope("device") seq_cst
+ ; CHECK: %[[#]] = OpAtomicSMin %[[#Int]] %[[#Pointer:]] %[[#Scope_Device:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %9 = atomicrmw umax i32 addrspace(1)* @ui, i32 42 syncscope("device") seq_cst
+ ; CHECK: %[[#]] = OpAtomicUMax %[[#Int]] %[[#Pointer:]] %[[#Scope_Device:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %10 = atomicrmw umin i32 addrspace(1)* @ui, i32 42 syncscope("device") seq_cst
+ ; CHECK: %[[#]] = OpAtomicUMin %[[#Int]] %[[#Pointer:]] %[[#Scope_Device:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+
+ ret void
+}
+
+define dso_local spir_func void @test_all_svm_devices_atomicrmw() local_unnamed_addr {
+entry:
+ %0 = atomicrmw xchg i32 addrspace(1)* @ui, i32 42 seq_cst
+ ; CHECK: %[[#]] = OpAtomicExchange %[[#Int]] %[[#Pointer:]] %[[#Scope_CrossDevice:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %1 = atomicrmw xchg float addrspace(1)* @f, float 42.000000e+00 seq_cst
+ ; CHECK: %[[#]] = OpAtomicExchange %[[#Float:]] %[[#FPPointer:]] %[[#Scope_CrossDevice:]] %[[#MemSem_SeqCst:]] %[[#FPValue:]]
+ %2 = atomicrmw add i32 addrspace(1)* @ui, i32 42 seq_cst
+ ; CHECK: %[[#]] = OpAtomicIAdd %[[#Int]] %[[#Pointer:]] %[[#Scope_CrossDevice:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %3 = atomicrmw sub i32 addrspace(1)* @ui, i32 42 seq_cst
+ ; CHECK: %[[#]] = OpAtomicISub %[[#Int]] %[[#Pointer:]] %[[#Scope_CrossDevice:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %4 = atomicrmw or i32 addrspace(1)* @ui, i32 42 seq_cst
+ ; CHECK: %[[#]] = OpAtomicOr %[[#Int]] %[[#Pointer:]] %[[#Scope_CrossDevice:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %5 = atomicrmw xor i32 addrspace(1)* @ui, i32 42 seq_cst
+ ; CHECK: %[[#]] = OpAtomicXor %[[#Int]] %[[#Pointer:]] %[[#Scope_CrossDevice:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %6 = atomicrmw and i32 addrspace(1)* @ui, i32 42 seq_cst
+ ; CHECK: %[[#]] = OpAtomicAnd %[[#Int]] %[[#Pointer:]] %[[#Scope_CrossDevice:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %7 = atomicrmw max i32 addrspace(1)* @ui, i32 42 seq_cst
+ ; CHECK: %[[#]] = OpAtomicSMax %[[#Int]] %[[#Pointer:]] %[[#Scope_CrossDevice:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %8 = atomicrmw min i32 addrspace(1)* @ui, i32 42 seq_cst
+ ; CHECK: %[[#]] = OpAtomicSMin %[[#Int]] %[[#Pointer:]] %[[#Scope_CrossDevice:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %9 = atomicrmw umax i32 addrspace(1)* @ui, i32 42 seq_cst
+ ; CHECK: %[[#]] = OpAtomicUMax %[[#Int]] %[[#Pointer:]] %[[#Scope_CrossDevice:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+ %10 = atomicrmw umin i32 addrspace(1)* @ui, i32 42 seq_cst
+ ; CHECK: %[[#]] = OpAtomicUMin %[[#Int]] %[[#Pointer:]] %[[#Scope_CrossDevice:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
+
+ ret void
+}
diff --git a/llvm/test/CodeGen/Thumb2/mve-soft-float-abi.ll b/llvm/test/CodeGen/Thumb2/mve-soft-float-abi.ll
index 4b76906..41d2c02 100644
--- a/llvm/test/CodeGen/Thumb2/mve-soft-float-abi.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-soft-float-abi.ll
@@ -543,3 +543,69 @@ define <4 x i32> @insertextract(i32 %x, i32 %y) {
%4 = insertelement <4 x i32> %3, i32 %y, i32 3
ret <4 x i32> %4
}
+
+declare void @print_uint32x4_t(<4 x i32> %val)
+define i32 @main(i64 %x, i64 %y) {
+; CHECK-LE-LABEL: main:
+; CHECK-LE: @ %bb.0: @ %entry
+; CHECK-LE-NEXT: .save {r4, lr}
+; CHECK-LE-NEXT: push {r4, lr}
+; CHECK-LE-NEXT: .vsave {d8, d9}
+; CHECK-LE-NEXT: vpush {d8, d9}
+; CHECK-LE-NEXT: .pad #8
+; CHECK-LE-NEXT: sub sp, #8
+; CHECK-LE-NEXT: vmov.32 q4[2], r2
+; CHECK-LE-NEXT: mov r4, r1
+; CHECK-LE-NEXT: mov r1, r0
+; CHECK-LE-NEXT: vmov.32 q4[3], r3
+; CHECK-LE-NEXT: movs r0, #0
+; CHECK-LE-NEXT: mov r2, r1
+; CHECK-LE-NEXT: mov r3, r4
+; CHECK-LE-NEXT: vstr d9, [sp]
+; CHECK-LE-NEXT: bl print_uint32x4_t
+; CHECK-LE-NEXT: movs r0, #0
+; CHECK-LE-NEXT: movs r2, #1
+; CHECK-LE-NEXT: mov r3, r4
+; CHECK-LE-NEXT: vstr d9, [sp]
+; CHECK-LE-NEXT: bl print_uint32x4_t
+; CHECK-LE-NEXT: movs r0, #0
+; CHECK-LE-NEXT: add sp, #8
+; CHECK-LE-NEXT: vpop {d8, d9}
+; CHECK-LE-NEXT: pop {r4, pc}
+;
+; CHECK-BE-LABEL: main:
+; CHECK-BE: @ %bb.0: @ %entry
+; CHECK-BE-NEXT: .save {r4, lr}
+; CHECK-BE-NEXT: push {r4, lr}
+; CHECK-BE-NEXT: .vsave {d8, d9}
+; CHECK-BE-NEXT: vpush {d8, d9}
+; CHECK-BE-NEXT: .pad #8
+; CHECK-BE-NEXT: sub sp, #8
+; CHECK-BE-NEXT: vmov.32 q0[2], r2
+; CHECK-BE-NEXT: mov r4, r1
+; CHECK-BE-NEXT: mov r1, r0
+; CHECK-BE-NEXT: vmov.32 q0[3], r3
+; CHECK-BE-NEXT: vrev64.32 q4, q0
+; CHECK-BE-NEXT: movs r0, #0
+; CHECK-BE-NEXT: mov r2, r1
+; CHECK-BE-NEXT: mov r3, r4
+; CHECK-BE-NEXT: vstr d9, [sp]
+; CHECK-BE-NEXT: bl print_uint32x4_t
+; CHECK-BE-NEXT: movs r0, #0
+; CHECK-BE-NEXT: movs r2, #1
+; CHECK-BE-NEXT: mov r3, r4
+; CHECK-BE-NEXT: vstr d9, [sp]
+; CHECK-BE-NEXT: bl print_uint32x4_t
+; CHECK-BE-NEXT: movs r0, #0
+; CHECK-BE-NEXT: add sp, #8
+; CHECK-BE-NEXT: vpop {d8, d9}
+; CHECK-BE-NEXT: pop {r4, pc}
+entry:
+ %a = insertelement <2 x i64> poison, i64 %x, i64 0
+ %b = insertelement <2 x i64> %a, i64 %y, i64 1
+ %c = bitcast <2 x i64> %b to <4 x i32>
+ %i = insertelement <4 x i32> %c, i32 1, i64 0
+ tail call void @print_uint32x4_t(i32 0, <4 x i32> %c)
+ tail call void @print_uint32x4_t(i32 0, <4 x i32> %i)
+ ret i32 0
+}
diff --git a/llvm/test/CodeGen/X86/canonicalize-vars-f16-type.ll b/llvm/test/CodeGen/X86/canonicalize-vars-f16-type.ll
new file mode 100644
index 0000000..fdf0bf3
--- /dev/null
+++ b/llvm/test/CodeGen/X86/canonicalize-vars-f16-type.ll
@@ -0,0 +1,270 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --default-march x86_64-unknown-linux-gnu --version 5
+; RUN: llc -mattr=+sse2 -mtriple=x86_64 < %s | FileCheck %s -check-prefixes=SSE
+; RUN: llc -mattr=+avx -mtriple=x86_64 < %s | FileCheck %s -check-prefixes=AVX,AVX1
+; RUN: llc -mattr=+avx2 -mtriple=x86_64 < %s | FileCheck %s -check-prefixes=AVX,AVX2
+; RUN: llc -mattr=+avx512f -mtriple=x86_64 < %s | FileCheck %s -check-prefixes=AVX512,AVX512F
+; RUN: llc -mattr=+avx512bw -mtriple=x86_64 < %s | FileCheck %s -check-prefixes=AVX512,AVX512BW
+
+define void @v_test_canonicalize__half(half addrspace(1)* %out) nounwind {
+; SSE-LABEL: v_test_canonicalize__half:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: pushq %rbx
+; SSE-NEXT: subq $16, %rsp
+; SSE-NEXT: movq %rdi, %rbx
+; SSE-NEXT: pinsrw $0, (%rdi), %xmm0
+; SSE-NEXT: callq __extendhfsf2@PLT
+; SSE-NEXT: movd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE-NEXT: pinsrw $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT: callq __extendhfsf2@PLT
+; SSE-NEXT: mulss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; SSE-NEXT: callq __truncsfhf2@PLT
+; SSE-NEXT: pextrw $0, %xmm0, %eax
+; SSE-NEXT: movw %ax, (%rbx)
+; SSE-NEXT: addq $16, %rsp
+; SSE-NEXT: popq %rbx
+; SSE-NEXT: retq
+;
+; AVX-LABEL: v_test_canonicalize__half:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: pushq %rbx
+; AVX-NEXT: subq $16, %rsp
+; AVX-NEXT: movq %rdi, %rbx
+; AVX-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm0
+; AVX-NEXT: callq __extendhfsf2@PLT
+; AVX-NEXT: vmovd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; AVX-NEXT: vpinsrw $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: callq __extendhfsf2@PLT
+; AVX-NEXT: vmulss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
+; AVX-NEXT: callq __truncsfhf2@PLT
+; AVX-NEXT: vpextrw $0, %xmm0, (%rbx)
+; AVX-NEXT: addq $16, %rsp
+; AVX-NEXT: popq %rbx
+; AVX-NEXT: retq
+;
+; AVX512-LABEL: v_test_canonicalize__half:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: movzwl (%rdi), %eax
+; AVX512-NEXT: movzwl {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ecx
+; AVX512-NEXT: vmovd %ecx, %xmm0
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovd %eax, %xmm1
+; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1
+; AVX512-NEXT: vmulss %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: movw %ax, (%rdi)
+; AVX512-NEXT: retq
+entry:
+ %val = load half, half addrspace(1)* %out
+ %canonicalized = call half @llvm.canonicalize.f16(half %val)
+ store half %canonicalized, half addrspace(1)* %out
+ ret void
+}
+
+define half @complex_canonicalize_fmul_half(half %a, half %b) nounwind {
+; SSE-LABEL: complex_canonicalize_fmul_half:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: pushq %rax
+; SSE-NEXT: movss %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; SSE-NEXT: callq __extendhfsf2@PLT
+; SSE-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
+; SSE-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
+; SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT: callq __extendhfsf2@PLT
+; SSE-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; SSE-NEXT: movss (%rsp), %xmm1 # 4-byte Reload
+; SSE-NEXT: # xmm1 = mem[0],zero,zero,zero
+; SSE-NEXT: subss %xmm0, %xmm1
+; SSE-NEXT: movaps %xmm1, %xmm0
+; SSE-NEXT: callq __truncsfhf2@PLT
+; SSE-NEXT: callq __extendhfsf2@PLT
+; SSE-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
+; SSE-NEXT: addss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; SSE-NEXT: callq __truncsfhf2@PLT
+; SSE-NEXT: callq __extendhfsf2@PLT
+; SSE-NEXT: subss (%rsp), %xmm0 # 4-byte Folded Reload
+; SSE-NEXT: callq __truncsfhf2@PLT
+; SSE-NEXT: callq __extendhfsf2@PLT
+; SSE-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
+; SSE-NEXT: pinsrw $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT: callq __extendhfsf2@PLT
+; SSE-NEXT: mulss (%rsp), %xmm0 # 4-byte Folded Reload
+; SSE-NEXT: callq __truncsfhf2@PLT
+; SSE-NEXT: callq __extendhfsf2@PLT
+; SSE-NEXT: subss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; SSE-NEXT: callq __truncsfhf2@PLT
+; SSE-NEXT: popq %rax
+; SSE-NEXT: retq
+;
+; AVX-LABEL: complex_canonicalize_fmul_half:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: pushq %rax
+; AVX-NEXT: vmovss %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; AVX-NEXT: callq __extendhfsf2@PLT
+; AVX-NEXT: vmovss %xmm0, (%rsp) # 4-byte Spill
+; AVX-NEXT: vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
+; AVX-NEXT: # xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: callq __extendhfsf2@PLT
+; AVX-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; AVX-NEXT: vmovss (%rsp), %xmm1 # 4-byte Reload
+; AVX-NEXT: # xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vsubss %xmm0, %xmm1, %xmm0
+; AVX-NEXT: callq __truncsfhf2@PLT
+; AVX-NEXT: callq __extendhfsf2@PLT
+; AVX-NEXT: vmovss %xmm0, (%rsp) # 4-byte Spill
+; AVX-NEXT: vaddss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
+; AVX-NEXT: callq __truncsfhf2@PLT
+; AVX-NEXT: callq __extendhfsf2@PLT
+; AVX-NEXT: vsubss (%rsp), %xmm0, %xmm0 # 4-byte Folded Reload
+; AVX-NEXT: callq __truncsfhf2@PLT
+; AVX-NEXT: callq __extendhfsf2@PLT
+; AVX-NEXT: vmovss %xmm0, (%rsp) # 4-byte Spill
+; AVX-NEXT: vpinsrw $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: callq __extendhfsf2@PLT
+; AVX-NEXT: vmulss (%rsp), %xmm0, %xmm0 # 4-byte Folded Reload
+; AVX-NEXT: callq __truncsfhf2@PLT
+; AVX-NEXT: callq __extendhfsf2@PLT
+; AVX-NEXT: vsubss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
+; AVX-NEXT: callq __truncsfhf2@PLT
+; AVX-NEXT: popq %rax
+; AVX-NEXT: retq
+;
+; AVX512-LABEL: complex_canonicalize_fmul_half:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vpextrw $0, %xmm1, %eax
+; AVX512-NEXT: vpextrw $0, %xmm0, %ecx
+; AVX512-NEXT: vmovd %ecx, %xmm0
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovd %eax, %xmm1
+; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1
+; AVX512-NEXT: vsubss %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm2
+; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2
+; AVX512-NEXT: vcvtph2ps %xmm2, %xmm2
+; AVX512-NEXT: vsubss %xmm0, %xmm2, %xmm0
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: movzwl {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %eax
+; AVX512-NEXT: vmovd %eax, %xmm2
+; AVX512-NEXT: vcvtph2ps %xmm2, %xmm2
+; AVX512-NEXT: vmulss %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; AVX512-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3]
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vsubss %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
+; AVX512-NEXT: retq
+entry:
+
+ %mul1 = fsub half %a, %b
+ %add = fadd half %mul1, %b
+ %mul2 = fsub half %add, %mul1
+ %canonicalized = call half @llvm.canonicalize.f16(half %mul2)
+ %result = fsub half %canonicalized, %b
+ ret half %result
+}
+
+define void @v_test_canonicalize_v2half(<2 x half> addrspace(1)* %out) nounwind {
+; SSE-LABEL: v_test_canonicalize_v2half:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: pushq %rbx
+; SSE-NEXT: subq $48, %rsp
+; SSE-NEXT: movq %rdi, %rbx
+; SSE-NEXT: pinsrw $0, 2(%rdi), %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pinsrw $0, (%rdi), %xmm0
+; SSE-NEXT: callq __extendhfsf2@PLT
+; SSE-NEXT: movd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE-NEXT: pinsrw $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT: callq __extendhfsf2@PLT
+; SSE-NEXT: movd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
+; SSE-NEXT: # xmm1 = mem[0],zero,zero,zero
+; SSE-NEXT: mulss %xmm0, %xmm1
+; SSE-NEXT: movaps %xmm1, %xmm0
+; SSE-NEXT: callq __truncsfhf2@PLT
+; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: callq __extendhfsf2@PLT
+; SSE-NEXT: mulss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; SSE-NEXT: callq __truncsfhf2@PLT
+; SSE-NEXT: pextrw $0, %xmm0, %eax
+; SSE-NEXT: movw %ax, 2(%rbx)
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: pextrw $0, %xmm0, %eax
+; SSE-NEXT: movw %ax, (%rbx)
+; SSE-NEXT: addq $48, %rsp
+; SSE-NEXT: popq %rbx
+; SSE-NEXT: retq
+;
+; AVX-LABEL: v_test_canonicalize_v2half:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: pushq %rbx
+; AVX-NEXT: subq $48, %rsp
+; AVX-NEXT: movq %rdi, %rbx
+; AVX-NEXT: vpinsrw $0, 2(%rdi), %xmm0, %xmm0
+; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm0
+; AVX-NEXT: callq __extendhfsf2@PLT
+; AVX-NEXT: vmovd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; AVX-NEXT: vpinsrw $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: callq __extendhfsf2@PLT
+; AVX-NEXT: vmovd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; AVX-NEXT: vmulss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
+; AVX-NEXT: callq __truncsfhf2@PLT
+; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX-NEXT: callq __extendhfsf2@PLT
+; AVX-NEXT: vmulss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
+; AVX-NEXT: callq __truncsfhf2@PLT
+; AVX-NEXT: vpextrw $0, %xmm0, 2(%rbx)
+; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX-NEXT: vpextrw $0, %xmm0, (%rbx)
+; AVX-NEXT: addq $48, %rsp
+; AVX-NEXT: popq %rbx
+; AVX-NEXT: retq
+;
+; AVX512-LABEL: v_test_canonicalize_v2half:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT: movzwl {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %eax
+; AVX512-NEXT: vmovd %eax, %xmm1
+; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1
+; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[2,3],zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
+; AVX512-NEXT: vcvtph2ps %xmm2, %xmm2
+; AVX512-NEXT: vmulss %xmm1, %xmm2, %xmm2
+; AVX512-NEXT: vxorps %xmm3, %xmm3, %xmm3
+; AVX512-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3]
+; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2
+; AVX512-NEXT: vmovd %xmm2, %eax
+; AVX512-NEXT: vpinsrw $0, %eax, %xmm0, %xmm2
+; AVX512-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmulss %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm3[1,2,3]
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
+; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; AVX512-NEXT: vmovd %xmm0, (%rdi)
+; AVX512-NEXT: retq
+entry:
+ %val = load <2 x half>, <2 x half> addrspace(1)* %out
+ %canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %val)
+ store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; AVX1: {{.*}}
+; AVX2: {{.*}}
+; AVX512BW: {{.*}}
+; AVX512F: {{.*}}
diff --git a/llvm/test/CodeGen/X86/canonicalize-vars.ll b/llvm/test/CodeGen/X86/canonicalize-vars.ll
new file mode 100644
index 0000000..951ea1b
--- /dev/null
+++ b/llvm/test/CodeGen/X86/canonicalize-vars.ll
@@ -0,0 +1,672 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --default-march x86_64-unknown-linux-gnu --version 5
+; RUN: llc -mtriple=i686-- < %s | FileCheck %s -check-prefixes=X87
+; RUN: llc -mattr=+sse2 -mtriple=x86_64 < %s | FileCheck %s -check-prefixes=SSE,SSE2
+; RUN: llc -mattr=+avx -mtriple=x86_64 < %s | FileCheck %s -check-prefixes=AVX,AVX1
+; RUN: llc -mattr=+avx2 -mtriple=x86_64 < %s | FileCheck %s -check-prefixes=AVX,AVX2
+; RUN: llc -mattr=+avx512f -mtriple=x86_64 < %s | FileCheck %s -check-prefixes=AVX,AVX512F
+
+define float @canon_fp32_varargsf32(float %a) {
+; X87-LABEL: canon_fp32_varargsf32:
+; X87: # %bb.0:
+; X87-NEXT: fld1
+; X87-NEXT: fmuls {{[0-9]+}}(%esp)
+; X87-NEXT: retl
+;
+; SSE-LABEL: canon_fp32_varargsf32:
+; SSE: # %bb.0:
+; SSE-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: canon_fp32_varargsf32:
+; AVX: # %bb.0:
+; AVX-NEXT: vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: retq
+ %canonicalized = call float @llvm.canonicalize.f32(float %a)
+ ret float %canonicalized
+}
+
+define x86_fp80 @canon_fp32_varargsf80(x86_fp80 %a) {
+; X87-LABEL: canon_fp32_varargsf80:
+; X87: # %bb.0:
+; X87-NEXT: fldt {{[0-9]+}}(%esp)
+; X87-NEXT: fld1
+; X87-NEXT: fmulp %st, %st(1)
+; X87-NEXT: retl
+;
+; SSE-LABEL: canon_fp32_varargsf80:
+; SSE: # %bb.0:
+; SSE-NEXT: fldt {{[0-9]+}}(%rsp)
+; SSE-NEXT: fld1
+; SSE-NEXT: fmulp %st, %st(1)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: canon_fp32_varargsf80:
+; AVX: # %bb.0:
+; AVX-NEXT: fldt {{[0-9]+}}(%rsp)
+; AVX-NEXT: fld1
+; AVX-NEXT: fmulp %st, %st(1)
+; AVX-NEXT: retq
+ %canonicalized = call x86_fp80 @llvm.canonicalize.f80(x86_fp80 %a)
+ ret x86_fp80 %canonicalized
+}
+
+define x86_fp80 @complex_canonicalize_fmul_x86_fp80(x86_fp80 %a, x86_fp80 %b) {
+; X87-LABEL: complex_canonicalize_fmul_x86_fp80:
+; X87: # %bb.0: # %entry
+; X87-NEXT: fldt {{[0-9]+}}(%esp)
+; X87-NEXT: fldt {{[0-9]+}}(%esp)
+; X87-NEXT: fsub %st(1), %st
+; X87-NEXT: fld %st(0)
+; X87-NEXT: fadd %st(2), %st
+; X87-NEXT: fsubp %st, %st(1)
+; X87-NEXT: fld1
+; X87-NEXT: fmulp %st, %st(1)
+; X87-NEXT: fsubp %st, %st(1)
+; X87-NEXT: retl
+;
+; SSE-LABEL: complex_canonicalize_fmul_x86_fp80:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: fldt {{[0-9]+}}(%rsp)
+; SSE-NEXT: fldt {{[0-9]+}}(%rsp)
+; SSE-NEXT: fsub %st(1), %st
+; SSE-NEXT: fld %st(0)
+; SSE-NEXT: fadd %st(2), %st
+; SSE-NEXT: fsubp %st, %st(1)
+; SSE-NEXT: fld1
+; SSE-NEXT: fmulp %st, %st(1)
+; SSE-NEXT: fsubp %st, %st(1)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: complex_canonicalize_fmul_x86_fp80:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: fldt {{[0-9]+}}(%rsp)
+; AVX-NEXT: fldt {{[0-9]+}}(%rsp)
+; AVX-NEXT: fsub %st(1), %st
+; AVX-NEXT: fld %st(0)
+; AVX-NEXT: fadd %st(2), %st
+; AVX-NEXT: fsubp %st, %st(1)
+; AVX-NEXT: fld1
+; AVX-NEXT: fmulp %st, %st(1)
+; AVX-NEXT: fsubp %st, %st(1)
+; AVX-NEXT: retq
+entry:
+ %mul1 = fsub x86_fp80 %a, %b
+ %add = fadd x86_fp80 %mul1, %b
+ %mul2 = fsub x86_fp80 %add, %mul1
+ %canonicalized = call x86_fp80 @llvm.canonicalize.f80(x86_fp80 %mul2)
+ %result = fsub x86_fp80 %canonicalized, %b
+ ret x86_fp80 %result
+}
+
+define double @canonicalize_fp64(double %a, double %b) unnamed_addr #0 {
+; X87-LABEL: canonicalize_fp64:
+; X87: # %bb.0: # %start
+; X87-NEXT: fldl {{[0-9]+}}(%esp)
+; X87-NEXT: fldl {{[0-9]+}}(%esp)
+; X87-NEXT: fucom %st(1)
+; X87-NEXT: fnstsw %ax
+; X87-NEXT: # kill: def $ah killed $ah killed $ax
+; X87-NEXT: sahf
+; X87-NEXT: fxch %st(1)
+; X87-NEXT: fucom %st(0)
+; X87-NEXT: fnstsw %ax
+; X87-NEXT: fld %st(1)
+; X87-NEXT: ja .LBB3_2
+; X87-NEXT: # %bb.1: # %start
+; X87-NEXT: fstp %st(0)
+; X87-NEXT: fldz
+; X87-NEXT: fxch %st(1)
+; X87-NEXT: .LBB3_2: # %start
+; X87-NEXT: fstp %st(1)
+; X87-NEXT: # kill: def $ah killed $ah killed $ax
+; X87-NEXT: sahf
+; X87-NEXT: jp .LBB3_4
+; X87-NEXT: # %bb.3: # %start
+; X87-NEXT: fstp %st(1)
+; X87-NEXT: fldz
+; X87-NEXT: .LBB3_4: # %start
+; X87-NEXT: fstp %st(0)
+; X87-NEXT: fld1
+; X87-NEXT: fmulp %st, %st(1)
+; X87-NEXT: retl
+;
+; SSE-LABEL: canonicalize_fp64:
+; SSE: # %bb.0: # %start
+; SSE-NEXT: movapd %xmm0, %xmm2
+; SSE-NEXT: cmpunordsd %xmm0, %xmm2
+; SSE-NEXT: movapd %xmm2, %xmm3
+; SSE-NEXT: andpd %xmm1, %xmm3
+; SSE-NEXT: maxsd %xmm0, %xmm1
+; SSE-NEXT: andnpd %xmm1, %xmm2
+; SSE-NEXT: orpd %xmm3, %xmm2
+; SSE-NEXT: mulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; SSE-NEXT: movapd %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: canonicalize_fp64:
+; AVX1: # %bb.0: # %start
+; AVX1-NEXT: vmaxsd %xmm0, %xmm1, %xmm2
+; AVX1-NEXT: vcmpunordsd %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
+; AVX1-NEXT: vmulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: canonicalize_fp64:
+; AVX2: # %bb.0: # %start
+; AVX2-NEXT: vmaxsd %xmm0, %xmm1, %xmm2
+; AVX2-NEXT: vcmpunordsd %xmm0, %xmm0, %xmm0
+; AVX2-NEXT: vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
+; AVX2-NEXT: vmulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: canonicalize_fp64:
+; AVX512F: # %bb.0: # %start
+; AVX512F-NEXT: vmaxsd %xmm0, %xmm1, %xmm2
+; AVX512F-NEXT: vcmpunordsd %xmm0, %xmm0, %k1
+; AVX512F-NEXT: vmovsd %xmm1, %xmm2, %xmm2 {%k1}
+; AVX512F-NEXT: vmulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm0
+; AVX512F-NEXT: retq
+start:
+ %c = fcmp olt double %a, %b
+ %d = fcmp uno double %a, 0.000000e+00
+ %or.cond.i.i = or i1 %d, %c
+ %e = select i1 %or.cond.i.i, double %b, double %a
+ %f = tail call double @llvm.canonicalize.f64(double %e) #2
+ ret double %f
+}
+
+define float @canonicalize_fp32(float %aa, float %bb) unnamed_addr #0 {
+; X87-LABEL: canonicalize_fp32:
+; X87: # %bb.0: # %start
+; X87-NEXT: flds {{[0-9]+}}(%esp)
+; X87-NEXT: flds {{[0-9]+}}(%esp)
+; X87-NEXT: fucom %st(1)
+; X87-NEXT: fnstsw %ax
+; X87-NEXT: # kill: def $ah killed $ah killed $ax
+; X87-NEXT: sahf
+; X87-NEXT: fxch %st(1)
+; X87-NEXT: fucom %st(0)
+; X87-NEXT: fnstsw %ax
+; X87-NEXT: fld %st(1)
+; X87-NEXT: ja .LBB4_2
+; X87-NEXT: # %bb.1: # %start
+; X87-NEXT: fstp %st(0)
+; X87-NEXT: fldz
+; X87-NEXT: fxch %st(1)
+; X87-NEXT: .LBB4_2: # %start
+; X87-NEXT: fstp %st(1)
+; X87-NEXT: # kill: def $ah killed $ah killed $ax
+; X87-NEXT: sahf
+; X87-NEXT: jp .LBB4_4
+; X87-NEXT: # %bb.3: # %start
+; X87-NEXT: fstp %st(1)
+; X87-NEXT: fldz
+; X87-NEXT: .LBB4_4: # %start
+; X87-NEXT: fstp %st(0)
+; X87-NEXT: fld1
+; X87-NEXT: fmulp %st, %st(1)
+; X87-NEXT: retl
+;
+; SSE-LABEL: canonicalize_fp32:
+; SSE: # %bb.0: # %start
+; SSE-NEXT: movaps %xmm0, %xmm2
+; SSE-NEXT: cmpunordss %xmm0, %xmm2
+; SSE-NEXT: movaps %xmm2, %xmm3
+; SSE-NEXT: andps %xmm1, %xmm3
+; SSE-NEXT: maxss %xmm0, %xmm1
+; SSE-NEXT: andnps %xmm1, %xmm2
+; SSE-NEXT: orps %xmm3, %xmm2
+; SSE-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; SSE-NEXT: movaps %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: canonicalize_fp32:
+; AVX1: # %bb.0: # %start
+; AVX1-NEXT: vmaxss %xmm0, %xmm1, %xmm2
+; AVX1-NEXT: vcmpunordss %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0
+; AVX1-NEXT: vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: canonicalize_fp32:
+; AVX2: # %bb.0: # %start
+; AVX2-NEXT: vmaxss %xmm0, %xmm1, %xmm2
+; AVX2-NEXT: vcmpunordss %xmm0, %xmm0, %xmm0
+; AVX2-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0
+; AVX2-NEXT: vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: canonicalize_fp32:
+; AVX512F: # %bb.0: # %start
+; AVX512F-NEXT: vmaxss %xmm0, %xmm1, %xmm2
+; AVX512F-NEXT: vcmpunordss %xmm0, %xmm0, %k1
+; AVX512F-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1}
+; AVX512F-NEXT: vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm0
+; AVX512F-NEXT: retq
+start:
+ %cc = fcmp olt float %aa, %bb
+ %dd = fcmp uno float %aa, 0.000000e+00
+ %or.cond.i.i.x = or i1 %dd, %cc
+ %ee = select i1 %or.cond.i.i.x, float %bb, float %aa
+ %ff = tail call float @llvm.canonicalize.f32(float %ee) #2
+ ret float %ff
+}
+
+define void @v_test_canonicalize_var_f32(float addrspace(1)* %out) #1 {
+; X87-LABEL: v_test_canonicalize_var_f32:
+; X87: # %bb.0:
+; X87-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X87-NEXT: fld1
+; X87-NEXT: fmuls (%eax)
+; X87-NEXT: fstps (%eax)
+; X87-NEXT: retl
+;
+; SSE-LABEL: v_test_canonicalize_var_f32:
+; SSE: # %bb.0:
+; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT: movss %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: v_test_canonicalize_var_f32:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vmovss %xmm0, (%rdi)
+; AVX-NEXT: retq
+ %val = load float, float addrspace(1)* %out
+ %canonicalized = call float @llvm.canonicalize.f32(float %val)
+ store float %canonicalized, float addrspace(1)* %out
+ ret void
+}
+
+define void @v_test_canonicalize_x86_fp80(x86_fp80 addrspace(1)* %out) #1 {
+; X87-LABEL: v_test_canonicalize_x86_fp80:
+; X87: # %bb.0:
+; X87-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X87-NEXT: fldt (%eax)
+; X87-NEXT: fld1
+; X87-NEXT: fmulp %st, %st(1)
+; X87-NEXT: fstpt (%eax)
+; X87-NEXT: retl
+;
+; SSE-LABEL: v_test_canonicalize_x86_fp80:
+; SSE: # %bb.0:
+; SSE-NEXT: fldt (%rdi)
+; SSE-NEXT: fld1
+; SSE-NEXT: fmulp %st, %st(1)
+; SSE-NEXT: fstpt (%rdi)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: v_test_canonicalize_x86_fp80:
+; AVX: # %bb.0:
+; AVX-NEXT: fldt (%rdi)
+; AVX-NEXT: fld1
+; AVX-NEXT: fmulp %st, %st(1)
+; AVX-NEXT: fstpt (%rdi)
+; AVX-NEXT: retq
+ %val = load x86_fp80, x86_fp80 addrspace(1)* %out
+ %canonicalized = call x86_fp80 @llvm.canonicalize.f80(x86_fp80 %val)
+ store x86_fp80 %canonicalized, x86_fp80 addrspace(1)* %out
+ ret void
+}
+
+define void @v_test_canonicalize_var_f64(double addrspace(1)* %out) #1 {
+; X87-LABEL: v_test_canonicalize_var_f64:
+; X87: # %bb.0:
+; X87-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X87-NEXT: fld1
+; X87-NEXT: fmull (%eax)
+; X87-NEXT: fstpl (%eax)
+; X87-NEXT: retl
+;
+; SSE-LABEL: v_test_canonicalize_var_f64:
+; SSE: # %bb.0:
+; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: mulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT: movsd %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: v_test_canonicalize_var_f64:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vmovsd %xmm0, (%rdi)
+; AVX-NEXT: retq
+ %val = load double, double addrspace(1)* %out
+ %canonicalized = call double @llvm.canonicalize.f64(double %val)
+ store double %canonicalized, double addrspace(1)* %out
+ ret void
+}
+
+define void @canonicalize_undef(double addrspace(1)* %out) {
+; X87-LABEL: canonicalize_undef:
+; X87: # %bb.0:
+; X87-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X87-NEXT: movl $2146959360, 4(%eax) # imm = 0x7FF80000
+; X87-NEXT: movl $0, (%eax)
+; X87-NEXT: retl
+;
+; SSE-LABEL: canonicalize_undef:
+; SSE: # %bb.0:
+; SSE-NEXT: movabsq $9221120237041090560, %rax # imm = 0x7FF8000000000000
+; SSE-NEXT: movq %rax, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: canonicalize_undef:
+; AVX: # %bb.0:
+; AVX-NEXT: movabsq $9221120237041090560, %rax # imm = 0x7FF8000000000000
+; AVX-NEXT: movq %rax, (%rdi)
+; AVX-NEXT: retq
+ %canonicalized = call double @llvm.canonicalize.f64(double undef)
+ store double %canonicalized, double addrspace(1)* %out
+ ret void
+}
+
+define <4 x float> @canon_fp32_varargsv4f32(<4 x float> %a) {
+; X87-LABEL: canon_fp32_varargsv4f32:
+; X87: # %bb.0:
+; X87-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X87-NEXT: fld1
+; X87-NEXT: fld %st(0)
+; X87-NEXT: fmuls {{[0-9]+}}(%esp)
+; X87-NEXT: fld %st(1)
+; X87-NEXT: fmuls {{[0-9]+}}(%esp)
+; X87-NEXT: fld %st(2)
+; X87-NEXT: fmuls {{[0-9]+}}(%esp)
+; X87-NEXT: fxch %st(3)
+; X87-NEXT: fmuls {{[0-9]+}}(%esp)
+; X87-NEXT: fstps 12(%eax)
+; X87-NEXT: fxch %st(2)
+; X87-NEXT: fstps 8(%eax)
+; X87-NEXT: fxch %st(1)
+; X87-NEXT: fstps 4(%eax)
+; X87-NEXT: fstps (%eax)
+; X87-NEXT: retl $4
+;
+; SSE-LABEL: canon_fp32_varargsv4f32:
+; SSE: # %bb.0:
+; SSE-NEXT: mulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: canon_fp32_varargsv4f32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: canon_fp32_varargsv4f32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vbroadcastss {{.*#+}} xmm1 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
+; AVX2-NEXT: vmulps %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: canon_fp32_varargsv4f32:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vbroadcastss {{.*#+}} xmm1 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
+; AVX512F-NEXT: vmulps %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT: retq
+ %canonicalized = call <4 x float> @llvm.canonicalize.v4f32(<4 x float> %a)
+ ret <4 x float> %canonicalized
+}
+
+define <4 x double> @canon_fp64_varargsv4f64(<4 x double> %a) {
+; X87-LABEL: canon_fp64_varargsv4f64:
+; X87: # %bb.0:
+; X87-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X87-NEXT: fld1
+; X87-NEXT: fld %st(0)
+; X87-NEXT: fmull {{[0-9]+}}(%esp)
+; X87-NEXT: fld %st(1)
+; X87-NEXT: fmull {{[0-9]+}}(%esp)
+; X87-NEXT: fld %st(2)
+; X87-NEXT: fmull {{[0-9]+}}(%esp)
+; X87-NEXT: fxch %st(3)
+; X87-NEXT: fmull {{[0-9]+}}(%esp)
+; X87-NEXT: fstpl 24(%eax)
+; X87-NEXT: fxch %st(2)
+; X87-NEXT: fstpl 16(%eax)
+; X87-NEXT: fxch %st(1)
+; X87-NEXT: fstpl 8(%eax)
+; X87-NEXT: fstpl (%eax)
+; X87-NEXT: retl $4
+;
+; SSE-LABEL: canon_fp64_varargsv4f64:
+; SSE: # %bb.0:
+; SSE-NEXT: movapd {{.*#+}} xmm2 = [1.0E+0,1.0E+0]
+; SSE-NEXT: mulpd %xmm2, %xmm0
+; SSE-NEXT: mulpd %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: canon_fp64_varargsv4f64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmulpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: canon_fp64_varargsv4f64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm1 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
+; AVX2-NEXT: vmulpd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: canon_fp64_varargsv4f64:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vbroadcastsd {{.*#+}} ymm1 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
+; AVX512F-NEXT: vmulpd %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: retq
+ %canonicalized = call <4 x double> @llvm.canonicalize.v4f32(<4 x double> %a)
+ ret <4 x double> %canonicalized
+}
+
+define <2 x x86_fp80> @canon_fp80_varargsv2fp80(<2 x x86_fp80> %a) {
+; X87-LABEL: canon_fp80_varargsv2fp80:
+; X87: # %bb.0:
+; X87-NEXT: fldt {{[0-9]+}}(%esp)
+; X87-NEXT: fldt {{[0-9]+}}(%esp)
+; X87-NEXT: fld1
+; X87-NEXT: fmul %st, %st(1)
+; X87-NEXT: fmulp %st, %st(2)
+; X87-NEXT: fxch %st(1)
+; X87-NEXT: retl
+;
+; SSE-LABEL: canon_fp80_varargsv2fp80:
+; SSE: # %bb.0:
+; SSE-NEXT: fldt {{[0-9]+}}(%rsp)
+; SSE-NEXT: fldt {{[0-9]+}}(%rsp)
+; SSE-NEXT: fld1
+; SSE-NEXT: fmul %st, %st(1)
+; SSE-NEXT: fmulp %st, %st(2)
+; SSE-NEXT: fxch %st(1)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: canon_fp80_varargsv2fp80:
+; AVX: # %bb.0:
+; AVX-NEXT: fldt {{[0-9]+}}(%rsp)
+; AVX-NEXT: fldt {{[0-9]+}}(%rsp)
+; AVX-NEXT: fld1
+; AVX-NEXT: fmul %st, %st(1)
+; AVX-NEXT: fmulp %st, %st(2)
+; AVX-NEXT: fxch %st(1)
+; AVX-NEXT: retq
+ %canonicalized = call <2 x x86_fp80> @llvm.canonicalize.v2f80(<2 x x86_fp80> %a)
+ ret <2 x x86_fp80> %canonicalized
+}
+
+define void @vec_canonicalize_var_v4f32(<4 x float> addrspace(1)* %out) #1 {
+; X87-LABEL: vec_canonicalize_var_v4f32:
+; X87: # %bb.0:
+; X87-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X87-NEXT: fld1
+; X87-NEXT: fld %st(0)
+; X87-NEXT: fmuls (%eax)
+; X87-NEXT: fld %st(1)
+; X87-NEXT: fmuls 4(%eax)
+; X87-NEXT: fld %st(2)
+; X87-NEXT: fmuls 8(%eax)
+; X87-NEXT: fxch %st(3)
+; X87-NEXT: fmuls 12(%eax)
+; X87-NEXT: fstps 12(%eax)
+; X87-NEXT: fxch %st(2)
+; X87-NEXT: fstps 8(%eax)
+; X87-NEXT: fxch %st(1)
+; X87-NEXT: fstps 4(%eax)
+; X87-NEXT: fstps (%eax)
+; X87-NEXT: retl
+;
+; SSE-LABEL: vec_canonicalize_var_v4f32:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps (%rdi), %xmm0
+; SSE-NEXT: mulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT: movaps %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: vec_canonicalize_var_v4f32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovaps (%rdi), %xmm0
+; AVX1-NEXT: vmulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vmovaps %xmm0, (%rdi)
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: vec_canonicalize_var_v4f32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vbroadcastss {{.*#+}} xmm0 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
+; AVX2-NEXT: vmulps (%rdi), %xmm0, %xmm0
+; AVX2-NEXT: vmovaps %xmm0, (%rdi)
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: vec_canonicalize_var_v4f32:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vbroadcastss {{.*#+}} xmm0 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
+; AVX512F-NEXT: vmulps (%rdi), %xmm0, %xmm0
+; AVX512F-NEXT: vmovaps %xmm0, (%rdi)
+; AVX512F-NEXT: retq
+ %val = load <4 x float>, <4 x float> addrspace(1)* %out
+ %canonicalized = call <4 x float> @llvm.canonicalize.v4f32(<4 x float> %val)
+ store <4 x float> %canonicalized, <4 x float> addrspace(1)* %out
+ ret void
+}
+
+define void @vec_canonicalize_var_v4f64(<4 x double> addrspace(1)* %out) #1 {
+; X87-LABEL: vec_canonicalize_var_v4f64:
+; X87: # %bb.0:
+; X87-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X87-NEXT: fld1
+; X87-NEXT: fld %st(0)
+; X87-NEXT: fmull (%eax)
+; X87-NEXT: fld %st(1)
+; X87-NEXT: fmull 8(%eax)
+; X87-NEXT: fld %st(2)
+; X87-NEXT: fmull 16(%eax)
+; X87-NEXT: fxch %st(3)
+; X87-NEXT: fmull 24(%eax)
+; X87-NEXT: fstpl 24(%eax)
+; X87-NEXT: fxch %st(2)
+; X87-NEXT: fstpl 16(%eax)
+; X87-NEXT: fxch %st(1)
+; X87-NEXT: fstpl 8(%eax)
+; X87-NEXT: fstpl (%eax)
+; X87-NEXT: retl
+;
+; SSE-LABEL: vec_canonicalize_var_v4f64:
+; SSE: # %bb.0:
+; SSE-NEXT: movapd {{.*#+}} xmm0 = [1.0E+0,1.0E+0]
+; SSE-NEXT: movapd 16(%rdi), %xmm1
+; SSE-NEXT: mulpd %xmm0, %xmm1
+; SSE-NEXT: mulpd (%rdi), %xmm0
+; SSE-NEXT: movapd %xmm0, (%rdi)
+; SSE-NEXT: movapd %xmm1, 16(%rdi)
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: vec_canonicalize_var_v4f64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovapd (%rdi), %ymm0
+; AVX1-NEXT: vmulpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT: vmovapd %ymm0, (%rdi)
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: vec_canonicalize_var_v4f64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm0 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
+; AVX2-NEXT: vmulpd (%rdi), %ymm0, %ymm0
+; AVX2-NEXT: vmovapd %ymm0, (%rdi)
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: vec_canonicalize_var_v4f64:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vbroadcastsd {{.*#+}} ymm0 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
+; AVX512F-NEXT: vmulpd (%rdi), %ymm0, %ymm0
+; AVX512F-NEXT: vmovapd %ymm0, (%rdi)
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+ %val = load <4 x double>, <4 x double> addrspace(1)* %out
+ %canonicalized = call <4 x double> @llvm.canonicalize.v4f32(<4 x double> %val)
+ store <4 x double> %canonicalized, <4 x double> addrspace(1)* %out
+ ret void
+}
+
+define void @vec_canonicalize_x86_fp80(<4 x x86_fp80> addrspace(1)* %out) #1 {
+; X87-LABEL: vec_canonicalize_x86_fp80:
+; X87: # %bb.0:
+; X87-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X87-NEXT: fldt 30(%eax)
+; X87-NEXT: fldt 20(%eax)
+; X87-NEXT: fldt 10(%eax)
+; X87-NEXT: fldt (%eax)
+; X87-NEXT: fld1
+; X87-NEXT: fmul %st, %st(1)
+; X87-NEXT: fmul %st, %st(2)
+; X87-NEXT: fmul %st, %st(3)
+; X87-NEXT: fmulp %st, %st(4)
+; X87-NEXT: fxch %st(3)
+; X87-NEXT: fstpt 30(%eax)
+; X87-NEXT: fxch %st(1)
+; X87-NEXT: fstpt 20(%eax)
+; X87-NEXT: fstpt 10(%eax)
+; X87-NEXT: fstpt (%eax)
+; X87-NEXT: retl
+;
+; SSE-LABEL: vec_canonicalize_x86_fp80:
+; SSE: # %bb.0:
+; SSE-NEXT: fldt 30(%rdi)
+; SSE-NEXT: fldt 20(%rdi)
+; SSE-NEXT: fldt 10(%rdi)
+; SSE-NEXT: fldt (%rdi)
+; SSE-NEXT: fld1
+; SSE-NEXT: fmul %st, %st(1)
+; SSE-NEXT: fmul %st, %st(2)
+; SSE-NEXT: fmul %st, %st(3)
+; SSE-NEXT: fmulp %st, %st(4)
+; SSE-NEXT: fxch %st(3)
+; SSE-NEXT: fstpt 30(%rdi)
+; SSE-NEXT: fxch %st(1)
+; SSE-NEXT: fstpt 20(%rdi)
+; SSE-NEXT: fstpt 10(%rdi)
+; SSE-NEXT: fstpt (%rdi)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: vec_canonicalize_x86_fp80:
+; AVX: # %bb.0:
+; AVX-NEXT: fldt 30(%rdi)
+; AVX-NEXT: fldt 20(%rdi)
+; AVX-NEXT: fldt 10(%rdi)
+; AVX-NEXT: fldt (%rdi)
+; AVX-NEXT: fld1
+; AVX-NEXT: fmul %st, %st(1)
+; AVX-NEXT: fmul %st, %st(2)
+; AVX-NEXT: fmul %st, %st(3)
+; AVX-NEXT: fmulp %st, %st(4)
+; AVX-NEXT: fxch %st(3)
+; AVX-NEXT: fstpt 30(%rdi)
+; AVX-NEXT: fxch %st(1)
+; AVX-NEXT: fstpt 20(%rdi)
+; AVX-NEXT: fstpt 10(%rdi)
+; AVX-NEXT: fstpt (%rdi)
+; AVX-NEXT: retq
+ %val = load <4 x x86_fp80>, <4 x x86_fp80> addrspace(1)* %out
+ %canonicalized = call <4 x x86_fp80> @llvm.canonicalize.f80(<4 x x86_fp80> %val)
+ store <4 x x86_fp80> %canonicalized, <4 x x86_fp80> addrspace(1)* %out
+ ret void
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; SSE2: {{.*}}
diff --git a/llvm/test/CodeGen/X86/div-rem-pair-recomposition-signed.ll b/llvm/test/CodeGen/X86/div-rem-pair-recomposition-signed.ll
index 1d3b015..c350ed6 100644
--- a/llvm/test/CodeGen/X86/div-rem-pair-recomposition-signed.ll
+++ b/llvm/test/CodeGen/X86/div-rem-pair-recomposition-signed.ll
@@ -174,22 +174,23 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-LABEL: scalar_i128:
; X86: # %bb.0: # %_udiv-special-cases
; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
; X86-NEXT: pushl %ebx
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
-; X86-NEXT: subl $156, %esp
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT: movl %esi, %eax
+; X86-NEXT: andl $-16, %esp
+; X86-NEXT: subl $176, %esp
+; X86-NEXT: movl 20(%ebp), %edx
+; X86-NEXT: movl 24(%ebp), %ecx
+; X86-NEXT: movl %ecx, %eax
; X86-NEXT: sarl $31, %eax
-; X86-NEXT: xorl %eax, %esi
-; X86-NEXT: movl %esi, %edi
+; X86-NEXT: xorl %eax, %ecx
+; X86-NEXT: movl %ecx, %edi
; X86-NEXT: xorl %eax, %edx
; X86-NEXT: movl %edx, %esi
-; X86-NEXT: movl %ecx, %edx
+; X86-NEXT: movl 16(%ebp), %edx
; X86-NEXT: xorl %eax, %edx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl 12(%ebp), %ecx
; X86-NEXT: xorl %eax, %ecx
; X86-NEXT: subl %eax, %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
@@ -198,32 +199,33 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: sbbl %eax, %esi
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: sbbl %eax, %edi
-; X86-NEXT: movl %edi, (%esp) # 4-byte Spill
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl 40(%ebp), %ecx
; X86-NEXT: movl %ecx, %edx
; X86-NEXT: sarl $31, %edx
; X86-NEXT: movl %ecx, %esi
; X86-NEXT: xorl %edx, %esi
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT: movl 36(%ebp), %ecx
+; X86-NEXT: xorl %edx, %ecx
+; X86-NEXT: movl 32(%ebp), %ebx
; X86-NEXT: xorl %edx, %ebx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
-; X86-NEXT: xorl %edx, %ebp
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl 28(%ebp), %edi
; X86-NEXT: xorl %edx, %edi
; X86-NEXT: subl %edx, %edi
-; X86-NEXT: sbbl %edx, %ebp
; X86-NEXT: sbbl %edx, %ebx
+; X86-NEXT: sbbl %edx, %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: sbbl %edx, %esi
; X86-NEXT: xorl %eax, %edx
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %ebp, %eax
+; X86-NEXT: movl %ebx, %eax
; X86-NEXT: orl %esi, %eax
; X86-NEXT: movl %edi, %ecx
-; X86-NEXT: orl %ebx, %ecx
+; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; X86-NEXT: orl %eax, %ecx
; X86-NEXT: sete %cl
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: orl (%esp), %eax # 4-byte Folded Reload
+; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
; X86-NEXT: orl %eax, %edx
@@ -232,359 +234,357 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
; X86-NEXT: bsrl %esi, %edx
; X86-NEXT: xorl $31, %edx
-; X86-NEXT: bsrl %ebx, %ecx
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT: bsrl %eax, %ecx
; X86-NEXT: xorl $31, %ecx
; X86-NEXT: orl $32, %ecx
; X86-NEXT: testl %esi, %esi
; X86-NEXT: cmovnel %edx, %ecx
-; X86-NEXT: bsrl %ebp, %edx
+; X86-NEXT: bsrl %ebx, %edx
; X86-NEXT: xorl $31, %edx
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: bsrl %edi, %edi
; X86-NEXT: xorl $31, %edi
; X86-NEXT: orl $32, %edi
-; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: testl %ebp, %ebp
+; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: testl %ebx, %ebx
; X86-NEXT: cmovnel %edx, %edi
; X86-NEXT: orl $64, %edi
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %ebx, %edx
+; X86-NEXT: movl %eax, %edx
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: orl %esi, %edx
; X86-NEXT: cmovnel %ecx, %edi
-; X86-NEXT: movl (%esp), %ebx # 4-byte Reload
-; X86-NEXT: bsrl %ebx, %edx
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT: bsrl %eax, %edx
; X86-NEXT: xorl $31, %edx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; X86-NEXT: bsrl %ebp, %ecx
+; X86-NEXT: bsrl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; X86-NEXT: xorl $31, %ecx
; X86-NEXT: orl $32, %ecx
-; X86-NEXT: testl %ebx, %ebx
+; X86-NEXT: testl %eax, %eax
; X86-NEXT: cmovnel %edx, %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: bsrl %eax, %esi
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT: bsrl %ebx, %esi
; X86-NEXT: xorl $31, %esi
; X86-NEXT: bsrl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
; X86-NEXT: xorl $31, %edx
; X86-NEXT: orl $32, %edx
-; X86-NEXT: testl %eax, %eax
+; X86-NEXT: testl %ebx, %ebx
; X86-NEXT: cmovnel %esi, %edx
; X86-NEXT: orl $64, %edx
-; X86-NEXT: movl %ebp, %esi
-; X86-NEXT: orl %ebx, %esi
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X86-NEXT: orl %eax, %esi
; X86-NEXT: cmovnel %ecx, %edx
; X86-NEXT: xorl %ebx, %ebx
; X86-NEXT: subl %edx, %edi
; X86-NEXT: movl $0, %edx
; X86-NEXT: sbbl %edx, %edx
-; X86-NEXT: movl $0, %eax
-; X86-NEXT: sbbl %eax, %eax
; X86-NEXT: movl $0, %esi
; X86-NEXT: sbbl %esi, %esi
+; X86-NEXT: movl $0, %eax
+; X86-NEXT: sbbl %eax, %eax
; X86-NEXT: movl $127, %ecx
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: cmpl %edi, %ecx
; X86-NEXT: movl $0, %ecx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: sbbl %edx, %ecx
; X86-NEXT: movl $0, %ecx
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: sbbl %eax, %ecx
-; X86-NEXT: movl $0, %ecx
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: sbbl %esi, %ecx
+; X86-NEXT: movl $0, %ecx
+; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: sbbl %eax, %ecx
; X86-NEXT: setb %cl
; X86-NEXT: orb {{[-0-9]+}}(%e{{[sb]}}p), %cl # 1-byte Folded Reload
-; X86-NEXT: movl (%esp), %edx # 4-byte Reload
-; X86-NEXT: cmovnel %ebx, %edx
-; X86-NEXT: cmovnel %ebx, %ebp
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X86-NEXT: cmovnel %ebx, %esi
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT: cmovnel %ebx, %eax
+; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: cmovnel %ebx, %eax
; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
-; X86-NEXT: movl %ebx, %esi
-; X86-NEXT: jne .LBB4_8
-; X86-NEXT: # %bb.1: # %_udiv-special-cases
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: xorl $127, %edi
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NEXT: movl %ebx, %ecx
+; X86-NEXT: jne .LBB4_1
+; X86-NEXT: # %bb.8: # %_udiv-special-cases
+; X86-NEXT: movl %edx, %edi
+; X86-NEXT: movl %eax, %edx
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT: xorl $127, %eax
+; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X86-NEXT: movl %edi, %ecx
; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; X86-NEXT: orl %edi, %ecx
-; X86-NEXT: je .LBB4_8
-; X86-NEXT: # %bb.2: # %udiv-bb1
+; X86-NEXT: orl %eax, %ecx
+; X86-NEXT: movl %edx, %eax
+; X86-NEXT: movl %ebx, %edx
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT: je .LBB4_9
+; X86-NEXT: # %bb.5: # %udiv-bb1
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NEXT: xorps %xmm0, %xmm0
+; X86-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl (%esp), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: # kill: def $al killed $al killed $eax
-; X86-NEXT: xorb $127, %al
-; X86-NEXT: movb %al, %ch
-; X86-NEXT: andb $7, %ch
+; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT: movl %ebx, %ecx
+; X86-NEXT: xorb $127, %cl
+; X86-NEXT: movl %ecx, %eax
; X86-NEXT: shrb $3, %al
-; X86-NEXT: andb $15, %al
+; X86-NEXT: andb $12, %al
; X86-NEXT: negb %al
-; X86-NEXT: movsbl %al, %edi
-; X86-NEXT: movl 148(%esp,%edi), %edx
-; X86-NEXT: movl 152(%esp,%edi), %esi
-; X86-NEXT: movb %ch, %cl
-; X86-NEXT: shldl %cl, %edx, %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shll %cl, %edx
-; X86-NEXT: notb %cl
-; X86-NEXT: movl 144(%esp,%edi), %eax
-; X86-NEXT: movl %eax, %ebp
-; X86-NEXT: shrl %ebp
-; X86-NEXT: shrl %cl, %ebp
-; X86-NEXT: orl %edx, %ebp
-; X86-NEXT: movl 140(%esp,%edi), %edx
-; X86-NEXT: movb %ch, %cl
+; X86-NEXT: movsbl %al, %eax
+; X86-NEXT: movl 152(%esp,%eax), %esi
+; X86-NEXT: movl 156(%esp,%eax), %edx
+; X86-NEXT: shldl %cl, %esi, %edx
+; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl 144(%esp,%eax), %edx
+; X86-NEXT: movl 148(%esp,%eax), %eax
+; X86-NEXT: shldl %cl, %eax, %esi
; X86-NEXT: shldl %cl, %edx, %eax
; X86-NEXT: shll %cl, %edx
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: addl $1, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: adcl $0, %ebx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X86-NEXT: addl $1, %ebx
+; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: adcl $0, %edi
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: adcl $0, %ecx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X86-NEXT: adcl $0, %edx
-; X86-NEXT: jae .LBB4_3
+; X86-NEXT: jae .LBB4_2
; X86-NEXT: # %bb.6:
-; X86-NEXT: xorl %edi, %edi
; X86-NEXT: xorl %ecx, %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: jmp .LBB4_7
-; X86-NEXT: .LBB4_3: # %udiv-preheader
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: movl %esi, %ebx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X86-NEXT: movl (%esp), %esi # 4-byte Reload
-; X86-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X86-NEXT: jmp .LBB4_7
+; X86-NEXT: .LBB4_1:
+; X86-NEXT: movl %ebx, %edx
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT: jmp .LBB4_9
+; X86-NEXT: .LBB4_2: # %udiv-preheader
+; X86-NEXT: movl %edi, %ebx
+; X86-NEXT: movl %edx, %edi
+; X86-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: shrb $3, %al
+; X86-NEXT: andb $12, %al
+; X86-NEXT: movzbl %al, %eax
+; X86-NEXT: movl 108(%esp,%eax), %edx
+; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movb %dl, %ch
-; X86-NEXT: andb $7, %ch
-; X86-NEXT: movb %dl, %cl
-; X86-NEXT: shrb $3, %cl
-; X86-NEXT: andb $15, %cl
-; X86-NEXT: movzbl %cl, %edx
-; X86-NEXT: movl 104(%esp,%edx), %ebx
-; X86-NEXT: movl 100(%esp,%edx), %edi
-; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %edi, %ebp
-; X86-NEXT: movb %ch, %cl
-; X86-NEXT: shrdl %cl, %ebx, %ebp
-; X86-NEXT: movl 92(%esp,%edx), %esi
+; X86-NEXT: movl 104(%esp,%eax), %ebx
+; X86-NEXT: movl %ebx, %esi
+; X86-NEXT: shrdl %cl, %edx, %esi
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 96(%esp,%edx), %esi
-; X86-NEXT: movl %esi, %edx
-; X86-NEXT: shrl %cl, %edx
-; X86-NEXT: notb %cl
-; X86-NEXT: addl %edi, %edi
-; X86-NEXT: shll %cl, %edi
-; X86-NEXT: orl %edx, %edi
+; X86-NEXT: movl 96(%esp,%eax), %esi
+; X86-NEXT: movl 100(%esp,%eax), %eax
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movb %ch, %cl
-; X86-NEXT: shrl %cl, %ebx
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shrdl %cl, %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: addl $-1, %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: adcl $-1, %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: adcl $-1, %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl %eax, %edi
+; X86-NEXT: shrdl %cl, %ebx, %edi
+; X86-NEXT: movl %edi, %ebx
+; X86-NEXT: shrl %cl, %edx
+; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NEXT: shrdl %cl, %eax, %esi
+; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT: addl $-1, %eax
+; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT: adcl $-1, %eax
+; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT: adcl $-1, %eax
+; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: adcl $-1, %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: xorl %edx, %edx
; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: .p2align 4, 0x90
-; X86-NEXT: .LBB4_4: # %udiv-do-while
+; X86-NEXT: .LBB4_3: # %udiv-do-while
; X86-NEXT: # =>This Inner Loop Header: Depth=1
-; X86-NEXT: movl %ebp, (%esp) # 4-byte Spill
+; X86-NEXT: movl %edx, %esi
+; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT: shldl $1, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NEXT: movl %ebx, %edx
+; X86-NEXT: shldl $1, %ebx, %eax
+; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NEXT: shldl $1, %ebp, %ebx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; X86-NEXT: shldl $1, %ebp, (%esp) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: shldl $1, %edx, %ebp
+; X86-NEXT: shldl $1, %ebx, %edx
+; X86-NEXT: shldl $1, %ecx, %ebx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: shldl $1, %edi, %edx
+; X86-NEXT: shldl $1, %edi, %ecx
+; X86-NEXT: orl %esi, %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: shldl $1, %ecx, %edi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X86-NEXT: orl %esi, %edi
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl $1, %eax, %ecx
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X86-NEXT: shldl $1, %edi, %ecx
; X86-NEXT: orl %esi, %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: addl %edi, %edi
+; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: cmpl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: shldl $1, %ecx, %eax
-; X86-NEXT: orl %esi, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: addl %ecx, %ecx
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: cmpl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: sbbl %ebp, %ecx
+; X86-NEXT: sbbl %edx, %ecx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: sbbl (%esp), %ecx # 4-byte Folded Reload
+; X86-NEXT: sbbl %eax, %ecx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: sbbl %ebx, %ecx
+; X86-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; X86-NEXT: sarl $31, %ecx
-; X86-NEXT: movl %ecx, %eax
-; X86-NEXT: andl $1, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %ecx, %esi
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X86-NEXT: andl $1, %esi
+; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %ecx, %edi
; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X86-NEXT: movl %ecx, %esi
+; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X86-NEXT: movl %ecx, %eax
; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; X86-NEXT: subl %ecx, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: sbbl %eax, %ebp
-; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl (%esp), %ebp # 4-byte Reload
-; X86-NEXT: sbbl %edi, %ebp
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: sbbl %esi, %ebx
+; X86-NEXT: subl %ecx, %ebx
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: sbbl %eax, %edx
+; X86-NEXT: movl %edx, %ebx
+; X86-NEXT: sbbl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT: sbbl %edi, %eax
+; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: addl $-1, %ecx
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT: adcl $-1, %eax
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; X86-NEXT: adcl $-1, %edi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NEXT: adcl $-1, %ebx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X86-NEXT: adcl $-1, %esi
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: orl %esi, %edi
+; X86-NEXT: orl %esi, %eax
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: orl %ebx, %ecx
+; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: orl %edi, %ecx
-; X86-NEXT: jne .LBB4_4
-; X86-NEXT: # %bb.5:
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NEXT: orl %eax, %ecx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: jne .LBB4_3
+; X86-NEXT: # %bb.4:
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT: movl %ecx, %esi
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: .LBB4_7: # %udiv-loop-exit
-; X86-NEXT: shldl $1, %ebp, %edx
+; X86-NEXT: shldl $1, %ebx, %esi
+; X86-NEXT: orl %edx, %esi
+; X86-NEXT: shldl $1, %eax, %ebx
+; X86-NEXT: orl %edx, %ebx
+; X86-NEXT: shldl $1, %edi, %eax
+; X86-NEXT: orl %edx, %eax
+; X86-NEXT: movl %edi, %edx
+; X86-NEXT: addl %edi, %edx
; X86-NEXT: orl %ecx, %edx
-; X86-NEXT: shldl $1, %eax, %ebp
-; X86-NEXT: orl %ecx, %ebp
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: shldl $1, %esi, %eax
-; X86-NEXT: orl %ecx, %eax
-; X86-NEXT: addl %esi, %esi
-; X86-NEXT: orl %edi, %esi
-; X86-NEXT: .LBB4_8: # %udiv-end
+; X86-NEXT: .LBB4_9: # %udiv-end
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: xorl %ecx, %edx
-; X86-NEXT: xorl %ecx, %ebp
-; X86-NEXT: xorl %ecx, %eax
; X86-NEXT: xorl %ecx, %esi
-; X86-NEXT: subl %ecx, %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: xorl %ecx, %ebx
+; X86-NEXT: xorl %ecx, %eax
+; X86-NEXT: xorl %ecx, %edx
+; X86-NEXT: subl %ecx, %edx
; X86-NEXT: sbbl %ecx, %eax
+; X86-NEXT: sbbl %ecx, %ebx
+; X86-NEXT: sbbl %ecx, %esi
+; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl 44(%ebp), %ecx
+; X86-NEXT: movl %edx, (%ecx)
+; X86-NEXT: movl %eax, 4(%ecx)
+; X86-NEXT: movl %ebx, 8(%ecx)
+; X86-NEXT: movl %esi, 12(%ecx)
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: sbbl %ecx, %ebp
-; X86-NEXT: sbbl %ecx, %edx
+; X86-NEXT: movl 28(%ebp), %ecx
+; X86-NEXT: movl %ebx, %edi
+; X86-NEXT: movl %edx, %esi
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl %esi, (%ecx)
-; X86-NEXT: movl %eax, 4(%ecx)
-; X86-NEXT: movl %ebp, 8(%ecx)
-; X86-NEXT: movl %edx, 12(%ecx)
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl %ebp, %edi
; X86-NEXT: mull %ecx
; X86-NEXT: movl %edx, %ebx
-; X86-NEXT: movl %eax, %ebp
+; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %esi, %eax
; X86-NEXT: mull %ecx
-; X86-NEXT: movl %eax, (%esp) # 4-byte Spill
+; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %edx, %ecx
-; X86-NEXT: addl %ebp, %ecx
+; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; X86-NEXT: adcl $0, %ebx
; X86-NEXT: movl %esi, %eax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
-; X86-NEXT: mull %ebp
+; X86-NEXT: movl 32(%ebp), %esi
+; X86-NEXT: mull %esi
; X86-NEXT: addl %ecx, %eax
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: adcl %ebx, %edx
-; X86-NEXT: movl %edx, %ebx
-; X86-NEXT: setb %cl
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: movl %esi, %eax
-; X86-NEXT: mull %ebp
-; X86-NEXT: addl %ebx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movzbl %cl, %eax
+; X86-NEXT: movl %edx, %ecx
+; X86-NEXT: setb %bl
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT: mull %esi
+; X86-NEXT: addl %ecx, %eax
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: movzbl %bl, %eax
; X86-NEXT: adcl %eax, %edx
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: imull %eax, %ecx
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT: movl 28(%ebp), %eax
+; X86-NEXT: imull %eax, %ebx
; X86-NEXT: mull %edi
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: imull %ebp, %edi
+; X86-NEXT: imull %esi, %edi
; X86-NEXT: addl %edx, %edi
-; X86-NEXT: addl %ecx, %edi
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl %eax, %ecx
-; X86-NEXT: imull %esi, %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: addl %ebx, %edi
+; X86-NEXT: movl 36(%ebp), %eax
+; X86-NEXT: movl %eax, %esi
+; X86-NEXT: imull {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X86-NEXT: movl 40(%ebp), %ebx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: imull %edx, %esi
+; X86-NEXT: imull %edx, %ebx
; X86-NEXT: mull %edx
-; X86-NEXT: addl %edx, %esi
-; X86-NEXT: addl %ecx, %esi
+; X86-NEXT: addl %edx, %ebx
+; X86-NEXT: addl %esi, %ebx
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; X86-NEXT: adcl %edi, %esi
-; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: subl (%esp), %ecx # 4-byte Folded Reload
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; X86-NEXT: sbbl %eax, %ebx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT: sbbl %esi, %edi
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl %ecx, (%eax)
-; X86-NEXT: movl %edx, 4(%eax)
-; X86-NEXT: movl %ebx, 8(%eax)
-; X86-NEXT: movl %edi, 12(%eax)
-; X86-NEXT: addl $156, %esp
+; X86-NEXT: adcl %edi, %ebx
+; X86-NEXT: addl %ecx, %eax
+; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X86-NEXT: movl 12(%ebp), %edx
+; X86-NEXT: subl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X86-NEXT: movl 16(%ebp), %ecx
+; X86-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X86-NEXT: movl 20(%ebp), %edi
+; X86-NEXT: sbbl %eax, %edi
+; X86-NEXT: movl 24(%ebp), %esi
+; X86-NEXT: sbbl %ebx, %esi
+; X86-NEXT: movl 8(%ebp), %eax
+; X86-NEXT: movl %edx, (%eax)
+; X86-NEXT: movl %ecx, 4(%eax)
+; X86-NEXT: movl %edi, 8(%eax)
+; X86-NEXT: movl %esi, 12(%eax)
+; X86-NEXT: leal -12(%ebp), %esp
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
; X86-NEXT: popl %ebx
diff --git a/llvm/test/CodeGen/X86/div-rem-pair-recomposition-unsigned.ll b/llvm/test/CodeGen/X86/div-rem-pair-recomposition-unsigned.ll
index 58ea70e..16dc1d6 100644
--- a/llvm/test/CodeGen/X86/div-rem-pair-recomposition-unsigned.ll
+++ b/llvm/test/CodeGen/X86/div-rem-pair-recomposition-unsigned.ll
@@ -174,379 +174,370 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-LABEL: scalar_i128:
; X86: # %bb.0: # %_udiv-special-cases
; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
; X86-NEXT: pushl %ebx
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
-; X86-NEXT: subl $136, %esp
-; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: orl %edi, %eax
-; X86-NEXT: movl %ebp, %ecx
-; X86-NEXT: orl %esi, %ecx
+; X86-NEXT: andl $-16, %esp
+; X86-NEXT: subl $160, %esp
+; X86-NEXT: movl 28(%ebp), %ebx
+; X86-NEXT: movl 40(%ebp), %esi
+; X86-NEXT: movl 32(%ebp), %edi
+; X86-NEXT: movl %edi, %eax
+; X86-NEXT: orl %esi, %eax
+; X86-NEXT: movl %ebx, %ecx
+; X86-NEXT: orl 36(%ebp), %ecx
; X86-NEXT: orl %eax, %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: sete %bl
-; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: orl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: sete %cl
+; X86-NEXT: movl 16(%ebp), %eax
+; X86-NEXT: orl 24(%ebp), %eax
+; X86-NEXT: movl 12(%ebp), %edx
+; X86-NEXT: orl 20(%ebp), %edx
; X86-NEXT: orl %eax, %edx
; X86-NEXT: sete %al
-; X86-NEXT: orb %bl, %al
-; X86-NEXT: movb %al, (%esp) # 1-byte Spill
-; X86-NEXT: bsrl %edi, %edx
+; X86-NEXT: orb %cl, %al
+; X86-NEXT: movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT: bsrl %esi, %edx
; X86-NEXT: xorl $31, %edx
-; X86-NEXT: bsrl %esi, %ecx
+; X86-NEXT: bsrl 36(%ebp), %ecx
; X86-NEXT: xorl $31, %ecx
; X86-NEXT: orl $32, %ecx
-; X86-NEXT: testl %edi, %edi
-; X86-NEXT: movl %edi, %ebx
+; X86-NEXT: testl %esi, %esi
; X86-NEXT: cmovnel %edx, %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: bsrl %eax, %edx
+; X86-NEXT: bsrl %edi, %edx
; X86-NEXT: xorl $31, %edx
-; X86-NEXT: bsrl %ebp, %ebp
-; X86-NEXT: movl %esi, %edi
-; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT: xorl $31, %ebp
-; X86-NEXT: orl $32, %ebp
-; X86-NEXT: testl %eax, %eax
-; X86-NEXT: cmovnel %edx, %ebp
-; X86-NEXT: orl $64, %ebp
-; X86-NEXT: movl %edi, %edx
-; X86-NEXT: orl %ebx, %edx
-; X86-NEXT: cmovnel %ecx, %ebp
-; X86-NEXT: bsrl %esi, %edx
-; X86-NEXT: movl %esi, %ebx
+; X86-NEXT: bsrl %ebx, %eax
+; X86-NEXT: xorl $31, %eax
+; X86-NEXT: orl $32, %eax
+; X86-NEXT: testl %edi, %edi
+; X86-NEXT: cmovnel %edx, %eax
+; X86-NEXT: orl $64, %eax
+; X86-NEXT: movl 36(%ebp), %edx
+; X86-NEXT: orl %esi, %edx
+; X86-NEXT: cmovnel %ecx, %eax
+; X86-NEXT: movl 24(%ebp), %ebx
+; X86-NEXT: bsrl %ebx, %edx
; X86-NEXT: xorl $31, %edx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: bsrl %eax, %ecx
+; X86-NEXT: movl 20(%ebp), %ecx
+; X86-NEXT: bsrl %ecx, %ecx
; X86-NEXT: xorl $31, %ecx
; X86-NEXT: orl $32, %ecx
-; X86-NEXT: testl %esi, %esi
+; X86-NEXT: testl %ebx, %ebx
; X86-NEXT: cmovnel %edx, %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl 16(%ebp), %edi
; X86-NEXT: bsrl %edi, %esi
; X86-NEXT: xorl $31, %esi
-; X86-NEXT: bsrl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: bsrl 12(%ebp), %edx
; X86-NEXT: xorl $31, %edx
; X86-NEXT: orl $32, %edx
; X86-NEXT: testl %edi, %edi
; X86-NEXT: cmovnel %esi, %edx
; X86-NEXT: orl $64, %edx
-; X86-NEXT: orl %ebx, %eax
+; X86-NEXT: movl 20(%ebp), %edi
+; X86-NEXT: movl %edi, %esi
+; X86-NEXT: orl %ebx, %esi
; X86-NEXT: cmovnel %ecx, %edx
-; X86-NEXT: subl %edx, %ebp
+; X86-NEXT: subl %edx, %eax
; X86-NEXT: movl $0, %edx
; X86-NEXT: sbbl %edx, %edx
+; X86-NEXT: movl $0, %ebx
+; X86-NEXT: sbbl %ebx, %ebx
; X86-NEXT: movl $0, %esi
; X86-NEXT: sbbl %esi, %esi
-; X86-NEXT: movl $0, %edi
-; X86-NEXT: sbbl %edi, %edi
; X86-NEXT: movl $127, %ecx
-; X86-NEXT: cmpl %ebp, %ecx
+; X86-NEXT: cmpl %eax, %ecx
; X86-NEXT: movl $0, %ecx
; X86-NEXT: sbbl %edx, %ecx
; X86-NEXT: movl $0, %ecx
-; X86-NEXT: sbbl %esi, %ecx
+; X86-NEXT: sbbl %ebx, %ecx
; X86-NEXT: movl $0, %ecx
-; X86-NEXT: sbbl %edi, %ecx
+; X86-NEXT: sbbl %esi, %ecx
; X86-NEXT: setb %cl
-; X86-NEXT: orb (%esp), %cl # 1-byte Folded Reload
-; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %ebp, %eax
+; X86-NEXT: orb {{[-0-9]+}}(%e{{[sb]}}p), %cl # 1-byte Folded Reload
+; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: xorl $127, %eax
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: orl %esi, %eax
+; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: orl %ebx, %eax
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: orl %edi, %edx
+; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: orl %esi, %edx
; X86-NEXT: orl %eax, %edx
; X86-NEXT: sete %al
; X86-NEXT: testb %cl, %cl
-; X86-NEXT: movl %ebx, %edx
-; X86-NEXT: movl $0, %edi
-; X86-NEXT: cmovnel %edi, %edx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT: cmovnel %edi, %esi
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
-; X86-NEXT: cmovnel %edi, %ebp
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; X86-NEXT: cmovnel %edi, %ebx
-; X86-NEXT: orb %cl, %al
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movb %cl, %ah
+; X86-NEXT: movl 24(%ebp), %ebx
+; X86-NEXT: movl $0, %esi
+; X86-NEXT: cmovnel %esi, %ebx
+; X86-NEXT: movl %edi, %ecx
+; X86-NEXT: cmovnel %esi, %ecx
+; X86-NEXT: movl $0, %edx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl 16(%ebp), %esi
+; X86-NEXT: cmovnel %edx, %esi
+; X86-NEXT: movl 12(%ebp), %edi
+; X86-NEXT: movl %edi, %ecx
+; X86-NEXT: cmovnel %edx, %ecx
+; X86-NEXT: orb %ah, %al
+; X86-NEXT: movl 44(%ebp), %eax
; X86-NEXT: jne .LBB4_7
; X86-NEXT: # %bb.1: # %udiv-bb1
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; X86-NEXT: xorps %xmm0, %xmm0
+; X86-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-NEXT: movl 16(%ebp), %eax
; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl 20(%ebp), %edx
+; X86-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NEXT: movl 24(%ebp), %eax
; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NEXT: movl %ebx, %eax
-; X86-NEXT: xorb $127, %al
-; X86-NEXT: movb %al, %ch
-; X86-NEXT: andb $7, %ch
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NEXT: movl %edx, %ecx
+; X86-NEXT: xorb $127, %cl
+; X86-NEXT: movl %ecx, %eax
; X86-NEXT: shrb $3, %al
-; X86-NEXT: andb $15, %al
+; X86-NEXT: andb $12, %al
; X86-NEXT: negb %al
; X86-NEXT: movsbl %al, %eax
-; X86-NEXT: movl 128(%esp,%eax), %edx
-; X86-NEXT: movl 132(%esp,%eax), %esi
-; X86-NEXT: movb %ch, %cl
-; X86-NEXT: shldl %cl, %edx, %esi
+; X86-NEXT: movl 136(%esp,%eax), %edi
+; X86-NEXT: movl 140(%esp,%eax), %esi
+; X86-NEXT: shldl %cl, %edi, %esi
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shll %cl, %edx
-; X86-NEXT: notb %cl
-; X86-NEXT: movl 124(%esp,%eax), %ebp
-; X86-NEXT: movl %ebp, %esi
-; X86-NEXT: shrl %esi
-; X86-NEXT: shrl %cl, %esi
-; X86-NEXT: orl %edx, %esi
-; X86-NEXT: movl 120(%esp,%eax), %eax
-; X86-NEXT: movb %ch, %cl
-; X86-NEXT: shldl %cl, %eax, %ebp
-; X86-NEXT: shll %cl, %eax
-; X86-NEXT: movl %eax, (%esp) # 4-byte Spill
-; X86-NEXT: addl $1, %ebx
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: adcl $0, %ecx
+; X86-NEXT: movl 128(%esp,%eax), %ebx
+; X86-NEXT: movl 132(%esp,%eax), %eax
+; X86-NEXT: shldl %cl, %eax, %edi
+; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl %eax, %edi
+; X86-NEXT: shldl %cl, %ebx, %edi
+; X86-NEXT: shll %cl, %ebx
+; X86-NEXT: movl %ebx, %ecx
+; X86-NEXT: addl $1, %edx
+; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: adcl $0, %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NEXT: adcl $0, %ebx
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X86-NEXT: adcl $0, %esi
+; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NEXT: movl 20(%ebp), %ebx
; X86-NEXT: jae .LBB4_2
; X86-NEXT: # %bb.5:
+; X86-NEXT: xorl %edx, %edx
; X86-NEXT: xorl %eax, %eax
-; X86-NEXT: xorl %ecx, %ecx
+; X86-NEXT: movl %edi, %esi
; X86-NEXT: jmp .LBB4_6
; X86-NEXT: .LBB4_2: # %udiv-preheader
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl 12(%ebp), %edx
; X86-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl 16(%ebp), %edx
; X86-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NEXT: movl %ebx, {{[0-9]+}}(%esp)
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl 24(%ebp), %eax
; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movb %al, %ch
-; X86-NEXT: andb $7, %ch
; X86-NEXT: # kill: def $al killed $al killed $eax
; X86-NEXT: shrb $3, %al
-; X86-NEXT: andb $15, %al
+; X86-NEXT: andb $12, %al
; X86-NEXT: movzbl %al, %eax
-; X86-NEXT: movl 84(%esp,%eax), %ebx
+; X86-NEXT: movl 92(%esp,%eax), %esi
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 80(%esp,%eax), %esi
-; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %esi, %edx
-; X86-NEXT: movb %ch, %cl
-; X86-NEXT: shrdl %cl, %ebx, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 72(%esp,%eax), %ebp
-; X86-NEXT: movl 76(%esp,%eax), %edx
-; X86-NEXT: movl %edx, %eax
-; X86-NEXT: shrl %cl, %eax
-; X86-NEXT: notb %cl
-; X86-NEXT: addl %esi, %esi
-; X86-NEXT: shll %cl, %esi
-; X86-NEXT: orl %eax, %esi
+; X86-NEXT: movl 88(%esp,%eax), %edx
+; X86-NEXT: movl %edx, %ebx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NEXT: shrdl %cl, %esi, %ebx
+; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl 80(%esp,%eax), %edi
+; X86-NEXT: movl 84(%esp,%eax), %eax
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT: movl %eax, %esi
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: shrdl %cl, %edx, %esi
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movb %ch, %cl
-; X86-NEXT: shrl %cl, %ebx
-; X86-NEXT: movl %ebx, %edi
-; X86-NEXT: shrdl %cl, %edx, %ebp
-; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NEXT: shrl %cl, %edx
+; X86-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NEXT: shrdl %cl, %eax, %edi
+; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl 28(%ebp), %eax
; X86-NEXT: addl $-1, %eax
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl 32(%ebp), %eax
; X86-NEXT: adcl $-1, %eax
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: adcl $-1, %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: adcl $-1, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: xorl %ecx, %ecx
+; X86-NEXT: movl 36(%ebp), %esi
+; X86-NEXT: adcl $-1, %esi
+; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl 40(%ebp), %eax
+; X86-NEXT: adcl $-1, %eax
+; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: xorl %eax, %eax
; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl (%esp), %edx # 4-byte Reload
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X86-NEXT: .p2align 4, 0x90
; X86-NEXT: .LBB4_3: # %udiv-do-while
; X86-NEXT: # =>This Inner Loop Header: Depth=1
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; X86-NEXT: shldl $1, %ebp, %edi
-; X86-NEXT: movl %edi, (%esp) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NEXT: shldl $1, %ebx, %ebp
+; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: shldl $1, %esi, %ebx
+; X86-NEXT: shldl $1, %esi, %edx
+; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; X86-NEXT: shldl $1, %edi, %esi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl $1, %eax, %edi
-; X86-NEXT: orl %ecx, %edi
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: shldl $1, %edi, %eax
-; X86-NEXT: orl %ecx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl $1, %edx, %edi
-; X86-NEXT: orl %ecx, %edi
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: addl %edx, %edx
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: cmpl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT: shldl $1, %ebx, %edi
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NEXT: shldl $1, %edx, %ebx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: sbbl %ebx, %ecx
+; X86-NEXT: shldl $1, %ecx, %edx
+; X86-NEXT: orl %eax, %edx
+; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NEXT: shldl $1, %edx, %ecx
+; X86-NEXT: orl %eax, %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: shldl $1, %ecx, %edx
+; X86-NEXT: orl %eax, %edx
+; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: addl %ecx, %ecx
+; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: cmpl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: sbbl %edi, %ecx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: sbbl %ebp, %ecx
+; X86-NEXT: sbbl %esi, %ecx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: sbbl (%esp), %ecx # 4-byte Folded Reload
+; X86-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; X86-NEXT: sarl $31, %ecx
; X86-NEXT: movl %ecx, %eax
; X86-NEXT: andl $1, %eax
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %ecx, %eax
-; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: andl 40(%ebp), %eax
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %ecx, %edi
-; X86-NEXT: andl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl %ecx, %eax
-; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: andl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: subl %ecx, %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: sbbl %eax, %ebx
+; X86-NEXT: andl 36(%ebp), %eax
+; X86-NEXT: movl %ecx, %edx
+; X86-NEXT: andl 32(%ebp), %edx
+; X86-NEXT: andl 28(%ebp), %ecx
+; X86-NEXT: subl %ecx, %ebx
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: sbbl %edi, %ebp
-; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: sbbl %eax, (%esp) # 4-byte Folded Spill
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT: sbbl %edx, %edi
+; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: sbbl %eax, %esi
+; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: addl $-1, %ecx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: adcl $-1, %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X86-NEXT: adcl $-1, %ebx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: adcl $-1, %edi
+; X86-NEXT: adcl $-1, %esi
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: orl %edi, %eax
+; X86-NEXT: orl %esi, %eax
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: orl %ebx, %ecx
-; X86-NEXT: movl (%esp), %edi # 4-byte Reload
; X86-NEXT: orl %eax, %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: jne .LBB4_3
; X86-NEXT: # %bb.4:
-; X86-NEXT: movl %edx, (%esp) # 4-byte Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: .LBB4_6: # %udiv-loop-exit
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: shldl $1, %esi, %edx
-; X86-NEXT: orl %ecx, %edx
-; X86-NEXT: shldl $1, %ebp, %esi
-; X86-NEXT: orl %ecx, %esi
-; X86-NEXT: movl (%esp), %ebx # 4-byte Reload
-; X86-NEXT: shldl $1, %ebx, %ebp
-; X86-NEXT: orl %ecx, %ebp
-; X86-NEXT: addl %ebx, %ebx
+; X86-NEXT: .LBB4_6: # %udiv-loop-exit
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X86-NEXT: shldl $1, %edi, %ebx
; X86-NEXT: orl %eax, %ebx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shldl $1, %esi, %edi
+; X86-NEXT: orl %eax, %edi
+; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: shldl $1, %ecx, %esi
+; X86-NEXT: orl %eax, %esi
+; X86-NEXT: addl %ecx, %ecx
+; X86-NEXT: orl %edx, %ecx
+; X86-NEXT: movl 44(%ebp), %eax
; X86-NEXT: .LBB4_7: # %udiv-end
-; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %ebx, (%eax)
-; X86-NEXT: movl %ebp, 4(%eax)
-; X86-NEXT: movl %esi, 8(%eax)
-; X86-NEXT: movl %edx, 12(%eax)
-; X86-NEXT: movl %ebx, %ecx
-; X86-NEXT: movl %ebx, (%esp) # 4-byte Spill
-; X86-NEXT: movl %esi, %ebx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %ecx, (%eax)
+; X86-NEXT: movl %esi, 4(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NEXT: movl %edx, 8(%eax)
+; X86-NEXT: movl %ebx, 12(%eax)
+; X86-NEXT: movl %esi, %edx
+; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl 36(%ebp), %eax
; X86-NEXT: movl %eax, %esi
-; X86-NEXT: imull %ebp, %esi
-; X86-NEXT: movl %edx, %edi
+; X86-NEXT: imull %edx, %esi
; X86-NEXT: mull %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: addl %esi, %edx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
-; X86-NEXT: imull %ecx, %ebp
-; X86-NEXT: addl %edx, %ebp
-; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT: movl %esi, %eax
-; X86-NEXT: mull %ebx
-; X86-NEXT: movl %eax, %ecx
-; X86-NEXT: imull %esi, %edi
+; X86-NEXT: movl 40(%ebp), %edi
+; X86-NEXT: imull %ecx, %edi
; X86-NEXT: addl %edx, %edi
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: imull %eax, %ebx
-; X86-NEXT: addl %edi, %ebx
+; X86-NEXT: movl 28(%ebp), %eax
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X86-NEXT: mull %esi
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: imull 28(%ebp), %ebx
+; X86-NEXT: addl %edx, %ebx
+; X86-NEXT: movl 32(%ebp), %edx
+; X86-NEXT: imull %edx, %esi
+; X86-NEXT: addl %ebx, %esi
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: adcl %ebp, %ebx
-; X86-NEXT: movl (%esp), %ebp # 4-byte Reload
-; X86-NEXT: movl %ebp, %eax
-; X86-NEXT: mull %esi
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: adcl %edi, %esi
+; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X86-NEXT: movl %edi, %eax
+; X86-NEXT: movl 28(%ebp), %ecx
+; X86-NEXT: mull %ecx
+; X86-NEXT: movl %edx, %esi
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: mull %esi
-; X86-NEXT: movl %edx, %edi
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT: movl %ebx, %eax
+; X86-NEXT: mull %ecx
; X86-NEXT: movl %eax, %ecx
-; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; X86-NEXT: adcl $0, %edi
-; X86-NEXT: movl %ebp, %eax
-; X86-NEXT: mull {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT: movl %edx, %ebp
+; X86-NEXT: addl %esi, %ecx
+; X86-NEXT: adcl $0, %edx
+; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl %edi, %eax
+; X86-NEXT: mull 32(%ebp)
+; X86-NEXT: movl 16(%ebp), %esi
+; X86-NEXT: movl %edx, %edi
; X86-NEXT: addl %ecx, %eax
-; X86-NEXT: movl %eax, (%esp) # 4-byte Spill
-; X86-NEXT: adcl %edi, %ebp
+; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
; X86-NEXT: setb %cl
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: mull {{[0-9]+}}(%esp)
-; X86-NEXT: addl %ebp, %eax
+; X86-NEXT: movl %ebx, %eax
+; X86-NEXT: mull 32(%ebp)
+; X86-NEXT: addl %edi, %eax
; X86-NEXT: movzbl %cl, %ecx
; X86-NEXT: adcl %ecx, %edx
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; X86-NEXT: adcl %ebx, %edx
-; X86-NEXT: subl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT: sbbl (%esp), %edi # 4-byte Folded Reload
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; X86-NEXT: sbbl %eax, %ebx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X86-NEXT: movl 12(%ebp), %ebx
+; X86-NEXT: subl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X86-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X86-NEXT: movl 20(%ebp), %edi
+; X86-NEXT: sbbl %eax, %edi
+; X86-NEXT: movl 24(%ebp), %ecx
; X86-NEXT: sbbl %edx, %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl %esi, (%eax)
-; X86-NEXT: movl %edi, 4(%eax)
-; X86-NEXT: movl %ebx, 8(%eax)
+; X86-NEXT: movl 8(%ebp), %eax
+; X86-NEXT: movl %ebx, (%eax)
+; X86-NEXT: movl %esi, 4(%eax)
+; X86-NEXT: movl %edi, 8(%eax)
; X86-NEXT: movl %ecx, 12(%eax)
-; X86-NEXT: addl $136, %esp
+; X86-NEXT: leal -12(%ebp), %esp
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
; X86-NEXT: popl %ebx
diff --git a/llvm/test/CodeGen/X86/extractelement-fp.ll b/llvm/test/CodeGen/X86/extractelement-fp.ll
index 38162f6..944f6bb 100644
--- a/llvm/test/CodeGen/X86/extractelement-fp.ll
+++ b/llvm/test/CodeGen/X86/extractelement-fp.ll
@@ -1310,15 +1310,14 @@ define float @rcp_v4f32(<4 x float> %x) nounwind {
define float @rcp_v8f32(<8 x float> %x) nounwind {
; X64-LABEL: rcp_v8f32:
; X64: # %bb.0:
-; X64-NEXT: vrcpps %ymm0, %ymm0
-; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; X64-NEXT: vrcpss %xmm0, %xmm0, %xmm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
;
; X86-LABEL: rcp_v8f32:
; X86: # %bb.0:
; X86-NEXT: pushl %eax
-; X86-NEXT: vrcpps %ymm0, %ymm0
+; X86-NEXT: vrcpss %xmm0, %xmm0, %xmm0
; X86-NEXT: vmovss %xmm0, (%esp)
; X86-NEXT: flds (%esp)
; X86-NEXT: popl %eax
@@ -1351,15 +1350,14 @@ define float @rsqrt_v4f32(<4 x float> %x) nounwind {
define float @rsqrt_v8f32(<8 x float> %x) nounwind {
; X64-LABEL: rsqrt_v8f32:
; X64: # %bb.0:
-; X64-NEXT: vrsqrtps %ymm0, %ymm0
-; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; X64-NEXT: vrsqrtss %xmm0, %xmm0, %xmm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
;
; X86-LABEL: rsqrt_v8f32:
; X86: # %bb.0:
; X86-NEXT: pushl %eax
-; X86-NEXT: vrsqrtps %ymm0, %ymm0
+; X86-NEXT: vrsqrtss %xmm0, %xmm0, %xmm0
; X86-NEXT: vmovss %xmm0, (%esp)
; X86-NEXT: flds (%esp)
; X86-NEXT: popl %eax
diff --git a/llvm/test/CodeGen/X86/pmulh.ll b/llvm/test/CodeGen/X86/pmulh.ll
index c2a009f..502249a 100644
--- a/llvm/test/CodeGen/X86/pmulh.ll
+++ b/llvm/test/CodeGen/X86/pmulh.ll
@@ -937,6 +937,56 @@ define <16 x i32> @zext_mulhuw_v16i16_lshr(<16 x i16> %a, <16 x i16> %b) {
ret <16 x i32> %d
}
+; PR109790
+define <16 x i16> @zext_mulhuw_v16i16_negative_constant(<16 x i16> %a) {
+; SSE-LABEL: zext_mulhuw_v16i16_negative_constant:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [32767,32767,32767,32767,32767,32767,32767,32767]
+; SSE-NEXT: pand %xmm2, %xmm1
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [64536,64536,64536,64536,64536,64536,64536,64536]
+; SSE-NEXT: pmulhw %xmm2, %xmm0
+; SSE-NEXT: pmulhw %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: zext_mulhuw_v16i16_negative_constant:
+; AVX: # %bb.0:
+; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [64536,64536,64536,64536,64536,64536,64536,64536,64536,64536,64536,64536,64536,64536,64536,64536]
+; AVX-NEXT: retq
+ %k = and <16 x i16> %a, <i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767>
+ %x = zext nneg <16 x i16> %k to <16 x i32>
+ %m = mul nsw <16 x i32> %x, <i32 -1000, i32 -1000, i32 -1000, i32 -1000, i32 -1000, i32 -1000, i32 -1000, i32 -1000, i32 -1000, i32 -1000, i32 -1000, i32 -1000, i32 -1000, i32 -1000, i32 -1000, i32 -1000>
+ %s = lshr <16 x i32> %m, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
+ %t = trunc nuw <16 x i32> %s to <16 x i16>
+ ret <16 x i16> %t
+}
+
+; PR109790
+define <16 x i16> @zext_mulhuw_v16i16_positive_constant(<16 x i16> %a) {
+; SSE-LABEL: zext_mulhuw_v16i16_positive_constant:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [32767,32767,32767,32767,32767,32767,32767,32767]
+; SSE-NEXT: pand %xmm2, %xmm1
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [1000,1000,1000,1000,1000,1000,1000,1000]
+; SSE-NEXT: pmulhw %xmm2, %xmm0
+; SSE-NEXT: pmulhw %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: zext_mulhuw_v16i16_positive_constant:
+; AVX: # %bb.0:
+; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000]
+; AVX-NEXT: retq
+ %k = and <16 x i16> %a, <i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767>
+ %x = zext nneg <16 x i16> %k to <16 x i32>
+ %m = mul nuw nsw <16 x i32> %x, <i32 1000, i32 1000, i32 1000, i32 1000, i32 1000, i32 1000, i32 1000, i32 1000, i32 1000, i32 1000, i32 1000, i32 1000, i32 1000, i32 1000, i32 1000, i32 1000>
+ %s = lshr <16 x i32> %m, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
+ %t = trunc nuw nsw <16 x i32> %s to <16 x i16>
+ ret <16 x i16> %t
+}
+
define <16 x i32> @mulhsw_v16i16_lshr(<16 x i16> %a, <16 x i16> %b) {
; SSE2-LABEL: mulhsw_v16i16_lshr:
; SSE2: # %bb.0:
@@ -2056,3 +2106,4 @@ define <8 x i16> @sse2_pmulhu_w_const(<8 x i16> %a0, <8 x i16> %a1) {
ret <8 x i16> %res
}
declare <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16>, <8 x i16>)
+
diff --git a/llvm/test/CodeGen/X86/pr38539.ll b/llvm/test/CodeGen/X86/pr38539.ll
index 6fcebdb..fb169a3 100644
--- a/llvm/test/CodeGen/X86/pr38539.ll
+++ b/llvm/test/CodeGen/X86/pr38539.ll
@@ -22,7 +22,7 @@ define void @f() nounwind {
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
; X86-NEXT: andl $-16, %esp
-; X86-NEXT: subl $176, %esp
+; X86-NEXT: subl $160, %esp
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -96,18 +96,16 @@ define void @f() nounwind {
; X86-NEXT: addl $1, %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: adcl $0, %ebx
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: adcl $0, %edx
; X86-NEXT: andl $3, %edx
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movb $65, %cl
; X86-NEXT: subb %al, %cl
-; X86-NEXT: movb %cl, %ch
-; X86-NEXT: andb $7, %ch
-; X86-NEXT: shrb $3, %cl
-; X86-NEXT: andb $15, %cl
-; X86-NEXT: negb %cl
-; X86-NEXT: movsbl %cl, %esi
+; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: shrb $3, %al
+; X86-NEXT: andb $12, %al
+; X86-NEXT: negb %al
+; X86-NEXT: movsbl %al, %esi
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
@@ -117,29 +115,24 @@ define void @f() nounwind {
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 136(%esp,%esi), %edi
-; X86-NEXT: movb %ch, %cl
-; X86-NEXT: shll %cl, %edi
-; X86-NEXT: notb %cl
-; X86-NEXT: movl 128(%esp,%esi), %ebx
-; X86-NEXT: movl 132(%esp,%esi), %eax
-; X86-NEXT: movl %eax, %esi
-; X86-NEXT: shrl %esi
-; X86-NEXT: shrl %cl, %esi
-; X86-NEXT: movb %ch, %cl
-; X86-NEXT: shldl %cl, %ebx, %eax
+; X86-NEXT: movl 112(%esp,%esi), %edi
+; X86-NEXT: movl 116(%esp,%esi), %eax
+; X86-NEXT: movl 120(%esp,%esi), %esi
+; X86-NEXT: shldl %cl, %eax, %esi
+; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: shldl %cl, %edi, %eax
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shll %cl, %ebx
+; X86-NEXT: shll %cl, %edi
+; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT: orl %edx, %eax
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: orl %edx, %ecx
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X86-NEXT: orl %ebx, %eax
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; X86-NEXT: je .LBB0_13
; X86-NEXT: # %bb.11: # %udiv-preheader
-; X86-NEXT: andl $3, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: orl %esi, %edi
; X86-NEXT: andl $3, %edi
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: andl $3, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
@@ -148,26 +141,20 @@ define void @f() nounwind {
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movb %al, %ch
-; X86-NEXT: andb $7, %ch
-; X86-NEXT: # kill: def $al killed $al killed $eax
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, %eax
; X86-NEXT: shrb $3, %al
-; X86-NEXT: andb $15, %al
-; X86-NEXT: movzbl %al, %edx
-; X86-NEXT: movl 80(%esp,%edx), %edi
-; X86-NEXT: movl 84(%esp,%edx), %eax
-; X86-NEXT: movl %eax, %esi
-; X86-NEXT: movb %ch, %cl
-; X86-NEXT: shrl %cl, %esi
-; X86-NEXT: notb %cl
-; X86-NEXT: movl 88(%esp,%edx), %ebx
-; X86-NEXT: addl %ebx, %ebx
-; X86-NEXT: shll %cl, %ebx
-; X86-NEXT: orl %esi, %ebx
-; X86-NEXT: movb %ch, %cl
-; X86-NEXT: shrdl %cl, %eax, %edi
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: andb $12, %al
+; X86-NEXT: movzbl %al, %eax
+; X86-NEXT: movl 72(%esp,%eax), %ebx
+; X86-NEXT: movl 64(%esp,%eax), %esi
+; X86-NEXT: movl 68(%esp,%eax), %edx
+; X86-NEXT: movl %edx, %eax
+; X86-NEXT: shrdl %cl, %ebx, %eax
+; X86-NEXT: movl %eax, %ebx
+; X86-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NEXT: shrdl %cl, %edx, %esi
+; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: addl $-1, %eax
@@ -175,70 +162,69 @@ define void @f() nounwind {
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: adcl $-1, %eax
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: adcl $3, %eax
-; X86-NEXT: andl $3, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: adcl $3, %edi
+; X86-NEXT: andl $3, %edi
+; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X86-NEXT: xorl %ecx, %ecx
; X86-NEXT: .p2align 4, 0x90
; X86-NEXT: .LBB0_12: # %udiv-do-while
; X86-NEXT: # =>This Inner Loop Header: Depth=1
+; X86-NEXT: movl %ebx, %esi
; X86-NEXT: shldl $1, %ebx, %ecx
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT: shldl $1, %ebx, %esi
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: shldl $1, %edi, %ebx
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: movl %esi, %eax
-; X86-NEXT: andl $2, %eax
-; X86-NEXT: shrl %eax
-; X86-NEXT: leal (%eax,%edi,2), %edi
+; X86-NEXT: movl %edi, %edx
+; X86-NEXT: andl $2, %edx
+; X86-NEXT: shrl %edx
+; X86-NEXT: leal (%edx,%ebx,2), %ebx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: shldl $1, %edx, %esi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NEXT: orl %ebx, %esi
+; X86-NEXT: shldl $1, %edx, %edi
+; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: shldl $1, %eax, %edx
-; X86-NEXT: orl %ebx, %edx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: addl %eax, %eax
; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl $3, %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: cmpl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: sbbl %ebx, %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: sbbl %ecx, %esi
-; X86-NEXT: shll $30, %esi
-; X86-NEXT: movl %esi, %eax
-; X86-NEXT: sarl $30, %eax
-; X86-NEXT: sarl $31, %esi
-; X86-NEXT: shrdl $1, %esi, %eax
-; X86-NEXT: movl %eax, %edx
-; X86-NEXT: andl $1, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: andl $3, %edi
+; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: cmpl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NEXT: sbbl %esi, %edx
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NEXT: sbbl %ecx, %edx
+; X86-NEXT: shll $30, %edx
+; X86-NEXT: movl %edx, %edi
+; X86-NEXT: sarl $30, %edi
+; X86-NEXT: sarl $31, %edx
+; X86-NEXT: shrdl $1, %edx, %edi
+; X86-NEXT: movl %edi, %eax
+; X86-NEXT: andl $1, %eax
+; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X86-NEXT: movl %edx, %eax
; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; X86-NEXT: movl %esi, %edx
; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; X86-NEXT: subl %eax, %edi
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: sbbl %esi, %ebx
-; X86-NEXT: sbbl %edx, %ecx
+; X86-NEXT: subl %edi, %ebx
+; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: sbbl %edx, %esi
+; X86-NEXT: movl %esi, %ebx
+; X86-NEXT: sbbl %eax, %ecx
; X86-NEXT: andl $3, %ecx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: addl $-1, %eax
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X86-NEXT: adcl $-1, %edx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: adcl $3, %esi
-; X86-NEXT: andl $3, %esi
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X86-NEXT: adcl $3, %edi
+; X86-NEXT: andl $3, %edi
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: orl %esi, %eax
+; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: orl %edi, %eax
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: orl %edx, %eax
; X86-NEXT: jne .LBB0_12
diff --git a/llvm/test/CodeGen/X86/pr57673.ll b/llvm/test/CodeGen/X86/pr57673.ll
index cf7717f..4ca8ae9 100644
--- a/llvm/test/CodeGen/X86/pr57673.ll
+++ b/llvm/test/CodeGen/X86/pr57673.ll
@@ -37,7 +37,7 @@ define void @foo() {
; NORMAL-NEXT: {{ $}}
; NORMAL-NEXT: [[MOVUPSrm:%[0-9]+]]:vr128 = MOVUPSrm %stack.1.i, 1, $noreg, 40, $noreg :: (load (s128) from %ir.i4, align 8)
; NORMAL-NEXT: MOVUPSmr $noreg, 1, $noreg, 0, $noreg, killed [[MOVUPSrm]] :: (store (s128) into `ptr null`, align 8)
- ; NORMAL-NEXT: DBG_VALUE $noreg, $noreg, !3, !DIExpression(), debug-location !8
+ ; NORMAL-NEXT: DBG_VALUE_LIST !3, !DIExpression(DW_OP_LLVM_arg, 0, DW_OP_plus_uconst, 40, DW_OP_stack_value), %stack.1.i, %stack.1.i, debug-location !8
; NORMAL-NEXT: [[MOVUPSrm1:%[0-9]+]]:vr128 = MOVUPSrm %stack.1.i, 1, $noreg, 40, $noreg :: (load (s128) from %ir.i6, align 8)
; NORMAL-NEXT: MOVUPSmr $noreg, 1, $noreg, 0, $noreg, killed [[MOVUPSrm1]] :: (store (s128) into `ptr null`, align 8)
; NORMAL-NEXT: {{ $}}
@@ -76,7 +76,7 @@ define void @foo() {
; INSTRREF-NEXT: {{ $}}
; INSTRREF-NEXT: [[MOVUPSrm:%[0-9]+]]:vr128 = MOVUPSrm %stack.1.i, 1, $noreg, 40, $noreg :: (load (s128) from %ir.i4, align 8)
; INSTRREF-NEXT: MOVUPSmr $noreg, 1, $noreg, 0, $noreg, killed [[MOVUPSrm]] :: (store (s128) into `ptr null`, align 8)
- ; INSTRREF-NEXT: DBG_VALUE $noreg, $noreg, !3, !DIExpression(), debug-location !8
+ ; INSTRREF-NEXT: DBG_VALUE_LIST !3, !DIExpression(DW_OP_LLVM_arg, 0, DW_OP_plus_uconst, 40, DW_OP_stack_value), %stack.1.i, %stack.1.i, debug-location !8
; INSTRREF-NEXT: [[MOVUPSrm1:%[0-9]+]]:vr128 = MOVUPSrm %stack.1.i, 1, $noreg, 40, $noreg :: (load (s128) from %ir.i6, align 8)
; INSTRREF-NEXT: MOVUPSmr $noreg, 1, $noreg, 0, $noreg, killed [[MOVUPSrm1]] :: (store (s128) into `ptr null`, align 8)
; INSTRREF-NEXT: {{ $}}
diff --git a/llvm/test/CodeGen/X86/scheduler-backtracking.ll b/llvm/test/CodeGen/X86/scheduler-backtracking.ll
index df3c25a..6be79ed 100644
--- a/llvm/test/CodeGen/X86/scheduler-backtracking.ll
+++ b/llvm/test/CodeGen/X86/scheduler-backtracking.ll
@@ -13,26 +13,24 @@ define i256 @test1(i256 %a) nounwind {
; ILP-LABEL: test1:
; ILP: # %bb.0:
; ILP-NEXT: movq %rdi, %rax
+; ILP-NEXT: xorps %xmm0, %xmm0
+; ILP-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; ILP-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; ILP-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; ILP-NEXT: leal (%rsi,%rsi), %ecx
-; ILP-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; ILP-NEXT: movq $0, -{{[0-9]+}}(%rsp)
+; ILP-NEXT: addb $3, %cl
; ILP-NEXT: movq $0, -{{[0-9]+}}(%rsp)
; ILP-NEXT: movq $1, -{{[0-9]+}}(%rsp)
-; ILP-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; ILP-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; ILP-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; ILP-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; ILP-NEXT: addb $3, %cl
; ILP-NEXT: movl %ecx, %edx
; ILP-NEXT: shrb $3, %dl
-; ILP-NEXT: andb $7, %cl
+; ILP-NEXT: andb $24, %dl
; ILP-NEXT: negb %dl
; ILP-NEXT: movsbq %dl, %rdx
-; ILP-NEXT: movq -16(%rsp,%rdx), %rsi
-; ILP-NEXT: movq -8(%rsp,%rdx), %rdi
+; ILP-NEXT: movq -24(%rsp,%rdx), %rsi
+; ILP-NEXT: movq -16(%rsp,%rdx), %rdi
; ILP-NEXT: shldq %cl, %rsi, %rdi
-; ILP-NEXT: movq -32(%rsp,%rdx), %r8
-; ILP-NEXT: movq -24(%rsp,%rdx), %rdx
+; ILP-NEXT: movq -40(%rsp,%rdx), %r8
+; ILP-NEXT: movq -32(%rsp,%rdx), %rdx
; ILP-NEXT: movq %r8, %r9
; ILP-NEXT: shlq %cl, %r9
; ILP-NEXT: movq %rdx, %r10
@@ -52,27 +50,25 @@ define i256 @test1(i256 %a) nounwind {
; HYBRID-LABEL: test1:
; HYBRID: # %bb.0:
; HYBRID-NEXT: movq %rdi, %rax
-; HYBRID-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; HYBRID-NEXT: movq $0, -{{[0-9]+}}(%rsp)
+; HYBRID-NEXT: xorps %xmm0, %xmm0
+; HYBRID-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; HYBRID-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; HYBRID-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; HYBRID-NEXT: movq $0, -{{[0-9]+}}(%rsp)
; HYBRID-NEXT: movq $1, -{{[0-9]+}}(%rsp)
-; HYBRID-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; HYBRID-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; HYBRID-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; HYBRID-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; HYBRID-NEXT: addl %esi, %esi
-; HYBRID-NEXT: addb $3, %sil
-; HYBRID-NEXT: movl %esi, %ecx
-; HYBRID-NEXT: andb $7, %cl
-; HYBRID-NEXT: shrb $3, %sil
-; HYBRID-NEXT: negb %sil
-; HYBRID-NEXT: movsbq %sil, %rdx
-; HYBRID-NEXT: movq -16(%rsp,%rdx), %rsi
-; HYBRID-NEXT: movq -8(%rsp,%rdx), %rdi
+; HYBRID-NEXT: leal (%rsi,%rsi), %ecx
+; HYBRID-NEXT: addb $3, %cl
+; HYBRID-NEXT: movl %ecx, %edx
+; HYBRID-NEXT: shrb $3, %dl
+; HYBRID-NEXT: andb $24, %dl
+; HYBRID-NEXT: negb %dl
+; HYBRID-NEXT: movsbq %dl, %rdx
+; HYBRID-NEXT: movq -24(%rsp,%rdx), %rsi
+; HYBRID-NEXT: movq -16(%rsp,%rdx), %rdi
; HYBRID-NEXT: shldq %cl, %rsi, %rdi
; HYBRID-NEXT: movq %rdi, 24(%rax)
-; HYBRID-NEXT: movq -32(%rsp,%rdx), %rdi
-; HYBRID-NEXT: movq -24(%rsp,%rdx), %rdx
+; HYBRID-NEXT: movq -40(%rsp,%rdx), %rdi
+; HYBRID-NEXT: movq -32(%rsp,%rdx), %rdx
; HYBRID-NEXT: movq %rdx, %r8
; HYBRID-NEXT: shldq %cl, %rdi, %r8
; HYBRID-NEXT: movq %r8, 8(%rax)
@@ -81,6 +77,7 @@ define i256 @test1(i256 %a) nounwind {
; HYBRID-NEXT: shlq %cl, %rsi
; HYBRID-NEXT: notb %cl
; HYBRID-NEXT: shrq %rdx
+; HYBRID-NEXT: # kill: def $cl killed $cl killed $ecx
; HYBRID-NEXT: shrq %cl, %rdx
; HYBRID-NEXT: orq %rsi, %rdx
; HYBRID-NEXT: movq %rdx, 16(%rax)
@@ -89,27 +86,25 @@ define i256 @test1(i256 %a) nounwind {
; BURR-LABEL: test1:
; BURR: # %bb.0:
; BURR-NEXT: movq %rdi, %rax
-; BURR-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; BURR-NEXT: movq $0, -{{[0-9]+}}(%rsp)
+; BURR-NEXT: xorps %xmm0, %xmm0
+; BURR-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; BURR-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; BURR-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; BURR-NEXT: movq $0, -{{[0-9]+}}(%rsp)
; BURR-NEXT: movq $1, -{{[0-9]+}}(%rsp)
-; BURR-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; BURR-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; BURR-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; BURR-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; BURR-NEXT: addl %esi, %esi
-; BURR-NEXT: addb $3, %sil
-; BURR-NEXT: movl %esi, %ecx
-; BURR-NEXT: andb $7, %cl
-; BURR-NEXT: shrb $3, %sil
-; BURR-NEXT: negb %sil
-; BURR-NEXT: movsbq %sil, %rdx
-; BURR-NEXT: movq -16(%rsp,%rdx), %rsi
-; BURR-NEXT: movq -8(%rsp,%rdx), %rdi
+; BURR-NEXT: leal (%rsi,%rsi), %ecx
+; BURR-NEXT: addb $3, %cl
+; BURR-NEXT: movl %ecx, %edx
+; BURR-NEXT: shrb $3, %dl
+; BURR-NEXT: andb $24, %dl
+; BURR-NEXT: negb %dl
+; BURR-NEXT: movsbq %dl, %rdx
+; BURR-NEXT: movq -24(%rsp,%rdx), %rsi
+; BURR-NEXT: movq -16(%rsp,%rdx), %rdi
; BURR-NEXT: shldq %cl, %rsi, %rdi
; BURR-NEXT: movq %rdi, 24(%rax)
-; BURR-NEXT: movq -32(%rsp,%rdx), %rdi
-; BURR-NEXT: movq -24(%rsp,%rdx), %rdx
+; BURR-NEXT: movq -40(%rsp,%rdx), %rdi
+; BURR-NEXT: movq -32(%rsp,%rdx), %rdx
; BURR-NEXT: movq %rdx, %r8
; BURR-NEXT: shldq %cl, %rdi, %r8
; BURR-NEXT: movq %r8, 8(%rax)
@@ -118,6 +113,7 @@ define i256 @test1(i256 %a) nounwind {
; BURR-NEXT: shlq %cl, %rsi
; BURR-NEXT: notb %cl
; BURR-NEXT: shrq %rdx
+; BURR-NEXT: # kill: def $cl killed $cl killed $ecx
; BURR-NEXT: shrq %cl, %rdx
; BURR-NEXT: orq %rsi, %rdx
; BURR-NEXT: movq %rdx, 16(%rax)
@@ -126,33 +122,31 @@ define i256 @test1(i256 %a) nounwind {
; SRC-LABEL: test1:
; SRC: # %bb.0:
; SRC-NEXT: movq %rdi, %rax
-; SRC-NEXT: addl %esi, %esi
-; SRC-NEXT: addb $3, %sil
-; SRC-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; SRC-NEXT: movq $0, -{{[0-9]+}}(%rsp)
+; SRC-NEXT: leal (%rsi,%rsi), %edx
+; SRC-NEXT: addb $3, %dl
+; SRC-NEXT: xorps %xmm0, %xmm0
+; SRC-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SRC-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SRC-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SRC-NEXT: movq $0, -{{[0-9]+}}(%rsp)
; SRC-NEXT: movq $1, -{{[0-9]+}}(%rsp)
-; SRC-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; SRC-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; SRC-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; SRC-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; SRC-NEXT: movl %esi, %edx
-; SRC-NEXT: andb $7, %dl
-; SRC-NEXT: shrb $3, %sil
-; SRC-NEXT: negb %sil
-; SRC-NEXT: movsbq %sil, %rsi
-; SRC-NEXT: movq -16(%rsp,%rsi), %rdi
+; SRC-NEXT: movl %edx, %ecx
+; SRC-NEXT: shrb $3, %cl
+; SRC-NEXT: andb $24, %cl
+; SRC-NEXT: negb %cl
+; SRC-NEXT: movsbq %cl, %rsi
+; SRC-NEXT: movq -24(%rsp,%rsi), %rdi
; SRC-NEXT: movq %rdi, %r8
; SRC-NEXT: movl %edx, %ecx
; SRC-NEXT: shlq %cl, %r8
; SRC-NEXT: notb %cl
-; SRC-NEXT: movq -32(%rsp,%rsi), %r9
-; SRC-NEXT: movq -24(%rsp,%rsi), %r10
+; SRC-NEXT: movq -40(%rsp,%rsi), %r9
+; SRC-NEXT: movq -32(%rsp,%rsi), %r10
; SRC-NEXT: movq %r10, %r11
; SRC-NEXT: shrq %r11
; SRC-NEXT: shrq %cl, %r11
; SRC-NEXT: orq %r8, %r11
-; SRC-NEXT: movq -8(%rsp,%rsi), %rsi
+; SRC-NEXT: movq -16(%rsp,%rsi), %rsi
; SRC-NEXT: movl %edx, %ecx
; SRC-NEXT: shldq %cl, %rdi, %rsi
; SRC-NEXT: movq %r9, %rdi
@@ -171,27 +165,25 @@ define i256 @test1(i256 %a) nounwind {
; LIN-NEXT: addb $3, %dl
; LIN-NEXT: movl %edx, %ecx
; LIN-NEXT: shrb $3, %cl
+; LIN-NEXT: andb $24, %cl
; LIN-NEXT: negb %cl
; LIN-NEXT: movsbq %cl, %rsi
-; LIN-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; LIN-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; LIN-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; LIN-NEXT: movq $0, -{{[0-9]+}}(%rsp)
+; LIN-NEXT: xorps %xmm0, %xmm0
+; LIN-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; LIN-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; LIN-NEXT: movq $1, -{{[0-9]+}}(%rsp)
; LIN-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; LIN-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; LIN-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; LIN-NEXT: movq -32(%rsp,%rsi), %rdi
-; LIN-NEXT: andb $7, %dl
+; LIN-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; LIN-NEXT: movq -40(%rsp,%rsi), %rdi
; LIN-NEXT: movq %rdi, %r8
; LIN-NEXT: movl %edx, %ecx
; LIN-NEXT: shlq %cl, %r8
; LIN-NEXT: movq %r8, (%rax)
-; LIN-NEXT: movq -24(%rsp,%rsi), %r8
+; LIN-NEXT: movq -32(%rsp,%rsi), %r8
; LIN-NEXT: movq %r8, %r9
; LIN-NEXT: shldq %cl, %rdi, %r9
; LIN-NEXT: movq %r9, 8(%rax)
-; LIN-NEXT: movq -16(%rsp,%rsi), %rdi
+; LIN-NEXT: movq -24(%rsp,%rsi), %rdi
; LIN-NEXT: movq %rdi, %r9
; LIN-NEXT: shlq %cl, %r9
; LIN-NEXT: shrq %r8
@@ -199,7 +191,7 @@ define i256 @test1(i256 %a) nounwind {
; LIN-NEXT: shrq %cl, %r8
; LIN-NEXT: orq %r9, %r8
; LIN-NEXT: movq %r8, 16(%rax)
-; LIN-NEXT: movq -8(%rsp,%rsi), %rsi
+; LIN-NEXT: movq -16(%rsp,%rsi), %rsi
; LIN-NEXT: movl %edx, %ecx
; LIN-NEXT: shldq %cl, %rdi, %rsi
; LIN-NEXT: movq %rsi, 24(%rax)
diff --git a/llvm/test/CodeGen/X86/section-stats.ll b/llvm/test/CodeGen/X86/section-stats.ll
index 94d0a96..2cab7d1 100644
--- a/llvm/test/CodeGen/X86/section-stats.ll
+++ b/llvm/test/CodeGen/X86/section-stats.ll
@@ -3,6 +3,8 @@
; CHECK-DAG: 1 elf-object-writer - Total size of SHF_ALLOC text sections
; CHECK-DAG: 1 elf-object-writer - Total size of SHF_ALLOC read-write sections
+; CHECK-DAG: 512 elf-object-writer - Total size of section headers table
+; CHECK-DAG: 64 elf-object-writer - Total size of ELF headers
target triple = "x86_64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/X86/shift-i128.ll b/llvm/test/CodeGen/X86/shift-i128.ll
index 4fbe05c..767bd77 100644
--- a/llvm/test/CodeGen/X86/shift-i128.ll
+++ b/llvm/test/CodeGen/X86/shift-i128.ll
@@ -10,49 +10,45 @@ define void @test_lshr_i128(i128 %x, i128 %a, ptr nocapture %r) nounwind {
; i686-LABEL: test_lshr_i128:
; i686: # %bb.0: # %entry
; i686-NEXT: pushl %ebp
+; i686-NEXT: movl %esp, %ebp
; i686-NEXT: pushl %ebx
; i686-NEXT: pushl %edi
; i686-NEXT: pushl %esi
-; i686-NEXT: subl $32, %esp
-; i686-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; i686-NEXT: movl {{[0-9]+}}(%esp), %eax
-; i686-NEXT: movl {{[0-9]+}}(%esp), %esi
-; i686-NEXT: movl {{[0-9]+}}(%esp), %edi
-; i686-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; i686-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; i686-NEXT: andl $-16, %esp
+; i686-NEXT: subl $48, %esp
+; i686-NEXT: movl 24(%ebp), %ecx
+; i686-NEXT: movl 8(%ebp), %eax
+; i686-NEXT: movl 12(%ebp), %edx
+; i686-NEXT: movl 16(%ebp), %esi
+; i686-NEXT: movl 20(%ebp), %edi
; i686-NEXT: movl %edi, {{[0-9]+}}(%esp)
; i686-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; i686-NEXT: movl %edx, {{[0-9]+}}(%esp)
; i686-NEXT: movl %eax, (%esp)
; i686-NEXT: movl $0, {{[0-9]+}}(%esp)
; i686-NEXT: movl $0, {{[0-9]+}}(%esp)
; i686-NEXT: movl $0, {{[0-9]+}}(%esp)
; i686-NEXT: movl $0, {{[0-9]+}}(%esp)
; i686-NEXT: movl %ecx, %eax
-; i686-NEXT: andb $7, %al
-; i686-NEXT: shrb $3, %cl
-; i686-NEXT: andb $15, %cl
-; i686-NEXT: movzbl %cl, %ebp
-; i686-NEXT: movl 4(%esp,%ebp), %edx
-; i686-NEXT: movl %edx, %esi
-; i686-NEXT: movl %eax, %ecx
-; i686-NEXT: shrl %cl, %esi
-; i686-NEXT: notb %cl
-; i686-NEXT: movl 8(%esp,%ebp), %ebx
-; i686-NEXT: leal (%ebx,%ebx), %edi
-; i686-NEXT: shll %cl, %edi
-; i686-NEXT: orl %esi, %edi
-; i686-NEXT: movl (%esp,%ebp), %esi
-; i686-NEXT: movl 12(%esp,%ebp), %ebp
-; i686-NEXT: movl %eax, %ecx
-; i686-NEXT: shrdl %cl, %ebp, %ebx
-; i686-NEXT: shrdl %cl, %edx, %esi
-; i686-NEXT: shrl %cl, %ebp
-; i686-NEXT: movl {{[0-9]+}}(%esp), %eax
-; i686-NEXT: movl %ebp, 12(%eax)
-; i686-NEXT: movl %ebx, 8(%eax)
-; i686-NEXT: movl %esi, (%eax)
-; i686-NEXT: movl %edi, 4(%eax)
-; i686-NEXT: addl $32, %esp
+; i686-NEXT: shrb $3, %al
+; i686-NEXT: andb $12, %al
+; i686-NEXT: movzbl %al, %edi
+; i686-NEXT: movl 8(%esp,%edi), %eax
+; i686-NEXT: movl 4(%esp,%edi), %ebx
+; i686-NEXT: movl %ebx, %edx
+; i686-NEXT: shrdl %cl, %eax, %edx
+; i686-NEXT: movl (%esp,%edi), %esi
+; i686-NEXT: movl 12(%esp,%edi), %edi
+; i686-NEXT: shrdl %cl, %edi, %eax
+; i686-NEXT: shrdl %cl, %ebx, %esi
+; i686-NEXT: movl 40(%ebp), %ebx
+; i686-NEXT: # kill: def $cl killed $cl killed $ecx
+; i686-NEXT: shrl %cl, %edi
+; i686-NEXT: movl %edi, 12(%ebx)
+; i686-NEXT: movl %eax, 8(%ebx)
+; i686-NEXT: movl %edx, 4(%ebx)
+; i686-NEXT: movl %esi, (%ebx)
+; i686-NEXT: leal -12(%ebp), %esp
; i686-NEXT: popl %esi
; i686-NEXT: popl %edi
; i686-NEXT: popl %ebx
@@ -81,50 +77,46 @@ define void @test_ashr_i128(i128 %x, i128 %a, ptr nocapture %r) nounwind {
; i686-LABEL: test_ashr_i128:
; i686: # %bb.0: # %entry
; i686-NEXT: pushl %ebp
+; i686-NEXT: movl %esp, %ebp
; i686-NEXT: pushl %ebx
; i686-NEXT: pushl %edi
; i686-NEXT: pushl %esi
-; i686-NEXT: subl $32, %esp
-; i686-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; i686-NEXT: movl {{[0-9]+}}(%esp), %eax
-; i686-NEXT: movl {{[0-9]+}}(%esp), %esi
-; i686-NEXT: movl {{[0-9]+}}(%esp), %edi
-; i686-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; i686-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; i686-NEXT: andl $-16, %esp
+; i686-NEXT: subl $48, %esp
+; i686-NEXT: movl 24(%ebp), %ecx
+; i686-NEXT: movl 8(%ebp), %eax
+; i686-NEXT: movl 12(%ebp), %edx
+; i686-NEXT: movl 16(%ebp), %esi
+; i686-NEXT: movl 20(%ebp), %edi
; i686-NEXT: movl %edi, {{[0-9]+}}(%esp)
; i686-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; i686-NEXT: movl %edx, {{[0-9]+}}(%esp)
; i686-NEXT: movl %eax, (%esp)
-; i686-NEXT: sarl $31, %ebx
-; i686-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; i686-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; i686-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; i686-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; i686-NEXT: sarl $31, %edi
+; i686-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; i686-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; i686-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; i686-NEXT: movl %edi, {{[0-9]+}}(%esp)
; i686-NEXT: movl %ecx, %eax
-; i686-NEXT: andb $7, %al
-; i686-NEXT: shrb $3, %cl
-; i686-NEXT: andb $15, %cl
-; i686-NEXT: movzbl %cl, %ebp
-; i686-NEXT: movl 4(%esp,%ebp), %edx
-; i686-NEXT: movl %edx, %esi
-; i686-NEXT: movl %eax, %ecx
-; i686-NEXT: shrl %cl, %esi
-; i686-NEXT: notb %cl
-; i686-NEXT: movl 8(%esp,%ebp), %ebx
-; i686-NEXT: leal (%ebx,%ebx), %edi
-; i686-NEXT: shll %cl, %edi
-; i686-NEXT: orl %esi, %edi
-; i686-NEXT: movl (%esp,%ebp), %esi
-; i686-NEXT: movl 12(%esp,%ebp), %ebp
-; i686-NEXT: movl %eax, %ecx
-; i686-NEXT: shrdl %cl, %ebp, %ebx
-; i686-NEXT: shrdl %cl, %edx, %esi
-; i686-NEXT: sarl %cl, %ebp
-; i686-NEXT: movl {{[0-9]+}}(%esp), %eax
-; i686-NEXT: movl %ebp, 12(%eax)
-; i686-NEXT: movl %ebx, 8(%eax)
-; i686-NEXT: movl %esi, (%eax)
-; i686-NEXT: movl %edi, 4(%eax)
-; i686-NEXT: addl $32, %esp
+; i686-NEXT: shrb $3, %al
+; i686-NEXT: andb $12, %al
+; i686-NEXT: movzbl %al, %edi
+; i686-NEXT: movl 8(%esp,%edi), %eax
+; i686-NEXT: movl 4(%esp,%edi), %ebx
+; i686-NEXT: movl %ebx, %edx
+; i686-NEXT: shrdl %cl, %eax, %edx
+; i686-NEXT: movl (%esp,%edi), %esi
+; i686-NEXT: movl 12(%esp,%edi), %edi
+; i686-NEXT: shrdl %cl, %edi, %eax
+; i686-NEXT: shrdl %cl, %ebx, %esi
+; i686-NEXT: movl 40(%ebp), %ebx
+; i686-NEXT: # kill: def $cl killed $cl killed $ecx
+; i686-NEXT: sarl %cl, %edi
+; i686-NEXT: movl %edi, 12(%ebx)
+; i686-NEXT: movl %eax, 8(%ebx)
+; i686-NEXT: movl %edx, 4(%ebx)
+; i686-NEXT: movl %esi, (%ebx)
+; i686-NEXT: leal -12(%ebp), %esp
; i686-NEXT: popl %esi
; i686-NEXT: popl %edi
; i686-NEXT: popl %ebx
@@ -154,15 +146,17 @@ define void @test_shl_i128(i128 %x, i128 %a, ptr nocapture %r) nounwind {
; i686-LABEL: test_shl_i128:
; i686: # %bb.0: # %entry
; i686-NEXT: pushl %ebp
+; i686-NEXT: movl %esp, %ebp
; i686-NEXT: pushl %ebx
; i686-NEXT: pushl %edi
; i686-NEXT: pushl %esi
-; i686-NEXT: subl $32, %esp
-; i686-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; i686-NEXT: movl {{[0-9]+}}(%esp), %eax
-; i686-NEXT: movl {{[0-9]+}}(%esp), %edx
-; i686-NEXT: movl {{[0-9]+}}(%esp), %esi
-; i686-NEXT: movl {{[0-9]+}}(%esp), %edi
+; i686-NEXT: andl $-16, %esp
+; i686-NEXT: subl $48, %esp
+; i686-NEXT: movl 24(%ebp), %ecx
+; i686-NEXT: movl 8(%ebp), %eax
+; i686-NEXT: movl 12(%ebp), %edx
+; i686-NEXT: movl 16(%ebp), %esi
+; i686-NEXT: movl 20(%ebp), %edi
; i686-NEXT: movl %edi, {{[0-9]+}}(%esp)
; i686-NEXT: movl %esi, {{[0-9]+}}(%esp)
; i686-NEXT: movl %edx, {{[0-9]+}}(%esp)
@@ -172,36 +166,27 @@ define void @test_shl_i128(i128 %x, i128 %a, ptr nocapture %r) nounwind {
; i686-NEXT: movl $0, {{[0-9]+}}(%esp)
; i686-NEXT: movl $0, (%esp)
; i686-NEXT: movl %ecx, %eax
-; i686-NEXT: andb $7, %al
-; i686-NEXT: shrb $3, %cl
-; i686-NEXT: andb $15, %cl
-; i686-NEXT: negb %cl
-; i686-NEXT: movsbl %cl, %ebp
-; i686-NEXT: movl 24(%esp,%ebp), %ebx
-; i686-NEXT: movl %ebx, %edx
-; i686-NEXT: movl %eax, %ecx
-; i686-NEXT: shll %cl, %edx
-; i686-NEXT: notb %cl
-; i686-NEXT: movl 20(%esp,%ebp), %edi
-; i686-NEXT: movl %edi, %esi
-; i686-NEXT: shrl %esi
-; i686-NEXT: shrl %cl, %esi
-; i686-NEXT: orl %edx, %esi
-; i686-NEXT: movl 16(%esp,%ebp), %edx
-; i686-NEXT: movl 28(%esp,%ebp), %ebp
-; i686-NEXT: movl %eax, %ecx
-; i686-NEXT: shldl %cl, %ebx, %ebp
-; i686-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; i686-NEXT: movl %ebp, 12(%ecx)
-; i686-NEXT: movl %edx, %ebx
-; i686-NEXT: movl %eax, %ecx
-; i686-NEXT: shll %cl, %ebx
-; i686-NEXT: shldl %cl, %edx, %edi
-; i686-NEXT: movl {{[0-9]+}}(%esp), %eax
-; i686-NEXT: movl %edi, 4(%eax)
-; i686-NEXT: movl %ebx, (%eax)
-; i686-NEXT: movl %esi, 8(%eax)
-; i686-NEXT: addl $32, %esp
+; i686-NEXT: shrb $3, %al
+; i686-NEXT: andb $12, %al
+; i686-NEXT: negb %al
+; i686-NEXT: movsbl %al, %edi
+; i686-NEXT: movl 20(%esp,%edi), %eax
+; i686-NEXT: movl 24(%esp,%edi), %ebx
+; i686-NEXT: movl %ebx, %esi
+; i686-NEXT: shldl %cl, %eax, %esi
+; i686-NEXT: movl 16(%esp,%edi), %edx
+; i686-NEXT: movl 28(%esp,%edi), %edi
+; i686-NEXT: shldl %cl, %ebx, %edi
+; i686-NEXT: movl 40(%ebp), %ebx
+; i686-NEXT: movl %edi, 12(%ebx)
+; i686-NEXT: movl %esi, 8(%ebx)
+; i686-NEXT: movl %edx, %esi
+; i686-NEXT: shll %cl, %esi
+; i686-NEXT: # kill: def $cl killed $cl killed $ecx
+; i686-NEXT: shldl %cl, %edx, %eax
+; i686-NEXT: movl %eax, 4(%ebx)
+; i686-NEXT: movl %esi, (%ebx)
+; i686-NEXT: leal -12(%ebp), %esp
; i686-NEXT: popl %esi
; i686-NEXT: popl %edi
; i686-NEXT: popl %ebx
@@ -264,104 +249,93 @@ define void @test_lshr_v2i128(<2 x i128> %x, <2 x i128> %a, ptr nocapture %r) no
; i686-LABEL: test_lshr_v2i128:
; i686: # %bb.0: # %entry
; i686-NEXT: pushl %ebp
+; i686-NEXT: movl %esp, %ebp
; i686-NEXT: pushl %ebx
; i686-NEXT: pushl %edi
; i686-NEXT: pushl %esi
-; i686-NEXT: subl $100, %esp
-; i686-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; i686-NEXT: movl {{[0-9]+}}(%esp), %esi
-; i686-NEXT: movl {{[0-9]+}}(%esp), %eax
-; i686-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; i686-NEXT: movl {{[0-9]+}}(%esp), %edx
-; i686-NEXT: movl {{[0-9]+}}(%esp), %edi
-; i686-NEXT: movl {{[0-9]+}}(%esp), %ebp
-; i686-NEXT: movl %ebp, {{[0-9]+}}(%esp)
-; i686-NEXT: movl {{[0-9]+}}(%esp), %ebp
-; i686-NEXT: movl %ebp, {{[0-9]+}}(%esp)
-; i686-NEXT: movl {{[0-9]+}}(%esp), %ebp
-; i686-NEXT: movl %ebp, {{[0-9]+}}(%esp)
-; i686-NEXT: movl {{[0-9]+}}(%esp), %ebp
-; i686-NEXT: movl %ebp, {{[0-9]+}}(%esp)
+; i686-NEXT: andl $-16, %esp
+; i686-NEXT: subl $112, %esp
+; i686-NEXT: movl 40(%ebp), %edx
+; i686-NEXT: movl 24(%ebp), %eax
+; i686-NEXT: movl 28(%ebp), %ecx
+; i686-NEXT: movl 32(%ebp), %esi
+; i686-NEXT: movl 20(%ebp), %edi
; i686-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; i686-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; i686-NEXT: movl 16(%ebp), %edi
+; i686-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; i686-NEXT: movl 12(%ebp), %edi
+; i686-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; i686-NEXT: movl 8(%ebp), %edi
+; i686-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; i686-NEXT: movl 36(%ebp), %edi
+; i686-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; i686-NEXT: movl %esi, {{[0-9]+}}(%esp)
; i686-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; i686-NEXT: movl %eax, {{[0-9]+}}(%esp)
; i686-NEXT: movl $0, {{[0-9]+}}(%esp)
; i686-NEXT: movl $0, {{[0-9]+}}(%esp)
; i686-NEXT: movl $0, {{[0-9]+}}(%esp)
; i686-NEXT: movl $0, {{[0-9]+}}(%esp)
-; i686-NEXT: movl %esi, %ecx
-; i686-NEXT: andl $7, %ecx
+; i686-NEXT: movl %edx, %ebx
+; i686-NEXT: andl $31, %ebx
+; i686-NEXT: shrl $3, %edx
+; i686-NEXT: andl $12, %edx
+; i686-NEXT: movl 40(%esp,%edx), %eax
+; i686-NEXT: movl 36(%esp,%edx), %esi
+; i686-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; i686-NEXT: movl %ebx, %ecx
+; i686-NEXT: shrdl %cl, %eax, %esi
+; i686-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; i686-NEXT: movl 32(%esp,%edx), %ecx
; i686-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; i686-NEXT: shrl $3, %esi
-; i686-NEXT: andl $15, %esi
-; i686-NEXT: movl 40(%esp,%esi), %eax
-; i686-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; i686-NEXT: shrl %cl, %eax
-; i686-NEXT: notl %ecx
-; i686-NEXT: movl 44(%esp,%esi), %edx
-; i686-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; i686-NEXT: addl %edx, %edx
-; i686-NEXT: # kill: def $cl killed $cl killed $ecx
-; i686-NEXT: shll %cl, %edx
-; i686-NEXT: orl %eax, %edx
+; i686-NEXT: movl 44(%esp,%edx), %edx
; i686-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; i686-NEXT: movl 36(%esp,%esi), %eax
+; i686-NEXT: movl %ebx, %ecx
+; i686-NEXT: movl %ebx, %esi
+; i686-NEXT: shrdl %cl, %edx, %eax
; i686-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; i686-NEXT: movl $0, {{[0-9]+}}(%esp)
; i686-NEXT: movl $0, {{[0-9]+}}(%esp)
; i686-NEXT: movl $0, {{[0-9]+}}(%esp)
; i686-NEXT: movl $0, {{[0-9]+}}(%esp)
-; i686-NEXT: movl %ebx, %edx
-; i686-NEXT: andl $7, %edx
-; i686-NEXT: shrl $3, %ebx
-; i686-NEXT: andl $15, %ebx
-; i686-NEXT: movl 72(%esp,%ebx), %ebp
-; i686-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; i686-NEXT: movl %edx, %ecx
-; i686-NEXT: shrl %cl, %ebp
-; i686-NEXT: movl %edx, %ecx
-; i686-NEXT: notl %ecx
-; i686-NEXT: movl 76(%esp,%ebx), %eax
-; i686-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; i686-NEXT: leal (%eax,%eax), %edi
-; i686-NEXT: # kill: def $cl killed $cl killed $ecx
-; i686-NEXT: shll %cl, %edi
-; i686-NEXT: orl %ebp, %edi
-; i686-NEXT: movl 48(%esp,%esi), %esi
-; i686-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; i686-NEXT: movl 56(%ebp), %edx
+; i686-NEXT: movl %edx, %eax
+; i686-NEXT: andl $31, %eax
+; i686-NEXT: shrl $3, %edx
+; i686-NEXT: andl $12, %edx
+; i686-NEXT: movl 72(%esp,%edx), %ebx
+; i686-NEXT: movl 68(%esp,%edx), %edi
+; i686-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; i686-NEXT: movl %eax, %ecx
+; i686-NEXT: shrdl %cl, %ebx, %edi
+; i686-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; i686-NEXT: movl 64(%esp,%edx), %edi
+; i686-NEXT: movl 76(%esp,%edx), %edx
+; i686-NEXT: shrdl %cl, %edx, %ebx
+; i686-NEXT: movl %esi, %ecx
+; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; i686-NEXT: shrdl %cl, %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; i686-NEXT: movl 68(%esp,%ebx), %ecx
-; i686-NEXT: movl %ecx, (%esp) # 4-byte Spill
-; i686-NEXT: movl 80(%esp,%ebx), %esi
-; i686-NEXT: movl %edx, %ecx
-; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; i686-NEXT: shrdl %cl, %esi, %ebx
+; i686-NEXT: # kill: def $cl killed $cl killed $ecx
+; i686-NEXT: shrl %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; i686-NEXT: movl %eax, %ecx
-; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; i686-NEXT: shrdl %cl, %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; i686-NEXT: shrl %cl, %ebp
-; i686-NEXT: movl %edx, %ecx
-; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; i686-NEXT: shrdl %cl, %eax, (%esp) # 4-byte Folded Spill
-; i686-NEXT: shrl %cl, %esi
-; i686-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; i686-NEXT: movl %esi, 28(%ecx)
-; i686-NEXT: movl %ebx, 24(%ecx)
-; i686-NEXT: movl (%esp), %eax # 4-byte Reload
-; i686-NEXT: movl %eax, 16(%ecx)
-; i686-NEXT: movl %ebp, 12(%ecx)
-; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; i686-NEXT: movl %edx, 8(%ecx)
-; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; i686-NEXT: movl %edx, (%ecx)
-; i686-NEXT: movl %edi, 20(%ecx)
-; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; i686-NEXT: movl %eax, 4(%ecx)
-; i686-NEXT: addl $100, %esp
+; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; i686-NEXT: shrdl %cl, %esi, %edi
+; i686-NEXT: shrl %cl, %edx
+; i686-NEXT: movl 72(%ebp), %eax
+; i686-NEXT: movl %edx, 28(%eax)
+; i686-NEXT: movl %ebx, 24(%eax)
+; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; i686-NEXT: movl %ecx, 20(%eax)
+; i686-NEXT: movl %edi, 16(%eax)
+; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; i686-NEXT: movl %ecx, 12(%eax)
+; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; i686-NEXT: movl %ecx, 8(%eax)
+; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; i686-NEXT: movl %ecx, 4(%eax)
+; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; i686-NEXT: movl %ecx, (%eax)
+; i686-NEXT: leal -12(%ebp), %esp
; i686-NEXT: popl %esi
; i686-NEXT: popl %edi
; i686-NEXT: popl %ebx
@@ -402,107 +376,96 @@ define void @test_ashr_v2i128(<2 x i128> %x, <2 x i128> %a, ptr nocapture %r) no
; i686-LABEL: test_ashr_v2i128:
; i686: # %bb.0: # %entry
; i686-NEXT: pushl %ebp
+; i686-NEXT: movl %esp, %ebp
; i686-NEXT: pushl %ebx
; i686-NEXT: pushl %edi
; i686-NEXT: pushl %esi
-; i686-NEXT: subl $92, %esp
-; i686-NEXT: movl {{[0-9]+}}(%esp), %ebp
-; i686-NEXT: movl {{[0-9]+}}(%esp), %edi
-; i686-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; i686-NEXT: movl {{[0-9]+}}(%esp), %edx
-; i686-NEXT: movl {{[0-9]+}}(%esp), %esi
-; i686-NEXT: movl {{[0-9]+}}(%esp), %eax
-; i686-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; i686-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; i686-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; i686-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; i686-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; i686-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; i686-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; i686-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; i686-NEXT: sarl $31, %ebx
-; i686-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; i686-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; i686-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; i686-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; i686-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; i686-NEXT: andl $-16, %esp
+; i686-NEXT: subl $112, %esp
+; i686-NEXT: movl 40(%ebp), %edx
+; i686-NEXT: movl 24(%ebp), %eax
+; i686-NEXT: movl 28(%ebp), %ecx
+; i686-NEXT: movl 32(%ebp), %esi
+; i686-NEXT: movl 16(%ebp), %edi
+; i686-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; i686-NEXT: movl 12(%ebp), %edi
+; i686-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; i686-NEXT: movl 8(%ebp), %edi
+; i686-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; i686-NEXT: movl 20(%ebp), %edi
+; i686-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; i686-NEXT: sarl $31, %edi
+; i686-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; i686-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; i686-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; i686-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; i686-NEXT: movl 36(%ebp), %edi
+; i686-NEXT: movl %edi, {{[0-9]+}}(%esp)
; i686-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; i686-NEXT: movl %edx, {{[0-9]+}}(%esp)
; i686-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; i686-NEXT: sarl $31, %eax
-; i686-NEXT: movl %eax, {{[0-9]+}}(%esp)
; i686-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; i686-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; i686-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; i686-NEXT: movl %edi, %ebx
-; i686-NEXT: andl $7, %ebx
-; i686-NEXT: shrl $3, %edi
-; i686-NEXT: andl $15, %edi
-; i686-NEXT: movl 32(%esp,%edi), %eax
-; i686-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; i686-NEXT: movl %ebx, %ecx
-; i686-NEXT: shrl %cl, %eax
+; i686-NEXT: sarl $31, %edi
+; i686-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; i686-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; i686-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; i686-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; i686-NEXT: movl %edx, %eax
+; i686-NEXT: andl $31, %eax
+; i686-NEXT: shrl $3, %edx
+; i686-NEXT: andl $12, %edx
+; i686-NEXT: movl 40(%esp,%edx), %esi
+; i686-NEXT: movl 36(%esp,%edx), %edi
+; i686-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; i686-NEXT: movl %eax, %ecx
+; i686-NEXT: shrdl %cl, %esi, %edi
+; i686-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; i686-NEXT: movl 32(%esp,%edx), %ecx
+; i686-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; i686-NEXT: movl 44(%esp,%edx), %edx
+; i686-NEXT: movl %edx, (%esp) # 4-byte Spill
+; i686-NEXT: movl %eax, %ecx
+; i686-NEXT: shrdl %cl, %edx, %esi
+; i686-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; i686-NEXT: movl 56(%ebp), %edx
+; i686-NEXT: movl %edx, %ebx
+; i686-NEXT: andl $31, %ebx
+; i686-NEXT: shrl $3, %edx
+; i686-NEXT: andl $12, %edx
+; i686-NEXT: movl 72(%esp,%edx), %esi
+; i686-NEXT: movl 68(%esp,%edx), %edi
+; i686-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; i686-NEXT: movl %ebx, %ecx
-; i686-NEXT: notl %ecx
-; i686-NEXT: movl 36(%esp,%edi), %edx
-; i686-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; i686-NEXT: addl %edx, %edx
-; i686-NEXT: # kill: def $cl killed $cl killed $ecx
-; i686-NEXT: shll %cl, %edx
-; i686-NEXT: orl %eax, %edx
-; i686-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; i686-NEXT: movl %ebp, %eax
-; i686-NEXT: movl %ebp, %edx
-; i686-NEXT: andl $7, %edx
-; i686-NEXT: shrl $3, %eax
-; i686-NEXT: andl $15, %eax
-; i686-NEXT: movl 64(%esp,%eax), %ebp
-; i686-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; i686-NEXT: movl %eax, (%esp) # 4-byte Spill
-; i686-NEXT: movl %edx, %ecx
-; i686-NEXT: shrl %cl, %ebp
-; i686-NEXT: movl %edx, %ecx
-; i686-NEXT: notl %ecx
-; i686-NEXT: movl 68(%esp,%eax), %esi
-; i686-NEXT: leal (%esi,%esi), %eax
-; i686-NEXT: # kill: def $cl killed $cl killed $ecx
-; i686-NEXT: shll %cl, %eax
-; i686-NEXT: orl %ebp, %eax
-; i686-NEXT: movl 28(%esp,%edi), %ecx
+; i686-NEXT: shrdl %cl, %esi, %edi
+; i686-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; i686-NEXT: movl 64(%esp,%edx), %ecx
; i686-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; i686-NEXT: movl 40(%esp,%edi), %edi
+; i686-NEXT: movl 76(%esp,%edx), %edx
; i686-NEXT: movl %ebx, %ecx
+; i686-NEXT: shrdl %cl, %edx, %esi
+; i686-NEXT: movl %eax, %ecx
+; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; i686-NEXT: shrdl %cl, %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; i686-NEXT: movl (%esp), %ecx # 4-byte Reload
-; i686-NEXT: movl 60(%esp,%ecx), %ebp
-; i686-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; i686-NEXT: movl 72(%esp,%ecx), %ebp
-; i686-NEXT: movl %edx, %ecx
-; i686-NEXT: shrdl %cl, %ebp, %esi
-; i686-NEXT: movl %esi, (%esp) # 4-byte Spill
+; i686-NEXT: sarl %cl, (%esp) # 4-byte Folded Spill
; i686-NEXT: movl %ebx, %ecx
-; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; i686-NEXT: shrdl %cl, %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; i686-NEXT: sarl %cl, %edi
-; i686-NEXT: movl %edx, %ecx
-; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; i686-NEXT: shrdl %cl, %esi, %ebx
-; i686-NEXT: sarl %cl, %ebp
-; i686-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; i686-NEXT: movl %ebp, 28(%ecx)
-; i686-NEXT: movl (%esp), %edx # 4-byte Reload
-; i686-NEXT: movl %edx, 24(%ecx)
-; i686-NEXT: movl %ebx, 16(%ecx)
-; i686-NEXT: movl %edi, 12(%ecx)
-; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; i686-NEXT: movl %edx, 8(%ecx)
-; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; i686-NEXT: movl %edx, (%ecx)
-; i686-NEXT: movl %eax, 20(%ecx)
+; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; i686-NEXT: movl %eax, 4(%ecx)
-; i686-NEXT: addl $92, %esp
+; i686-NEXT: shrdl %cl, %eax, %edi
+; i686-NEXT: sarl %cl, %edx
+; i686-NEXT: movl 72(%ebp), %eax
+; i686-NEXT: movl %edx, 28(%eax)
+; i686-NEXT: movl %esi, 24(%eax)
+; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; i686-NEXT: movl %ecx, 20(%eax)
+; i686-NEXT: movl %edi, 16(%eax)
+; i686-NEXT: movl (%esp), %ecx # 4-byte Reload
+; i686-NEXT: movl %ecx, 12(%eax)
+; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; i686-NEXT: movl %ecx, 8(%eax)
+; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; i686-NEXT: movl %ecx, 4(%eax)
+; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; i686-NEXT: movl %ecx, (%eax)
+; i686-NEXT: leal -12(%ebp), %esp
; i686-NEXT: popl %esi
; i686-NEXT: popl %edi
; i686-NEXT: popl %ebx
@@ -546,112 +509,106 @@ define void @test_shl_v2i128(<2 x i128> %x, <2 x i128> %a, ptr nocapture %r) nou
; i686-LABEL: test_shl_v2i128:
; i686: # %bb.0: # %entry
; i686-NEXT: pushl %ebp
+; i686-NEXT: movl %esp, %ebp
; i686-NEXT: pushl %ebx
; i686-NEXT: pushl %edi
; i686-NEXT: pushl %esi
-; i686-NEXT: subl $100, %esp
-; i686-NEXT: movl {{[0-9]+}}(%esp), %ebp
-; i686-NEXT: movl {{[0-9]+}}(%esp), %eax
-; i686-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; i686-NEXT: movl {{[0-9]+}}(%esp), %edx
-; i686-NEXT: movl {{[0-9]+}}(%esp), %esi
-; i686-NEXT: movl {{[0-9]+}}(%esp), %edi
-; i686-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; i686-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; i686-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; i686-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; i686-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; i686-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; i686-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; i686-NEXT: andl $-16, %esp
+; i686-NEXT: subl $128, %esp
+; i686-NEXT: movl 40(%ebp), %edi
+; i686-NEXT: movl 24(%ebp), %eax
+; i686-NEXT: movl 28(%ebp), %ecx
+; i686-NEXT: movl 32(%ebp), %edx
+; i686-NEXT: movl 20(%ebp), %esi
+; i686-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; i686-NEXT: movl 16(%ebp), %esi
+; i686-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; i686-NEXT: movl 12(%ebp), %esi
+; i686-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; i686-NEXT: movl 8(%ebp), %esi
+; i686-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; i686-NEXT: movl 36(%ebp), %esi
; i686-NEXT: movl %esi, {{[0-9]+}}(%esp)
; i686-NEXT: movl %edx, {{[0-9]+}}(%esp)
; i686-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; i686-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; i686-NEXT: movl %ebp, %ecx
-; i686-NEXT: shrl $3, %ebp
-; i686-NEXT: andl $15, %ebp
+; i686-NEXT: movl %edi, %ebx
+; i686-NEXT: shrl $3, %ebx
+; i686-NEXT: andl $12, %ebx
; i686-NEXT: leal {{[0-9]+}}(%esp), %eax
-; i686-NEXT: subl %ebp, %eax
+; i686-NEXT: subl %ebx, %eax
; i686-NEXT: movl $0, {{[0-9]+}}(%esp)
; i686-NEXT: movl $0, {{[0-9]+}}(%esp)
; i686-NEXT: movl $0, {{[0-9]+}}(%esp)
; i686-NEXT: movl $0, {{[0-9]+}}(%esp)
-; i686-NEXT: movl 8(%eax), %edx
-; i686-NEXT: movl %edx, (%esp) # 4-byte Spill
-; i686-NEXT: andl $7, %ecx
+; i686-NEXT: movl (%eax), %esi
+; i686-NEXT: movl 4(%eax), %edx
+; i686-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; i686-NEXT: movl 8(%eax), %eax
+; i686-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; i686-NEXT: movl %edi, %ecx
+; i686-NEXT: andl $31, %ecx
; i686-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; i686-NEXT: shll %cl, %edx
-; i686-NEXT: movl 4(%eax), %esi
-; i686-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; i686-NEXT: shrl %esi
-; i686-NEXT: notl %ecx
; i686-NEXT: # kill: def $cl killed $cl killed $ecx
-; i686-NEXT: shrl %cl, %esi
-; i686-NEXT: orl %edx, %esi
-; i686-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; i686-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; i686-NEXT: movl (%eax), %eax
+; i686-NEXT: shldl %cl, %edx, %eax
; i686-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; i686-NEXT: movl %ebx, %edx
+; i686-NEXT: movl 56(%ebp), %eax
+; i686-NEXT: movl %eax, %edx
; i686-NEXT: shrl $3, %edx
-; i686-NEXT: andl $15, %edx
-; i686-NEXT: leal {{[0-9]+}}(%esp), %esi
-; i686-NEXT: subl %edx, %esi
+; i686-NEXT: andl $12, %edx
+; i686-NEXT: leal {{[0-9]+}}(%esp), %ecx
+; i686-NEXT: subl %edx, %ecx
; i686-NEXT: movl $0, {{[0-9]+}}(%esp)
; i686-NEXT: movl $0, {{[0-9]+}}(%esp)
; i686-NEXT: movl $0, {{[0-9]+}}(%esp)
; i686-NEXT: movl $0, {{[0-9]+}}(%esp)
-; i686-NEXT: andl $7, %ebx
-; i686-NEXT: movl 8(%esi), %edi
+; i686-NEXT: movl (%ecx), %edi
; i686-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; i686-NEXT: movl %ebx, %ecx
-; i686-NEXT: shll %cl, %edi
-; i686-NEXT: movl 4(%esi), %eax
+; i686-NEXT: movl 4(%ecx), %edi
+; i686-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; i686-NEXT: movl 8(%ecx), %ecx
+; i686-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; i686-NEXT: andl $31, %eax
; i686-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; i686-NEXT: shrl %eax
-; i686-NEXT: movl %ebx, %ecx
-; i686-NEXT: notl %ecx
+; i686-NEXT: movl %ecx, %eax
+; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; i686-NEXT: # kill: def $cl killed $cl killed $ecx
-; i686-NEXT: shrl %cl, %eax
-; i686-NEXT: orl %edi, %eax
-; i686-NEXT: movl (%esi), %ecx
-; i686-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; i686-NEXT: movl %esi, %edi
+; i686-NEXT: shldl %cl, %edi, %eax
+; i686-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; i686-NEXT: movl %esi, %eax
; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; i686-NEXT: shll %cl, %edi
-; i686-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; i686-NEXT: shll %cl, %eax
+; i686-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; i686-NEXT: shldl %cl, %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; i686-NEXT: negl %ebp
-; i686-NEXT: movl 64(%esp,%ebp), %esi
-; i686-NEXT: # kill: def $cl killed $cl killed $ecx
-; i686-NEXT: movl (%esp), %edi # 4-byte Reload
-; i686-NEXT: shldl %cl, %edi, %esi
-; i686-NEXT: movl %esi, (%esp) # 4-byte Spill
+; i686-NEXT: negl %ebx
+; i686-NEXT: movl 76(%esp,%ebx), %ebx
; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; i686-NEXT: movl %esi, %edi
-; i686-NEXT: movl %ebx, %ecx
-; i686-NEXT: shll %cl, %edi
-; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; i686-NEXT: shldl %cl, %esi, %ebp
+; i686-NEXT: shldl %cl, %esi, %ebx
+; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; i686-NEXT: movl %edi, %esi
+; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; i686-NEXT: movl %eax, %ecx
+; i686-NEXT: shll %cl, %esi
+; i686-NEXT: shldl %cl, %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; i686-NEXT: negl %edx
-; i686-NEXT: movl 96(%esp,%edx), %edx
-; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; i686-NEXT: shldl %cl, %ebx, %edx
-; i686-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; i686-NEXT: movl %edx, 28(%ecx)
-; i686-NEXT: movl %ebp, 20(%ecx)
-; i686-NEXT: movl %edi, 16(%ecx)
-; i686-NEXT: movl (%esp), %edx # 4-byte Reload
-; i686-NEXT: movl %edx, 12(%ecx)
-; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; i686-NEXT: movl %edx, 4(%ecx)
-; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; i686-NEXT: movl %edx, (%ecx)
-; i686-NEXT: movl %eax, 24(%ecx)
+; i686-NEXT: movl 108(%esp,%edx), %edx
; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; i686-NEXT: movl %eax, 8(%ecx)
-; i686-NEXT: addl $100, %esp
+; i686-NEXT: shldl %cl, %eax, %edx
+; i686-NEXT: movl 72(%ebp), %eax
+; i686-NEXT: movl %edx, 28(%eax)
+; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; i686-NEXT: movl %ecx, 24(%eax)
+; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; i686-NEXT: movl %ecx, 20(%eax)
+; i686-NEXT: movl %esi, 16(%eax)
+; i686-NEXT: movl %ebx, 12(%eax)
+; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; i686-NEXT: movl %ecx, 8(%eax)
+; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; i686-NEXT: movl %ecx, 4(%eax)
+; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; i686-NEXT: movl %ecx, (%eax)
+; i686-NEXT: leal -12(%ebp), %esp
; i686-NEXT: popl %esi
; i686-NEXT: popl %edi
; i686-NEXT: popl %ebx
diff --git a/llvm/test/CodeGen/X86/shift-i256.ll b/llvm/test/CodeGen/X86/shift-i256.ll
index e1466ae..128e219 100644
--- a/llvm/test/CodeGen/X86/shift-i256.ll
+++ b/llvm/test/CodeGen/X86/shift-i256.ll
@@ -8,98 +8,78 @@ define void @shift1(i256 %x, i256 %a, ptr nocapture %r) nounwind readnone {
; CHECK-LABEL: shift1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushl %ebp
+; CHECK-NEXT: movl %esp, %ebp
; CHECK-NEXT: pushl %ebx
; CHECK-NEXT: pushl %edi
; CHECK-NEXT: pushl %esi
-; CHECK-NEXT: subl $92, %esp
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edi
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ebp
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %esi
-; CHECK-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %esi
-; CHECK-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %esi
-; CHECK-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; CHECK-NEXT: movl %ebp, {{[0-9]+}}(%esp)
-; CHECK-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; CHECK-NEXT: andl $-16, %esp
+; CHECK-NEXT: subl $112, %esp
+; CHECK-NEXT: movl 40(%ebp), %ecx
+; CHECK-NEXT: movl 8(%ebp), %eax
+; CHECK-NEXT: movl 12(%ebp), %edx
+; CHECK-NEXT: movl 16(%ebp), %esi
+; CHECK-NEXT: movl 32(%ebp), %edi
+; CHECK-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; CHECK-NEXT: movl 28(%ebp), %edi
+; CHECK-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; CHECK-NEXT: movl 24(%ebp), %edi
; CHECK-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; CHECK-NEXT: movl 20(%ebp), %edi
+; CHECK-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; CHECK-NEXT: movl 36(%ebp), %edi
+; CHECK-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; CHECK-NEXT: movl %esi, {{[0-9]+}}(%esp)
; CHECK-NEXT: movl %edx, {{[0-9]+}}(%esp)
; CHECK-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; CHECK-NEXT: sarl $31, %esi
-; CHECK-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; CHECK-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; CHECK-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; CHECK-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; CHECK-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; CHECK-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; CHECK-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; CHECK-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; CHECK-NEXT: sarl $31, %edi
+; CHECK-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; CHECK-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; CHECK-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; CHECK-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; CHECK-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; CHECK-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; CHECK-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; CHECK-NEXT: movl %edi, {{[0-9]+}}(%esp)
; CHECK-NEXT: movl %ecx, %eax
-; CHECK-NEXT: andb $7, %al
-; CHECK-NEXT: shrb $3, %cl
-; CHECK-NEXT: movzbl %cl, %ebp
-; CHECK-NEXT: movl 32(%esp,%ebp), %esi
+; CHECK-NEXT: shrb $5, %al
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: movl 40(%esp,%eax,4), %edx
+; CHECK-NEXT: movl 36(%esp,%eax,4), %esi
; CHECK-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; CHECK-NEXT: movl %eax, %ecx
-; CHECK-NEXT: shrl %cl, %esi
-; CHECK-NEXT: movl %eax, %edx
-; CHECK-NEXT: notb %dl
-; CHECK-NEXT: movl 36(%esp,%ebp), %ecx
-; CHECK-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; CHECK-NEXT: leal (%ecx,%ecx), %edi
-; CHECK-NEXT: movl %edx, %ecx
-; CHECK-NEXT: shll %cl, %edi
-; CHECK-NEXT: orl %esi, %edi
-; CHECK-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; CHECK-NEXT: movl 40(%esp,%ebp), %esi
+; CHECK-NEXT: shrdl %cl, %edx, %esi
; CHECK-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; CHECK-NEXT: movl %eax, %ecx
-; CHECK-NEXT: shrl %cl, %esi
-; CHECK-NEXT: movl 44(%esp,%ebp), %ecx
-; CHECK-NEXT: movl %ecx, (%esp) # 4-byte Spill
-; CHECK-NEXT: leal (%ecx,%ecx), %edi
-; CHECK-NEXT: movl %edx, %ecx
-; CHECK-NEXT: shll %cl, %edi
-; CHECK-NEXT: orl %esi, %edi
+; CHECK-NEXT: movl 44(%esp,%eax,4), %esi
+; CHECK-NEXT: shrdl %cl, %esi, %edx
+; CHECK-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; CHECK-NEXT: movl 48(%esp,%eax,4), %ebx
+; CHECK-NEXT: shrdl %cl, %ebx, %esi
+; CHECK-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; CHECK-NEXT: movl 52(%esp,%eax,4), %esi
+; CHECK-NEXT: shrdl %cl, %esi, %ebx
+; CHECK-NEXT: movl 56(%esp,%eax,4), %edx
+; CHECK-NEXT: shrdl %cl, %edx, %esi
+; CHECK-NEXT: movl 32(%esp,%eax,4), %edi
; CHECK-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; CHECK-NEXT: movl 48(%esp,%ebp), %ebx
-; CHECK-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; CHECK-NEXT: movl %eax, %ecx
-; CHECK-NEXT: shrl %cl, %ebx
-; CHECK-NEXT: movl 52(%esp,%ebp), %edi
-; CHECK-NEXT: leal (%edi,%edi), %esi
-; CHECK-NEXT: movl %edx, %ecx
-; CHECK-NEXT: shll %cl, %esi
-; CHECK-NEXT: orl %ebx, %esi
-; CHECK-NEXT: movl %eax, %ecx
-; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; CHECK-NEXT: shrdl %cl, %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; CHECK-NEXT: shrdl %cl, %edx, (%esp) # 4-byte Folded Spill
-; CHECK-NEXT: movl 28(%esp,%ebp), %edx
-; CHECK-NEXT: movl 56(%esp,%ebp), %ebx
-; CHECK-NEXT: shrdl %cl, %ebx, %edi
-; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; CHECK-NEXT: shrdl %cl, %ebp, %edx
-; CHECK-NEXT: sarl %cl, %ebx
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT: movl %ebx, 28(%eax)
-; CHECK-NEXT: movl %edi, 24(%eax)
-; CHECK-NEXT: movl (%esp), %ecx # 4-byte Reload
-; CHECK-NEXT: movl %ecx, 16(%eax)
-; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; CHECK-NEXT: movl %ecx, 8(%eax)
-; CHECK-NEXT: movl %edx, (%eax)
-; CHECK-NEXT: movl %esi, 20(%eax)
-; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; CHECK-NEXT: movl %ecx, 12(%eax)
-; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; CHECK-NEXT: movl %ecx, 4(%eax)
-; CHECK-NEXT: addl $92, %esp
+; CHECK-NEXT: movl 60(%esp,%eax,4), %eax
+; CHECK-NEXT: shrdl %cl, %eax, %edx
+; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; CHECK-NEXT: shrdl %cl, %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
+; CHECK-NEXT: sarl %cl, %eax
+; CHECK-NEXT: movl 72(%ebp), %ecx
+; CHECK-NEXT: movl %eax, 28(%ecx)
+; CHECK-NEXT: movl %edx, 24(%ecx)
+; CHECK-NEXT: movl %esi, 20(%ecx)
+; CHECK-NEXT: movl %ebx, 16(%ecx)
+; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; CHECK-NEXT: movl %eax, 12(%ecx)
+; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; CHECK-NEXT: movl %eax, 8(%ecx)
+; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; CHECK-NEXT: movl %eax, 4(%ecx)
+; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; CHECK-NEXT: movl %eax, (%ecx)
+; CHECK-NEXT: leal -12(%ebp), %esp
; CHECK-NEXT: popl %esi
; CHECK-NEXT: popl %edi
; CHECK-NEXT: popl %ebx
@@ -120,42 +100,35 @@ define void @shift1(i256 %x, i256 %a, ptr nocapture %r) nounwind readnone {
; CHECK-X64-O0-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; CHECK-X64-O0-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; CHECK-X64-O0-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
-; CHECK-X64-O0-NEXT: movb %r8b, %dl
-; CHECK-X64-O0-NEXT: movb %dl, %cl
-; CHECK-X64-O0-NEXT: andb $7, %cl
+; CHECK-X64-O0-NEXT: movb %r8b, %cl
; CHECK-X64-O0-NEXT: movb %cl, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill
-; CHECK-X64-O0-NEXT: shrb $3, %dl
+; CHECK-X64-O0-NEXT: movb %cl, %dl
+; CHECK-X64-O0-NEXT: shrb $6, %dl
; CHECK-X64-O0-NEXT: movzbl %dl, %edx
; CHECK-X64-O0-NEXT: movl %edx, %edi
-; CHECK-X64-O0-NEXT: movq -64(%rsp,%rdi), %rdx
-; CHECK-X64-O0-NEXT: movq -56(%rsp,%rdi), %r8
-; CHECK-X64-O0-NEXT: movq %r8, %r9
-; CHECK-X64-O0-NEXT: shrq %cl, %r9
-; CHECK-X64-O0-NEXT: movb {{[-0-9]+}}(%r{{[sb]}}p), %cl # 1-byte Reload
-; CHECK-X64-O0-NEXT: notb %cl
-; CHECK-X64-O0-NEXT: movq -48(%rsp,%rdi), %rsi
-; CHECK-X64-O0-NEXT: movq %rsi, %r10
-; CHECK-X64-O0-NEXT: addq %r10, %r10
-; CHECK-X64-O0-NEXT: shlq %cl, %r10
+; CHECK-X64-O0-NEXT: movq -56(%rsp,%rdi,8), %rsi
+; CHECK-X64-O0-NEXT: movq -72(%rsp,%rdi,8), %r8
+; CHECK-X64-O0-NEXT: movq -64(%rsp,%rdi,8), %r9
+; CHECK-X64-O0-NEXT: movq %r9, %rdx
+; CHECK-X64-O0-NEXT: shrdq %cl, %rsi, %rdx
; CHECK-X64-O0-NEXT: movb {{[-0-9]+}}(%r{{[sb]}}p), %cl # 1-byte Reload
-; CHECK-X64-O0-NEXT: orq %r10, %r9
-; CHECK-X64-O0-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; CHECK-X64-O0-NEXT: movq -40(%rsp,%rdi), %rdi
+; CHECK-X64-O0-NEXT: movq -48(%rsp,%rdi,8), %rdi
; CHECK-X64-O0-NEXT: shrdq %cl, %rdi, %rsi
; CHECK-X64-O0-NEXT: movb {{[-0-9]+}}(%r{{[sb]}}p), %cl # 1-byte Reload
-; CHECK-X64-O0-NEXT: shrdq %cl, %r8, %rdx
+; CHECK-X64-O0-NEXT: shrdq %cl, %r9, %r8
; CHECK-X64-O0-NEXT: movb {{[-0-9]+}}(%r{{[sb]}}p), %cl # 1-byte Reload
+; CHECK-X64-O0-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; CHECK-X64-O0-NEXT: sarq %cl, %rdi
; CHECK-X64-O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
; CHECK-X64-O0-NEXT: movq %rdi, 24(%rax)
; CHECK-X64-O0-NEXT: movq %rsi, 16(%rax)
-; CHECK-X64-O0-NEXT: movq %rdx, (%rax)
-; CHECK-X64-O0-NEXT: movq %rcx, 8(%rax)
+; CHECK-X64-O0-NEXT: movq %rdx, 8(%rax)
+; CHECK-X64-O0-NEXT: movq %rcx, (%rax)
; CHECK-X64-O0-NEXT: retq
;
; CHECK-X64-O2-LABEL: shift1:
; CHECK-X64-O2: # %bb.0: # %entry
-; CHECK-X64-O2-NEXT: movq {{[0-9]+}}(%rsp), %r9
+; CHECK-X64-O2-NEXT: movq {{[0-9]+}}(%rsp), %rax
; CHECK-X64-O2-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; CHECK-X64-O2-NEXT: movq %rdx, -{{[0-9]+}}(%rsp)
; CHECK-X64-O2-NEXT: movq %rsi, -{{[0-9]+}}(%rsp)
@@ -165,29 +138,23 @@ define void @shift1(i256 %x, i256 %a, ptr nocapture %r) nounwind readnone {
; CHECK-X64-O2-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; CHECK-X64-O2-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; CHECK-X64-O2-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
-; CHECK-X64-O2-NEXT: movl %r8d, %eax
-; CHECK-X64-O2-NEXT: andb $7, %al
-; CHECK-X64-O2-NEXT: shrb $3, %r8b
-; CHECK-X64-O2-NEXT: movzbl %r8b, %edx
-; CHECK-X64-O2-NEXT: movq -64(%rsp,%rdx), %rsi
-; CHECK-X64-O2-NEXT: movq -56(%rsp,%rdx), %rdi
-; CHECK-X64-O2-NEXT: movq %rdi, %r8
-; CHECK-X64-O2-NEXT: movl %eax, %ecx
-; CHECK-X64-O2-NEXT: shrq %cl, %r8
-; CHECK-X64-O2-NEXT: notb %cl
-; CHECK-X64-O2-NEXT: movq -48(%rsp,%rdx), %r10
-; CHECK-X64-O2-NEXT: leaq (%r10,%r10), %r11
-; CHECK-X64-O2-NEXT: shlq %cl, %r11
-; CHECK-X64-O2-NEXT: orq %r8, %r11
-; CHECK-X64-O2-NEXT: movq -40(%rsp,%rdx), %rdx
-; CHECK-X64-O2-NEXT: movl %eax, %ecx
-; CHECK-X64-O2-NEXT: shrdq %cl, %rdx, %r10
-; CHECK-X64-O2-NEXT: shrdq %cl, %rdi, %rsi
+; CHECK-X64-O2-NEXT: movl %r8d, %ecx
+; CHECK-X64-O2-NEXT: shrb $6, %cl
+; CHECK-X64-O2-NEXT: movzbl %cl, %edx
+; CHECK-X64-O2-NEXT: movq -56(%rsp,%rdx,8), %rsi
+; CHECK-X64-O2-NEXT: movq -72(%rsp,%rdx,8), %rdi
+; CHECK-X64-O2-NEXT: movq -64(%rsp,%rdx,8), %r9
+; CHECK-X64-O2-NEXT: movq %r9, %r10
+; CHECK-X64-O2-NEXT: movl %r8d, %ecx
+; CHECK-X64-O2-NEXT: shrdq %cl, %rsi, %r10
+; CHECK-X64-O2-NEXT: movq -48(%rsp,%rdx,8), %rdx
+; CHECK-X64-O2-NEXT: shrdq %cl, %rdx, %rsi
+; CHECK-X64-O2-NEXT: shrdq %cl, %r9, %rdi
; CHECK-X64-O2-NEXT: sarq %cl, %rdx
-; CHECK-X64-O2-NEXT: movq %rdx, 24(%r9)
-; CHECK-X64-O2-NEXT: movq %r10, 16(%r9)
-; CHECK-X64-O2-NEXT: movq %rsi, (%r9)
-; CHECK-X64-O2-NEXT: movq %r11, 8(%r9)
+; CHECK-X64-O2-NEXT: movq %rdx, 24(%rax)
+; CHECK-X64-O2-NEXT: movq %rsi, 16(%rax)
+; CHECK-X64-O2-NEXT: movq %r10, 8(%rax)
+; CHECK-X64-O2-NEXT: movq %rdi, (%rax)
; CHECK-X64-O2-NEXT: retq
entry:
%0 = ashr i256 %x, %a
@@ -199,11 +166,13 @@ define i256 @shift2(i256 %c) nounwind
; CHECK-LABEL: shift2:
; CHECK: # %bb.0:
; CHECK-NEXT: pushl %ebp
+; CHECK-NEXT: movl %esp, %ebp
; CHECK-NEXT: pushl %ebx
; CHECK-NEXT: pushl %edi
; CHECK-NEXT: pushl %esi
-; CHECK-NEXT: subl $92, %esp
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: andl $-16, %esp
+; CHECK-NEXT: subl $112, %esp
+; CHECK-NEXT: movl 12(%ebp), %ecx
; CHECK-NEXT: movl $0, {{[0-9]+}}(%esp)
; CHECK-NEXT: movl $0, {{[0-9]+}}(%esp)
; CHECK-NEXT: movl $0, {{[0-9]+}}(%esp)
@@ -220,68 +189,54 @@ define i256 @shift2(i256 %c) nounwind
; CHECK-NEXT: movl $0, {{[0-9]+}}(%esp)
; CHECK-NEXT: movl $0, {{[0-9]+}}(%esp)
; CHECK-NEXT: movl $0, {{[0-9]+}}(%esp)
-; CHECK-NEXT: movb %al, %ch
-; CHECK-NEXT: andb $7, %ch
+; CHECK-NEXT: movl %ecx, %eax
; CHECK-NEXT: shrb $3, %al
+; CHECK-NEXT: andb $28, %al
; CHECK-NEXT: negb %al
; CHECK-NEXT: movsbl %al, %eax
-; CHECK-NEXT: movl 68(%esp,%eax), %edx
-; CHECK-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; CHECK-NEXT: movb %ch, %cl
-; CHECK-NEXT: shll %cl, %edx
-; CHECK-NEXT: notb %cl
-; CHECK-NEXT: movb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
-; CHECK-NEXT: movl 64(%esp,%eax), %ebp
-; CHECK-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; CHECK-NEXT: shrl %ebp
-; CHECK-NEXT: shrl %cl, %ebp
-; CHECK-NEXT: orl %edx, %ebp
-; CHECK-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; CHECK-NEXT: movl 76(%esp,%eax), %edx
-; CHECK-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; CHECK-NEXT: movb %ch, %cl
-; CHECK-NEXT: shll %cl, %edx
-; CHECK-NEXT: movl 72(%esp,%eax), %ebx
-; CHECK-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; CHECK-NEXT: shrl %ebx
-; CHECK-NEXT: movb {{[-0-9]+}}(%e{{[sb]}}p), %cl # 1-byte Reload
-; CHECK-NEXT: shrl %cl, %ebx
-; CHECK-NEXT: orl %edx, %ebx
-; CHECK-NEXT: movl 84(%esp,%eax), %esi
+; CHECK-NEXT: movl 68(%esp,%eax), %esi
; CHECK-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; CHECK-NEXT: movb %ch, %cl
-; CHECK-NEXT: shll %cl, %esi
-; CHECK-NEXT: movl 80(%esp,%eax), %edi
-; CHECK-NEXT: movl %edi, %edx
-; CHECK-NEXT: shrl %edx
-; CHECK-NEXT: movb {{[-0-9]+}}(%e{{[sb]}}p), %cl # 1-byte Reload
-; CHECK-NEXT: shrl %cl, %edx
-; CHECK-NEXT: orl %esi, %edx
-; CHECK-NEXT: movb %ch, %cl
-; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; CHECK-NEXT: shldl %cl, %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; CHECK-NEXT: movl 72(%esp,%eax), %edx
+; CHECK-NEXT: movl %edx, %edi
; CHECK-NEXT: shldl %cl, %esi, %edi
-; CHECK-NEXT: movl 60(%esp,%eax), %ebp
-; CHECK-NEXT: movl 88(%esp,%eax), %esi
-; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; CHECK-NEXT: shldl %cl, %eax, %esi
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; CHECK-NEXT: movl 76(%esp,%eax), %esi
+; CHECK-NEXT: movl %esi, %edi
+; CHECK-NEXT: shldl %cl, %edx, %edi
+; CHECK-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; CHECK-NEXT: movl 80(%esp,%eax), %edx
+; CHECK-NEXT: movl %edx, %edi
+; CHECK-NEXT: shldl %cl, %esi, %edi
+; CHECK-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; CHECK-NEXT: movl 84(%esp,%eax), %esi
+; CHECK-NEXT: movl %esi, %ebx
+; CHECK-NEXT: shldl %cl, %edx, %ebx
+; CHECK-NEXT: movl 88(%esp,%eax), %edi
+; CHECK-NEXT: movl %edi, %edx
+; CHECK-NEXT: shldl %cl, %esi, %edx
+; CHECK-NEXT: movl 64(%esp,%eax), %esi
+; CHECK-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; CHECK-NEXT: movl 92(%esp,%eax), %esi
+; CHECK-NEXT: shldl %cl, %edi, %esi
+; CHECK-NEXT: movl 8(%ebp), %eax
; CHECK-NEXT: movl %esi, 28(%eax)
-; CHECK-NEXT: movl %edi, 20(%eax)
-; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; CHECK-NEXT: movl %esi, 12(%eax)
-; CHECK-NEXT: movl %ebp, %esi
-; CHECK-NEXT: shll %cl, %esi
-; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; CHECK-NEXT: shldl %cl, %ebp, %edi
-; CHECK-NEXT: movl %edi, 4(%eax)
-; CHECK-NEXT: movl %esi, (%eax)
; CHECK-NEXT: movl %edx, 24(%eax)
-; CHECK-NEXT: movl %ebx, 16(%eax)
-; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; CHECK-NEXT: movl %ecx, 8(%eax)
-; CHECK-NEXT: addl $92, %esp
+; CHECK-NEXT: movl %ebx, 20(%eax)
+; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; CHECK-NEXT: movl %edx, 16(%eax)
+; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; CHECK-NEXT: movl %edx, 12(%eax)
+; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; CHECK-NEXT: movl %edx, 8(%eax)
+; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; CHECK-NEXT: movl %edi, %edx
+; CHECK-NEXT: shll %cl, %edx
+; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
+; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; CHECK-NEXT: shldl %cl, %edi, %esi
+; CHECK-NEXT: movl %esi, 4(%eax)
+; CHECK-NEXT: movl %edx, (%eax)
+; CHECK-NEXT: leal -12(%ebp), %esp
; CHECK-NEXT: popl %esi
; CHECK-NEXT: popl %edi
; CHECK-NEXT: popl %ebx
@@ -299,77 +254,64 @@ define i256 @shift2(i256 %c) nounwind
; CHECK-X64-O0-NEXT: movq $0, -{{[0-9]+}}(%rsp)
; CHECK-X64-O0-NEXT: movq $0, -{{[0-9]+}}(%rsp)
; CHECK-X64-O0-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; CHECK-X64-O0-NEXT: movb %sil, %dl
-; CHECK-X64-O0-NEXT: movb %dl, %cl
-; CHECK-X64-O0-NEXT: andb $7, %cl
+; CHECK-X64-O0-NEXT: movb %sil, %cl
; CHECK-X64-O0-NEXT: movb %cl, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill
+; CHECK-X64-O0-NEXT: movb %cl, %dl
; CHECK-X64-O0-NEXT: shrb $3, %dl
+; CHECK-X64-O0-NEXT: andb $24, %dl
; CHECK-X64-O0-NEXT: negb %dl
-; CHECK-X64-O0-NEXT: movsbq %dl, %rdx
-; CHECK-X64-O0-NEXT: movq -16(%rsp,%rdx), %rsi
-; CHECK-X64-O0-NEXT: movq %rsi, %r10
-; CHECK-X64-O0-NEXT: shlq %cl, %r10
+; CHECK-X64-O0-NEXT: movsbq %dl, %r8
+; CHECK-X64-O0-NEXT: movq -40(%rsp,%r8), %r9
+; CHECK-X64-O0-NEXT: movq -32(%rsp,%r8), %rdx
+; CHECK-X64-O0-NEXT: movq -24(%rsp,%r8), %r10
+; CHECK-X64-O0-NEXT: movq %r10, %rsi
+; CHECK-X64-O0-NEXT: shldq %cl, %rdx, %rsi
; CHECK-X64-O0-NEXT: movb {{[-0-9]+}}(%r{{[sb]}}p), %cl # 1-byte Reload
-; CHECK-X64-O0-NEXT: notb %cl
-; CHECK-X64-O0-NEXT: movq -32(%rsp,%rdx), %r9
-; CHECK-X64-O0-NEXT: movq -24(%rsp,%rdx), %r8
-; CHECK-X64-O0-NEXT: movq %r8, %r11
-; CHECK-X64-O0-NEXT: shrq %r11
-; CHECK-X64-O0-NEXT: shrq %cl, %r11
-; CHECK-X64-O0-NEXT: movb {{[-0-9]+}}(%r{{[sb]}}p), %cl # 1-byte Reload
-; CHECK-X64-O0-NEXT: orq %r11, %r10
-; CHECK-X64-O0-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; CHECK-X64-O0-NEXT: movq -8(%rsp,%rdx), %rdx
-; CHECK-X64-O0-NEXT: shldq %cl, %rsi, %rdx
+; CHECK-X64-O0-NEXT: movq -16(%rsp,%r8), %r8
+; CHECK-X64-O0-NEXT: shldq %cl, %r10, %r8
; CHECK-X64-O0-NEXT: movb {{[-0-9]+}}(%r{{[sb]}}p), %cl # 1-byte Reload
-; CHECK-X64-O0-NEXT: movq %r9, %rsi
-; CHECK-X64-O0-NEXT: shlq %cl, %rsi
+; CHECK-X64-O0-NEXT: movq %r9, %r10
+; CHECK-X64-O0-NEXT: shlq %cl, %r10
; CHECK-X64-O0-NEXT: movb {{[-0-9]+}}(%r{{[sb]}}p), %cl # 1-byte Reload
-; CHECK-X64-O0-NEXT: shldq %cl, %r9, %r8
+; CHECK-X64-O0-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-X64-O0-NEXT: shldq %cl, %r9, %rdx
; CHECK-X64-O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; CHECK-X64-O0-NEXT: movq %r8, 8(%rdi)
-; CHECK-X64-O0-NEXT: movq %rsi, (%rdi)
-; CHECK-X64-O0-NEXT: movq %rdx, 24(%rdi)
-; CHECK-X64-O0-NEXT: movq %rcx, 16(%rdi)
+; CHECK-X64-O0-NEXT: movq %r8, 24(%rdi)
+; CHECK-X64-O0-NEXT: movq %rsi, 16(%rdi)
+; CHECK-X64-O0-NEXT: movq %rdx, 8(%rdi)
+; CHECK-X64-O0-NEXT: movq %rcx, (%rdi)
; CHECK-X64-O0-NEXT: retq
;
; CHECK-X64-O2-LABEL: shift2:
; CHECK-X64-O2: # %bb.0:
+; CHECK-X64-O2-NEXT: movq %rsi, %rcx
; CHECK-X64-O2-NEXT: movq %rdi, %rax
-; CHECK-X64-O2-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; CHECK-X64-O2-NEXT: movq $0, -{{[0-9]+}}(%rsp)
+; CHECK-X64-O2-NEXT: xorps %xmm0, %xmm0
+; CHECK-X64-O2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; CHECK-X64-O2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; CHECK-X64-O2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-X64-O2-NEXT: movq $0, -{{[0-9]+}}(%rsp)
; CHECK-X64-O2-NEXT: movq $1, -{{[0-9]+}}(%rsp)
-; CHECK-X64-O2-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; CHECK-X64-O2-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; CHECK-X64-O2-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; CHECK-X64-O2-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; CHECK-X64-O2-NEXT: movl %esi, %edx
-; CHECK-X64-O2-NEXT: andb $7, %dl
-; CHECK-X64-O2-NEXT: shrb $3, %sil
-; CHECK-X64-O2-NEXT: negb %sil
-; CHECK-X64-O2-NEXT: movsbq %sil, %rsi
-; CHECK-X64-O2-NEXT: movq -16(%rsp,%rsi), %rdi
-; CHECK-X64-O2-NEXT: movq %rdi, %r8
-; CHECK-X64-O2-NEXT: movl %edx, %ecx
+; CHECK-X64-O2-NEXT: movl %ecx, %edx
+; CHECK-X64-O2-NEXT: shrb $3, %dl
+; CHECK-X64-O2-NEXT: andb $24, %dl
+; CHECK-X64-O2-NEXT: negb %dl
+; CHECK-X64-O2-NEXT: movsbq %dl, %rdx
+; CHECK-X64-O2-NEXT: movq -40(%rsp,%rdx), %rsi
+; CHECK-X64-O2-NEXT: movq -32(%rsp,%rdx), %rdi
+; CHECK-X64-O2-NEXT: movq -24(%rsp,%rdx), %r8
+; CHECK-X64-O2-NEXT: movq %r8, %r9
+; CHECK-X64-O2-NEXT: shldq %cl, %rdi, %r9
+; CHECK-X64-O2-NEXT: movq -16(%rsp,%rdx), %rdx
+; CHECK-X64-O2-NEXT: shldq %cl, %r8, %rdx
+; CHECK-X64-O2-NEXT: movq %rsi, %r8
; CHECK-X64-O2-NEXT: shlq %cl, %r8
-; CHECK-X64-O2-NEXT: notb %cl
-; CHECK-X64-O2-NEXT: movq -32(%rsp,%rsi), %r9
-; CHECK-X64-O2-NEXT: movq -24(%rsp,%rsi), %r10
-; CHECK-X64-O2-NEXT: movq %r10, %r11
-; CHECK-X64-O2-NEXT: shrq %r11
-; CHECK-X64-O2-NEXT: shrq %cl, %r11
-; CHECK-X64-O2-NEXT: orq %r8, %r11
-; CHECK-X64-O2-NEXT: movq -8(%rsp,%rsi), %rsi
-; CHECK-X64-O2-NEXT: movl %edx, %ecx
-; CHECK-X64-O2-NEXT: shldq %cl, %rdi, %rsi
-; CHECK-X64-O2-NEXT: movq %r9, %rdi
-; CHECK-X64-O2-NEXT: shlq %cl, %rdi
-; CHECK-X64-O2-NEXT: shldq %cl, %r9, %r10
-; CHECK-X64-O2-NEXT: movq %rsi, 24(%rax)
-; CHECK-X64-O2-NEXT: movq %r10, 8(%rax)
-; CHECK-X64-O2-NEXT: movq %rdi, (%rax)
-; CHECK-X64-O2-NEXT: movq %r11, 16(%rax)
+; CHECK-X64-O2-NEXT: # kill: def $cl killed $cl killed $rcx
+; CHECK-X64-O2-NEXT: shldq %cl, %rsi, %rdi
+; CHECK-X64-O2-NEXT: movq %rdx, 24(%rax)
+; CHECK-X64-O2-NEXT: movq %r9, 16(%rax)
+; CHECK-X64-O2-NEXT: movq %rdi, 8(%rax)
+; CHECK-X64-O2-NEXT: movq %r8, (%rax)
; CHECK-X64-O2-NEXT: retq
{
%b = shl i256 1, %c ; %c must not be a constant
diff --git a/llvm/test/CodeGen/X86/sjlj-shadow-stack-liveness.mir b/llvm/test/CodeGen/X86/sjlj-shadow-stack-liveness.mir
index 3def36f..83bc8ec 100644
--- a/llvm/test/CodeGen/X86/sjlj-shadow-stack-liveness.mir
+++ b/llvm/test/CodeGen/X86/sjlj-shadow-stack-liveness.mir
@@ -14,6 +14,7 @@ name: bar
# CHECK-LABEL: name: bar
alignment: 16
tracksRegLiveness: true
+noPhis: false
body: |
bb.0:
%0:gr64 = IMPLICIT_DEF
@@ -29,8 +30,6 @@ body: |
; CHECK-NOT: MOV64rm killed %0
; CHECK-NEXT: MOV64rm killed %0
- ; FIXME: Dummy PHI to set the property NoPHIs to false. PR38439.
bb.2:
- %1:gr64 = PHI undef %1, %bb.2, undef %1, %bb.2
JMP_1 %bb.2
...
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512vbmi.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512vbmi.ll
index 9b32005..61814b4 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512vbmi.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512vbmi.ll
@@ -146,3 +146,18 @@ define <64 x i8> @combine_permi2q_pshufb_as_permi2d_mask(<8 x i64> %a0, <8 x i64
%res2 = call <64 x i8> @llvm.x86.avx512.mask.pshuf.b.512(<64 x i8> %res1, <64 x i8> <i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 4, i8 5, i8 6, i8 7, i8 4, i8 5, i8 6, i8 7, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 8, i8 9, i8 10, i8 11, i8 8, i8 9, i8 10, i8 11, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 12, i8 13, i8 14, i8 15, i8 12, i8 13, i8 14, i8 15, i8 12, i8 13, i8 14, i8 15>, <64 x i8> zeroinitializer, i64 %m)
ret <64 x i8> %res2
}
+
+; PR109272
+define <64 x i8> @combine_vpermi2var_v64i8_with_mask(<64 x i8> %a0, <64 x i8> %a1, <64 x i8> %a2) {
+; CHECK-LABEL: combine_vpermi2var_v64i8_with_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vpermt2b %zmm2, %zmm1, %zmm0
+; CHECK-NEXT: vpmovb2m %zmm1, %k0
+; CHECK-NEXT: vpmovm2b %k0, %zmm1
+; CHECK-NEXT: vpandnq %zmm0, %zmm1, %zmm0
+; CHECK-NEXT: ret{{[l|q]}}
+ %perm = tail call <64 x i8> @llvm.x86.avx512.vpermi2var.qi.512(<64 x i8> %a0, <64 x i8> %a1, <64 x i8> %a2)
+ %cmp = icmp slt <64 x i8> %a1, zeroinitializer
+ %sel = select <64 x i1> %cmp, <64 x i8> zeroinitializer, <64 x i8> %perm
+ ret <64 x i8> %sel
+}
diff --git a/llvm/test/CodeGen/X86/wide-scalar-shift-by-byte-multiple-legalization.ll b/llvm/test/CodeGen/X86/wide-scalar-shift-by-byte-multiple-legalization.ll
index e5affd8..2775257 100644
--- a/llvm/test/CodeGen/X86/wide-scalar-shift-by-byte-multiple-legalization.ll
+++ b/llvm/test/CodeGen/X86/wide-scalar-shift-by-byte-multiple-legalization.ll
@@ -646,7 +646,869 @@ define void @lshr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X64-HAVE-SHLD-HAVE-BMI2-NEXT: movq %rax, (%rdx)
; X64-HAVE-SHLD-HAVE-BMI2-NEXT: retq
;
-; X86-SSE2-LABEL: lshr_16bytes:
+; FALLBACK16-LABEL: lshr_16bytes:
+; FALLBACK16: # %bb.0:
+; FALLBACK16-NEXT: pushl %ebp
+; FALLBACK16-NEXT: pushl %ebx
+; FALLBACK16-NEXT: pushl %edi
+; FALLBACK16-NEXT: pushl %esi
+; FALLBACK16-NEXT: subl $60, %esp
+; FALLBACK16-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK16-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK16-NEXT: movl (%ecx), %edx
+; FALLBACK16-NEXT: movl 4(%ecx), %esi
+; FALLBACK16-NEXT: movl 8(%ecx), %edi
+; FALLBACK16-NEXT: movl 12(%ecx), %ecx
+; FALLBACK16-NEXT: movb (%eax), %ah
+; FALLBACK16-NEXT: movb %ah, %al
+; FALLBACK16-NEXT: shlb $3, %al
+; FALLBACK16-NEXT: xorps %xmm0, %xmm0
+; FALLBACK16-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: andb $12, %ah
+; FALLBACK16-NEXT: movzbl %ah, %ebp
+; FALLBACK16-NEXT: movl 20(%esp,%ebp), %esi
+; FALLBACK16-NEXT: movl %esi, %ebx
+; FALLBACK16-NEXT: movl %eax, %ecx
+; FALLBACK16-NEXT: shrl %cl, %ebx
+; FALLBACK16-NEXT: movl %eax, %edx
+; FALLBACK16-NEXT: notb %dl
+; FALLBACK16-NEXT: movl 24(%esp,%ebp), %ecx
+; FALLBACK16-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: leal (%ecx,%ecx), %edi
+; FALLBACK16-NEXT: movl %edx, %ecx
+; FALLBACK16-NEXT: shll %cl, %edi
+; FALLBACK16-NEXT: orl %ebx, %edi
+; FALLBACK16-NEXT: movl 16(%esp,%ebp), %ebx
+; FALLBACK16-NEXT: movl %eax, %ecx
+; FALLBACK16-NEXT: shrl %cl, %ebx
+; FALLBACK16-NEXT: addl %esi, %esi
+; FALLBACK16-NEXT: movl %edx, %ecx
+; FALLBACK16-NEXT: shll %cl, %esi
+; FALLBACK16-NEXT: orl %ebx, %esi
+; FALLBACK16-NEXT: movl %eax, %ecx
+; FALLBACK16-NEXT: shrl %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; FALLBACK16-NEXT: movl 28(%esp,%ebp), %ebx
+; FALLBACK16-NEXT: leal (%ebx,%ebx), %ebp
+; FALLBACK16-NEXT: movl %edx, %ecx
+; FALLBACK16-NEXT: shll %cl, %ebp
+; FALLBACK16-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; FALLBACK16-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK16-NEXT: movl %eax, %ecx
+; FALLBACK16-NEXT: shrl %cl, %ebx
+; FALLBACK16-NEXT: movl %ebx, 12(%edx)
+; FALLBACK16-NEXT: movl %ebp, 8(%edx)
+; FALLBACK16-NEXT: movl %esi, (%edx)
+; FALLBACK16-NEXT: movl %edi, 4(%edx)
+; FALLBACK16-NEXT: addl $60, %esp
+; FALLBACK16-NEXT: popl %esi
+; FALLBACK16-NEXT: popl %edi
+; FALLBACK16-NEXT: popl %ebx
+; FALLBACK16-NEXT: popl %ebp
+; FALLBACK16-NEXT: retl
+;
+; FALLBACK17-LABEL: lshr_16bytes:
+; FALLBACK17: # %bb.0:
+; FALLBACK17-NEXT: pushl %ebp
+; FALLBACK17-NEXT: pushl %ebx
+; FALLBACK17-NEXT: pushl %edi
+; FALLBACK17-NEXT: pushl %esi
+; FALLBACK17-NEXT: subl $44, %esp
+; FALLBACK17-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK17-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK17-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK17-NEXT: movl (%edx), %esi
+; FALLBACK17-NEXT: movl 4(%edx), %edi
+; FALLBACK17-NEXT: movl 8(%edx), %ebx
+; FALLBACK17-NEXT: movl 12(%edx), %edx
+; FALLBACK17-NEXT: movb (%ecx), %ch
+; FALLBACK17-NEXT: movb %ch, %cl
+; FALLBACK17-NEXT: shlb $3, %cl
+; FALLBACK17-NEXT: xorps %xmm0, %xmm0
+; FALLBACK17-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %esi, (%esp)
+; FALLBACK17-NEXT: andb $12, %ch
+; FALLBACK17-NEXT: movzbl %ch, %ebx
+; FALLBACK17-NEXT: movl 8(%esp,%ebx), %esi
+; FALLBACK17-NEXT: movl (%esp,%ebx), %edx
+; FALLBACK17-NEXT: movl 4(%esp,%ebx), %ebp
+; FALLBACK17-NEXT: movl %ebp, %edi
+; FALLBACK17-NEXT: shrdl %cl, %esi, %edi
+; FALLBACK17-NEXT: movl 12(%esp,%ebx), %ebx
+; FALLBACK17-NEXT: shrdl %cl, %ebx, %esi
+; FALLBACK17-NEXT: shrdl %cl, %ebp, %edx
+; FALLBACK17-NEXT: shrl %cl, %ebx
+; FALLBACK17-NEXT: movl %esi, 8(%eax)
+; FALLBACK17-NEXT: movl %ebx, 12(%eax)
+; FALLBACK17-NEXT: movl %edx, (%eax)
+; FALLBACK17-NEXT: movl %edi, 4(%eax)
+; FALLBACK17-NEXT: addl $44, %esp
+; FALLBACK17-NEXT: popl %esi
+; FALLBACK17-NEXT: popl %edi
+; FALLBACK17-NEXT: popl %ebx
+; FALLBACK17-NEXT: popl %ebp
+; FALLBACK17-NEXT: retl
+;
+; FALLBACK18-LABEL: lshr_16bytes:
+; FALLBACK18: # %bb.0:
+; FALLBACK18-NEXT: pushl %ebp
+; FALLBACK18-NEXT: pushl %ebx
+; FALLBACK18-NEXT: pushl %edi
+; FALLBACK18-NEXT: pushl %esi
+; FALLBACK18-NEXT: subl $44, %esp
+; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK18-NEXT: movl (%ecx), %edx
+; FALLBACK18-NEXT: movl 4(%ecx), %esi
+; FALLBACK18-NEXT: movl 8(%ecx), %edi
+; FALLBACK18-NEXT: movl 12(%ecx), %ecx
+; FALLBACK18-NEXT: movzbl (%eax), %ebx
+; FALLBACK18-NEXT: movl %ebx, %eax
+; FALLBACK18-NEXT: shlb $3, %al
+; FALLBACK18-NEXT: xorps %xmm0, %xmm0
+; FALLBACK18-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %edx, (%esp)
+; FALLBACK18-NEXT: andb $12, %bl
+; FALLBACK18-NEXT: movzbl %bl, %esi
+; FALLBACK18-NEXT: movl 4(%esp,%esi), %edi
+; FALLBACK18-NEXT: movl 8(%esp,%esi), %ebx
+; FALLBACK18-NEXT: shrxl %eax, %edi, %ebp
+; FALLBACK18-NEXT: movl %eax, %edx
+; FALLBACK18-NEXT: notb %dl
+; FALLBACK18-NEXT: leal (%ebx,%ebx), %ecx
+; FALLBACK18-NEXT: shlxl %edx, %ecx, %ecx
+; FALLBACK18-NEXT: orl %ebp, %ecx
+; FALLBACK18-NEXT: shrxl %eax, (%esp,%esi), %ebp
+; FALLBACK18-NEXT: addl %edi, %edi
+; FALLBACK18-NEXT: shlxl %edx, %edi, %edi
+; FALLBACK18-NEXT: orl %ebp, %edi
+; FALLBACK18-NEXT: shrxl %eax, %ebx, %ebx
+; FALLBACK18-NEXT: movl 12(%esp,%esi), %esi
+; FALLBACK18-NEXT: shrxl %eax, %esi, %eax
+; FALLBACK18-NEXT: addl %esi, %esi
+; FALLBACK18-NEXT: shlxl %edx, %esi, %edx
+; FALLBACK18-NEXT: orl %ebx, %edx
+; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %esi
+; FALLBACK18-NEXT: movl %eax, 12(%esi)
+; FALLBACK18-NEXT: movl %edx, 8(%esi)
+; FALLBACK18-NEXT: movl %edi, (%esi)
+; FALLBACK18-NEXT: movl %ecx, 4(%esi)
+; FALLBACK18-NEXT: addl $44, %esp
+; FALLBACK18-NEXT: popl %esi
+; FALLBACK18-NEXT: popl %edi
+; FALLBACK18-NEXT: popl %ebx
+; FALLBACK18-NEXT: popl %ebp
+; FALLBACK18-NEXT: retl
+;
+; FALLBACK19-LABEL: lshr_16bytes:
+; FALLBACK19: # %bb.0:
+; FALLBACK19-NEXT: pushl %ebp
+; FALLBACK19-NEXT: pushl %ebx
+; FALLBACK19-NEXT: pushl %edi
+; FALLBACK19-NEXT: pushl %esi
+; FALLBACK19-NEXT: subl $44, %esp
+; FALLBACK19-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK19-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK19-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK19-NEXT: movl (%edx), %esi
+; FALLBACK19-NEXT: movl 4(%edx), %edi
+; FALLBACK19-NEXT: movl 8(%edx), %ebx
+; FALLBACK19-NEXT: movl 12(%edx), %edx
+; FALLBACK19-NEXT: movzbl (%ecx), %eax
+; FALLBACK19-NEXT: movl %eax, %ecx
+; FALLBACK19-NEXT: shlb $3, %cl
+; FALLBACK19-NEXT: xorps %xmm0, %xmm0
+; FALLBACK19-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %esi, (%esp)
+; FALLBACK19-NEXT: andb $12, %al
+; FALLBACK19-NEXT: movzbl %al, %eax
+; FALLBACK19-NEXT: movl 8(%esp,%eax), %ebx
+; FALLBACK19-NEXT: movl (%esp,%eax), %edx
+; FALLBACK19-NEXT: movl 4(%esp,%eax), %esi
+; FALLBACK19-NEXT: movl %esi, %edi
+; FALLBACK19-NEXT: shrdl %cl, %ebx, %edi
+; FALLBACK19-NEXT: movl 12(%esp,%eax), %eax
+; FALLBACK19-NEXT: shrdl %cl, %eax, %ebx
+; FALLBACK19-NEXT: movl %ebx, 8(%ebp)
+; FALLBACK19-NEXT: shrxl %ecx, %eax, %eax
+; FALLBACK19-NEXT: movl %eax, 12(%ebp)
+; FALLBACK19-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK19-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK19-NEXT: movl %edx, (%ebp)
+; FALLBACK19-NEXT: movl %edi, 4(%ebp)
+; FALLBACK19-NEXT: addl $44, %esp
+; FALLBACK19-NEXT: popl %esi
+; FALLBACK19-NEXT: popl %edi
+; FALLBACK19-NEXT: popl %ebx
+; FALLBACK19-NEXT: popl %ebp
+; FALLBACK19-NEXT: retl
+;
+; FALLBACK20-LABEL: lshr_16bytes:
+; FALLBACK20: # %bb.0:
+; FALLBACK20-NEXT: pushl %ebp
+; FALLBACK20-NEXT: pushl %ebx
+; FALLBACK20-NEXT: pushl %edi
+; FALLBACK20-NEXT: pushl %esi
+; FALLBACK20-NEXT: subl $60, %esp
+; FALLBACK20-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK20-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK20-NEXT: movups (%ecx), %xmm0
+; FALLBACK20-NEXT: movzbl (%eax), %ecx
+; FALLBACK20-NEXT: movl %ecx, %eax
+; FALLBACK20-NEXT: shlb $3, %al
+; FALLBACK20-NEXT: xorps %xmm1, %xmm1
+; FALLBACK20-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: andb $12, %cl
+; FALLBACK20-NEXT: movzbl %cl, %edi
+; FALLBACK20-NEXT: movl 16(%esp,%edi), %ebx
+; FALLBACK20-NEXT: movl 20(%esp,%edi), %esi
+; FALLBACK20-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl %eax, %ecx
+; FALLBACK20-NEXT: shrl %cl, %ebx
+; FALLBACK20-NEXT: movl %eax, %edx
+; FALLBACK20-NEXT: notb %dl
+; FALLBACK20-NEXT: addl %esi, %esi
+; FALLBACK20-NEXT: movl %edx, %ecx
+; FALLBACK20-NEXT: shll %cl, %esi
+; FALLBACK20-NEXT: orl %ebx, %esi
+; FALLBACK20-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl 24(%esp,%edi), %ebx
+; FALLBACK20-NEXT: movl %ebx, %esi
+; FALLBACK20-NEXT: movl %eax, %ecx
+; FALLBACK20-NEXT: shrl %cl, %esi
+; FALLBACK20-NEXT: movl 28(%esp,%edi), %edi
+; FALLBACK20-NEXT: leal (%edi,%edi), %ebp
+; FALLBACK20-NEXT: movl %edx, %ecx
+; FALLBACK20-NEXT: shll %cl, %ebp
+; FALLBACK20-NEXT: orl %esi, %ebp
+; FALLBACK20-NEXT: movl %eax, %ecx
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; FALLBACK20-NEXT: shrl %cl, %esi
+; FALLBACK20-NEXT: addl %ebx, %ebx
+; FALLBACK20-NEXT: movl %edx, %ecx
+; FALLBACK20-NEXT: shll %cl, %ebx
+; FALLBACK20-NEXT: orl %esi, %ebx
+; FALLBACK20-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK20-NEXT: movl %eax, %ecx
+; FALLBACK20-NEXT: shrl %cl, %edi
+; FALLBACK20-NEXT: movl %edi, 12(%edx)
+; FALLBACK20-NEXT: movl %ebx, 4(%edx)
+; FALLBACK20-NEXT: movl %ebp, 8(%edx)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK20-NEXT: movl %eax, (%edx)
+; FALLBACK20-NEXT: addl $60, %esp
+; FALLBACK20-NEXT: popl %esi
+; FALLBACK20-NEXT: popl %edi
+; FALLBACK20-NEXT: popl %ebx
+; FALLBACK20-NEXT: popl %ebp
+; FALLBACK20-NEXT: retl
+;
+; FALLBACK21-LABEL: lshr_16bytes:
+; FALLBACK21: # %bb.0:
+; FALLBACK21-NEXT: pushl %ebp
+; FALLBACK21-NEXT: pushl %ebx
+; FALLBACK21-NEXT: pushl %edi
+; FALLBACK21-NEXT: pushl %esi
+; FALLBACK21-NEXT: subl $44, %esp
+; FALLBACK21-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK21-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK21-NEXT: movups (%edx), %xmm0
+; FALLBACK21-NEXT: movzbl (%ecx), %edx
+; FALLBACK21-NEXT: movl %edx, %ecx
+; FALLBACK21-NEXT: shlb $3, %cl
+; FALLBACK21-NEXT: xorps %xmm1, %xmm1
+; FALLBACK21-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movaps %xmm0, (%esp)
+; FALLBACK21-NEXT: andb $12, %dl
+; FALLBACK21-NEXT: movzbl %dl, %ebx
+; FALLBACK21-NEXT: movl 12(%esp,%ebx), %edx
+; FALLBACK21-NEXT: movl 8(%esp,%ebx), %ebp
+; FALLBACK21-NEXT: movl %ebp, %edi
+; FALLBACK21-NEXT: shrdl %cl, %edx, %edi
+; FALLBACK21-NEXT: movl (%esp,%ebx), %esi
+; FALLBACK21-NEXT: movl 4(%esp,%ebx), %eax
+; FALLBACK21-NEXT: movl %eax, %ebx
+; FALLBACK21-NEXT: shrdl %cl, %ebp, %ebx
+; FALLBACK21-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK21-NEXT: movl %ebx, 4(%ebp)
+; FALLBACK21-NEXT: movl %edi, 8(%ebp)
+; FALLBACK21-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK21-NEXT: shrl %cl, %edx
+; FALLBACK21-NEXT: movl %edx, 12(%ebp)
+; FALLBACK21-NEXT: movl %esi, (%ebp)
+; FALLBACK21-NEXT: addl $44, %esp
+; FALLBACK21-NEXT: popl %esi
+; FALLBACK21-NEXT: popl %edi
+; FALLBACK21-NEXT: popl %ebx
+; FALLBACK21-NEXT: popl %ebp
+; FALLBACK21-NEXT: retl
+;
+; FALLBACK22-LABEL: lshr_16bytes:
+; FALLBACK22: # %bb.0:
+; FALLBACK22-NEXT: pushl %ebp
+; FALLBACK22-NEXT: pushl %ebx
+; FALLBACK22-NEXT: pushl %edi
+; FALLBACK22-NEXT: pushl %esi
+; FALLBACK22-NEXT: subl $44, %esp
+; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK22-NEXT: movups (%ecx), %xmm0
+; FALLBACK22-NEXT: movzbl (%eax), %ecx
+; FALLBACK22-NEXT: movl %ecx, %eax
+; FALLBACK22-NEXT: shlb $3, %al
+; FALLBACK22-NEXT: xorps %xmm1, %xmm1
+; FALLBACK22-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movaps %xmm0, (%esp)
+; FALLBACK22-NEXT: andb $12, %cl
+; FALLBACK22-NEXT: movzbl %cl, %edi
+; FALLBACK22-NEXT: shrxl %eax, (%esp,%edi), %ebx
+; FALLBACK22-NEXT: movl %eax, %ecx
+; FALLBACK22-NEXT: notb %cl
+; FALLBACK22-NEXT: movl 4(%esp,%edi), %ebp
+; FALLBACK22-NEXT: movl 8(%esp,%edi), %esi
+; FALLBACK22-NEXT: leal (%ebp,%ebp), %edx
+; FALLBACK22-NEXT: shlxl %ecx, %edx, %edx
+; FALLBACK22-NEXT: orl %ebx, %edx
+; FALLBACK22-NEXT: shrxl %eax, %esi, %ebx
+; FALLBACK22-NEXT: shrxl %eax, %ebp, %ebp
+; FALLBACK22-NEXT: movl 12(%esp,%edi), %edi
+; FALLBACK22-NEXT: shrxl %eax, %edi, %eax
+; FALLBACK22-NEXT: addl %edi, %edi
+; FALLBACK22-NEXT: shlxl %ecx, %edi, %edi
+; FALLBACK22-NEXT: orl %ebx, %edi
+; FALLBACK22-NEXT: addl %esi, %esi
+; FALLBACK22-NEXT: shlxl %ecx, %esi, %ecx
+; FALLBACK22-NEXT: orl %ebp, %ecx
+; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %esi
+; FALLBACK22-NEXT: movl %eax, 12(%esi)
+; FALLBACK22-NEXT: movl %ecx, 4(%esi)
+; FALLBACK22-NEXT: movl %edi, 8(%esi)
+; FALLBACK22-NEXT: movl %edx, (%esi)
+; FALLBACK22-NEXT: addl $44, %esp
+; FALLBACK22-NEXT: popl %esi
+; FALLBACK22-NEXT: popl %edi
+; FALLBACK22-NEXT: popl %ebx
+; FALLBACK22-NEXT: popl %ebp
+; FALLBACK22-NEXT: retl
+;
+; FALLBACK23-LABEL: lshr_16bytes:
+; FALLBACK23: # %bb.0:
+; FALLBACK23-NEXT: pushl %ebp
+; FALLBACK23-NEXT: pushl %ebx
+; FALLBACK23-NEXT: pushl %edi
+; FALLBACK23-NEXT: pushl %esi
+; FALLBACK23-NEXT: subl $44, %esp
+; FALLBACK23-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK23-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK23-NEXT: movups (%edx), %xmm0
+; FALLBACK23-NEXT: movzbl (%ecx), %edx
+; FALLBACK23-NEXT: movl %edx, %ecx
+; FALLBACK23-NEXT: shlb $3, %cl
+; FALLBACK23-NEXT: xorps %xmm1, %xmm1
+; FALLBACK23-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movaps %xmm0, (%esp)
+; FALLBACK23-NEXT: andb $12, %dl
+; FALLBACK23-NEXT: movzbl %dl, %ebx
+; FALLBACK23-NEXT: movl 12(%esp,%ebx), %edx
+; FALLBACK23-NEXT: movl 8(%esp,%ebx), %ebp
+; FALLBACK23-NEXT: movl %ebp, %edi
+; FALLBACK23-NEXT: shrdl %cl, %edx, %edi
+; FALLBACK23-NEXT: movl (%esp,%ebx), %esi
+; FALLBACK23-NEXT: movl 4(%esp,%ebx), %eax
+; FALLBACK23-NEXT: movl %eax, %ebx
+; FALLBACK23-NEXT: shrdl %cl, %ebp, %ebx
+; FALLBACK23-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK23-NEXT: movl %ebx, 4(%ebp)
+; FALLBACK23-NEXT: movl %edi, 8(%ebp)
+; FALLBACK23-NEXT: shrxl %ecx, %edx, %edx
+; FALLBACK23-NEXT: movl %edx, 12(%ebp)
+; FALLBACK23-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK23-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK23-NEXT: movl %esi, (%ebp)
+; FALLBACK23-NEXT: addl $44, %esp
+; FALLBACK23-NEXT: popl %esi
+; FALLBACK23-NEXT: popl %edi
+; FALLBACK23-NEXT: popl %ebx
+; FALLBACK23-NEXT: popl %ebp
+; FALLBACK23-NEXT: retl
+;
+; FALLBACK24-LABEL: lshr_16bytes:
+; FALLBACK24: # %bb.0:
+; FALLBACK24-NEXT: pushl %ebp
+; FALLBACK24-NEXT: pushl %ebx
+; FALLBACK24-NEXT: pushl %edi
+; FALLBACK24-NEXT: pushl %esi
+; FALLBACK24-NEXT: subl $60, %esp
+; FALLBACK24-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK24-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK24-NEXT: vmovups (%ecx), %xmm0
+; FALLBACK24-NEXT: movzbl (%eax), %ecx
+; FALLBACK24-NEXT: movl %ecx, %eax
+; FALLBACK24-NEXT: shlb $3, %al
+; FALLBACK24-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK24-NEXT: vmovaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: andb $12, %cl
+; FALLBACK24-NEXT: movzbl %cl, %edi
+; FALLBACK24-NEXT: movl 16(%esp,%edi), %ebx
+; FALLBACK24-NEXT: movl 20(%esp,%edi), %esi
+; FALLBACK24-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl %eax, %ecx
+; FALLBACK24-NEXT: shrl %cl, %ebx
+; FALLBACK24-NEXT: movl %eax, %edx
+; FALLBACK24-NEXT: notb %dl
+; FALLBACK24-NEXT: addl %esi, %esi
+; FALLBACK24-NEXT: movl %edx, %ecx
+; FALLBACK24-NEXT: shll %cl, %esi
+; FALLBACK24-NEXT: orl %ebx, %esi
+; FALLBACK24-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl 24(%esp,%edi), %ebx
+; FALLBACK24-NEXT: movl %ebx, %esi
+; FALLBACK24-NEXT: movl %eax, %ecx
+; FALLBACK24-NEXT: shrl %cl, %esi
+; FALLBACK24-NEXT: movl 28(%esp,%edi), %edi
+; FALLBACK24-NEXT: leal (%edi,%edi), %ebp
+; FALLBACK24-NEXT: movl %edx, %ecx
+; FALLBACK24-NEXT: shll %cl, %ebp
+; FALLBACK24-NEXT: orl %esi, %ebp
+; FALLBACK24-NEXT: movl %eax, %ecx
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; FALLBACK24-NEXT: shrl %cl, %esi
+; FALLBACK24-NEXT: addl %ebx, %ebx
+; FALLBACK24-NEXT: movl %edx, %ecx
+; FALLBACK24-NEXT: shll %cl, %ebx
+; FALLBACK24-NEXT: orl %esi, %ebx
+; FALLBACK24-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK24-NEXT: movl %eax, %ecx
+; FALLBACK24-NEXT: shrl %cl, %edi
+; FALLBACK24-NEXT: movl %edi, 12(%edx)
+; FALLBACK24-NEXT: movl %ebx, 4(%edx)
+; FALLBACK24-NEXT: movl %ebp, 8(%edx)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK24-NEXT: movl %eax, (%edx)
+; FALLBACK24-NEXT: addl $60, %esp
+; FALLBACK24-NEXT: popl %esi
+; FALLBACK24-NEXT: popl %edi
+; FALLBACK24-NEXT: popl %ebx
+; FALLBACK24-NEXT: popl %ebp
+; FALLBACK24-NEXT: retl
+;
+; FALLBACK25-LABEL: lshr_16bytes:
+; FALLBACK25: # %bb.0:
+; FALLBACK25-NEXT: pushl %ebp
+; FALLBACK25-NEXT: pushl %ebx
+; FALLBACK25-NEXT: pushl %edi
+; FALLBACK25-NEXT: pushl %esi
+; FALLBACK25-NEXT: subl $44, %esp
+; FALLBACK25-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK25-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK25-NEXT: vmovups (%edx), %xmm0
+; FALLBACK25-NEXT: movzbl (%ecx), %edx
+; FALLBACK25-NEXT: movl %edx, %ecx
+; FALLBACK25-NEXT: shlb $3, %cl
+; FALLBACK25-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK25-NEXT: vmovaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: vmovaps %xmm0, (%esp)
+; FALLBACK25-NEXT: andb $12, %dl
+; FALLBACK25-NEXT: movzbl %dl, %ebx
+; FALLBACK25-NEXT: movl 12(%esp,%ebx), %edx
+; FALLBACK25-NEXT: movl 8(%esp,%ebx), %ebp
+; FALLBACK25-NEXT: movl %ebp, %edi
+; FALLBACK25-NEXT: shrdl %cl, %edx, %edi
+; FALLBACK25-NEXT: movl (%esp,%ebx), %esi
+; FALLBACK25-NEXT: movl 4(%esp,%ebx), %eax
+; FALLBACK25-NEXT: movl %eax, %ebx
+; FALLBACK25-NEXT: shrdl %cl, %ebp, %ebx
+; FALLBACK25-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK25-NEXT: movl %ebx, 4(%ebp)
+; FALLBACK25-NEXT: movl %edi, 8(%ebp)
+; FALLBACK25-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK25-NEXT: shrl %cl, %edx
+; FALLBACK25-NEXT: movl %edx, 12(%ebp)
+; FALLBACK25-NEXT: movl %esi, (%ebp)
+; FALLBACK25-NEXT: addl $44, %esp
+; FALLBACK25-NEXT: popl %esi
+; FALLBACK25-NEXT: popl %edi
+; FALLBACK25-NEXT: popl %ebx
+; FALLBACK25-NEXT: popl %ebp
+; FALLBACK25-NEXT: retl
+;
+; FALLBACK26-LABEL: lshr_16bytes:
+; FALLBACK26: # %bb.0:
+; FALLBACK26-NEXT: pushl %ebp
+; FALLBACK26-NEXT: pushl %ebx
+; FALLBACK26-NEXT: pushl %edi
+; FALLBACK26-NEXT: pushl %esi
+; FALLBACK26-NEXT: subl $44, %esp
+; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK26-NEXT: vmovups (%ecx), %xmm0
+; FALLBACK26-NEXT: movzbl (%eax), %ecx
+; FALLBACK26-NEXT: movl %ecx, %eax
+; FALLBACK26-NEXT: shlb $3, %al
+; FALLBACK26-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK26-NEXT: vmovaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: vmovaps %xmm0, (%esp)
+; FALLBACK26-NEXT: andb $12, %cl
+; FALLBACK26-NEXT: movzbl %cl, %edi
+; FALLBACK26-NEXT: shrxl %eax, (%esp,%edi), %ebx
+; FALLBACK26-NEXT: movl %eax, %ecx
+; FALLBACK26-NEXT: notb %cl
+; FALLBACK26-NEXT: movl 4(%esp,%edi), %ebp
+; FALLBACK26-NEXT: movl 8(%esp,%edi), %esi
+; FALLBACK26-NEXT: leal (%ebp,%ebp), %edx
+; FALLBACK26-NEXT: shlxl %ecx, %edx, %edx
+; FALLBACK26-NEXT: orl %ebx, %edx
+; FALLBACK26-NEXT: shrxl %eax, %esi, %ebx
+; FALLBACK26-NEXT: shrxl %eax, %ebp, %ebp
+; FALLBACK26-NEXT: movl 12(%esp,%edi), %edi
+; FALLBACK26-NEXT: shrxl %eax, %edi, %eax
+; FALLBACK26-NEXT: addl %edi, %edi
+; FALLBACK26-NEXT: shlxl %ecx, %edi, %edi
+; FALLBACK26-NEXT: orl %ebx, %edi
+; FALLBACK26-NEXT: addl %esi, %esi
+; FALLBACK26-NEXT: shlxl %ecx, %esi, %ecx
+; FALLBACK26-NEXT: orl %ebp, %ecx
+; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %esi
+; FALLBACK26-NEXT: movl %eax, 12(%esi)
+; FALLBACK26-NEXT: movl %ecx, 4(%esi)
+; FALLBACK26-NEXT: movl %edi, 8(%esi)
+; FALLBACK26-NEXT: movl %edx, (%esi)
+; FALLBACK26-NEXT: addl $44, %esp
+; FALLBACK26-NEXT: popl %esi
+; FALLBACK26-NEXT: popl %edi
+; FALLBACK26-NEXT: popl %ebx
+; FALLBACK26-NEXT: popl %ebp
+; FALLBACK26-NEXT: retl
+;
+; FALLBACK27-LABEL: lshr_16bytes:
+; FALLBACK27: # %bb.0:
+; FALLBACK27-NEXT: pushl %ebp
+; FALLBACK27-NEXT: pushl %ebx
+; FALLBACK27-NEXT: pushl %edi
+; FALLBACK27-NEXT: pushl %esi
+; FALLBACK27-NEXT: subl $44, %esp
+; FALLBACK27-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK27-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK27-NEXT: vmovups (%edx), %xmm0
+; FALLBACK27-NEXT: movzbl (%ecx), %edx
+; FALLBACK27-NEXT: movl %edx, %ecx
+; FALLBACK27-NEXT: shlb $3, %cl
+; FALLBACK27-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK27-NEXT: vmovaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: vmovaps %xmm0, (%esp)
+; FALLBACK27-NEXT: andb $12, %dl
+; FALLBACK27-NEXT: movzbl %dl, %ebx
+; FALLBACK27-NEXT: movl 12(%esp,%ebx), %edx
+; FALLBACK27-NEXT: movl 8(%esp,%ebx), %ebp
+; FALLBACK27-NEXT: movl %ebp, %edi
+; FALLBACK27-NEXT: shrdl %cl, %edx, %edi
+; FALLBACK27-NEXT: movl (%esp,%ebx), %esi
+; FALLBACK27-NEXT: movl 4(%esp,%ebx), %eax
+; FALLBACK27-NEXT: movl %eax, %ebx
+; FALLBACK27-NEXT: shrdl %cl, %ebp, %ebx
+; FALLBACK27-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK27-NEXT: movl %ebx, 4(%ebp)
+; FALLBACK27-NEXT: movl %edi, 8(%ebp)
+; FALLBACK27-NEXT: shrxl %ecx, %edx, %edx
+; FALLBACK27-NEXT: movl %edx, 12(%ebp)
+; FALLBACK27-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK27-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK27-NEXT: movl %esi, (%ebp)
+; FALLBACK27-NEXT: addl $44, %esp
+; FALLBACK27-NEXT: popl %esi
+; FALLBACK27-NEXT: popl %edi
+; FALLBACK27-NEXT: popl %ebx
+; FALLBACK27-NEXT: popl %ebp
+; FALLBACK27-NEXT: retl
+;
+; FALLBACK28-LABEL: lshr_16bytes:
+; FALLBACK28: # %bb.0:
+; FALLBACK28-NEXT: pushl %ebp
+; FALLBACK28-NEXT: pushl %ebx
+; FALLBACK28-NEXT: pushl %edi
+; FALLBACK28-NEXT: pushl %esi
+; FALLBACK28-NEXT: subl $60, %esp
+; FALLBACK28-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK28-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK28-NEXT: vmovups (%ecx), %xmm0
+; FALLBACK28-NEXT: movzbl (%eax), %ecx
+; FALLBACK28-NEXT: movl %ecx, %eax
+; FALLBACK28-NEXT: shlb $3, %al
+; FALLBACK28-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK28-NEXT: vmovaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: andb $12, %cl
+; FALLBACK28-NEXT: movzbl %cl, %edi
+; FALLBACK28-NEXT: movl 16(%esp,%edi), %ebx
+; FALLBACK28-NEXT: movl 20(%esp,%edi), %esi
+; FALLBACK28-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl %eax, %ecx
+; FALLBACK28-NEXT: shrl %cl, %ebx
+; FALLBACK28-NEXT: movl %eax, %edx
+; FALLBACK28-NEXT: notb %dl
+; FALLBACK28-NEXT: addl %esi, %esi
+; FALLBACK28-NEXT: movl %edx, %ecx
+; FALLBACK28-NEXT: shll %cl, %esi
+; FALLBACK28-NEXT: orl %ebx, %esi
+; FALLBACK28-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl 24(%esp,%edi), %ebx
+; FALLBACK28-NEXT: movl %ebx, %esi
+; FALLBACK28-NEXT: movl %eax, %ecx
+; FALLBACK28-NEXT: shrl %cl, %esi
+; FALLBACK28-NEXT: movl 28(%esp,%edi), %edi
+; FALLBACK28-NEXT: leal (%edi,%edi), %ebp
+; FALLBACK28-NEXT: movl %edx, %ecx
+; FALLBACK28-NEXT: shll %cl, %ebp
+; FALLBACK28-NEXT: orl %esi, %ebp
+; FALLBACK28-NEXT: movl %eax, %ecx
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; FALLBACK28-NEXT: shrl %cl, %esi
+; FALLBACK28-NEXT: addl %ebx, %ebx
+; FALLBACK28-NEXT: movl %edx, %ecx
+; FALLBACK28-NEXT: shll %cl, %ebx
+; FALLBACK28-NEXT: orl %esi, %ebx
+; FALLBACK28-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK28-NEXT: movl %eax, %ecx
+; FALLBACK28-NEXT: shrl %cl, %edi
+; FALLBACK28-NEXT: movl %edi, 12(%edx)
+; FALLBACK28-NEXT: movl %ebx, 4(%edx)
+; FALLBACK28-NEXT: movl %ebp, 8(%edx)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK28-NEXT: movl %eax, (%edx)
+; FALLBACK28-NEXT: addl $60, %esp
+; FALLBACK28-NEXT: popl %esi
+; FALLBACK28-NEXT: popl %edi
+; FALLBACK28-NEXT: popl %ebx
+; FALLBACK28-NEXT: popl %ebp
+; FALLBACK28-NEXT: retl
+;
+; FALLBACK29-LABEL: lshr_16bytes:
+; FALLBACK29: # %bb.0:
+; FALLBACK29-NEXT: pushl %ebp
+; FALLBACK29-NEXT: pushl %ebx
+; FALLBACK29-NEXT: pushl %edi
+; FALLBACK29-NEXT: pushl %esi
+; FALLBACK29-NEXT: subl $44, %esp
+; FALLBACK29-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK29-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK29-NEXT: vmovups (%edx), %xmm0
+; FALLBACK29-NEXT: movzbl (%ecx), %edx
+; FALLBACK29-NEXT: movl %edx, %ecx
+; FALLBACK29-NEXT: shlb $3, %cl
+; FALLBACK29-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK29-NEXT: vmovaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: vmovaps %xmm0, (%esp)
+; FALLBACK29-NEXT: andb $12, %dl
+; FALLBACK29-NEXT: movzbl %dl, %ebx
+; FALLBACK29-NEXT: movl 12(%esp,%ebx), %edx
+; FALLBACK29-NEXT: movl 8(%esp,%ebx), %ebp
+; FALLBACK29-NEXT: movl %ebp, %edi
+; FALLBACK29-NEXT: shrdl %cl, %edx, %edi
+; FALLBACK29-NEXT: movl (%esp,%ebx), %esi
+; FALLBACK29-NEXT: movl 4(%esp,%ebx), %eax
+; FALLBACK29-NEXT: movl %eax, %ebx
+; FALLBACK29-NEXT: shrdl %cl, %ebp, %ebx
+; FALLBACK29-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK29-NEXT: movl %ebx, 4(%ebp)
+; FALLBACK29-NEXT: movl %edi, 8(%ebp)
+; FALLBACK29-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK29-NEXT: shrl %cl, %edx
+; FALLBACK29-NEXT: movl %edx, 12(%ebp)
+; FALLBACK29-NEXT: movl %esi, (%ebp)
+; FALLBACK29-NEXT: addl $44, %esp
+; FALLBACK29-NEXT: popl %esi
+; FALLBACK29-NEXT: popl %edi
+; FALLBACK29-NEXT: popl %ebx
+; FALLBACK29-NEXT: popl %ebp
+; FALLBACK29-NEXT: retl
+;
+; FALLBACK30-LABEL: lshr_16bytes:
+; FALLBACK30: # %bb.0:
+; FALLBACK30-NEXT: pushl %ebp
+; FALLBACK30-NEXT: pushl %ebx
+; FALLBACK30-NEXT: pushl %edi
+; FALLBACK30-NEXT: pushl %esi
+; FALLBACK30-NEXT: subl $44, %esp
+; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK30-NEXT: vmovups (%ecx), %xmm0
+; FALLBACK30-NEXT: movzbl (%eax), %ecx
+; FALLBACK30-NEXT: movl %ecx, %eax
+; FALLBACK30-NEXT: shlb $3, %al
+; FALLBACK30-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK30-NEXT: vmovaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: vmovaps %xmm0, (%esp)
+; FALLBACK30-NEXT: andb $12, %cl
+; FALLBACK30-NEXT: movzbl %cl, %edi
+; FALLBACK30-NEXT: shrxl %eax, (%esp,%edi), %ebx
+; FALLBACK30-NEXT: movl %eax, %ecx
+; FALLBACK30-NEXT: notb %cl
+; FALLBACK30-NEXT: movl 4(%esp,%edi), %ebp
+; FALLBACK30-NEXT: movl 8(%esp,%edi), %esi
+; FALLBACK30-NEXT: leal (%ebp,%ebp), %edx
+; FALLBACK30-NEXT: shlxl %ecx, %edx, %edx
+; FALLBACK30-NEXT: orl %ebx, %edx
+; FALLBACK30-NEXT: shrxl %eax, %esi, %ebx
+; FALLBACK30-NEXT: shrxl %eax, %ebp, %ebp
+; FALLBACK30-NEXT: movl 12(%esp,%edi), %edi
+; FALLBACK30-NEXT: shrxl %eax, %edi, %eax
+; FALLBACK30-NEXT: addl %edi, %edi
+; FALLBACK30-NEXT: shlxl %ecx, %edi, %edi
+; FALLBACK30-NEXT: orl %ebx, %edi
+; FALLBACK30-NEXT: addl %esi, %esi
+; FALLBACK30-NEXT: shlxl %ecx, %esi, %ecx
+; FALLBACK30-NEXT: orl %ebp, %ecx
+; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %esi
+; FALLBACK30-NEXT: movl %eax, 12(%esi)
+; FALLBACK30-NEXT: movl %ecx, 4(%esi)
+; FALLBACK30-NEXT: movl %edi, 8(%esi)
+; FALLBACK30-NEXT: movl %edx, (%esi)
+; FALLBACK30-NEXT: addl $44, %esp
+; FALLBACK30-NEXT: popl %esi
+; FALLBACK30-NEXT: popl %edi
+; FALLBACK30-NEXT: popl %ebx
+; FALLBACK30-NEXT: popl %ebp
+; FALLBACK30-NEXT: retl
+;
+; FALLBACK31-LABEL: lshr_16bytes:
+; FALLBACK31: # %bb.0:
+; FALLBACK31-NEXT: pushl %ebp
+; FALLBACK31-NEXT: pushl %ebx
+; FALLBACK31-NEXT: pushl %edi
+; FALLBACK31-NEXT: pushl %esi
+; FALLBACK31-NEXT: subl $44, %esp
+; FALLBACK31-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK31-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK31-NEXT: vmovups (%edx), %xmm0
+; FALLBACK31-NEXT: movzbl (%ecx), %edx
+; FALLBACK31-NEXT: movl %edx, %ecx
+; FALLBACK31-NEXT: shlb $3, %cl
+; FALLBACK31-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK31-NEXT: vmovaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: vmovaps %xmm0, (%esp)
+; FALLBACK31-NEXT: andb $12, %dl
+; FALLBACK31-NEXT: movzbl %dl, %ebx
+; FALLBACK31-NEXT: movl 12(%esp,%ebx), %edx
+; FALLBACK31-NEXT: movl 8(%esp,%ebx), %ebp
+; FALLBACK31-NEXT: movl %ebp, %edi
+; FALLBACK31-NEXT: shrdl %cl, %edx, %edi
+; FALLBACK31-NEXT: movl (%esp,%ebx), %esi
+; FALLBACK31-NEXT: movl 4(%esp,%ebx), %eax
+; FALLBACK31-NEXT: movl %eax, %ebx
+; FALLBACK31-NEXT: shrdl %cl, %ebp, %ebx
+; FALLBACK31-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK31-NEXT: movl %ebx, 4(%ebp)
+; FALLBACK31-NEXT: movl %edi, 8(%ebp)
+; FALLBACK31-NEXT: shrxl %ecx, %edx, %edx
+; FALLBACK31-NEXT: movl %edx, 12(%ebp)
+; FALLBACK31-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK31-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK31-NEXT: movl %esi, (%ebp)
+; FALLBACK31-NEXT: addl $44, %esp
+; FALLBACK31-NEXT: popl %esi
+; FALLBACK31-NEXT: popl %edi
+; FALLBACK31-NEXT: popl %ebx
+; FALLBACK31-NEXT: popl %ebp
+; FALLBACK31-NEXT: retl
+ %src = load i128, ptr %src.ptr, align 1
+ %byteOff = load i128, ptr %byteOff.ptr, align 1
+ %bitOff = shl i128 %byteOff, 3
+ %res = lshr i128 %src, %bitOff
+ store i128 %res, ptr %dst, align 1
+ ret void
+}
+
+define void @lshr_16bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) nounwind {
+; X64-NO-SHLD-NO-BMI2-LABEL: lshr_16bytes_dwordOff:
+; X64-NO-SHLD-NO-BMI2: # %bb.0:
+; X64-NO-SHLD-NO-BMI2-NEXT: movq (%rdi), %r8
+; X64-NO-SHLD-NO-BMI2-NEXT: movq 8(%rdi), %rdi
+; X64-NO-SHLD-NO-BMI2-NEXT: movzbl (%rsi), %eax
+; X64-NO-SHLD-NO-BMI2-NEXT: shlb $5, %al
+; X64-NO-SHLD-NO-BMI2-NEXT: movl %eax, %ecx
+; X64-NO-SHLD-NO-BMI2-NEXT: shrq %cl, %r8
+; X64-NO-SHLD-NO-BMI2-NEXT: leaq (%rdi,%rdi), %rsi
+; X64-NO-SHLD-NO-BMI2-NEXT: notb %cl
+; X64-NO-SHLD-NO-BMI2-NEXT: shlq %cl, %rsi
+; X64-NO-SHLD-NO-BMI2-NEXT: orq %r8, %rsi
+; X64-NO-SHLD-NO-BMI2-NEXT: movl %eax, %ecx
+; X64-NO-SHLD-NO-BMI2-NEXT: shrq %cl, %rdi
+; X64-NO-SHLD-NO-BMI2-NEXT: xorl %ecx, %ecx
+; X64-NO-SHLD-NO-BMI2-NEXT: testb $64, %al
+; X64-NO-SHLD-NO-BMI2-NEXT: cmovneq %rdi, %rsi
+; X64-NO-SHLD-NO-BMI2-NEXT: cmoveq %rdi, %rcx
+; X64-NO-SHLD-NO-BMI2-NEXT: movq %rcx, 8(%rdx)
+; X64-NO-SHLD-NO-BMI2-NEXT: movq %rsi, (%rdx)
+; X64-NO-SHLD-NO-BMI2-NEXT: retq
+;
+; X64-HAVE-SHLD-NO-BMI2-LABEL: lshr_16bytes_dwordOff:
+; X64-HAVE-SHLD-NO-BMI2: # %bb.0:
+; X64-HAVE-SHLD-NO-BMI2-NEXT: movq (%rdi), %rax
+; X64-HAVE-SHLD-NO-BMI2-NEXT: movq 8(%rdi), %rdi
+; X64-HAVE-SHLD-NO-BMI2-NEXT: movzbl (%rsi), %ecx
+; X64-HAVE-SHLD-NO-BMI2-NEXT: shlb $5, %cl
+; X64-HAVE-SHLD-NO-BMI2-NEXT: movq %rdi, %rsi
+; X64-HAVE-SHLD-NO-BMI2-NEXT: shrq %cl, %rsi
+; X64-HAVE-SHLD-NO-BMI2-NEXT: shrdq %cl, %rdi, %rax
+; X64-HAVE-SHLD-NO-BMI2-NEXT: xorl %edi, %edi
+; X64-HAVE-SHLD-NO-BMI2-NEXT: testb $64, %cl
+; X64-HAVE-SHLD-NO-BMI2-NEXT: cmovneq %rsi, %rax
+; X64-HAVE-SHLD-NO-BMI2-NEXT: cmoveq %rsi, %rdi
+; X64-HAVE-SHLD-NO-BMI2-NEXT: movq %rdi, 8(%rdx)
+; X64-HAVE-SHLD-NO-BMI2-NEXT: movq %rax, (%rdx)
+; X64-HAVE-SHLD-NO-BMI2-NEXT: retq
+;
+; X64-NO-SHLD-HAVE-BMI2-LABEL: lshr_16bytes_dwordOff:
+; X64-NO-SHLD-HAVE-BMI2: # %bb.0:
+; X64-NO-SHLD-HAVE-BMI2-NEXT: movq 8(%rdi), %rax
+; X64-NO-SHLD-HAVE-BMI2-NEXT: movzbl (%rsi), %ecx
+; X64-NO-SHLD-HAVE-BMI2-NEXT: shlb $5, %cl
+; X64-NO-SHLD-HAVE-BMI2-NEXT: shrxq %rcx, (%rdi), %rsi
+; X64-NO-SHLD-HAVE-BMI2-NEXT: movl %ecx, %edi
+; X64-NO-SHLD-HAVE-BMI2-NEXT: notb %dil
+; X64-NO-SHLD-HAVE-BMI2-NEXT: leaq (%rax,%rax), %r8
+; X64-NO-SHLD-HAVE-BMI2-NEXT: shlxq %rdi, %r8, %rdi
+; X64-NO-SHLD-HAVE-BMI2-NEXT: orq %rsi, %rdi
+; X64-NO-SHLD-HAVE-BMI2-NEXT: shrxq %rcx, %rax, %rax
+; X64-NO-SHLD-HAVE-BMI2-NEXT: xorl %esi, %esi
+; X64-NO-SHLD-HAVE-BMI2-NEXT: testb $64, %cl
+; X64-NO-SHLD-HAVE-BMI2-NEXT: cmovneq %rax, %rdi
+; X64-NO-SHLD-HAVE-BMI2-NEXT: cmoveq %rax, %rsi
+; X64-NO-SHLD-HAVE-BMI2-NEXT: movq %rsi, 8(%rdx)
+; X64-NO-SHLD-HAVE-BMI2-NEXT: movq %rdi, (%rdx)
+; X64-NO-SHLD-HAVE-BMI2-NEXT: retq
+;
+; X64-HAVE-SHLD-HAVE-BMI2-LABEL: lshr_16bytes_dwordOff:
+; X64-HAVE-SHLD-HAVE-BMI2: # %bb.0:
+; X64-HAVE-SHLD-HAVE-BMI2-NEXT: movq (%rdi), %rax
+; X64-HAVE-SHLD-HAVE-BMI2-NEXT: movq 8(%rdi), %rdi
+; X64-HAVE-SHLD-HAVE-BMI2-NEXT: movzbl (%rsi), %ecx
+; X64-HAVE-SHLD-HAVE-BMI2-NEXT: shlb $5, %cl
+; X64-HAVE-SHLD-HAVE-BMI2-NEXT: shrdq %cl, %rdi, %rax
+; X64-HAVE-SHLD-HAVE-BMI2-NEXT: shrxq %rcx, %rdi, %rsi
+; X64-HAVE-SHLD-HAVE-BMI2-NEXT: xorl %edi, %edi
+; X64-HAVE-SHLD-HAVE-BMI2-NEXT: testb $64, %cl
+; X64-HAVE-SHLD-HAVE-BMI2-NEXT: cmovneq %rsi, %rax
+; X64-HAVE-SHLD-HAVE-BMI2-NEXT: cmoveq %rsi, %rdi
+; X64-HAVE-SHLD-HAVE-BMI2-NEXT: movq %rdi, 8(%rdx)
+; X64-HAVE-SHLD-HAVE-BMI2-NEXT: movq %rax, (%rdx)
+; X64-HAVE-SHLD-HAVE-BMI2-NEXT: retq
+;
+; X86-SSE2-LABEL: lshr_16bytes_dwordOff:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pushl %ebx
; X86-SSE2-NEXT: pushl %edi
@@ -660,19 +1522,17 @@ define void @lshr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-SSE2-NEXT: movl 8(%edx), %ebx
; X86-SSE2-NEXT: movl 12(%edx), %edx
; X86-SSE2-NEXT: movzbl (%ecx), %ecx
+; X86-SSE2-NEXT: xorps %xmm0, %xmm0
+; X86-SSE2-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl %ebx, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl %esi, (%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: andl $15, %ecx
-; X86-SSE2-NEXT: movl (%esp,%ecx), %edx
-; X86-SSE2-NEXT: movl 4(%esp,%ecx), %esi
-; X86-SSE2-NEXT: movl 12(%esp,%ecx), %edi
-; X86-SSE2-NEXT: movl 8(%esp,%ecx), %ecx
+; X86-SSE2-NEXT: andl $3, %ecx
+; X86-SSE2-NEXT: movl (%esp,%ecx,4), %edx
+; X86-SSE2-NEXT: movl 4(%esp,%ecx,4), %esi
+; X86-SSE2-NEXT: movl 12(%esp,%ecx,4), %edi
+; X86-SSE2-NEXT: movl 8(%esp,%ecx,4), %ecx
; X86-SSE2-NEXT: movl %ecx, 8(%eax)
; X86-SSE2-NEXT: movl %edi, 12(%eax)
; X86-SSE2-NEXT: movl %edx, (%eax)
@@ -683,46 +1543,47 @@ define void @lshr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-SSE2-NEXT: popl %ebx
; X86-SSE2-NEXT: retl
;
-; X86-SSE42-LABEL: lshr_16bytes:
+; X86-SSE42-LABEL: lshr_16bytes_dwordOff:
; X86-SSE42: # %bb.0:
-; X86-SSE42-NEXT: subl $32, %esp
+; X86-SSE42-NEXT: subl $44, %esp
; X86-SSE42-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE42-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-SSE42-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-SSE42-NEXT: movups (%edx), %xmm0
; X86-SSE42-NEXT: movzbl (%ecx), %ecx
; X86-SSE42-NEXT: xorps %xmm1, %xmm1
-; X86-SSE42-NEXT: movups %xmm1, {{[0-9]+}}(%esp)
-; X86-SSE42-NEXT: movups %xmm0, (%esp)
-; X86-SSE42-NEXT: andl $15, %ecx
-; X86-SSE42-NEXT: movups (%esp,%ecx), %xmm0
+; X86-SSE42-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: movaps %xmm0, (%esp)
+; X86-SSE42-NEXT: andl $3, %ecx
+; X86-SSE42-NEXT: movups (%esp,%ecx,4), %xmm0
; X86-SSE42-NEXT: movups %xmm0, (%eax)
-; X86-SSE42-NEXT: addl $32, %esp
+; X86-SSE42-NEXT: addl $44, %esp
; X86-SSE42-NEXT: retl
;
-; X86-AVX-LABEL: lshr_16bytes:
+; X86-AVX-LABEL: lshr_16bytes_dwordOff:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: subl $32, %esp
+; X86-AVX-NEXT: subl $44, %esp
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-AVX-NEXT: vmovups (%edx), %xmm0
; X86-AVX-NEXT: movzbl (%ecx), %ecx
; X86-AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; X86-AVX-NEXT: vmovups %xmm1, {{[0-9]+}}(%esp)
-; X86-AVX-NEXT: vmovups %xmm0, (%esp)
-; X86-AVX-NEXT: andl $15, %ecx
-; X86-AVX-NEXT: vmovups (%esp,%ecx), %xmm0
+; X86-AVX-NEXT: vmovaps %xmm1, {{[0-9]+}}(%esp)
+; X86-AVX-NEXT: vmovaps %xmm0, (%esp)
+; X86-AVX-NEXT: andl $3, %ecx
+; X86-AVX-NEXT: vmovups (%esp,%ecx,4), %xmm0
; X86-AVX-NEXT: vmovups %xmm0, (%eax)
-; X86-AVX-NEXT: addl $32, %esp
+; X86-AVX-NEXT: addl $44, %esp
; X86-AVX-NEXT: retl
%src = load i128, ptr %src.ptr, align 1
- %byteOff = load i128, ptr %byteOff.ptr, align 1
- %bitOff = shl i128 %byteOff, 3
+ %dwordOff = load i128, ptr %dwordOff.ptr, align 1
+ %bitOff = shl i128 %dwordOff, 5
%res = lshr i128 %src, %bitOff
store i128 %res, ptr %dst, align 1
ret void
}
+
define void @shl_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X64-NO-SHLD-NO-BMI2-LABEL: shl_16bytes:
; X64-NO-SHLD-NO-BMI2: # %bb.0:
@@ -800,7 +1661,877 @@ define void @shl_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X64-HAVE-SHLD-HAVE-BMI2-NEXT: movq %rsi, (%rdx)
; X64-HAVE-SHLD-HAVE-BMI2-NEXT: retq
;
-; X86-SSE2-LABEL: shl_16bytes:
+; FALLBACK16-LABEL: shl_16bytes:
+; FALLBACK16: # %bb.0:
+; FALLBACK16-NEXT: pushl %ebp
+; FALLBACK16-NEXT: pushl %ebx
+; FALLBACK16-NEXT: pushl %edi
+; FALLBACK16-NEXT: pushl %esi
+; FALLBACK16-NEXT: subl $60, %esp
+; FALLBACK16-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK16-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK16-NEXT: movl (%ecx), %ebx
+; FALLBACK16-NEXT: movl 4(%ecx), %esi
+; FALLBACK16-NEXT: movl 8(%ecx), %edi
+; FALLBACK16-NEXT: movl 12(%ecx), %ecx
+; FALLBACK16-NEXT: movb (%eax), %ah
+; FALLBACK16-NEXT: movb %ah, %dh
+; FALLBACK16-NEXT: shlb $3, %dh
+; FALLBACK16-NEXT: xorps %xmm0, %xmm0
+; FALLBACK16-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: andb $12, %ah
+; FALLBACK16-NEXT: negb %ah
+; FALLBACK16-NEXT: movsbl %ah, %ebp
+; FALLBACK16-NEXT: movl 32(%esp,%ebp), %ebx
+; FALLBACK16-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 36(%esp,%ebp), %esi
+; FALLBACK16-NEXT: movl %esi, %edi
+; FALLBACK16-NEXT: movb %dh, %cl
+; FALLBACK16-NEXT: shll %cl, %edi
+; FALLBACK16-NEXT: movb %dh, %dl
+; FALLBACK16-NEXT: notb %dl
+; FALLBACK16-NEXT: shrl %ebx
+; FALLBACK16-NEXT: movl %edx, %ecx
+; FALLBACK16-NEXT: shrl %cl, %ebx
+; FALLBACK16-NEXT: orl %edi, %ebx
+; FALLBACK16-NEXT: movl 44(%esp,%ebp), %eax
+; FALLBACK16-NEXT: movb %dh, %cl
+; FALLBACK16-NEXT: shll %cl, %eax
+; FALLBACK16-NEXT: movl 40(%esp,%ebp), %edi
+; FALLBACK16-NEXT: movl %edi, %ebp
+; FALLBACK16-NEXT: shrl %ebp
+; FALLBACK16-NEXT: movl %edx, %ecx
+; FALLBACK16-NEXT: shrl %cl, %ebp
+; FALLBACK16-NEXT: orl %eax, %ebp
+; FALLBACK16-NEXT: movb %dh, %cl
+; FALLBACK16-NEXT: shll %cl, %edi
+; FALLBACK16-NEXT: shrl %esi
+; FALLBACK16-NEXT: movl %edx, %ecx
+; FALLBACK16-NEXT: shrl %cl, %esi
+; FALLBACK16-NEXT: orl %edi, %esi
+; FALLBACK16-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK16-NEXT: movb %dh, %cl
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK16-NEXT: shll %cl, %edx
+; FALLBACK16-NEXT: movl %edx, (%eax)
+; FALLBACK16-NEXT: movl %esi, 8(%eax)
+; FALLBACK16-NEXT: movl %ebp, 12(%eax)
+; FALLBACK16-NEXT: movl %ebx, 4(%eax)
+; FALLBACK16-NEXT: addl $60, %esp
+; FALLBACK16-NEXT: popl %esi
+; FALLBACK16-NEXT: popl %edi
+; FALLBACK16-NEXT: popl %ebx
+; FALLBACK16-NEXT: popl %ebp
+; FALLBACK16-NEXT: retl
+;
+; FALLBACK17-LABEL: shl_16bytes:
+; FALLBACK17: # %bb.0:
+; FALLBACK17-NEXT: pushl %ebx
+; FALLBACK17-NEXT: pushl %edi
+; FALLBACK17-NEXT: pushl %esi
+; FALLBACK17-NEXT: subl $32, %esp
+; FALLBACK17-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK17-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK17-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK17-NEXT: movl (%edx), %esi
+; FALLBACK17-NEXT: movl 4(%edx), %edi
+; FALLBACK17-NEXT: movl 8(%edx), %ebx
+; FALLBACK17-NEXT: movl 12(%edx), %edx
+; FALLBACK17-NEXT: movb (%ecx), %ch
+; FALLBACK17-NEXT: movb %ch, %cl
+; FALLBACK17-NEXT: shlb $3, %cl
+; FALLBACK17-NEXT: xorps %xmm0, %xmm0
+; FALLBACK17-NEXT: movaps %xmm0, (%esp)
+; FALLBACK17-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: andb $12, %ch
+; FALLBACK17-NEXT: negb %ch
+; FALLBACK17-NEXT: movsbl %ch, %edi
+; FALLBACK17-NEXT: movl 24(%esp,%edi), %esi
+; FALLBACK17-NEXT: movl 28(%esp,%edi), %edx
+; FALLBACK17-NEXT: shldl %cl, %esi, %edx
+; FALLBACK17-NEXT: movl 16(%esp,%edi), %ebx
+; FALLBACK17-NEXT: movl 20(%esp,%edi), %edi
+; FALLBACK17-NEXT: shldl %cl, %edi, %esi
+; FALLBACK17-NEXT: shldl %cl, %ebx, %edi
+; FALLBACK17-NEXT: shll %cl, %ebx
+; FALLBACK17-NEXT: movl %esi, 8(%eax)
+; FALLBACK17-NEXT: movl %edx, 12(%eax)
+; FALLBACK17-NEXT: movl %ebx, (%eax)
+; FALLBACK17-NEXT: movl %edi, 4(%eax)
+; FALLBACK17-NEXT: addl $32, %esp
+; FALLBACK17-NEXT: popl %esi
+; FALLBACK17-NEXT: popl %edi
+; FALLBACK17-NEXT: popl %ebx
+; FALLBACK17-NEXT: retl
+;
+; FALLBACK18-LABEL: shl_16bytes:
+; FALLBACK18: # %bb.0:
+; FALLBACK18-NEXT: pushl %ebp
+; FALLBACK18-NEXT: pushl %ebx
+; FALLBACK18-NEXT: pushl %edi
+; FALLBACK18-NEXT: pushl %esi
+; FALLBACK18-NEXT: subl $44, %esp
+; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK18-NEXT: movl (%ecx), %edx
+; FALLBACK18-NEXT: movl 4(%ecx), %esi
+; FALLBACK18-NEXT: movl 8(%ecx), %edi
+; FALLBACK18-NEXT: movl 12(%ecx), %ecx
+; FALLBACK18-NEXT: movzbl (%eax), %eax
+; FALLBACK18-NEXT: movl %eax, %ebx
+; FALLBACK18-NEXT: shlb $3, %bl
+; FALLBACK18-NEXT: xorps %xmm0, %xmm0
+; FALLBACK18-NEXT: movaps %xmm0, (%esp)
+; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: andb $12, %al
+; FALLBACK18-NEXT: negb %al
+; FALLBACK18-NEXT: movsbl %al, %edx
+; FALLBACK18-NEXT: movl 16(%esp,%edx), %edi
+; FALLBACK18-NEXT: movl 20(%esp,%edx), %ecx
+; FALLBACK18-NEXT: shlxl %ebx, %ecx, %esi
+; FALLBACK18-NEXT: shlxl %ebx, %edi, %ebp
+; FALLBACK18-NEXT: movl %ebx, %eax
+; FALLBACK18-NEXT: notb %al
+; FALLBACK18-NEXT: shrl %edi
+; FALLBACK18-NEXT: shrxl %eax, %edi, %edi
+; FALLBACK18-NEXT: orl %esi, %edi
+; FALLBACK18-NEXT: shlxl %ebx, 28(%esp,%edx), %esi
+; FALLBACK18-NEXT: movl 24(%esp,%edx), %edx
+; FALLBACK18-NEXT: shlxl %ebx, %edx, %ebx
+; FALLBACK18-NEXT: shrl %edx
+; FALLBACK18-NEXT: shrxl %eax, %edx, %edx
+; FALLBACK18-NEXT: orl %esi, %edx
+; FALLBACK18-NEXT: shrl %ecx
+; FALLBACK18-NEXT: shrxl %eax, %ecx, %eax
+; FALLBACK18-NEXT: orl %ebx, %eax
+; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK18-NEXT: movl %ebp, (%ecx)
+; FALLBACK18-NEXT: movl %eax, 8(%ecx)
+; FALLBACK18-NEXT: movl %edx, 12(%ecx)
+; FALLBACK18-NEXT: movl %edi, 4(%ecx)
+; FALLBACK18-NEXT: addl $44, %esp
+; FALLBACK18-NEXT: popl %esi
+; FALLBACK18-NEXT: popl %edi
+; FALLBACK18-NEXT: popl %ebx
+; FALLBACK18-NEXT: popl %ebp
+; FALLBACK18-NEXT: retl
+;
+; FALLBACK19-LABEL: shl_16bytes:
+; FALLBACK19: # %bb.0:
+; FALLBACK19-NEXT: pushl %ebp
+; FALLBACK19-NEXT: pushl %ebx
+; FALLBACK19-NEXT: pushl %edi
+; FALLBACK19-NEXT: pushl %esi
+; FALLBACK19-NEXT: subl $44, %esp
+; FALLBACK19-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK19-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK19-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK19-NEXT: movl (%edx), %esi
+; FALLBACK19-NEXT: movl 4(%edx), %edi
+; FALLBACK19-NEXT: movl 8(%edx), %ebx
+; FALLBACK19-NEXT: movl 12(%edx), %edx
+; FALLBACK19-NEXT: movzbl (%ecx), %eax
+; FALLBACK19-NEXT: movl %eax, %ecx
+; FALLBACK19-NEXT: shlb $3, %cl
+; FALLBACK19-NEXT: xorps %xmm0, %xmm0
+; FALLBACK19-NEXT: movaps %xmm0, (%esp)
+; FALLBACK19-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: andb $12, %al
+; FALLBACK19-NEXT: negb %al
+; FALLBACK19-NEXT: movsbl %al, %eax
+; FALLBACK19-NEXT: movl 24(%esp,%eax), %esi
+; FALLBACK19-NEXT: movl 28(%esp,%eax), %edx
+; FALLBACK19-NEXT: shldl %cl, %esi, %edx
+; FALLBACK19-NEXT: movl 16(%esp,%eax), %edi
+; FALLBACK19-NEXT: movl 20(%esp,%eax), %eax
+; FALLBACK19-NEXT: shldl %cl, %eax, %esi
+; FALLBACK19-NEXT: shldl %cl, %edi, %eax
+; FALLBACK19-NEXT: shlxl %ecx, %edi, %ecx
+; FALLBACK19-NEXT: movl %esi, 8(%ebp)
+; FALLBACK19-NEXT: movl %edx, 12(%ebp)
+; FALLBACK19-NEXT: movl %ecx, (%ebp)
+; FALLBACK19-NEXT: movl %eax, 4(%ebp)
+; FALLBACK19-NEXT: addl $44, %esp
+; FALLBACK19-NEXT: popl %esi
+; FALLBACK19-NEXT: popl %edi
+; FALLBACK19-NEXT: popl %ebx
+; FALLBACK19-NEXT: popl %ebp
+; FALLBACK19-NEXT: retl
+;
+; FALLBACK20-LABEL: shl_16bytes:
+; FALLBACK20: # %bb.0:
+; FALLBACK20-NEXT: pushl %ebp
+; FALLBACK20-NEXT: pushl %ebx
+; FALLBACK20-NEXT: pushl %edi
+; FALLBACK20-NEXT: pushl %esi
+; FALLBACK20-NEXT: subl $60, %esp
+; FALLBACK20-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK20-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK20-NEXT: movups (%ecx), %xmm0
+; FALLBACK20-NEXT: movzbl (%eax), %ecx
+; FALLBACK20-NEXT: movl %ecx, %eax
+; FALLBACK20-NEXT: shlb $3, %al
+; FALLBACK20-NEXT: xorps %xmm1, %xmm1
+; FALLBACK20-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: andb $12, %cl
+; FALLBACK20-NEXT: negb %cl
+; FALLBACK20-NEXT: movsbl %cl, %edi
+; FALLBACK20-NEXT: movl 44(%esp,%edi), %ebx
+; FALLBACK20-NEXT: movl %eax, %ecx
+; FALLBACK20-NEXT: shll %cl, %ebx
+; FALLBACK20-NEXT: movl %eax, %edx
+; FALLBACK20-NEXT: notb %dl
+; FALLBACK20-NEXT: movl 40(%esp,%edi), %ebp
+; FALLBACK20-NEXT: movl %ebp, %esi
+; FALLBACK20-NEXT: shrl %esi
+; FALLBACK20-NEXT: movl %edx, %ecx
+; FALLBACK20-NEXT: shrl %cl, %esi
+; FALLBACK20-NEXT: orl %ebx, %esi
+; FALLBACK20-NEXT: movl %eax, %ecx
+; FALLBACK20-NEXT: shll %cl, %ebp
+; FALLBACK20-NEXT: movl 32(%esp,%edi), %ecx
+; FALLBACK20-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl 36(%esp,%edi), %ebx
+; FALLBACK20-NEXT: movl %ebx, %edi
+; FALLBACK20-NEXT: shrl %edi
+; FALLBACK20-NEXT: movl %edx, %ecx
+; FALLBACK20-NEXT: shrl %cl, %edi
+; FALLBACK20-NEXT: orl %ebp, %edi
+; FALLBACK20-NEXT: movl %eax, %ecx
+; FALLBACK20-NEXT: shll %cl, %ebx
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; FALLBACK20-NEXT: shrl %ebp
+; FALLBACK20-NEXT: movl %edx, %ecx
+; FALLBACK20-NEXT: shrl %cl, %ebp
+; FALLBACK20-NEXT: orl %ebx, %ebp
+; FALLBACK20-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK20-NEXT: movl %eax, %ecx
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK20-NEXT: shll %cl, %eax
+; FALLBACK20-NEXT: movl %eax, (%edx)
+; FALLBACK20-NEXT: movl %ebp, 4(%edx)
+; FALLBACK20-NEXT: movl %edi, 8(%edx)
+; FALLBACK20-NEXT: movl %esi, 12(%edx)
+; FALLBACK20-NEXT: addl $60, %esp
+; FALLBACK20-NEXT: popl %esi
+; FALLBACK20-NEXT: popl %edi
+; FALLBACK20-NEXT: popl %ebx
+; FALLBACK20-NEXT: popl %ebp
+; FALLBACK20-NEXT: retl
+;
+; FALLBACK21-LABEL: shl_16bytes:
+; FALLBACK21: # %bb.0:
+; FALLBACK21-NEXT: pushl %ebp
+; FALLBACK21-NEXT: pushl %ebx
+; FALLBACK21-NEXT: pushl %edi
+; FALLBACK21-NEXT: pushl %esi
+; FALLBACK21-NEXT: subl $44, %esp
+; FALLBACK21-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK21-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK21-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK21-NEXT: movups (%edx), %xmm0
+; FALLBACK21-NEXT: movzbl (%ecx), %edx
+; FALLBACK21-NEXT: movl %edx, %ecx
+; FALLBACK21-NEXT: shlb $3, %cl
+; FALLBACK21-NEXT: xorps %xmm1, %xmm1
+; FALLBACK21-NEXT: movaps %xmm1, (%esp)
+; FALLBACK21-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: andb $12, %dl
+; FALLBACK21-NEXT: negb %dl
+; FALLBACK21-NEXT: movsbl %dl, %edi
+; FALLBACK21-NEXT: movl 24(%esp,%edi), %esi
+; FALLBACK21-NEXT: movl 28(%esp,%edi), %edx
+; FALLBACK21-NEXT: shldl %cl, %esi, %edx
+; FALLBACK21-NEXT: movl 16(%esp,%edi), %ebx
+; FALLBACK21-NEXT: movl 20(%esp,%edi), %edi
+; FALLBACK21-NEXT: shldl %cl, %edi, %esi
+; FALLBACK21-NEXT: movl %ebx, %ebp
+; FALLBACK21-NEXT: shll %cl, %ebp
+; FALLBACK21-NEXT: shldl %cl, %ebx, %edi
+; FALLBACK21-NEXT: movl %edi, 4(%eax)
+; FALLBACK21-NEXT: movl %esi, 8(%eax)
+; FALLBACK21-NEXT: movl %edx, 12(%eax)
+; FALLBACK21-NEXT: movl %ebp, (%eax)
+; FALLBACK21-NEXT: addl $44, %esp
+; FALLBACK21-NEXT: popl %esi
+; FALLBACK21-NEXT: popl %edi
+; FALLBACK21-NEXT: popl %ebx
+; FALLBACK21-NEXT: popl %ebp
+; FALLBACK21-NEXT: retl
+;
+; FALLBACK22-LABEL: shl_16bytes:
+; FALLBACK22: # %bb.0:
+; FALLBACK22-NEXT: pushl %ebp
+; FALLBACK22-NEXT: pushl %ebx
+; FALLBACK22-NEXT: pushl %edi
+; FALLBACK22-NEXT: pushl %esi
+; FALLBACK22-NEXT: subl $44, %esp
+; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK22-NEXT: movups (%ecx), %xmm0
+; FALLBACK22-NEXT: movzbl (%eax), %ecx
+; FALLBACK22-NEXT: movl %ecx, %eax
+; FALLBACK22-NEXT: shlb $3, %al
+; FALLBACK22-NEXT: xorps %xmm1, %xmm1
+; FALLBACK22-NEXT: movaps %xmm1, (%esp)
+; FALLBACK22-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: andb $12, %cl
+; FALLBACK22-NEXT: negb %cl
+; FALLBACK22-NEXT: movsbl %cl, %ecx
+; FALLBACK22-NEXT: shlxl %eax, 28(%esp,%ecx), %esi
+; FALLBACK22-NEXT: movl 24(%esp,%ecx), %edx
+; FALLBACK22-NEXT: shlxl %eax, %edx, %edi
+; FALLBACK22-NEXT: movl %eax, %ebx
+; FALLBACK22-NEXT: notb %bl
+; FALLBACK22-NEXT: shrl %edx
+; FALLBACK22-NEXT: shrxl %ebx, %edx, %edx
+; FALLBACK22-NEXT: orl %esi, %edx
+; FALLBACK22-NEXT: movl 20(%esp,%ecx), %esi
+; FALLBACK22-NEXT: movl %esi, %ebp
+; FALLBACK22-NEXT: shrl %ebp
+; FALLBACK22-NEXT: shrxl %ebx, %ebp, %ebp
+; FALLBACK22-NEXT: orl %edi, %ebp
+; FALLBACK22-NEXT: shlxl %eax, %esi, %esi
+; FALLBACK22-NEXT: movl 16(%esp,%ecx), %ecx
+; FALLBACK22-NEXT: shlxl %eax, %ecx, %eax
+; FALLBACK22-NEXT: shrl %ecx
+; FALLBACK22-NEXT: shrxl %ebx, %ecx, %ecx
+; FALLBACK22-NEXT: orl %esi, %ecx
+; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %esi
+; FALLBACK22-NEXT: movl %eax, (%esi)
+; FALLBACK22-NEXT: movl %ecx, 4(%esi)
+; FALLBACK22-NEXT: movl %ebp, 8(%esi)
+; FALLBACK22-NEXT: movl %edx, 12(%esi)
+; FALLBACK22-NEXT: addl $44, %esp
+; FALLBACK22-NEXT: popl %esi
+; FALLBACK22-NEXT: popl %edi
+; FALLBACK22-NEXT: popl %ebx
+; FALLBACK22-NEXT: popl %ebp
+; FALLBACK22-NEXT: retl
+;
+; FALLBACK23-LABEL: shl_16bytes:
+; FALLBACK23: # %bb.0:
+; FALLBACK23-NEXT: pushl %ebp
+; FALLBACK23-NEXT: pushl %ebx
+; FALLBACK23-NEXT: pushl %edi
+; FALLBACK23-NEXT: pushl %esi
+; FALLBACK23-NEXT: subl $44, %esp
+; FALLBACK23-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK23-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK23-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK23-NEXT: movups (%edx), %xmm0
+; FALLBACK23-NEXT: movzbl (%ecx), %edx
+; FALLBACK23-NEXT: movl %edx, %ecx
+; FALLBACK23-NEXT: shlb $3, %cl
+; FALLBACK23-NEXT: xorps %xmm1, %xmm1
+; FALLBACK23-NEXT: movaps %xmm1, (%esp)
+; FALLBACK23-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: andb $12, %dl
+; FALLBACK23-NEXT: negb %dl
+; FALLBACK23-NEXT: movsbl %dl, %edi
+; FALLBACK23-NEXT: movl 24(%esp,%edi), %esi
+; FALLBACK23-NEXT: movl 28(%esp,%edi), %edx
+; FALLBACK23-NEXT: shldl %cl, %esi, %edx
+; FALLBACK23-NEXT: movl 16(%esp,%edi), %ebx
+; FALLBACK23-NEXT: movl 20(%esp,%edi), %edi
+; FALLBACK23-NEXT: shldl %cl, %edi, %esi
+; FALLBACK23-NEXT: shlxl %ecx, %ebx, %ebp
+; FALLBACK23-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK23-NEXT: shldl %cl, %ebx, %edi
+; FALLBACK23-NEXT: movl %edi, 4(%eax)
+; FALLBACK23-NEXT: movl %esi, 8(%eax)
+; FALLBACK23-NEXT: movl %edx, 12(%eax)
+; FALLBACK23-NEXT: movl %ebp, (%eax)
+; FALLBACK23-NEXT: addl $44, %esp
+; FALLBACK23-NEXT: popl %esi
+; FALLBACK23-NEXT: popl %edi
+; FALLBACK23-NEXT: popl %ebx
+; FALLBACK23-NEXT: popl %ebp
+; FALLBACK23-NEXT: retl
+;
+; FALLBACK24-LABEL: shl_16bytes:
+; FALLBACK24: # %bb.0:
+; FALLBACK24-NEXT: pushl %ebp
+; FALLBACK24-NEXT: pushl %ebx
+; FALLBACK24-NEXT: pushl %edi
+; FALLBACK24-NEXT: pushl %esi
+; FALLBACK24-NEXT: subl $60, %esp
+; FALLBACK24-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK24-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK24-NEXT: vmovups (%ecx), %xmm0
+; FALLBACK24-NEXT: movzbl (%eax), %ecx
+; FALLBACK24-NEXT: movl %ecx, %eax
+; FALLBACK24-NEXT: shlb $3, %al
+; FALLBACK24-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK24-NEXT: vmovaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: andb $12, %cl
+; FALLBACK24-NEXT: negb %cl
+; FALLBACK24-NEXT: movsbl %cl, %edi
+; FALLBACK24-NEXT: movl 44(%esp,%edi), %ebx
+; FALLBACK24-NEXT: movl %eax, %ecx
+; FALLBACK24-NEXT: shll %cl, %ebx
+; FALLBACK24-NEXT: movl %eax, %edx
+; FALLBACK24-NEXT: notb %dl
+; FALLBACK24-NEXT: movl 40(%esp,%edi), %ebp
+; FALLBACK24-NEXT: movl %ebp, %esi
+; FALLBACK24-NEXT: shrl %esi
+; FALLBACK24-NEXT: movl %edx, %ecx
+; FALLBACK24-NEXT: shrl %cl, %esi
+; FALLBACK24-NEXT: orl %ebx, %esi
+; FALLBACK24-NEXT: movl %eax, %ecx
+; FALLBACK24-NEXT: shll %cl, %ebp
+; FALLBACK24-NEXT: movl 32(%esp,%edi), %ecx
+; FALLBACK24-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl 36(%esp,%edi), %ebx
+; FALLBACK24-NEXT: movl %ebx, %edi
+; FALLBACK24-NEXT: shrl %edi
+; FALLBACK24-NEXT: movl %edx, %ecx
+; FALLBACK24-NEXT: shrl %cl, %edi
+; FALLBACK24-NEXT: orl %ebp, %edi
+; FALLBACK24-NEXT: movl %eax, %ecx
+; FALLBACK24-NEXT: shll %cl, %ebx
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; FALLBACK24-NEXT: shrl %ebp
+; FALLBACK24-NEXT: movl %edx, %ecx
+; FALLBACK24-NEXT: shrl %cl, %ebp
+; FALLBACK24-NEXT: orl %ebx, %ebp
+; FALLBACK24-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK24-NEXT: movl %eax, %ecx
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK24-NEXT: shll %cl, %eax
+; FALLBACK24-NEXT: movl %eax, (%edx)
+; FALLBACK24-NEXT: movl %ebp, 4(%edx)
+; FALLBACK24-NEXT: movl %edi, 8(%edx)
+; FALLBACK24-NEXT: movl %esi, 12(%edx)
+; FALLBACK24-NEXT: addl $60, %esp
+; FALLBACK24-NEXT: popl %esi
+; FALLBACK24-NEXT: popl %edi
+; FALLBACK24-NEXT: popl %ebx
+; FALLBACK24-NEXT: popl %ebp
+; FALLBACK24-NEXT: retl
+;
+; FALLBACK25-LABEL: shl_16bytes:
+; FALLBACK25: # %bb.0:
+; FALLBACK25-NEXT: pushl %ebp
+; FALLBACK25-NEXT: pushl %ebx
+; FALLBACK25-NEXT: pushl %edi
+; FALLBACK25-NEXT: pushl %esi
+; FALLBACK25-NEXT: subl $44, %esp
+; FALLBACK25-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK25-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK25-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK25-NEXT: vmovups (%edx), %xmm0
+; FALLBACK25-NEXT: movzbl (%ecx), %edx
+; FALLBACK25-NEXT: movl %edx, %ecx
+; FALLBACK25-NEXT: shlb $3, %cl
+; FALLBACK25-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK25-NEXT: vmovaps %xmm1, (%esp)
+; FALLBACK25-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: andb $12, %dl
+; FALLBACK25-NEXT: negb %dl
+; FALLBACK25-NEXT: movsbl %dl, %edi
+; FALLBACK25-NEXT: movl 24(%esp,%edi), %esi
+; FALLBACK25-NEXT: movl 28(%esp,%edi), %edx
+; FALLBACK25-NEXT: shldl %cl, %esi, %edx
+; FALLBACK25-NEXT: movl 16(%esp,%edi), %ebx
+; FALLBACK25-NEXT: movl 20(%esp,%edi), %edi
+; FALLBACK25-NEXT: shldl %cl, %edi, %esi
+; FALLBACK25-NEXT: movl %ebx, %ebp
+; FALLBACK25-NEXT: shll %cl, %ebp
+; FALLBACK25-NEXT: shldl %cl, %ebx, %edi
+; FALLBACK25-NEXT: movl %edi, 4(%eax)
+; FALLBACK25-NEXT: movl %esi, 8(%eax)
+; FALLBACK25-NEXT: movl %edx, 12(%eax)
+; FALLBACK25-NEXT: movl %ebp, (%eax)
+; FALLBACK25-NEXT: addl $44, %esp
+; FALLBACK25-NEXT: popl %esi
+; FALLBACK25-NEXT: popl %edi
+; FALLBACK25-NEXT: popl %ebx
+; FALLBACK25-NEXT: popl %ebp
+; FALLBACK25-NEXT: retl
+;
+; FALLBACK26-LABEL: shl_16bytes:
+; FALLBACK26: # %bb.0:
+; FALLBACK26-NEXT: pushl %ebp
+; FALLBACK26-NEXT: pushl %ebx
+; FALLBACK26-NEXT: pushl %edi
+; FALLBACK26-NEXT: pushl %esi
+; FALLBACK26-NEXT: subl $44, %esp
+; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK26-NEXT: vmovups (%ecx), %xmm0
+; FALLBACK26-NEXT: movzbl (%eax), %ecx
+; FALLBACK26-NEXT: movl %ecx, %eax
+; FALLBACK26-NEXT: shlb $3, %al
+; FALLBACK26-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK26-NEXT: vmovaps %xmm1, (%esp)
+; FALLBACK26-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: andb $12, %cl
+; FALLBACK26-NEXT: negb %cl
+; FALLBACK26-NEXT: movsbl %cl, %ecx
+; FALLBACK26-NEXT: shlxl %eax, 28(%esp,%ecx), %esi
+; FALLBACK26-NEXT: movl 24(%esp,%ecx), %edx
+; FALLBACK26-NEXT: shlxl %eax, %edx, %edi
+; FALLBACK26-NEXT: movl %eax, %ebx
+; FALLBACK26-NEXT: notb %bl
+; FALLBACK26-NEXT: shrl %edx
+; FALLBACK26-NEXT: shrxl %ebx, %edx, %edx
+; FALLBACK26-NEXT: orl %esi, %edx
+; FALLBACK26-NEXT: movl 20(%esp,%ecx), %esi
+; FALLBACK26-NEXT: movl %esi, %ebp
+; FALLBACK26-NEXT: shrl %ebp
+; FALLBACK26-NEXT: shrxl %ebx, %ebp, %ebp
+; FALLBACK26-NEXT: orl %edi, %ebp
+; FALLBACK26-NEXT: shlxl %eax, %esi, %esi
+; FALLBACK26-NEXT: movl 16(%esp,%ecx), %ecx
+; FALLBACK26-NEXT: shlxl %eax, %ecx, %eax
+; FALLBACK26-NEXT: shrl %ecx
+; FALLBACK26-NEXT: shrxl %ebx, %ecx, %ecx
+; FALLBACK26-NEXT: orl %esi, %ecx
+; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %esi
+; FALLBACK26-NEXT: movl %eax, (%esi)
+; FALLBACK26-NEXT: movl %ecx, 4(%esi)
+; FALLBACK26-NEXT: movl %ebp, 8(%esi)
+; FALLBACK26-NEXT: movl %edx, 12(%esi)
+; FALLBACK26-NEXT: addl $44, %esp
+; FALLBACK26-NEXT: popl %esi
+; FALLBACK26-NEXT: popl %edi
+; FALLBACK26-NEXT: popl %ebx
+; FALLBACK26-NEXT: popl %ebp
+; FALLBACK26-NEXT: retl
+;
+; FALLBACK27-LABEL: shl_16bytes:
+; FALLBACK27: # %bb.0:
+; FALLBACK27-NEXT: pushl %ebp
+; FALLBACK27-NEXT: pushl %ebx
+; FALLBACK27-NEXT: pushl %edi
+; FALLBACK27-NEXT: pushl %esi
+; FALLBACK27-NEXT: subl $44, %esp
+; FALLBACK27-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK27-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK27-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK27-NEXT: vmovups (%edx), %xmm0
+; FALLBACK27-NEXT: movzbl (%ecx), %edx
+; FALLBACK27-NEXT: movl %edx, %ecx
+; FALLBACK27-NEXT: shlb $3, %cl
+; FALLBACK27-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK27-NEXT: vmovaps %xmm1, (%esp)
+; FALLBACK27-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: andb $12, %dl
+; FALLBACK27-NEXT: negb %dl
+; FALLBACK27-NEXT: movsbl %dl, %edi
+; FALLBACK27-NEXT: movl 24(%esp,%edi), %esi
+; FALLBACK27-NEXT: movl 28(%esp,%edi), %edx
+; FALLBACK27-NEXT: shldl %cl, %esi, %edx
+; FALLBACK27-NEXT: movl 16(%esp,%edi), %ebx
+; FALLBACK27-NEXT: movl 20(%esp,%edi), %edi
+; FALLBACK27-NEXT: shldl %cl, %edi, %esi
+; FALLBACK27-NEXT: shlxl %ecx, %ebx, %ebp
+; FALLBACK27-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK27-NEXT: shldl %cl, %ebx, %edi
+; FALLBACK27-NEXT: movl %edi, 4(%eax)
+; FALLBACK27-NEXT: movl %esi, 8(%eax)
+; FALLBACK27-NEXT: movl %edx, 12(%eax)
+; FALLBACK27-NEXT: movl %ebp, (%eax)
+; FALLBACK27-NEXT: addl $44, %esp
+; FALLBACK27-NEXT: popl %esi
+; FALLBACK27-NEXT: popl %edi
+; FALLBACK27-NEXT: popl %ebx
+; FALLBACK27-NEXT: popl %ebp
+; FALLBACK27-NEXT: retl
+;
+; FALLBACK28-LABEL: shl_16bytes:
+; FALLBACK28: # %bb.0:
+; FALLBACK28-NEXT: pushl %ebp
+; FALLBACK28-NEXT: pushl %ebx
+; FALLBACK28-NEXT: pushl %edi
+; FALLBACK28-NEXT: pushl %esi
+; FALLBACK28-NEXT: subl $60, %esp
+; FALLBACK28-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK28-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK28-NEXT: vmovups (%ecx), %xmm0
+; FALLBACK28-NEXT: movzbl (%eax), %ecx
+; FALLBACK28-NEXT: movl %ecx, %eax
+; FALLBACK28-NEXT: shlb $3, %al
+; FALLBACK28-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK28-NEXT: vmovaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: andb $12, %cl
+; FALLBACK28-NEXT: negb %cl
+; FALLBACK28-NEXT: movsbl %cl, %edi
+; FALLBACK28-NEXT: movl 44(%esp,%edi), %ebx
+; FALLBACK28-NEXT: movl %eax, %ecx
+; FALLBACK28-NEXT: shll %cl, %ebx
+; FALLBACK28-NEXT: movl %eax, %edx
+; FALLBACK28-NEXT: notb %dl
+; FALLBACK28-NEXT: movl 40(%esp,%edi), %ebp
+; FALLBACK28-NEXT: movl %ebp, %esi
+; FALLBACK28-NEXT: shrl %esi
+; FALLBACK28-NEXT: movl %edx, %ecx
+; FALLBACK28-NEXT: shrl %cl, %esi
+; FALLBACK28-NEXT: orl %ebx, %esi
+; FALLBACK28-NEXT: movl %eax, %ecx
+; FALLBACK28-NEXT: shll %cl, %ebp
+; FALLBACK28-NEXT: movl 32(%esp,%edi), %ecx
+; FALLBACK28-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl 36(%esp,%edi), %ebx
+; FALLBACK28-NEXT: movl %ebx, %edi
+; FALLBACK28-NEXT: shrl %edi
+; FALLBACK28-NEXT: movl %edx, %ecx
+; FALLBACK28-NEXT: shrl %cl, %edi
+; FALLBACK28-NEXT: orl %ebp, %edi
+; FALLBACK28-NEXT: movl %eax, %ecx
+; FALLBACK28-NEXT: shll %cl, %ebx
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; FALLBACK28-NEXT: shrl %ebp
+; FALLBACK28-NEXT: movl %edx, %ecx
+; FALLBACK28-NEXT: shrl %cl, %ebp
+; FALLBACK28-NEXT: orl %ebx, %ebp
+; FALLBACK28-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK28-NEXT: movl %eax, %ecx
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK28-NEXT: shll %cl, %eax
+; FALLBACK28-NEXT: movl %eax, (%edx)
+; FALLBACK28-NEXT: movl %ebp, 4(%edx)
+; FALLBACK28-NEXT: movl %edi, 8(%edx)
+; FALLBACK28-NEXT: movl %esi, 12(%edx)
+; FALLBACK28-NEXT: addl $60, %esp
+; FALLBACK28-NEXT: popl %esi
+; FALLBACK28-NEXT: popl %edi
+; FALLBACK28-NEXT: popl %ebx
+; FALLBACK28-NEXT: popl %ebp
+; FALLBACK28-NEXT: retl
+;
+; FALLBACK29-LABEL: shl_16bytes:
+; FALLBACK29: # %bb.0:
+; FALLBACK29-NEXT: pushl %ebp
+; FALLBACK29-NEXT: pushl %ebx
+; FALLBACK29-NEXT: pushl %edi
+; FALLBACK29-NEXT: pushl %esi
+; FALLBACK29-NEXT: subl $44, %esp
+; FALLBACK29-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK29-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK29-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK29-NEXT: vmovups (%edx), %xmm0
+; FALLBACK29-NEXT: movzbl (%ecx), %edx
+; FALLBACK29-NEXT: movl %edx, %ecx
+; FALLBACK29-NEXT: shlb $3, %cl
+; FALLBACK29-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK29-NEXT: vmovaps %xmm1, (%esp)
+; FALLBACK29-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: andb $12, %dl
+; FALLBACK29-NEXT: negb %dl
+; FALLBACK29-NEXT: movsbl %dl, %edi
+; FALLBACK29-NEXT: movl 24(%esp,%edi), %esi
+; FALLBACK29-NEXT: movl 28(%esp,%edi), %edx
+; FALLBACK29-NEXT: shldl %cl, %esi, %edx
+; FALLBACK29-NEXT: movl 16(%esp,%edi), %ebx
+; FALLBACK29-NEXT: movl 20(%esp,%edi), %edi
+; FALLBACK29-NEXT: shldl %cl, %edi, %esi
+; FALLBACK29-NEXT: movl %ebx, %ebp
+; FALLBACK29-NEXT: shll %cl, %ebp
+; FALLBACK29-NEXT: shldl %cl, %ebx, %edi
+; FALLBACK29-NEXT: movl %edi, 4(%eax)
+; FALLBACK29-NEXT: movl %esi, 8(%eax)
+; FALLBACK29-NEXT: movl %edx, 12(%eax)
+; FALLBACK29-NEXT: movl %ebp, (%eax)
+; FALLBACK29-NEXT: addl $44, %esp
+; FALLBACK29-NEXT: popl %esi
+; FALLBACK29-NEXT: popl %edi
+; FALLBACK29-NEXT: popl %ebx
+; FALLBACK29-NEXT: popl %ebp
+; FALLBACK29-NEXT: retl
+;
+; FALLBACK30-LABEL: shl_16bytes:
+; FALLBACK30: # %bb.0:
+; FALLBACK30-NEXT: pushl %ebp
+; FALLBACK30-NEXT: pushl %ebx
+; FALLBACK30-NEXT: pushl %edi
+; FALLBACK30-NEXT: pushl %esi
+; FALLBACK30-NEXT: subl $44, %esp
+; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK30-NEXT: vmovups (%ecx), %xmm0
+; FALLBACK30-NEXT: movzbl (%eax), %ecx
+; FALLBACK30-NEXT: movl %ecx, %eax
+; FALLBACK30-NEXT: shlb $3, %al
+; FALLBACK30-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK30-NEXT: vmovaps %xmm1, (%esp)
+; FALLBACK30-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: andb $12, %cl
+; FALLBACK30-NEXT: negb %cl
+; FALLBACK30-NEXT: movsbl %cl, %ecx
+; FALLBACK30-NEXT: shlxl %eax, 28(%esp,%ecx), %esi
+; FALLBACK30-NEXT: movl 24(%esp,%ecx), %edx
+; FALLBACK30-NEXT: shlxl %eax, %edx, %edi
+; FALLBACK30-NEXT: movl %eax, %ebx
+; FALLBACK30-NEXT: notb %bl
+; FALLBACK30-NEXT: shrl %edx
+; FALLBACK30-NEXT: shrxl %ebx, %edx, %edx
+; FALLBACK30-NEXT: orl %esi, %edx
+; FALLBACK30-NEXT: movl 20(%esp,%ecx), %esi
+; FALLBACK30-NEXT: movl %esi, %ebp
+; FALLBACK30-NEXT: shrl %ebp
+; FALLBACK30-NEXT: shrxl %ebx, %ebp, %ebp
+; FALLBACK30-NEXT: orl %edi, %ebp
+; FALLBACK30-NEXT: shlxl %eax, %esi, %esi
+; FALLBACK30-NEXT: movl 16(%esp,%ecx), %ecx
+; FALLBACK30-NEXT: shlxl %eax, %ecx, %eax
+; FALLBACK30-NEXT: shrl %ecx
+; FALLBACK30-NEXT: shrxl %ebx, %ecx, %ecx
+; FALLBACK30-NEXT: orl %esi, %ecx
+; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %esi
+; FALLBACK30-NEXT: movl %eax, (%esi)
+; FALLBACK30-NEXT: movl %ecx, 4(%esi)
+; FALLBACK30-NEXT: movl %ebp, 8(%esi)
+; FALLBACK30-NEXT: movl %edx, 12(%esi)
+; FALLBACK30-NEXT: addl $44, %esp
+; FALLBACK30-NEXT: popl %esi
+; FALLBACK30-NEXT: popl %edi
+; FALLBACK30-NEXT: popl %ebx
+; FALLBACK30-NEXT: popl %ebp
+; FALLBACK30-NEXT: retl
+;
+; FALLBACK31-LABEL: shl_16bytes:
+; FALLBACK31: # %bb.0:
+; FALLBACK31-NEXT: pushl %ebp
+; FALLBACK31-NEXT: pushl %ebx
+; FALLBACK31-NEXT: pushl %edi
+; FALLBACK31-NEXT: pushl %esi
+; FALLBACK31-NEXT: subl $44, %esp
+; FALLBACK31-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK31-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK31-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK31-NEXT: vmovups (%edx), %xmm0
+; FALLBACK31-NEXT: movzbl (%ecx), %edx
+; FALLBACK31-NEXT: movl %edx, %ecx
+; FALLBACK31-NEXT: shlb $3, %cl
+; FALLBACK31-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK31-NEXT: vmovaps %xmm1, (%esp)
+; FALLBACK31-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: andb $12, %dl
+; FALLBACK31-NEXT: negb %dl
+; FALLBACK31-NEXT: movsbl %dl, %edi
+; FALLBACK31-NEXT: movl 24(%esp,%edi), %esi
+; FALLBACK31-NEXT: movl 28(%esp,%edi), %edx
+; FALLBACK31-NEXT: shldl %cl, %esi, %edx
+; FALLBACK31-NEXT: movl 16(%esp,%edi), %ebx
+; FALLBACK31-NEXT: movl 20(%esp,%edi), %edi
+; FALLBACK31-NEXT: shldl %cl, %edi, %esi
+; FALLBACK31-NEXT: shlxl %ecx, %ebx, %ebp
+; FALLBACK31-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK31-NEXT: shldl %cl, %ebx, %edi
+; FALLBACK31-NEXT: movl %edi, 4(%eax)
+; FALLBACK31-NEXT: movl %esi, 8(%eax)
+; FALLBACK31-NEXT: movl %edx, 12(%eax)
+; FALLBACK31-NEXT: movl %ebp, (%eax)
+; FALLBACK31-NEXT: addl $44, %esp
+; FALLBACK31-NEXT: popl %esi
+; FALLBACK31-NEXT: popl %edi
+; FALLBACK31-NEXT: popl %ebx
+; FALLBACK31-NEXT: popl %ebp
+; FALLBACK31-NEXT: retl
+ %src = load i128, ptr %src.ptr, align 1
+ %byteOff = load i128, ptr %byteOff.ptr, align 1
+ %bitOff = shl i128 %byteOff, 3
+ %res = shl i128 %src, %bitOff
+ store i128 %res, ptr %dst, align 1
+ ret void
+}
+
+define void @shl_16bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) nounwind {
+; X64-NO-SHLD-NO-BMI2-LABEL: shl_16bytes_dwordOff:
+; X64-NO-SHLD-NO-BMI2: # %bb.0:
+; X64-NO-SHLD-NO-BMI2-NEXT: movq (%rdi), %r8
+; X64-NO-SHLD-NO-BMI2-NEXT: movq 8(%rdi), %rdi
+; X64-NO-SHLD-NO-BMI2-NEXT: movzbl (%rsi), %eax
+; X64-NO-SHLD-NO-BMI2-NEXT: shlb $5, %al
+; X64-NO-SHLD-NO-BMI2-NEXT: movl %eax, %ecx
+; X64-NO-SHLD-NO-BMI2-NEXT: shlq %cl, %rdi
+; X64-NO-SHLD-NO-BMI2-NEXT: movq %r8, %rsi
+; X64-NO-SHLD-NO-BMI2-NEXT: shrq %rsi
+; X64-NO-SHLD-NO-BMI2-NEXT: notb %cl
+; X64-NO-SHLD-NO-BMI2-NEXT: shrq %cl, %rsi
+; X64-NO-SHLD-NO-BMI2-NEXT: orq %rdi, %rsi
+; X64-NO-SHLD-NO-BMI2-NEXT: movl %eax, %ecx
+; X64-NO-SHLD-NO-BMI2-NEXT: shlq %cl, %r8
+; X64-NO-SHLD-NO-BMI2-NEXT: xorl %ecx, %ecx
+; X64-NO-SHLD-NO-BMI2-NEXT: testb $64, %al
+; X64-NO-SHLD-NO-BMI2-NEXT: cmovneq %r8, %rsi
+; X64-NO-SHLD-NO-BMI2-NEXT: cmoveq %r8, %rcx
+; X64-NO-SHLD-NO-BMI2-NEXT: movq %rcx, (%rdx)
+; X64-NO-SHLD-NO-BMI2-NEXT: movq %rsi, 8(%rdx)
+; X64-NO-SHLD-NO-BMI2-NEXT: retq
+;
+; X64-HAVE-SHLD-NO-BMI2-LABEL: shl_16bytes_dwordOff:
+; X64-HAVE-SHLD-NO-BMI2: # %bb.0:
+; X64-HAVE-SHLD-NO-BMI2-NEXT: movq (%rdi), %rax
+; X64-HAVE-SHLD-NO-BMI2-NEXT: movq 8(%rdi), %rdi
+; X64-HAVE-SHLD-NO-BMI2-NEXT: movzbl (%rsi), %ecx
+; X64-HAVE-SHLD-NO-BMI2-NEXT: shlb $5, %cl
+; X64-HAVE-SHLD-NO-BMI2-NEXT: movq %rax, %rsi
+; X64-HAVE-SHLD-NO-BMI2-NEXT: shlq %cl, %rsi
+; X64-HAVE-SHLD-NO-BMI2-NEXT: shldq %cl, %rax, %rdi
+; X64-HAVE-SHLD-NO-BMI2-NEXT: xorl %eax, %eax
+; X64-HAVE-SHLD-NO-BMI2-NEXT: testb $64, %cl
+; X64-HAVE-SHLD-NO-BMI2-NEXT: cmovneq %rsi, %rdi
+; X64-HAVE-SHLD-NO-BMI2-NEXT: cmoveq %rsi, %rax
+; X64-HAVE-SHLD-NO-BMI2-NEXT: movq %rdi, 8(%rdx)
+; X64-HAVE-SHLD-NO-BMI2-NEXT: movq %rax, (%rdx)
+; X64-HAVE-SHLD-NO-BMI2-NEXT: retq
+;
+; X64-NO-SHLD-HAVE-BMI2-LABEL: shl_16bytes_dwordOff:
+; X64-NO-SHLD-HAVE-BMI2: # %bb.0:
+; X64-NO-SHLD-HAVE-BMI2-NEXT: movq (%rdi), %rax
+; X64-NO-SHLD-HAVE-BMI2-NEXT: movzbl (%rsi), %ecx
+; X64-NO-SHLD-HAVE-BMI2-NEXT: shlb $5, %cl
+; X64-NO-SHLD-HAVE-BMI2-NEXT: shlxq %rcx, 8(%rdi), %rsi
+; X64-NO-SHLD-HAVE-BMI2-NEXT: movl %ecx, %edi
+; X64-NO-SHLD-HAVE-BMI2-NEXT: notb %dil
+; X64-NO-SHLD-HAVE-BMI2-NEXT: shlxq %rcx, %rax, %r8
+; X64-NO-SHLD-HAVE-BMI2-NEXT: shrq %rax
+; X64-NO-SHLD-HAVE-BMI2-NEXT: shrxq %rdi, %rax, %rax
+; X64-NO-SHLD-HAVE-BMI2-NEXT: orq %rsi, %rax
+; X64-NO-SHLD-HAVE-BMI2-NEXT: xorl %esi, %esi
+; X64-NO-SHLD-HAVE-BMI2-NEXT: testb $64, %cl
+; X64-NO-SHLD-HAVE-BMI2-NEXT: cmovneq %r8, %rax
+; X64-NO-SHLD-HAVE-BMI2-NEXT: cmoveq %r8, %rsi
+; X64-NO-SHLD-HAVE-BMI2-NEXT: movq %rsi, (%rdx)
+; X64-NO-SHLD-HAVE-BMI2-NEXT: movq %rax, 8(%rdx)
+; X64-NO-SHLD-HAVE-BMI2-NEXT: retq
+;
+; X64-HAVE-SHLD-HAVE-BMI2-LABEL: shl_16bytes_dwordOff:
+; X64-HAVE-SHLD-HAVE-BMI2: # %bb.0:
+; X64-HAVE-SHLD-HAVE-BMI2-NEXT: movq (%rdi), %rax
+; X64-HAVE-SHLD-HAVE-BMI2-NEXT: movq 8(%rdi), %rdi
+; X64-HAVE-SHLD-HAVE-BMI2-NEXT: movzbl (%rsi), %ecx
+; X64-HAVE-SHLD-HAVE-BMI2-NEXT: shlb $5, %cl
+; X64-HAVE-SHLD-HAVE-BMI2-NEXT: shldq %cl, %rax, %rdi
+; X64-HAVE-SHLD-HAVE-BMI2-NEXT: shlxq %rcx, %rax, %rax
+; X64-HAVE-SHLD-HAVE-BMI2-NEXT: xorl %esi, %esi
+; X64-HAVE-SHLD-HAVE-BMI2-NEXT: testb $64, %cl
+; X64-HAVE-SHLD-HAVE-BMI2-NEXT: cmovneq %rax, %rdi
+; X64-HAVE-SHLD-HAVE-BMI2-NEXT: cmoveq %rax, %rsi
+; X64-HAVE-SHLD-HAVE-BMI2-NEXT: movq %rdi, 8(%rdx)
+; X64-HAVE-SHLD-HAVE-BMI2-NEXT: movq %rsi, (%rdx)
+; X64-HAVE-SHLD-HAVE-BMI2-NEXT: retq
+;
+; X86-SSE2-LABEL: shl_16bytes_dwordOff:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pushl %ebx
; X86-SSE2-NEXT: pushl %edi
@@ -814,15 +2545,14 @@ define void @shl_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-SSE2-NEXT: movl 8(%edx), %ebx
; X86-SSE2-NEXT: movl 12(%edx), %edx
; X86-SSE2-NEXT: movzbl (%ecx), %ecx
+; X86-SSE2-NEXT: xorps %xmm0, %xmm0
+; X86-SSE2-NEXT: movaps %xmm0, (%esp)
; X86-SSE2-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl %ebx, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, (%esp)
-; X86-SSE2-NEXT: andb $15, %cl
+; X86-SSE2-NEXT: shlb $2, %cl
+; X86-SSE2-NEXT: andb $12, %cl
; X86-SSE2-NEXT: negb %cl
; X86-SSE2-NEXT: movsbl %cl, %ecx
; X86-SSE2-NEXT: movl 16(%esp,%ecx), %edx
@@ -839,50 +2569,53 @@ define void @shl_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-SSE2-NEXT: popl %ebx
; X86-SSE2-NEXT: retl
;
-; X86-SSE42-LABEL: shl_16bytes:
+; X86-SSE42-LABEL: shl_16bytes_dwordOff:
; X86-SSE42: # %bb.0:
-; X86-SSE42-NEXT: subl $32, %esp
+; X86-SSE42-NEXT: subl $44, %esp
; X86-SSE42-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE42-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-SSE42-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-SSE42-NEXT: movups (%edx), %xmm0
; X86-SSE42-NEXT: movzbl (%ecx), %ecx
; X86-SSE42-NEXT: xorps %xmm1, %xmm1
-; X86-SSE42-NEXT: movups %xmm1, (%esp)
-; X86-SSE42-NEXT: movups %xmm0, {{[0-9]+}}(%esp)
-; X86-SSE42-NEXT: andb $15, %cl
+; X86-SSE42-NEXT: movaps %xmm1, (%esp)
+; X86-SSE42-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: shlb $2, %cl
+; X86-SSE42-NEXT: andb $12, %cl
; X86-SSE42-NEXT: negb %cl
; X86-SSE42-NEXT: movsbl %cl, %ecx
; X86-SSE42-NEXT: movups 16(%esp,%ecx), %xmm0
; X86-SSE42-NEXT: movups %xmm0, (%eax)
-; X86-SSE42-NEXT: addl $32, %esp
+; X86-SSE42-NEXT: addl $44, %esp
; X86-SSE42-NEXT: retl
;
-; X86-AVX-LABEL: shl_16bytes:
+; X86-AVX-LABEL: shl_16bytes_dwordOff:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: subl $32, %esp
+; X86-AVX-NEXT: subl $44, %esp
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-AVX-NEXT: vmovups (%edx), %xmm0
; X86-AVX-NEXT: movzbl (%ecx), %ecx
; X86-AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; X86-AVX-NEXT: vmovups %xmm1, (%esp)
-; X86-AVX-NEXT: vmovups %xmm0, {{[0-9]+}}(%esp)
-; X86-AVX-NEXT: andb $15, %cl
+; X86-AVX-NEXT: vmovaps %xmm1, (%esp)
+; X86-AVX-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
+; X86-AVX-NEXT: shlb $2, %cl
+; X86-AVX-NEXT: andb $12, %cl
; X86-AVX-NEXT: negb %cl
; X86-AVX-NEXT: movsbl %cl, %ecx
; X86-AVX-NEXT: vmovups 16(%esp,%ecx), %xmm0
; X86-AVX-NEXT: vmovups %xmm0, (%eax)
-; X86-AVX-NEXT: addl $32, %esp
+; X86-AVX-NEXT: addl $44, %esp
; X86-AVX-NEXT: retl
%src = load i128, ptr %src.ptr, align 1
- %byteOff = load i128, ptr %byteOff.ptr, align 1
- %bitOff = shl i128 %byteOff, 3
+ %dwordOff = load i128, ptr %dwordOff.ptr, align 1
+ %bitOff = shl i128 %dwordOff, 5
%res = shl i128 %src, %bitOff
store i128 %res, ptr %dst, align 1
ret void
}
+
define void @ashr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X64-NO-SHLD-NO-BMI2-LABEL: ashr_16bytes:
; X64-NO-SHLD-NO-BMI2: # %bb.0:
@@ -960,7 +2693,312 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X64-HAVE-SHLD-HAVE-BMI2-NEXT: movq %rax, (%rdx)
; X64-HAVE-SHLD-HAVE-BMI2-NEXT: retq
;
-; X86-SSE2-LABEL: ashr_16bytes:
+; X86-NO-SHLD-NO-BMI2-LABEL: ashr_16bytes:
+; X86-NO-SHLD-NO-BMI2: # %bb.0:
+; X86-NO-SHLD-NO-BMI2-NEXT: pushl %ebp
+; X86-NO-SHLD-NO-BMI2-NEXT: pushl %ebx
+; X86-NO-SHLD-NO-BMI2-NEXT: pushl %edi
+; X86-NO-SHLD-NO-BMI2-NEXT: pushl %esi
+; X86-NO-SHLD-NO-BMI2-NEXT: subl $60, %esp
+; X86-NO-SHLD-NO-BMI2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NO-SHLD-NO-BMI2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NO-SHLD-NO-BMI2-NEXT: movl (%ecx), %edx
+; X86-NO-SHLD-NO-BMI2-NEXT: movl 4(%ecx), %esi
+; X86-NO-SHLD-NO-BMI2-NEXT: movl 8(%ecx), %edi
+; X86-NO-SHLD-NO-BMI2-NEXT: movl 12(%ecx), %ecx
+; X86-NO-SHLD-NO-BMI2-NEXT: movb (%eax), %ah
+; X86-NO-SHLD-NO-BMI2-NEXT: movb %ah, %al
+; X86-NO-SHLD-NO-BMI2-NEXT: shlb $3, %al
+; X86-NO-SHLD-NO-BMI2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NO-SHLD-NO-BMI2-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; X86-NO-SHLD-NO-BMI2-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; X86-NO-SHLD-NO-BMI2-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NO-SHLD-NO-BMI2-NEXT: sarl $31, %ecx
+; X86-NO-SHLD-NO-BMI2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NO-SHLD-NO-BMI2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NO-SHLD-NO-BMI2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NO-SHLD-NO-BMI2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NO-SHLD-NO-BMI2-NEXT: andb $12, %ah
+; X86-NO-SHLD-NO-BMI2-NEXT: movzbl %ah, %ebp
+; X86-NO-SHLD-NO-BMI2-NEXT: movl 20(%esp,%ebp), %esi
+; X86-NO-SHLD-NO-BMI2-NEXT: movl %esi, %ebx
+; X86-NO-SHLD-NO-BMI2-NEXT: movl %eax, %ecx
+; X86-NO-SHLD-NO-BMI2-NEXT: shrl %cl, %ebx
+; X86-NO-SHLD-NO-BMI2-NEXT: movl %eax, %edx
+; X86-NO-SHLD-NO-BMI2-NEXT: notb %dl
+; X86-NO-SHLD-NO-BMI2-NEXT: movl 24(%esp,%ebp), %ecx
+; X86-NO-SHLD-NO-BMI2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-SHLD-NO-BMI2-NEXT: leal (%ecx,%ecx), %edi
+; X86-NO-SHLD-NO-BMI2-NEXT: movl %edx, %ecx
+; X86-NO-SHLD-NO-BMI2-NEXT: shll %cl, %edi
+; X86-NO-SHLD-NO-BMI2-NEXT: orl %ebx, %edi
+; X86-NO-SHLD-NO-BMI2-NEXT: movl 16(%esp,%ebp), %ebx
+; X86-NO-SHLD-NO-BMI2-NEXT: movl %eax, %ecx
+; X86-NO-SHLD-NO-BMI2-NEXT: shrl %cl, %ebx
+; X86-NO-SHLD-NO-BMI2-NEXT: addl %esi, %esi
+; X86-NO-SHLD-NO-BMI2-NEXT: movl %edx, %ecx
+; X86-NO-SHLD-NO-BMI2-NEXT: shll %cl, %esi
+; X86-NO-SHLD-NO-BMI2-NEXT: orl %ebx, %esi
+; X86-NO-SHLD-NO-BMI2-NEXT: movl %eax, %ecx
+; X86-NO-SHLD-NO-BMI2-NEXT: shrl %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NO-SHLD-NO-BMI2-NEXT: movl 28(%esp,%ebp), %ebx
+; X86-NO-SHLD-NO-BMI2-NEXT: leal (%ebx,%ebx), %ebp
+; X86-NO-SHLD-NO-BMI2-NEXT: movl %edx, %ecx
+; X86-NO-SHLD-NO-BMI2-NEXT: shll %cl, %ebp
+; X86-NO-SHLD-NO-BMI2-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X86-NO-SHLD-NO-BMI2-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NO-SHLD-NO-BMI2-NEXT: movl %eax, %ecx
+; X86-NO-SHLD-NO-BMI2-NEXT: sarl %cl, %ebx
+; X86-NO-SHLD-NO-BMI2-NEXT: movl %ebx, 12(%edx)
+; X86-NO-SHLD-NO-BMI2-NEXT: movl %ebp, 8(%edx)
+; X86-NO-SHLD-NO-BMI2-NEXT: movl %esi, (%edx)
+; X86-NO-SHLD-NO-BMI2-NEXT: movl %edi, 4(%edx)
+; X86-NO-SHLD-NO-BMI2-NEXT: addl $60, %esp
+; X86-NO-SHLD-NO-BMI2-NEXT: popl %esi
+; X86-NO-SHLD-NO-BMI2-NEXT: popl %edi
+; X86-NO-SHLD-NO-BMI2-NEXT: popl %ebx
+; X86-NO-SHLD-NO-BMI2-NEXT: popl %ebp
+; X86-NO-SHLD-NO-BMI2-NEXT: retl
+;
+; X86-HAVE-SHLD-NO-BMI2-LABEL: ashr_16bytes:
+; X86-HAVE-SHLD-NO-BMI2: # %bb.0:
+; X86-HAVE-SHLD-NO-BMI2-NEXT: pushl %ebp
+; X86-HAVE-SHLD-NO-BMI2-NEXT: pushl %ebx
+; X86-HAVE-SHLD-NO-BMI2-NEXT: pushl %edi
+; X86-HAVE-SHLD-NO-BMI2-NEXT: pushl %esi
+; X86-HAVE-SHLD-NO-BMI2-NEXT: subl $44, %esp
+; X86-HAVE-SHLD-NO-BMI2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-SHLD-NO-BMI2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-HAVE-SHLD-NO-BMI2-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-HAVE-SHLD-NO-BMI2-NEXT: movl (%edx), %esi
+; X86-HAVE-SHLD-NO-BMI2-NEXT: movl 4(%edx), %edi
+; X86-HAVE-SHLD-NO-BMI2-NEXT: movl 8(%edx), %ebx
+; X86-HAVE-SHLD-NO-BMI2-NEXT: movl 12(%edx), %edx
+; X86-HAVE-SHLD-NO-BMI2-NEXT: movb (%ecx), %ch
+; X86-HAVE-SHLD-NO-BMI2-NEXT: movb %ch, %cl
+; X86-HAVE-SHLD-NO-BMI2-NEXT: shlb $3, %cl
+; X86-HAVE-SHLD-NO-BMI2-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-HAVE-SHLD-NO-BMI2-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; X86-HAVE-SHLD-NO-BMI2-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; X86-HAVE-SHLD-NO-BMI2-NEXT: movl %esi, (%esp)
+; X86-HAVE-SHLD-NO-BMI2-NEXT: sarl $31, %edx
+; X86-HAVE-SHLD-NO-BMI2-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-HAVE-SHLD-NO-BMI2-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-HAVE-SHLD-NO-BMI2-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-HAVE-SHLD-NO-BMI2-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-HAVE-SHLD-NO-BMI2-NEXT: andb $12, %ch
+; X86-HAVE-SHLD-NO-BMI2-NEXT: movzbl %ch, %ebx
+; X86-HAVE-SHLD-NO-BMI2-NEXT: movl 8(%esp,%ebx), %esi
+; X86-HAVE-SHLD-NO-BMI2-NEXT: movl (%esp,%ebx), %edx
+; X86-HAVE-SHLD-NO-BMI2-NEXT: movl 4(%esp,%ebx), %ebp
+; X86-HAVE-SHLD-NO-BMI2-NEXT: movl %ebp, %edi
+; X86-HAVE-SHLD-NO-BMI2-NEXT: shrdl %cl, %esi, %edi
+; X86-HAVE-SHLD-NO-BMI2-NEXT: movl 12(%esp,%ebx), %ebx
+; X86-HAVE-SHLD-NO-BMI2-NEXT: shrdl %cl, %ebx, %esi
+; X86-HAVE-SHLD-NO-BMI2-NEXT: shrdl %cl, %ebp, %edx
+; X86-HAVE-SHLD-NO-BMI2-NEXT: sarl %cl, %ebx
+; X86-HAVE-SHLD-NO-BMI2-NEXT: movl %esi, 8(%eax)
+; X86-HAVE-SHLD-NO-BMI2-NEXT: movl %ebx, 12(%eax)
+; X86-HAVE-SHLD-NO-BMI2-NEXT: movl %edx, (%eax)
+; X86-HAVE-SHLD-NO-BMI2-NEXT: movl %edi, 4(%eax)
+; X86-HAVE-SHLD-NO-BMI2-NEXT: addl $44, %esp
+; X86-HAVE-SHLD-NO-BMI2-NEXT: popl %esi
+; X86-HAVE-SHLD-NO-BMI2-NEXT: popl %edi
+; X86-HAVE-SHLD-NO-BMI2-NEXT: popl %ebx
+; X86-HAVE-SHLD-NO-BMI2-NEXT: popl %ebp
+; X86-HAVE-SHLD-NO-BMI2-NEXT: retl
+;
+; X86-NO-SHLD-HAVE-BMI2-LABEL: ashr_16bytes:
+; X86-NO-SHLD-HAVE-BMI2: # %bb.0:
+; X86-NO-SHLD-HAVE-BMI2-NEXT: pushl %ebp
+; X86-NO-SHLD-HAVE-BMI2-NEXT: pushl %ebx
+; X86-NO-SHLD-HAVE-BMI2-NEXT: pushl %edi
+; X86-NO-SHLD-HAVE-BMI2-NEXT: pushl %esi
+; X86-NO-SHLD-HAVE-BMI2-NEXT: subl $44, %esp
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl (%ecx), %edx
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl 4(%ecx), %esi
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl 8(%ecx), %edi
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl 12(%ecx), %ecx
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movzbl (%eax), %ebx
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %ebx, %eax
+; X86-NO-SHLD-HAVE-BMI2-NEXT: shlb $3, %al
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %edx, (%esp)
+; X86-NO-SHLD-HAVE-BMI2-NEXT: sarl $31, %ecx
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NO-SHLD-HAVE-BMI2-NEXT: andb $12, %bl
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movzbl %bl, %esi
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl 4(%esp,%esi), %edi
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl 8(%esp,%esi), %ebx
+; X86-NO-SHLD-HAVE-BMI2-NEXT: shrxl %eax, %edi, %ebp
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %eax, %edx
+; X86-NO-SHLD-HAVE-BMI2-NEXT: notb %dl
+; X86-NO-SHLD-HAVE-BMI2-NEXT: leal (%ebx,%ebx), %ecx
+; X86-NO-SHLD-HAVE-BMI2-NEXT: shlxl %edx, %ecx, %ecx
+; X86-NO-SHLD-HAVE-BMI2-NEXT: orl %ebp, %ecx
+; X86-NO-SHLD-HAVE-BMI2-NEXT: shrxl %eax, (%esp,%esi), %ebp
+; X86-NO-SHLD-HAVE-BMI2-NEXT: addl %edi, %edi
+; X86-NO-SHLD-HAVE-BMI2-NEXT: shlxl %edx, %edi, %edi
+; X86-NO-SHLD-HAVE-BMI2-NEXT: orl %ebp, %edi
+; X86-NO-SHLD-HAVE-BMI2-NEXT: shrxl %eax, %ebx, %ebx
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl 12(%esp,%esi), %esi
+; X86-NO-SHLD-HAVE-BMI2-NEXT: sarxl %eax, %esi, %eax
+; X86-NO-SHLD-HAVE-BMI2-NEXT: addl %esi, %esi
+; X86-NO-SHLD-HAVE-BMI2-NEXT: shlxl %edx, %esi, %edx
+; X86-NO-SHLD-HAVE-BMI2-NEXT: orl %ebx, %edx
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %eax, 12(%esi)
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %edx, 8(%esi)
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %edi, (%esi)
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %ecx, 4(%esi)
+; X86-NO-SHLD-HAVE-BMI2-NEXT: addl $44, %esp
+; X86-NO-SHLD-HAVE-BMI2-NEXT: popl %esi
+; X86-NO-SHLD-HAVE-BMI2-NEXT: popl %edi
+; X86-NO-SHLD-HAVE-BMI2-NEXT: popl %ebx
+; X86-NO-SHLD-HAVE-BMI2-NEXT: popl %ebp
+; X86-NO-SHLD-HAVE-BMI2-NEXT: retl
+;
+; X86-HAVE-SHLD-HAVE-BMI2-LABEL: ashr_16bytes:
+; X86-HAVE-SHLD-HAVE-BMI2: # %bb.0:
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: pushl %ebp
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: pushl %ebx
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: pushl %edi
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: pushl %esi
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: subl $44, %esp
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: movl (%edx), %esi
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: movl 4(%edx), %edi
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: movl 8(%edx), %ebx
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: movl 12(%edx), %edx
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: movzbl (%ecx), %eax
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: movl %eax, %ecx
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: shlb $3, %cl
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: movl %esi, (%esp)
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: sarl $31, %edx
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: andb $12, %al
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: movzbl %al, %eax
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: movl 8(%esp,%eax), %ebx
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: movl (%esp,%eax), %edx
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: movl 4(%esp,%eax), %esi
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: movl %esi, %edi
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: shrdl %cl, %ebx, %edi
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: movl 12(%esp,%eax), %eax
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: shrdl %cl, %eax, %ebx
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: movl %ebx, 8(%ebp)
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: sarxl %ecx, %eax, %eax
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: movl %eax, 12(%ebp)
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: shrdl %cl, %esi, %edx
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: movl %edx, (%ebp)
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: movl %edi, 4(%ebp)
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: addl $44, %esp
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: popl %esi
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: popl %edi
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: popl %ebx
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: popl %ebp
+; X86-HAVE-SHLD-HAVE-BMI2-NEXT: retl
+ %src = load i128, ptr %src.ptr, align 1
+ %byteOff = load i128, ptr %byteOff.ptr, align 1
+ %bitOff = shl i128 %byteOff, 3
+ %res = ashr i128 %src, %bitOff
+ store i128 %res, ptr %dst, align 1
+ ret void
+}
+
+define void @ashr_16bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) nounwind {
+; X64-NO-SHLD-NO-BMI2-LABEL: ashr_16bytes_dwordOff:
+; X64-NO-SHLD-NO-BMI2: # %bb.0:
+; X64-NO-SHLD-NO-BMI2-NEXT: movq (%rdi), %r8
+; X64-NO-SHLD-NO-BMI2-NEXT: movq 8(%rdi), %rdi
+; X64-NO-SHLD-NO-BMI2-NEXT: movzbl (%rsi), %eax
+; X64-NO-SHLD-NO-BMI2-NEXT: shlb $5, %al
+; X64-NO-SHLD-NO-BMI2-NEXT: movl %eax, %ecx
+; X64-NO-SHLD-NO-BMI2-NEXT: shrq %cl, %r8
+; X64-NO-SHLD-NO-BMI2-NEXT: leaq (%rdi,%rdi), %rsi
+; X64-NO-SHLD-NO-BMI2-NEXT: notb %cl
+; X64-NO-SHLD-NO-BMI2-NEXT: shlq %cl, %rsi
+; X64-NO-SHLD-NO-BMI2-NEXT: orq %r8, %rsi
+; X64-NO-SHLD-NO-BMI2-NEXT: movq %rdi, %r8
+; X64-NO-SHLD-NO-BMI2-NEXT: movl %eax, %ecx
+; X64-NO-SHLD-NO-BMI2-NEXT: sarq %cl, %r8
+; X64-NO-SHLD-NO-BMI2-NEXT: sarq $63, %rdi
+; X64-NO-SHLD-NO-BMI2-NEXT: testb $64, %al
+; X64-NO-SHLD-NO-BMI2-NEXT: cmovneq %r8, %rsi
+; X64-NO-SHLD-NO-BMI2-NEXT: cmoveq %r8, %rdi
+; X64-NO-SHLD-NO-BMI2-NEXT: movq %rdi, 8(%rdx)
+; X64-NO-SHLD-NO-BMI2-NEXT: movq %rsi, (%rdx)
+; X64-NO-SHLD-NO-BMI2-NEXT: retq
+;
+; X64-HAVE-SHLD-NO-BMI2-LABEL: ashr_16bytes_dwordOff:
+; X64-HAVE-SHLD-NO-BMI2: # %bb.0:
+; X64-HAVE-SHLD-NO-BMI2-NEXT: movq (%rdi), %rax
+; X64-HAVE-SHLD-NO-BMI2-NEXT: movq 8(%rdi), %rdi
+; X64-HAVE-SHLD-NO-BMI2-NEXT: movzbl (%rsi), %ecx
+; X64-HAVE-SHLD-NO-BMI2-NEXT: shlb $5, %cl
+; X64-HAVE-SHLD-NO-BMI2-NEXT: movq %rdi, %rsi
+; X64-HAVE-SHLD-NO-BMI2-NEXT: sarq %cl, %rsi
+; X64-HAVE-SHLD-NO-BMI2-NEXT: shrdq %cl, %rdi, %rax
+; X64-HAVE-SHLD-NO-BMI2-NEXT: sarq $63, %rdi
+; X64-HAVE-SHLD-NO-BMI2-NEXT: testb $64, %cl
+; X64-HAVE-SHLD-NO-BMI2-NEXT: cmovneq %rsi, %rax
+; X64-HAVE-SHLD-NO-BMI2-NEXT: cmoveq %rsi, %rdi
+; X64-HAVE-SHLD-NO-BMI2-NEXT: movq %rdi, 8(%rdx)
+; X64-HAVE-SHLD-NO-BMI2-NEXT: movq %rax, (%rdx)
+; X64-HAVE-SHLD-NO-BMI2-NEXT: retq
+;
+; X64-NO-SHLD-HAVE-BMI2-LABEL: ashr_16bytes_dwordOff:
+; X64-NO-SHLD-HAVE-BMI2: # %bb.0:
+; X64-NO-SHLD-HAVE-BMI2-NEXT: movq 8(%rdi), %rax
+; X64-NO-SHLD-HAVE-BMI2-NEXT: movzbl (%rsi), %ecx
+; X64-NO-SHLD-HAVE-BMI2-NEXT: shlb $5, %cl
+; X64-NO-SHLD-HAVE-BMI2-NEXT: shrxq %rcx, (%rdi), %rsi
+; X64-NO-SHLD-HAVE-BMI2-NEXT: movl %ecx, %edi
+; X64-NO-SHLD-HAVE-BMI2-NEXT: notb %dil
+; X64-NO-SHLD-HAVE-BMI2-NEXT: leaq (%rax,%rax), %r8
+; X64-NO-SHLD-HAVE-BMI2-NEXT: shlxq %rdi, %r8, %rdi
+; X64-NO-SHLD-HAVE-BMI2-NEXT: orq %rsi, %rdi
+; X64-NO-SHLD-HAVE-BMI2-NEXT: sarxq %rcx, %rax, %rsi
+; X64-NO-SHLD-HAVE-BMI2-NEXT: sarq $63, %rax
+; X64-NO-SHLD-HAVE-BMI2-NEXT: testb $64, %cl
+; X64-NO-SHLD-HAVE-BMI2-NEXT: cmovneq %rsi, %rdi
+; X64-NO-SHLD-HAVE-BMI2-NEXT: cmoveq %rsi, %rax
+; X64-NO-SHLD-HAVE-BMI2-NEXT: movq %rax, 8(%rdx)
+; X64-NO-SHLD-HAVE-BMI2-NEXT: movq %rdi, (%rdx)
+; X64-NO-SHLD-HAVE-BMI2-NEXT: retq
+;
+; X64-HAVE-SHLD-HAVE-BMI2-LABEL: ashr_16bytes_dwordOff:
+; X64-HAVE-SHLD-HAVE-BMI2: # %bb.0:
+; X64-HAVE-SHLD-HAVE-BMI2-NEXT: movq (%rdi), %rax
+; X64-HAVE-SHLD-HAVE-BMI2-NEXT: movq 8(%rdi), %rdi
+; X64-HAVE-SHLD-HAVE-BMI2-NEXT: movzbl (%rsi), %ecx
+; X64-HAVE-SHLD-HAVE-BMI2-NEXT: shlb $5, %cl
+; X64-HAVE-SHLD-HAVE-BMI2-NEXT: shrdq %cl, %rdi, %rax
+; X64-HAVE-SHLD-HAVE-BMI2-NEXT: sarxq %rcx, %rdi, %rsi
+; X64-HAVE-SHLD-HAVE-BMI2-NEXT: sarq $63, %rdi
+; X64-HAVE-SHLD-HAVE-BMI2-NEXT: testb $64, %cl
+; X64-HAVE-SHLD-HAVE-BMI2-NEXT: cmovneq %rsi, %rax
+; X64-HAVE-SHLD-HAVE-BMI2-NEXT: cmoveq %rsi, %rdi
+; X64-HAVE-SHLD-HAVE-BMI2-NEXT: movq %rdi, 8(%rdx)
+; X64-HAVE-SHLD-HAVE-BMI2-NEXT: movq %rax, (%rdx)
+; X64-HAVE-SHLD-HAVE-BMI2-NEXT: retq
+;
+; X86-SSE2-LABEL: ashr_16bytes_dwordOff:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pushl %ebx
; X86-SSE2-NEXT: pushl %edi
@@ -983,11 +3021,11 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-SSE2-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: andl $15, %ecx
-; X86-SSE2-NEXT: movl (%esp,%ecx), %edx
-; X86-SSE2-NEXT: movl 4(%esp,%ecx), %esi
-; X86-SSE2-NEXT: movl 12(%esp,%ecx), %edi
-; X86-SSE2-NEXT: movl 8(%esp,%ecx), %ecx
+; X86-SSE2-NEXT: andl $3, %ecx
+; X86-SSE2-NEXT: movl (%esp,%ecx,4), %edx
+; X86-SSE2-NEXT: movl 4(%esp,%ecx,4), %esi
+; X86-SSE2-NEXT: movl 12(%esp,%ecx,4), %edi
+; X86-SSE2-NEXT: movl 8(%esp,%ecx,4), %ecx
; X86-SSE2-NEXT: movl %ecx, 8(%eax)
; X86-SSE2-NEXT: movl %edi, 12(%eax)
; X86-SSE2-NEXT: movl %edx, (%eax)
@@ -998,7 +3036,7 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-SSE2-NEXT: popl %ebx
; X86-SSE2-NEXT: retl
;
-; X86-SSE42-LABEL: ashr_16bytes:
+; X86-SSE42-LABEL: ashr_16bytes_dwordOff:
; X86-SSE42: # %bb.0:
; X86-SSE42-NEXT: pushl %ebx
; X86-SSE42-NEXT: pushl %edi
@@ -1021,8 +3059,8 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-SSE42-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-SSE42-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-SSE42-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-SSE42-NEXT: andl $15, %ecx
-; X86-SSE42-NEXT: movups (%esp,%ecx), %xmm0
+; X86-SSE42-NEXT: andl $3, %ecx
+; X86-SSE42-NEXT: movups (%esp,%ecx,4), %xmm0
; X86-SSE42-NEXT: movups %xmm0, (%eax)
; X86-SSE42-NEXT: addl $32, %esp
; X86-SSE42-NEXT: popl %esi
@@ -1030,7 +3068,7 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-SSE42-NEXT: popl %ebx
; X86-SSE42-NEXT: retl
;
-; X86-AVX-LABEL: ashr_16bytes:
+; X86-AVX-LABEL: ashr_16bytes_dwordOff:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: pushl %ebx
; X86-AVX-NEXT: pushl %edi
@@ -1053,8 +3091,8 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-AVX-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-AVX-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-AVX-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-AVX-NEXT: andl $15, %ecx
-; X86-AVX-NEXT: vmovups (%esp,%ecx), %xmm0
+; X86-AVX-NEXT: andl $3, %ecx
+; X86-AVX-NEXT: vmovups (%esp,%ecx,4), %xmm0
; X86-AVX-NEXT: vmovups %xmm0, (%eax)
; X86-AVX-NEXT: addl $32, %esp
; X86-AVX-NEXT: popl %esi
@@ -1062,84 +3100,2731 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-AVX-NEXT: popl %ebx
; X86-AVX-NEXT: retl
%src = load i128, ptr %src.ptr, align 1
- %byteOff = load i128, ptr %byteOff.ptr, align 1
- %bitOff = shl i128 %byteOff, 3
+ %dwordOff = load i128, ptr %dwordOff.ptr, align 1
+ %bitOff = shl i128 %dwordOff, 5
%res = ashr i128 %src, %bitOff
store i128 %res, ptr %dst, align 1
ret void
}
define void @lshr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
-; X64-SSE2-LABEL: lshr_32bytes:
+; FALLBACK0-LABEL: lshr_32bytes:
+; FALLBACK0: # %bb.0:
+; FALLBACK0-NEXT: pushq %rbx
+; FALLBACK0-NEXT: movq (%rdi), %rcx
+; FALLBACK0-NEXT: movq 8(%rdi), %r8
+; FALLBACK0-NEXT: movq 16(%rdi), %r9
+; FALLBACK0-NEXT: movq 24(%rdi), %rdi
+; FALLBACK0-NEXT: movzbl (%rsi), %esi
+; FALLBACK0-NEXT: leal (,%rsi,8), %eax
+; FALLBACK0-NEXT: xorps %xmm0, %xmm0
+; FALLBACK0-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: andb $24, %sil
+; FALLBACK0-NEXT: movzbl %sil, %r9d
+; FALLBACK0-NEXT: movq -64(%rsp,%r9), %r10
+; FALLBACK0-NEXT: movq -56(%rsp,%r9), %rdi
+; FALLBACK0-NEXT: movq %rdi, %r11
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shrq %cl, %r11
+; FALLBACK0-NEXT: movl %eax, %esi
+; FALLBACK0-NEXT: notb %sil
+; FALLBACK0-NEXT: movq -48(%rsp,%r9), %rbx
+; FALLBACK0-NEXT: leaq (%rbx,%rbx), %r8
+; FALLBACK0-NEXT: movl %esi, %ecx
+; FALLBACK0-NEXT: shlq %cl, %r8
+; FALLBACK0-NEXT: orq %r11, %r8
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shrq %cl, %r10
+; FALLBACK0-NEXT: addq %rdi, %rdi
+; FALLBACK0-NEXT: movl %esi, %ecx
+; FALLBACK0-NEXT: shlq %cl, %rdi
+; FALLBACK0-NEXT: orq %r10, %rdi
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shrq %cl, %rbx
+; FALLBACK0-NEXT: movq -40(%rsp,%r9), %r9
+; FALLBACK0-NEXT: leaq (%r9,%r9), %r10
+; FALLBACK0-NEXT: movl %esi, %ecx
+; FALLBACK0-NEXT: shlq %cl, %r10
+; FALLBACK0-NEXT: orq %rbx, %r10
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shrq %cl, %r9
+; FALLBACK0-NEXT: movq %r9, 24(%rdx)
+; FALLBACK0-NEXT: movq %r10, 16(%rdx)
+; FALLBACK0-NEXT: movq %rdi, (%rdx)
+; FALLBACK0-NEXT: movq %r8, 8(%rdx)
+; FALLBACK0-NEXT: popq %rbx
+; FALLBACK0-NEXT: retq
+;
+; FALLBACK1-LABEL: lshr_32bytes:
+; FALLBACK1: # %bb.0:
+; FALLBACK1-NEXT: movq (%rdi), %rax
+; FALLBACK1-NEXT: movq 8(%rdi), %r8
+; FALLBACK1-NEXT: movq 16(%rdi), %r9
+; FALLBACK1-NEXT: movq 24(%rdi), %rdi
+; FALLBACK1-NEXT: movzbl (%rsi), %esi
+; FALLBACK1-NEXT: leal (,%rsi,8), %ecx
+; FALLBACK1-NEXT: xorps %xmm0, %xmm0
+; FALLBACK1-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: andb $24, %sil
+; FALLBACK1-NEXT: movzbl %sil, %eax
+; FALLBACK1-NEXT: movq -56(%rsp,%rax), %rsi
+; FALLBACK1-NEXT: movq -72(%rsp,%rax), %rdi
+; FALLBACK1-NEXT: movq -64(%rsp,%rax), %r8
+; FALLBACK1-NEXT: movq %r8, %r9
+; FALLBACK1-NEXT: shrdq %cl, %rsi, %r9
+; FALLBACK1-NEXT: movq -48(%rsp,%rax), %rax
+; FALLBACK1-NEXT: shrdq %cl, %rax, %rsi
+; FALLBACK1-NEXT: shrdq %cl, %r8, %rdi
+; FALLBACK1-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK1-NEXT: shrq %cl, %rax
+; FALLBACK1-NEXT: movq %rsi, 16(%rdx)
+; FALLBACK1-NEXT: movq %rax, 24(%rdx)
+; FALLBACK1-NEXT: movq %rdi, (%rdx)
+; FALLBACK1-NEXT: movq %r9, 8(%rdx)
+; FALLBACK1-NEXT: retq
+;
+; FALLBACK2-LABEL: lshr_32bytes:
+; FALLBACK2: # %bb.0:
+; FALLBACK2-NEXT: movq (%rdi), %rcx
+; FALLBACK2-NEXT: movq 8(%rdi), %r8
+; FALLBACK2-NEXT: movq 16(%rdi), %r9
+; FALLBACK2-NEXT: movq 24(%rdi), %rdi
+; FALLBACK2-NEXT: movzbl (%rsi), %esi
+; FALLBACK2-NEXT: leal (,%rsi,8), %eax
+; FALLBACK2-NEXT: xorps %xmm0, %xmm0
+; FALLBACK2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: andb $24, %sil
+; FALLBACK2-NEXT: movzbl %sil, %ecx
+; FALLBACK2-NEXT: movq -64(%rsp,%rcx), %rsi
+; FALLBACK2-NEXT: movq -56(%rsp,%rcx), %rdi
+; FALLBACK2-NEXT: shrxq %rax, %rsi, %r8
+; FALLBACK2-NEXT: shrxq %rax, -72(%rsp,%rcx), %r9
+; FALLBACK2-NEXT: shrxq %rax, %rdi, %r10
+; FALLBACK2-NEXT: movq -48(%rsp,%rcx), %rcx
+; FALLBACK2-NEXT: shrxq %rax, %rcx, %r11
+; FALLBACK2-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK2-NEXT: notb %al
+; FALLBACK2-NEXT: addq %rdi, %rdi
+; FALLBACK2-NEXT: shlxq %rax, %rdi, %rdi
+; FALLBACK2-NEXT: orq %r8, %rdi
+; FALLBACK2-NEXT: addq %rsi, %rsi
+; FALLBACK2-NEXT: shlxq %rax, %rsi, %rsi
+; FALLBACK2-NEXT: orq %r9, %rsi
+; FALLBACK2-NEXT: addq %rcx, %rcx
+; FALLBACK2-NEXT: shlxq %rax, %rcx, %rax
+; FALLBACK2-NEXT: orq %r10, %rax
+; FALLBACK2-NEXT: movq %r11, 24(%rdx)
+; FALLBACK2-NEXT: movq %rax, 16(%rdx)
+; FALLBACK2-NEXT: movq %rsi, (%rdx)
+; FALLBACK2-NEXT: movq %rdi, 8(%rdx)
+; FALLBACK2-NEXT: retq
+;
+; FALLBACK3-LABEL: lshr_32bytes:
+; FALLBACK3: # %bb.0:
+; FALLBACK3-NEXT: movq (%rdi), %rax
+; FALLBACK3-NEXT: movq 8(%rdi), %r8
+; FALLBACK3-NEXT: movq 16(%rdi), %r9
+; FALLBACK3-NEXT: movq 24(%rdi), %rdi
+; FALLBACK3-NEXT: movzbl (%rsi), %esi
+; FALLBACK3-NEXT: leal (,%rsi,8), %ecx
+; FALLBACK3-NEXT: xorps %xmm0, %xmm0
+; FALLBACK3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: andb $24, %sil
+; FALLBACK3-NEXT: movzbl %sil, %eax
+; FALLBACK3-NEXT: movq -56(%rsp,%rax), %rsi
+; FALLBACK3-NEXT: movq -72(%rsp,%rax), %rdi
+; FALLBACK3-NEXT: movq -64(%rsp,%rax), %r8
+; FALLBACK3-NEXT: movq %r8, %r9
+; FALLBACK3-NEXT: shrdq %cl, %rsi, %r9
+; FALLBACK3-NEXT: movq -48(%rsp,%rax), %rax
+; FALLBACK3-NEXT: shrdq %cl, %rax, %rsi
+; FALLBACK3-NEXT: shrdq %cl, %r8, %rdi
+; FALLBACK3-NEXT: shrxq %rcx, %rax, %rax
+; FALLBACK3-NEXT: movq %rsi, 16(%rdx)
+; FALLBACK3-NEXT: movq %rax, 24(%rdx)
+; FALLBACK3-NEXT: movq %rdi, (%rdx)
+; FALLBACK3-NEXT: movq %r9, 8(%rdx)
+; FALLBACK3-NEXT: retq
+;
+; FALLBACK4-LABEL: lshr_32bytes:
+; FALLBACK4: # %bb.0:
+; FALLBACK4-NEXT: pushq %rbx
+; FALLBACK4-NEXT: movups (%rdi), %xmm0
+; FALLBACK4-NEXT: movups 16(%rdi), %xmm1
+; FALLBACK4-NEXT: movzbl (%rsi), %ecx
+; FALLBACK4-NEXT: leal (,%rcx,8), %eax
+; FALLBACK4-NEXT: xorps %xmm2, %xmm2
+; FALLBACK4-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: andb $24, %cl
+; FALLBACK4-NEXT: movzbl %cl, %r9d
+; FALLBACK4-NEXT: movq -64(%rsp,%r9), %r10
+; FALLBACK4-NEXT: movq -56(%rsp,%r9), %r8
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shrq %cl, %r10
+; FALLBACK4-NEXT: movl %eax, %esi
+; FALLBACK4-NEXT: notb %sil
+; FALLBACK4-NEXT: leaq (%r8,%r8), %rdi
+; FALLBACK4-NEXT: movl %esi, %ecx
+; FALLBACK4-NEXT: shlq %cl, %rdi
+; FALLBACK4-NEXT: orq %r10, %rdi
+; FALLBACK4-NEXT: movq -48(%rsp,%r9), %r10
+; FALLBACK4-NEXT: movq %r10, %r11
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shrq %cl, %r11
+; FALLBACK4-NEXT: movq -40(%rsp,%r9), %r9
+; FALLBACK4-NEXT: leaq (%r9,%r9), %rbx
+; FALLBACK4-NEXT: movl %esi, %ecx
+; FALLBACK4-NEXT: shlq %cl, %rbx
+; FALLBACK4-NEXT: orq %r11, %rbx
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shrq %cl, %r8
+; FALLBACK4-NEXT: addq %r10, %r10
+; FALLBACK4-NEXT: movl %esi, %ecx
+; FALLBACK4-NEXT: shlq %cl, %r10
+; FALLBACK4-NEXT: orq %r8, %r10
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shrq %cl, %r9
+; FALLBACK4-NEXT: movq %r9, 24(%rdx)
+; FALLBACK4-NEXT: movq %r10, 8(%rdx)
+; FALLBACK4-NEXT: movq %rbx, 16(%rdx)
+; FALLBACK4-NEXT: movq %rdi, (%rdx)
+; FALLBACK4-NEXT: popq %rbx
+; FALLBACK4-NEXT: retq
+;
+; FALLBACK5-LABEL: lshr_32bytes:
+; FALLBACK5: # %bb.0:
+; FALLBACK5-NEXT: movups (%rdi), %xmm0
+; FALLBACK5-NEXT: movups 16(%rdi), %xmm1
+; FALLBACK5-NEXT: movzbl (%rsi), %eax
+; FALLBACK5-NEXT: leal (,%rax,8), %ecx
+; FALLBACK5-NEXT: xorps %xmm2, %xmm2
+; FALLBACK5-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: andb $24, %al
+; FALLBACK5-NEXT: movzbl %al, %eax
+; FALLBACK5-NEXT: movq -48(%rsp,%rax), %rsi
+; FALLBACK5-NEXT: movq -56(%rsp,%rax), %rdi
+; FALLBACK5-NEXT: movq %rdi, %r8
+; FALLBACK5-NEXT: shrdq %cl, %rsi, %r8
+; FALLBACK5-NEXT: movq -72(%rsp,%rax), %r9
+; FALLBACK5-NEXT: movq -64(%rsp,%rax), %rax
+; FALLBACK5-NEXT: movq %rax, %r10
+; FALLBACK5-NEXT: shrdq %cl, %rdi, %r10
+; FALLBACK5-NEXT: shrdq %cl, %rax, %r9
+; FALLBACK5-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK5-NEXT: shrq %cl, %rsi
+; FALLBACK5-NEXT: movq %r10, 8(%rdx)
+; FALLBACK5-NEXT: movq %r8, 16(%rdx)
+; FALLBACK5-NEXT: movq %rsi, 24(%rdx)
+; FALLBACK5-NEXT: movq %r9, (%rdx)
+; FALLBACK5-NEXT: retq
+;
+; FALLBACK6-LABEL: lshr_32bytes:
+; FALLBACK6: # %bb.0:
+; FALLBACK6-NEXT: movups (%rdi), %xmm0
+; FALLBACK6-NEXT: movups 16(%rdi), %xmm1
+; FALLBACK6-NEXT: movzbl (%rsi), %ecx
+; FALLBACK6-NEXT: leal (,%rcx,8), %eax
+; FALLBACK6-NEXT: xorps %xmm2, %xmm2
+; FALLBACK6-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: andb $24, %cl
+; FALLBACK6-NEXT: movzbl %cl, %ecx
+; FALLBACK6-NEXT: shrxq %rax, -72(%rsp,%rcx), %rsi
+; FALLBACK6-NEXT: movq -64(%rsp,%rcx), %rdi
+; FALLBACK6-NEXT: movq -56(%rsp,%rcx), %r8
+; FALLBACK6-NEXT: shrxq %rax, %r8, %r9
+; FALLBACK6-NEXT: movq -48(%rsp,%rcx), %rcx
+; FALLBACK6-NEXT: shrxq %rax, %rdi, %r10
+; FALLBACK6-NEXT: shrxq %rax, %rcx, %r11
+; FALLBACK6-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK6-NEXT: notb %al
+; FALLBACK6-NEXT: addq %rdi, %rdi
+; FALLBACK6-NEXT: shlxq %rax, %rdi, %rdi
+; FALLBACK6-NEXT: orq %rsi, %rdi
+; FALLBACK6-NEXT: addq %rcx, %rcx
+; FALLBACK6-NEXT: shlxq %rax, %rcx, %rcx
+; FALLBACK6-NEXT: orq %r9, %rcx
+; FALLBACK6-NEXT: addq %r8, %r8
+; FALLBACK6-NEXT: shlxq %rax, %r8, %rax
+; FALLBACK6-NEXT: orq %r10, %rax
+; FALLBACK6-NEXT: movq %r11, 24(%rdx)
+; FALLBACK6-NEXT: movq %rax, 8(%rdx)
+; FALLBACK6-NEXT: movq %rcx, 16(%rdx)
+; FALLBACK6-NEXT: movq %rdi, (%rdx)
+; FALLBACK6-NEXT: retq
+;
+; FALLBACK7-LABEL: lshr_32bytes:
+; FALLBACK7: # %bb.0:
+; FALLBACK7-NEXT: movups (%rdi), %xmm0
+; FALLBACK7-NEXT: movups 16(%rdi), %xmm1
+; FALLBACK7-NEXT: movzbl (%rsi), %eax
+; FALLBACK7-NEXT: leal (,%rax,8), %ecx
+; FALLBACK7-NEXT: xorps %xmm2, %xmm2
+; FALLBACK7-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: andb $24, %al
+; FALLBACK7-NEXT: movzbl %al, %eax
+; FALLBACK7-NEXT: movq -48(%rsp,%rax), %rsi
+; FALLBACK7-NEXT: movq -56(%rsp,%rax), %rdi
+; FALLBACK7-NEXT: movq %rdi, %r8
+; FALLBACK7-NEXT: shrdq %cl, %rsi, %r8
+; FALLBACK7-NEXT: movq -72(%rsp,%rax), %r9
+; FALLBACK7-NEXT: movq -64(%rsp,%rax), %rax
+; FALLBACK7-NEXT: movq %rax, %r10
+; FALLBACK7-NEXT: shrdq %cl, %rdi, %r10
+; FALLBACK7-NEXT: shrdq %cl, %rax, %r9
+; FALLBACK7-NEXT: shrxq %rcx, %rsi, %rax
+; FALLBACK7-NEXT: movq %r10, 8(%rdx)
+; FALLBACK7-NEXT: movq %r8, 16(%rdx)
+; FALLBACK7-NEXT: movq %rax, 24(%rdx)
+; FALLBACK7-NEXT: movq %r9, (%rdx)
+; FALLBACK7-NEXT: retq
+;
+; FALLBACK8-LABEL: lshr_32bytes:
+; FALLBACK8: # %bb.0:
+; FALLBACK8-NEXT: pushq %rbx
+; FALLBACK8-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK8-NEXT: movzbl (%rsi), %ecx
+; FALLBACK8-NEXT: leal (,%rcx,8), %eax
+; FALLBACK8-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK8-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: andb $24, %cl
+; FALLBACK8-NEXT: movzbl %cl, %r9d
+; FALLBACK8-NEXT: movq -64(%rsp,%r9), %r10
+; FALLBACK8-NEXT: movq -56(%rsp,%r9), %r8
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shrq %cl, %r10
+; FALLBACK8-NEXT: movl %eax, %esi
+; FALLBACK8-NEXT: notb %sil
+; FALLBACK8-NEXT: leaq (%r8,%r8), %rdi
+; FALLBACK8-NEXT: movl %esi, %ecx
+; FALLBACK8-NEXT: shlq %cl, %rdi
+; FALLBACK8-NEXT: orq %r10, %rdi
+; FALLBACK8-NEXT: movq -48(%rsp,%r9), %r10
+; FALLBACK8-NEXT: movq %r10, %r11
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shrq %cl, %r11
+; FALLBACK8-NEXT: movq -40(%rsp,%r9), %r9
+; FALLBACK8-NEXT: leaq (%r9,%r9), %rbx
+; FALLBACK8-NEXT: movl %esi, %ecx
+; FALLBACK8-NEXT: shlq %cl, %rbx
+; FALLBACK8-NEXT: orq %r11, %rbx
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shrq %cl, %r8
+; FALLBACK8-NEXT: addq %r10, %r10
+; FALLBACK8-NEXT: movl %esi, %ecx
+; FALLBACK8-NEXT: shlq %cl, %r10
+; FALLBACK8-NEXT: orq %r8, %r10
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shrq %cl, %r9
+; FALLBACK8-NEXT: movq %r9, 24(%rdx)
+; FALLBACK8-NEXT: movq %r10, 8(%rdx)
+; FALLBACK8-NEXT: movq %rbx, 16(%rdx)
+; FALLBACK8-NEXT: movq %rdi, (%rdx)
+; FALLBACK8-NEXT: popq %rbx
+; FALLBACK8-NEXT: vzeroupper
+; FALLBACK8-NEXT: retq
+;
+; FALLBACK9-LABEL: lshr_32bytes:
+; FALLBACK9: # %bb.0:
+; FALLBACK9-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK9-NEXT: movzbl (%rsi), %eax
+; FALLBACK9-NEXT: leal (,%rax,8), %ecx
+; FALLBACK9-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK9-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: andb $24, %al
+; FALLBACK9-NEXT: movzbl %al, %eax
+; FALLBACK9-NEXT: movq -48(%rsp,%rax), %rsi
+; FALLBACK9-NEXT: movq -56(%rsp,%rax), %rdi
+; FALLBACK9-NEXT: movq %rdi, %r8
+; FALLBACK9-NEXT: shrdq %cl, %rsi, %r8
+; FALLBACK9-NEXT: movq -72(%rsp,%rax), %r9
+; FALLBACK9-NEXT: movq -64(%rsp,%rax), %rax
+; FALLBACK9-NEXT: movq %rax, %r10
+; FALLBACK9-NEXT: shrdq %cl, %rdi, %r10
+; FALLBACK9-NEXT: shrdq %cl, %rax, %r9
+; FALLBACK9-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK9-NEXT: shrq %cl, %rsi
+; FALLBACK9-NEXT: movq %r10, 8(%rdx)
+; FALLBACK9-NEXT: movq %r8, 16(%rdx)
+; FALLBACK9-NEXT: movq %rsi, 24(%rdx)
+; FALLBACK9-NEXT: movq %r9, (%rdx)
+; FALLBACK9-NEXT: vzeroupper
+; FALLBACK9-NEXT: retq
+;
+; FALLBACK10-LABEL: lshr_32bytes:
+; FALLBACK10: # %bb.0:
+; FALLBACK10-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK10-NEXT: movzbl (%rsi), %ecx
+; FALLBACK10-NEXT: leal (,%rcx,8), %eax
+; FALLBACK10-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK10-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: andb $24, %cl
+; FALLBACK10-NEXT: movzbl %cl, %ecx
+; FALLBACK10-NEXT: shrxq %rax, -72(%rsp,%rcx), %rsi
+; FALLBACK10-NEXT: movq -64(%rsp,%rcx), %rdi
+; FALLBACK10-NEXT: movq -56(%rsp,%rcx), %r8
+; FALLBACK10-NEXT: shrxq %rax, %r8, %r9
+; FALLBACK10-NEXT: movq -48(%rsp,%rcx), %rcx
+; FALLBACK10-NEXT: shrxq %rax, %rdi, %r10
+; FALLBACK10-NEXT: shrxq %rax, %rcx, %r11
+; FALLBACK10-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK10-NEXT: notb %al
+; FALLBACK10-NEXT: addq %rdi, %rdi
+; FALLBACK10-NEXT: shlxq %rax, %rdi, %rdi
+; FALLBACK10-NEXT: orq %rsi, %rdi
+; FALLBACK10-NEXT: addq %rcx, %rcx
+; FALLBACK10-NEXT: shlxq %rax, %rcx, %rcx
+; FALLBACK10-NEXT: orq %r9, %rcx
+; FALLBACK10-NEXT: addq %r8, %r8
+; FALLBACK10-NEXT: shlxq %rax, %r8, %rax
+; FALLBACK10-NEXT: orq %r10, %rax
+; FALLBACK10-NEXT: movq %r11, 24(%rdx)
+; FALLBACK10-NEXT: movq %rax, 8(%rdx)
+; FALLBACK10-NEXT: movq %rcx, 16(%rdx)
+; FALLBACK10-NEXT: movq %rdi, (%rdx)
+; FALLBACK10-NEXT: vzeroupper
+; FALLBACK10-NEXT: retq
+;
+; FALLBACK11-LABEL: lshr_32bytes:
+; FALLBACK11: # %bb.0:
+; FALLBACK11-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK11-NEXT: movzbl (%rsi), %eax
+; FALLBACK11-NEXT: leal (,%rax,8), %ecx
+; FALLBACK11-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK11-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: andb $24, %al
+; FALLBACK11-NEXT: movzbl %al, %eax
+; FALLBACK11-NEXT: movq -48(%rsp,%rax), %rsi
+; FALLBACK11-NEXT: movq -56(%rsp,%rax), %rdi
+; FALLBACK11-NEXT: movq %rdi, %r8
+; FALLBACK11-NEXT: shrdq %cl, %rsi, %r8
+; FALLBACK11-NEXT: movq -72(%rsp,%rax), %r9
+; FALLBACK11-NEXT: movq -64(%rsp,%rax), %rax
+; FALLBACK11-NEXT: movq %rax, %r10
+; FALLBACK11-NEXT: shrdq %cl, %rdi, %r10
+; FALLBACK11-NEXT: shrdq %cl, %rax, %r9
+; FALLBACK11-NEXT: shrxq %rcx, %rsi, %rax
+; FALLBACK11-NEXT: movq %r10, 8(%rdx)
+; FALLBACK11-NEXT: movq %r8, 16(%rdx)
+; FALLBACK11-NEXT: movq %rax, 24(%rdx)
+; FALLBACK11-NEXT: movq %r9, (%rdx)
+; FALLBACK11-NEXT: vzeroupper
+; FALLBACK11-NEXT: retq
+;
+; FALLBACK12-LABEL: lshr_32bytes:
+; FALLBACK12: # %bb.0:
+; FALLBACK12-NEXT: pushq %rbx
+; FALLBACK12-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK12-NEXT: movzbl (%rsi), %ecx
+; FALLBACK12-NEXT: leal (,%rcx,8), %eax
+; FALLBACK12-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK12-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK12-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK12-NEXT: andb $24, %cl
+; FALLBACK12-NEXT: movzbl %cl, %r9d
+; FALLBACK12-NEXT: movq -64(%rsp,%r9), %r10
+; FALLBACK12-NEXT: movq -56(%rsp,%r9), %r8
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shrq %cl, %r10
+; FALLBACK12-NEXT: movl %eax, %esi
+; FALLBACK12-NEXT: notb %sil
+; FALLBACK12-NEXT: leaq (%r8,%r8), %rdi
+; FALLBACK12-NEXT: movl %esi, %ecx
+; FALLBACK12-NEXT: shlq %cl, %rdi
+; FALLBACK12-NEXT: orq %r10, %rdi
+; FALLBACK12-NEXT: movq -48(%rsp,%r9), %r10
+; FALLBACK12-NEXT: movq %r10, %r11
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shrq %cl, %r11
+; FALLBACK12-NEXT: movq -40(%rsp,%r9), %r9
+; FALLBACK12-NEXT: leaq (%r9,%r9), %rbx
+; FALLBACK12-NEXT: movl %esi, %ecx
+; FALLBACK12-NEXT: shlq %cl, %rbx
+; FALLBACK12-NEXT: orq %r11, %rbx
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shrq %cl, %r8
+; FALLBACK12-NEXT: addq %r10, %r10
+; FALLBACK12-NEXT: movl %esi, %ecx
+; FALLBACK12-NEXT: shlq %cl, %r10
+; FALLBACK12-NEXT: orq %r8, %r10
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shrq %cl, %r9
+; FALLBACK12-NEXT: movq %r9, 24(%rdx)
+; FALLBACK12-NEXT: movq %r10, 8(%rdx)
+; FALLBACK12-NEXT: movq %rbx, 16(%rdx)
+; FALLBACK12-NEXT: movq %rdi, (%rdx)
+; FALLBACK12-NEXT: popq %rbx
+; FALLBACK12-NEXT: vzeroupper
+; FALLBACK12-NEXT: retq
+;
+; FALLBACK13-LABEL: lshr_32bytes:
+; FALLBACK13: # %bb.0:
+; FALLBACK13-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK13-NEXT: movzbl (%rsi), %eax
+; FALLBACK13-NEXT: leal (,%rax,8), %ecx
+; FALLBACK13-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK13-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK13-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK13-NEXT: andb $24, %al
+; FALLBACK13-NEXT: movzbl %al, %eax
+; FALLBACK13-NEXT: movq -48(%rsp,%rax), %rsi
+; FALLBACK13-NEXT: movq -56(%rsp,%rax), %rdi
+; FALLBACK13-NEXT: movq %rdi, %r8
+; FALLBACK13-NEXT: shrdq %cl, %rsi, %r8
+; FALLBACK13-NEXT: movq -72(%rsp,%rax), %r9
+; FALLBACK13-NEXT: movq -64(%rsp,%rax), %rax
+; FALLBACK13-NEXT: movq %rax, %r10
+; FALLBACK13-NEXT: shrdq %cl, %rdi, %r10
+; FALLBACK13-NEXT: shrdq %cl, %rax, %r9
+; FALLBACK13-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK13-NEXT: shrq %cl, %rsi
+; FALLBACK13-NEXT: movq %r10, 8(%rdx)
+; FALLBACK13-NEXT: movq %r8, 16(%rdx)
+; FALLBACK13-NEXT: movq %rsi, 24(%rdx)
+; FALLBACK13-NEXT: movq %r9, (%rdx)
+; FALLBACK13-NEXT: vzeroupper
+; FALLBACK13-NEXT: retq
+;
+; FALLBACK14-LABEL: lshr_32bytes:
+; FALLBACK14: # %bb.0:
+; FALLBACK14-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK14-NEXT: movzbl (%rsi), %ecx
+; FALLBACK14-NEXT: leal (,%rcx,8), %eax
+; FALLBACK14-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK14-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: andb $24, %cl
+; FALLBACK14-NEXT: movzbl %cl, %ecx
+; FALLBACK14-NEXT: shrxq %rax, -72(%rsp,%rcx), %rsi
+; FALLBACK14-NEXT: movq -64(%rsp,%rcx), %rdi
+; FALLBACK14-NEXT: movq -56(%rsp,%rcx), %r8
+; FALLBACK14-NEXT: shrxq %rax, %r8, %r9
+; FALLBACK14-NEXT: movq -48(%rsp,%rcx), %rcx
+; FALLBACK14-NEXT: shrxq %rax, %rdi, %r10
+; FALLBACK14-NEXT: shrxq %rax, %rcx, %r11
+; FALLBACK14-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK14-NEXT: notb %al
+; FALLBACK14-NEXT: addq %rdi, %rdi
+; FALLBACK14-NEXT: shlxq %rax, %rdi, %rdi
+; FALLBACK14-NEXT: orq %rsi, %rdi
+; FALLBACK14-NEXT: addq %rcx, %rcx
+; FALLBACK14-NEXT: shlxq %rax, %rcx, %rcx
+; FALLBACK14-NEXT: orq %r9, %rcx
+; FALLBACK14-NEXT: addq %r8, %r8
+; FALLBACK14-NEXT: shlxq %rax, %r8, %rax
+; FALLBACK14-NEXT: orq %r10, %rax
+; FALLBACK14-NEXT: movq %r11, 24(%rdx)
+; FALLBACK14-NEXT: movq %rax, 8(%rdx)
+; FALLBACK14-NEXT: movq %rcx, 16(%rdx)
+; FALLBACK14-NEXT: movq %rdi, (%rdx)
+; FALLBACK14-NEXT: vzeroupper
+; FALLBACK14-NEXT: retq
+;
+; FALLBACK15-LABEL: lshr_32bytes:
+; FALLBACK15: # %bb.0:
+; FALLBACK15-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK15-NEXT: movzbl (%rsi), %eax
+; FALLBACK15-NEXT: leal (,%rax,8), %ecx
+; FALLBACK15-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK15-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK15-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK15-NEXT: andb $24, %al
+; FALLBACK15-NEXT: movzbl %al, %eax
+; FALLBACK15-NEXT: movq -48(%rsp,%rax), %rsi
+; FALLBACK15-NEXT: movq -56(%rsp,%rax), %rdi
+; FALLBACK15-NEXT: movq %rdi, %r8
+; FALLBACK15-NEXT: shrdq %cl, %rsi, %r8
+; FALLBACK15-NEXT: movq -72(%rsp,%rax), %r9
+; FALLBACK15-NEXT: movq -64(%rsp,%rax), %rax
+; FALLBACK15-NEXT: movq %rax, %r10
+; FALLBACK15-NEXT: shrdq %cl, %rdi, %r10
+; FALLBACK15-NEXT: shrdq %cl, %rax, %r9
+; FALLBACK15-NEXT: shrxq %rcx, %rsi, %rax
+; FALLBACK15-NEXT: movq %r10, 8(%rdx)
+; FALLBACK15-NEXT: movq %r8, 16(%rdx)
+; FALLBACK15-NEXT: movq %rax, 24(%rdx)
+; FALLBACK15-NEXT: movq %r9, (%rdx)
+; FALLBACK15-NEXT: vzeroupper
+; FALLBACK15-NEXT: retq
+;
+; FALLBACK16-LABEL: lshr_32bytes:
+; FALLBACK16: # %bb.0:
+; FALLBACK16-NEXT: pushl %ebp
+; FALLBACK16-NEXT: pushl %ebx
+; FALLBACK16-NEXT: pushl %edi
+; FALLBACK16-NEXT: pushl %esi
+; FALLBACK16-NEXT: subl $108, %esp
+; FALLBACK16-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK16-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK16-NEXT: movl (%ebp), %ecx
+; FALLBACK16-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 4(%ebp), %ecx
+; FALLBACK16-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 8(%ebp), %ecx
+; FALLBACK16-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 12(%ebp), %edi
+; FALLBACK16-NEXT: movl 16(%ebp), %ebx
+; FALLBACK16-NEXT: movb (%eax), %ah
+; FALLBACK16-NEXT: movl 20(%ebp), %esi
+; FALLBACK16-NEXT: movl 24(%ebp), %ecx
+; FALLBACK16-NEXT: movl 28(%ebp), %ebp
+; FALLBACK16-NEXT: xorps %xmm0, %xmm0
+; FALLBACK16-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %ebp, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movb %ah, %dh
+; FALLBACK16-NEXT: shlb $3, %dh
+; FALLBACK16-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: andb $28, %ah
+; FALLBACK16-NEXT: movzbl %ah, %edi
+; FALLBACK16-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 32(%esp,%edi), %esi
+; FALLBACK16-NEXT: movl 36(%esp,%edi), %eax
+; FALLBACK16-NEXT: movl %eax, %ebx
+; FALLBACK16-NEXT: movb %dh, %cl
+; FALLBACK16-NEXT: shrl %cl, %ebx
+; FALLBACK16-NEXT: movb %dh, %dl
+; FALLBACK16-NEXT: notb %dl
+; FALLBACK16-NEXT: movl 40(%esp,%edi), %edi
+; FALLBACK16-NEXT: leal (%edi,%edi), %ebp
+; FALLBACK16-NEXT: movl %edx, %ecx
+; FALLBACK16-NEXT: shll %cl, %ebp
+; FALLBACK16-NEXT: orl %ebx, %ebp
+; FALLBACK16-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movb %dh, %cl
+; FALLBACK16-NEXT: shrl %cl, %esi
+; FALLBACK16-NEXT: movl %eax, %ebx
+; FALLBACK16-NEXT: addl %eax, %ebx
+; FALLBACK16-NEXT: movl %edx, %ecx
+; FALLBACK16-NEXT: shll %cl, %ebx
+; FALLBACK16-NEXT: orl %esi, %ebx
+; FALLBACK16-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK16-NEXT: movl 44(%esp,%eax), %ebp
+; FALLBACK16-NEXT: movl %ebp, %esi
+; FALLBACK16-NEXT: movb %dh, %cl
+; FALLBACK16-NEXT: shrl %cl, %esi
+; FALLBACK16-NEXT: movl 48(%esp,%eax), %eax
+; FALLBACK16-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: leal (%eax,%eax), %ebx
+; FALLBACK16-NEXT: movl %edx, %ecx
+; FALLBACK16-NEXT: shll %cl, %ebx
+; FALLBACK16-NEXT: orl %esi, %ebx
+; FALLBACK16-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movb %dh, %cl
+; FALLBACK16-NEXT: shrl %cl, %edi
+; FALLBACK16-NEXT: addl %ebp, %ebp
+; FALLBACK16-NEXT: movl %edx, %ecx
+; FALLBACK16-NEXT: shll %cl, %ebp
+; FALLBACK16-NEXT: orl %edi, %ebp
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK16-NEXT: movl 52(%esp,%eax), %edi
+; FALLBACK16-NEXT: movl %edi, %ebx
+; FALLBACK16-NEXT: movb %dh, %cl
+; FALLBACK16-NEXT: shrl %cl, %ebx
+; FALLBACK16-NEXT: movl 56(%esp,%eax), %esi
+; FALLBACK16-NEXT: leal (%esi,%esi), %eax
+; FALLBACK16-NEXT: movl %edx, %ecx
+; FALLBACK16-NEXT: shll %cl, %eax
+; FALLBACK16-NEXT: orl %ebx, %eax
+; FALLBACK16-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movb %dh, %cl
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; FALLBACK16-NEXT: shrl %cl, %ebx
+; FALLBACK16-NEXT: addl %edi, %edi
+; FALLBACK16-NEXT: movl %edx, %ecx
+; FALLBACK16-NEXT: shll %cl, %edi
+; FALLBACK16-NEXT: orl %ebx, %edi
+; FALLBACK16-NEXT: movb %dh, %cl
+; FALLBACK16-NEXT: movl %esi, %eax
+; FALLBACK16-NEXT: shrl %cl, %eax
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl 60(%esp,%ecx), %ebx
+; FALLBACK16-NEXT: leal (%ebx,%ebx), %esi
+; FALLBACK16-NEXT: movl %edx, %ecx
+; FALLBACK16-NEXT: shll %cl, %esi
+; FALLBACK16-NEXT: orl %eax, %esi
+; FALLBACK16-NEXT: movb %dh, %cl
+; FALLBACK16-NEXT: shrl %cl, %ebx
+; FALLBACK16-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK16-NEXT: movl %ebx, 28(%eax)
+; FALLBACK16-NEXT: movl %esi, 24(%eax)
+; FALLBACK16-NEXT: movl %edi, 16(%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, 20(%eax)
+; FALLBACK16-NEXT: movl %ebp, 8(%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, 12(%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, (%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, 4(%eax)
+; FALLBACK16-NEXT: addl $108, %esp
+; FALLBACK16-NEXT: popl %esi
+; FALLBACK16-NEXT: popl %edi
+; FALLBACK16-NEXT: popl %ebx
+; FALLBACK16-NEXT: popl %ebp
+; FALLBACK16-NEXT: retl
+;
+; FALLBACK17-LABEL: lshr_32bytes:
+; FALLBACK17: # %bb.0:
+; FALLBACK17-NEXT: pushl %ebp
+; FALLBACK17-NEXT: pushl %ebx
+; FALLBACK17-NEXT: pushl %edi
+; FALLBACK17-NEXT: pushl %esi
+; FALLBACK17-NEXT: subl $92, %esp
+; FALLBACK17-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK17-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK17-NEXT: movl (%ebp), %eax
+; FALLBACK17-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 4(%ebp), %eax
+; FALLBACK17-NEXT: movl %eax, (%esp) # 4-byte Spill
+; FALLBACK17-NEXT: movl 8(%ebp), %esi
+; FALLBACK17-NEXT: movl 12(%ebp), %edi
+; FALLBACK17-NEXT: movl 16(%ebp), %ebx
+; FALLBACK17-NEXT: movb (%ecx), %ch
+; FALLBACK17-NEXT: movl 20(%ebp), %edx
+; FALLBACK17-NEXT: movl 24(%ebp), %eax
+; FALLBACK17-NEXT: movl 28(%ebp), %ebp
+; FALLBACK17-NEXT: xorps %xmm0, %xmm0
+; FALLBACK17-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %ebp, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movb %ch, %cl
+; FALLBACK17-NEXT: shlb $3, %cl
+; FALLBACK17-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl (%esp), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: andb $28, %ch
+; FALLBACK17-NEXT: movzbl %ch, %ebp
+; FALLBACK17-NEXT: movl 24(%esp,%ebp), %edx
+; FALLBACK17-NEXT: movl 20(%esp,%ebp), %eax
+; FALLBACK17-NEXT: movl %eax, (%esp) # 4-byte Spill
+; FALLBACK17-NEXT: shrdl %cl, %edx, %eax
+; FALLBACK17-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 32(%esp,%ebp), %ebx
+; FALLBACK17-NEXT: movl 28(%esp,%ebp), %eax
+; FALLBACK17-NEXT: movl %eax, %esi
+; FALLBACK17-NEXT: shrdl %cl, %ebx, %esi
+; FALLBACK17-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK17-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 40(%esp,%ebp), %edx
+; FALLBACK17-NEXT: movl 36(%esp,%ebp), %eax
+; FALLBACK17-NEXT: movl %eax, %edi
+; FALLBACK17-NEXT: shrdl %cl, %edx, %edi
+; FALLBACK17-NEXT: shrdl %cl, %eax, %ebx
+; FALLBACK17-NEXT: movl 16(%esp,%ebp), %esi
+; FALLBACK17-NEXT: movl 44(%esp,%ebp), %eax
+; FALLBACK17-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK17-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK17-NEXT: movl %edx, 24(%ebp)
+; FALLBACK17-NEXT: movl (%esp), %edx # 4-byte Reload
+; FALLBACK17-NEXT: shrdl %cl, %edx, %esi
+; FALLBACK17-NEXT: shrl %cl, %eax
+; FALLBACK17-NEXT: movl %eax, 28(%ebp)
+; FALLBACK17-NEXT: movl %ebx, 16(%ebp)
+; FALLBACK17-NEXT: movl %edi, 20(%ebp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, 8(%ebp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, 12(%ebp)
+; FALLBACK17-NEXT: movl %esi, (%ebp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, 4(%ebp)
+; FALLBACK17-NEXT: addl $92, %esp
+; FALLBACK17-NEXT: popl %esi
+; FALLBACK17-NEXT: popl %edi
+; FALLBACK17-NEXT: popl %ebx
+; FALLBACK17-NEXT: popl %ebp
+; FALLBACK17-NEXT: retl
+;
+; FALLBACK18-LABEL: lshr_32bytes:
+; FALLBACK18: # %bb.0:
+; FALLBACK18-NEXT: pushl %ebp
+; FALLBACK18-NEXT: pushl %ebx
+; FALLBACK18-NEXT: pushl %edi
+; FALLBACK18-NEXT: pushl %esi
+; FALLBACK18-NEXT: subl $108, %esp
+; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK18-NEXT: movl (%eax), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 4(%eax), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 8(%eax), %esi
+; FALLBACK18-NEXT: movl 12(%eax), %edi
+; FALLBACK18-NEXT: movl 16(%eax), %ebp
+; FALLBACK18-NEXT: movzbl (%ebx), %ebx
+; FALLBACK18-NEXT: movl 20(%eax), %edx
+; FALLBACK18-NEXT: movl 24(%eax), %ecx
+; FALLBACK18-NEXT: movl 28(%eax), %eax
+; FALLBACK18-NEXT: xorps %xmm0, %xmm0
+; FALLBACK18-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %ebx, %eax
+; FALLBACK18-NEXT: shlb $3, %al
+; FALLBACK18-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %ebp, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: andb $28, %bl
+; FALLBACK18-NEXT: movzbl %bl, %edi
+; FALLBACK18-NEXT: movl 36(%esp,%edi), %esi
+; FALLBACK18-NEXT: movl 40(%esp,%edi), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shrxl %eax, %esi, %edx
+; FALLBACK18-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl %eax, %edx
+; FALLBACK18-NEXT: movl %eax, %ebx
+; FALLBACK18-NEXT: notb %dl
+; FALLBACK18-NEXT: leal (%ecx,%ecx), %ebp
+; FALLBACK18-NEXT: shlxl %edx, %ebp, %eax
+; FALLBACK18-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl %ebx, %ecx
+; FALLBACK18-NEXT: shrxl %ebx, 32(%esp,%edi), %ebx
+; FALLBACK18-NEXT: addl %esi, %esi
+; FALLBACK18-NEXT: shlxl %edx, %esi, %eax
+; FALLBACK18-NEXT: orl %ebx, %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 48(%esp,%edi), %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: leal (%eax,%eax), %ebx
+; FALLBACK18-NEXT: shlxl %edx, %ebx, %esi
+; FALLBACK18-NEXT: movl 44(%esp,%edi), %ebp
+; FALLBACK18-NEXT: movl %ecx, %eax
+; FALLBACK18-NEXT: shrxl %ecx, %ebp, %ebx
+; FALLBACK18-NEXT: orl %ebx, %esi
+; FALLBACK18-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; FALLBACK18-NEXT: movl %eax, %ebx
+; FALLBACK18-NEXT: addl %ebp, %ebp
+; FALLBACK18-NEXT: shlxl %edx, %ebp, %eax
+; FALLBACK18-NEXT: orl %ecx, %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 56(%esp,%edi), %ebp
+; FALLBACK18-NEXT: leal (%ebp,%ebp), %ecx
+; FALLBACK18-NEXT: shlxl %edx, %ecx, %ecx
+; FALLBACK18-NEXT: movl 52(%esp,%edi), %eax
+; FALLBACK18-NEXT: shrxl %ebx, %eax, %esi
+; FALLBACK18-NEXT: orl %esi, %ecx
+; FALLBACK18-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; FALLBACK18-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: addl %eax, %eax
+; FALLBACK18-NEXT: shlxl %edx, %eax, %esi
+; FALLBACK18-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; FALLBACK18-NEXT: shrxl %ebx, %ebp, %eax
+; FALLBACK18-NEXT: movl 60(%esp,%edi), %edi
+; FALLBACK18-NEXT: shrxl %ebx, %edi, %ebx
+; FALLBACK18-NEXT: addl %edi, %edi
+; FALLBACK18-NEXT: shlxl %edx, %edi, %edi
+; FALLBACK18-NEXT: orl %eax, %edi
+; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK18-NEXT: movl %ebx, 28(%eax)
+; FALLBACK18-NEXT: movl %edi, 24(%eax)
+; FALLBACK18-NEXT: movl %esi, 16(%eax)
+; FALLBACK18-NEXT: movl %ecx, 20(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 8(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 12(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, (%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 4(%eax)
+; FALLBACK18-NEXT: addl $108, %esp
+; FALLBACK18-NEXT: popl %esi
+; FALLBACK18-NEXT: popl %edi
+; FALLBACK18-NEXT: popl %ebx
+; FALLBACK18-NEXT: popl %ebp
+; FALLBACK18-NEXT: retl
+;
+; FALLBACK19-LABEL: lshr_32bytes:
+; FALLBACK19: # %bb.0:
+; FALLBACK19-NEXT: pushl %ebp
+; FALLBACK19-NEXT: pushl %ebx
+; FALLBACK19-NEXT: pushl %edi
+; FALLBACK19-NEXT: pushl %esi
+; FALLBACK19-NEXT: subl $92, %esp
+; FALLBACK19-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; FALLBACK19-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK19-NEXT: movl (%ecx), %eax
+; FALLBACK19-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 4(%ecx), %eax
+; FALLBACK19-NEXT: movl %eax, (%esp) # 4-byte Spill
+; FALLBACK19-NEXT: movl 8(%ecx), %esi
+; FALLBACK19-NEXT: movl 12(%ecx), %edi
+; FALLBACK19-NEXT: movl 16(%ecx), %ebp
+; FALLBACK19-NEXT: movzbl (%ebx), %ebx
+; FALLBACK19-NEXT: movl 20(%ecx), %edx
+; FALLBACK19-NEXT: movl 24(%ecx), %eax
+; FALLBACK19-NEXT: movl 28(%ecx), %ecx
+; FALLBACK19-NEXT: xorps %xmm0, %xmm0
+; FALLBACK19-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %ebx, %ecx
+; FALLBACK19-NEXT: shlb $3, %cl
+; FALLBACK19-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %ebp, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl (%esp), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: andb $28, %bl
+; FALLBACK19-NEXT: movzbl %bl, %ebp
+; FALLBACK19-NEXT: movl 24(%esp,%ebp), %esi
+; FALLBACK19-NEXT: movl 20(%esp,%ebp), %eax
+; FALLBACK19-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: shrdl %cl, %esi, %eax
+; FALLBACK19-NEXT: movl %eax, (%esp) # 4-byte Spill
+; FALLBACK19-NEXT: movl 32(%esp,%ebp), %ebx
+; FALLBACK19-NEXT: movl 28(%esp,%ebp), %eax
+; FALLBACK19-NEXT: movl %eax, %edx
+; FALLBACK19-NEXT: shrdl %cl, %ebx, %edx
+; FALLBACK19-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK19-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 40(%esp,%ebp), %eax
+; FALLBACK19-NEXT: movl 36(%esp,%ebp), %edx
+; FALLBACK19-NEXT: movl %edx, %esi
+; FALLBACK19-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK19-NEXT: shrdl %cl, %edx, %ebx
+; FALLBACK19-NEXT: movl 16(%esp,%ebp), %edx
+; FALLBACK19-NEXT: movl 44(%esp,%ebp), %edi
+; FALLBACK19-NEXT: shrdl %cl, %edi, %eax
+; FALLBACK19-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK19-NEXT: movl %eax, 24(%ebp)
+; FALLBACK19-NEXT: shrxl %ecx, %edi, %eax
+; FALLBACK19-NEXT: movl %eax, 28(%ebp)
+; FALLBACK19-NEXT: movl %ebx, 16(%ebp)
+; FALLBACK19-NEXT: movl %esi, 20(%ebp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, 8(%ebp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, 12(%ebp)
+; FALLBACK19-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK19-NEXT: movl %edx, (%ebp)
+; FALLBACK19-NEXT: movl (%esp), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, 4(%ebp)
+; FALLBACK19-NEXT: addl $92, %esp
+; FALLBACK19-NEXT: popl %esi
+; FALLBACK19-NEXT: popl %edi
+; FALLBACK19-NEXT: popl %ebx
+; FALLBACK19-NEXT: popl %ebp
+; FALLBACK19-NEXT: retl
+;
+; FALLBACK20-LABEL: lshr_32bytes:
+; FALLBACK20: # %bb.0:
+; FALLBACK20-NEXT: pushl %ebp
+; FALLBACK20-NEXT: pushl %ebx
+; FALLBACK20-NEXT: pushl %edi
+; FALLBACK20-NEXT: pushl %esi
+; FALLBACK20-NEXT: subl $108, %esp
+; FALLBACK20-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK20-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK20-NEXT: movups (%ecx), %xmm0
+; FALLBACK20-NEXT: movups 16(%ecx), %xmm1
+; FALLBACK20-NEXT: movzbl (%eax), %ecx
+; FALLBACK20-NEXT: movl %ecx, %eax
+; FALLBACK20-NEXT: shlb $3, %al
+; FALLBACK20-NEXT: xorps %xmm2, %xmm2
+; FALLBACK20-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: andb $28, %cl
+; FALLBACK20-NEXT: movzbl %cl, %ecx
+; FALLBACK20-NEXT: movl 32(%esp,%ecx), %esi
+; FALLBACK20-NEXT: movl 36(%esp,%ecx), %ebx
+; FALLBACK20-NEXT: movl %ecx, %edi
+; FALLBACK20-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl %eax, %ecx
+; FALLBACK20-NEXT: shrl %cl, %esi
+; FALLBACK20-NEXT: movl %eax, %edx
+; FALLBACK20-NEXT: notb %dl
+; FALLBACK20-NEXT: addl %ebx, %ebx
+; FALLBACK20-NEXT: movl %edx, %ecx
+; FALLBACK20-NEXT: shll %cl, %ebx
+; FALLBACK20-NEXT: orl %esi, %ebx
+; FALLBACK20-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl 44(%esp,%edi), %ebp
+; FALLBACK20-NEXT: movl %ebp, %esi
+; FALLBACK20-NEXT: movl %eax, %ecx
+; FALLBACK20-NEXT: shrl %cl, %esi
+; FALLBACK20-NEXT: movl 48(%esp,%edi), %ecx
+; FALLBACK20-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: leal (%ecx,%ecx), %ebx
+; FALLBACK20-NEXT: movl %edx, %ecx
+; FALLBACK20-NEXT: shll %cl, %ebx
+; FALLBACK20-NEXT: orl %esi, %ebx
+; FALLBACK20-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl 40(%esp,%edi), %esi
+; FALLBACK20-NEXT: movl %esi, %ebx
+; FALLBACK20-NEXT: movl %eax, %ecx
+; FALLBACK20-NEXT: shrl %cl, %ebx
+; FALLBACK20-NEXT: addl %ebp, %ebp
+; FALLBACK20-NEXT: movl %edx, %ecx
+; FALLBACK20-NEXT: shll %cl, %ebp
+; FALLBACK20-NEXT: orl %ebx, %ebp
+; FALLBACK20-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl 52(%esp,%edi), %ebp
+; FALLBACK20-NEXT: movl %ebp, %ebx
+; FALLBACK20-NEXT: movl %eax, %ecx
+; FALLBACK20-NEXT: shrl %cl, %ebx
+; FALLBACK20-NEXT: movl 56(%esp,%edi), %ecx
+; FALLBACK20-NEXT: movl %ecx, (%esp) # 4-byte Spill
+; FALLBACK20-NEXT: leal (%ecx,%ecx), %edi
+; FALLBACK20-NEXT: movl %edx, %ecx
+; FALLBACK20-NEXT: shll %cl, %edi
+; FALLBACK20-NEXT: orl %ebx, %edi
+; FALLBACK20-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl %eax, %ecx
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; FALLBACK20-NEXT: shrl %cl, %edi
+; FALLBACK20-NEXT: addl %ebp, %ebp
+; FALLBACK20-NEXT: movl %edx, %ecx
+; FALLBACK20-NEXT: shll %cl, %ebp
+; FALLBACK20-NEXT: orl %edi, %ebp
+; FALLBACK20-NEXT: movl %eax, %ecx
+; FALLBACK20-NEXT: shrl %cl, (%esp) # 4-byte Folded Spill
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl 60(%esp,%ecx), %ebx
+; FALLBACK20-NEXT: leal (%ebx,%ebx), %edi
+; FALLBACK20-NEXT: movl %edx, %ecx
+; FALLBACK20-NEXT: shll %cl, %edi
+; FALLBACK20-NEXT: orl (%esp), %edi # 4-byte Folded Reload
+; FALLBACK20-NEXT: movl %eax, %ecx
+; FALLBACK20-NEXT: shrl %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; FALLBACK20-NEXT: addl %esi, %esi
+; FALLBACK20-NEXT: movl %edx, %ecx
+; FALLBACK20-NEXT: shll %cl, %esi
+; FALLBACK20-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; FALLBACK20-NEXT: movl %eax, %ecx
+; FALLBACK20-NEXT: shrl %cl, %ebx
+; FALLBACK20-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK20-NEXT: movl %ebx, 28(%eax)
+; FALLBACK20-NEXT: movl %esi, 4(%eax)
+; FALLBACK20-NEXT: movl %edi, 24(%eax)
+; FALLBACK20-NEXT: movl %ebp, 16(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, 20(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, 8(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, 12(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, (%eax)
+; FALLBACK20-NEXT: addl $108, %esp
+; FALLBACK20-NEXT: popl %esi
+; FALLBACK20-NEXT: popl %edi
+; FALLBACK20-NEXT: popl %ebx
+; FALLBACK20-NEXT: popl %ebp
+; FALLBACK20-NEXT: retl
+;
+; FALLBACK21-LABEL: lshr_32bytes:
+; FALLBACK21: # %bb.0:
+; FALLBACK21-NEXT: pushl %ebp
+; FALLBACK21-NEXT: pushl %ebx
+; FALLBACK21-NEXT: pushl %edi
+; FALLBACK21-NEXT: pushl %esi
+; FALLBACK21-NEXT: subl $108, %esp
+; FALLBACK21-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK21-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK21-NEXT: movups (%ecx), %xmm0
+; FALLBACK21-NEXT: movups 16(%ecx), %xmm1
+; FALLBACK21-NEXT: movzbl (%eax), %eax
+; FALLBACK21-NEXT: movl %eax, %ecx
+; FALLBACK21-NEXT: shlb $3, %cl
+; FALLBACK21-NEXT: xorps %xmm2, %xmm2
+; FALLBACK21-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: andb $28, %al
+; FALLBACK21-NEXT: movzbl %al, %ebp
+; FALLBACK21-NEXT: movl 48(%esp,%ebp), %esi
+; FALLBACK21-NEXT: movl 44(%esp,%ebp), %eax
+; FALLBACK21-NEXT: movl %eax, %edx
+; FALLBACK21-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK21-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: movl 40(%esp,%ebp), %edx
+; FALLBACK21-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK21-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: movl 56(%esp,%ebp), %ebx
+; FALLBACK21-NEXT: movl 52(%esp,%ebp), %eax
+; FALLBACK21-NEXT: movl %eax, %edx
+; FALLBACK21-NEXT: shrdl %cl, %ebx, %edx
+; FALLBACK21-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK21-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: movl 60(%esp,%ebp), %eax
+; FALLBACK21-NEXT: shrdl %cl, %eax, %ebx
+; FALLBACK21-NEXT: movl 32(%esp,%ebp), %edx
+; FALLBACK21-NEXT: movl 36(%esp,%ebp), %edi
+; FALLBACK21-NEXT: movl %edi, %esi
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; FALLBACK21-NEXT: shrdl %cl, %ebp, %esi
+; FALLBACK21-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK21-NEXT: movl %esi, 4(%ebp)
+; FALLBACK21-NEXT: movl %ebx, 24(%ebp)
+; FALLBACK21-NEXT: shrdl %cl, %edi, %edx
+; FALLBACK21-NEXT: shrl %cl, %eax
+; FALLBACK21-NEXT: movl %eax, 28(%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 16(%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 20(%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 8(%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 12(%ebp)
+; FALLBACK21-NEXT: movl %edx, (%ebp)
+; FALLBACK21-NEXT: addl $108, %esp
+; FALLBACK21-NEXT: popl %esi
+; FALLBACK21-NEXT: popl %edi
+; FALLBACK21-NEXT: popl %ebx
+; FALLBACK21-NEXT: popl %ebp
+; FALLBACK21-NEXT: retl
+;
+; FALLBACK22-LABEL: lshr_32bytes:
+; FALLBACK22: # %bb.0:
+; FALLBACK22-NEXT: pushl %ebp
+; FALLBACK22-NEXT: pushl %ebx
+; FALLBACK22-NEXT: pushl %edi
+; FALLBACK22-NEXT: pushl %esi
+; FALLBACK22-NEXT: subl $108, %esp
+; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK22-NEXT: movups (%ecx), %xmm0
+; FALLBACK22-NEXT: movups 16(%ecx), %xmm1
+; FALLBACK22-NEXT: movzbl (%eax), %ecx
+; FALLBACK22-NEXT: movl %ecx, %edx
+; FALLBACK22-NEXT: shlb $3, %dl
+; FALLBACK22-NEXT: xorps %xmm2, %xmm2
+; FALLBACK22-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: andb $28, %cl
+; FALLBACK22-NEXT: movzbl %cl, %edi
+; FALLBACK22-NEXT: shrxl %edx, 32(%esp,%edi), %ecx
+; FALLBACK22-NEXT: movl %edx, %eax
+; FALLBACK22-NEXT: notb %al
+; FALLBACK22-NEXT: movl 36(%esp,%edi), %esi
+; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: addl %esi, %esi
+; FALLBACK22-NEXT: shlxl %eax, %esi, %esi
+; FALLBACK22-NEXT: orl %ecx, %esi
+; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 48(%esp,%edi), %ecx
+; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: addl %ecx, %ecx
+; FALLBACK22-NEXT: shlxl %eax, %ecx, %esi
+; FALLBACK22-NEXT: movl %eax, %ebp
+; FALLBACK22-NEXT: movl 44(%esp,%edi), %ecx
+; FALLBACK22-NEXT: shrxl %edx, %ecx, %ebx
+; FALLBACK22-NEXT: orl %ebx, %esi
+; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: addl %ecx, %ecx
+; FALLBACK22-NEXT: shlxl %eax, %ecx, %esi
+; FALLBACK22-NEXT: movl 40(%esp,%edi), %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shrxl %edx, %eax, %ebx
+; FALLBACK22-NEXT: orl %ebx, %esi
+; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 56(%esp,%edi), %esi
+; FALLBACK22-NEXT: leal (%esi,%esi), %ebx
+; FALLBACK22-NEXT: shlxl %ebp, %ebx, %eax
+; FALLBACK22-NEXT: movl %ebp, %ecx
+; FALLBACK22-NEXT: movl 52(%esp,%edi), %ebx
+; FALLBACK22-NEXT: shrxl %edx, %ebx, %ebp
+; FALLBACK22-NEXT: orl %ebp, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; FALLBACK22-NEXT: addl %ebx, %ebx
+; FALLBACK22-NEXT: shlxl %ecx, %ebx, %ebx
+; FALLBACK22-NEXT: orl %ebp, %ebx
+; FALLBACK22-NEXT: shrxl %edx, %esi, %ebp
+; FALLBACK22-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; FALLBACK22-NEXT: movl 60(%esp,%edi), %edi
+; FALLBACK22-NEXT: shrxl %edx, %edi, %eax
+; FALLBACK22-NEXT: addl %edi, %edi
+; FALLBACK22-NEXT: movl %ecx, %edx
+; FALLBACK22-NEXT: shlxl %ecx, %edi, %edi
+; FALLBACK22-NEXT: orl %ebp, %edi
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: addl %ecx, %ecx
+; FALLBACK22-NEXT: shlxl %edx, %ecx, %ecx
+; FALLBACK22-NEXT: orl %esi, %ecx
+; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK22-NEXT: movl %eax, 28(%edx)
+; FALLBACK22-NEXT: movl %ecx, 4(%edx)
+; FALLBACK22-NEXT: movl %edi, 24(%edx)
+; FALLBACK22-NEXT: movl %ebx, 16(%edx)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: movl %eax, 20(%edx)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: movl %eax, 8(%edx)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: movl %eax, 12(%edx)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: movl %eax, (%edx)
+; FALLBACK22-NEXT: addl $108, %esp
+; FALLBACK22-NEXT: popl %esi
+; FALLBACK22-NEXT: popl %edi
+; FALLBACK22-NEXT: popl %ebx
+; FALLBACK22-NEXT: popl %ebp
+; FALLBACK22-NEXT: retl
+;
+; FALLBACK23-LABEL: lshr_32bytes:
+; FALLBACK23: # %bb.0:
+; FALLBACK23-NEXT: pushl %ebp
+; FALLBACK23-NEXT: pushl %ebx
+; FALLBACK23-NEXT: pushl %edi
+; FALLBACK23-NEXT: pushl %esi
+; FALLBACK23-NEXT: subl $108, %esp
+; FALLBACK23-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK23-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK23-NEXT: movups (%ecx), %xmm0
+; FALLBACK23-NEXT: movups 16(%ecx), %xmm1
+; FALLBACK23-NEXT: movzbl (%eax), %eax
+; FALLBACK23-NEXT: movl %eax, %ecx
+; FALLBACK23-NEXT: shlb $3, %cl
+; FALLBACK23-NEXT: xorps %xmm2, %xmm2
+; FALLBACK23-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: andb $28, %al
+; FALLBACK23-NEXT: movzbl %al, %ebx
+; FALLBACK23-NEXT: movl 48(%esp,%ebx), %esi
+; FALLBACK23-NEXT: movl 44(%esp,%ebx), %eax
+; FALLBACK23-NEXT: movl %eax, %edx
+; FALLBACK23-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK23-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: movl 40(%esp,%ebx), %edx
+; FALLBACK23-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK23-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: movl 56(%esp,%ebx), %ebp
+; FALLBACK23-NEXT: movl 52(%esp,%ebx), %eax
+; FALLBACK23-NEXT: movl %eax, %edi
+; FALLBACK23-NEXT: shrdl %cl, %ebp, %edi
+; FALLBACK23-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK23-NEXT: movl 60(%esp,%ebx), %eax
+; FALLBACK23-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: shrdl %cl, %eax, %ebp
+; FALLBACK23-NEXT: movl 32(%esp,%ebx), %edx
+; FALLBACK23-NEXT: movl 36(%esp,%ebx), %ebx
+; FALLBACK23-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK23-NEXT: shrdl %cl, %eax, %ebx
+; FALLBACK23-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK23-NEXT: movl %ebx, 4(%eax)
+; FALLBACK23-NEXT: movl %ebp, 24(%eax)
+; FALLBACK23-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; FALLBACK23-NEXT: movl %ebx, 28(%eax)
+; FALLBACK23-NEXT: movl %esi, 16(%eax)
+; FALLBACK23-NEXT: movl %edi, 20(%eax)
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; FALLBACK23-NEXT: movl %esi, 8(%eax)
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; FALLBACK23-NEXT: movl %esi, 12(%eax)
+; FALLBACK23-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; FALLBACK23-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK23-NEXT: movl %edx, (%eax)
+; FALLBACK23-NEXT: addl $108, %esp
+; FALLBACK23-NEXT: popl %esi
+; FALLBACK23-NEXT: popl %edi
+; FALLBACK23-NEXT: popl %ebx
+; FALLBACK23-NEXT: popl %ebp
+; FALLBACK23-NEXT: retl
+;
+; FALLBACK24-LABEL: lshr_32bytes:
+; FALLBACK24: # %bb.0:
+; FALLBACK24-NEXT: pushl %ebp
+; FALLBACK24-NEXT: pushl %ebx
+; FALLBACK24-NEXT: pushl %edi
+; FALLBACK24-NEXT: pushl %esi
+; FALLBACK24-NEXT: subl $108, %esp
+; FALLBACK24-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK24-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK24-NEXT: vmovups (%ecx), %ymm0
+; FALLBACK24-NEXT: movzbl (%eax), %ecx
+; FALLBACK24-NEXT: movl %ecx, %eax
+; FALLBACK24-NEXT: shlb $3, %al
+; FALLBACK24-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK24-NEXT: vmovups %ymm1, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: andb $28, %cl
+; FALLBACK24-NEXT: movzbl %cl, %ecx
+; FALLBACK24-NEXT: movl 32(%esp,%ecx), %esi
+; FALLBACK24-NEXT: movl 36(%esp,%ecx), %ebx
+; FALLBACK24-NEXT: movl %ecx, %edi
+; FALLBACK24-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl %eax, %ecx
+; FALLBACK24-NEXT: shrl %cl, %esi
+; FALLBACK24-NEXT: movl %eax, %edx
+; FALLBACK24-NEXT: notb %dl
+; FALLBACK24-NEXT: addl %ebx, %ebx
+; FALLBACK24-NEXT: movl %edx, %ecx
+; FALLBACK24-NEXT: shll %cl, %ebx
+; FALLBACK24-NEXT: orl %esi, %ebx
+; FALLBACK24-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl 44(%esp,%edi), %ebp
+; FALLBACK24-NEXT: movl %ebp, %esi
+; FALLBACK24-NEXT: movl %eax, %ecx
+; FALLBACK24-NEXT: shrl %cl, %esi
+; FALLBACK24-NEXT: movl 48(%esp,%edi), %ecx
+; FALLBACK24-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: leal (%ecx,%ecx), %ebx
+; FALLBACK24-NEXT: movl %edx, %ecx
+; FALLBACK24-NEXT: shll %cl, %ebx
+; FALLBACK24-NEXT: orl %esi, %ebx
+; FALLBACK24-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl 40(%esp,%edi), %esi
+; FALLBACK24-NEXT: movl %esi, %ebx
+; FALLBACK24-NEXT: movl %eax, %ecx
+; FALLBACK24-NEXT: shrl %cl, %ebx
+; FALLBACK24-NEXT: addl %ebp, %ebp
+; FALLBACK24-NEXT: movl %edx, %ecx
+; FALLBACK24-NEXT: shll %cl, %ebp
+; FALLBACK24-NEXT: orl %ebx, %ebp
+; FALLBACK24-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl 52(%esp,%edi), %ebp
+; FALLBACK24-NEXT: movl %ebp, %ebx
+; FALLBACK24-NEXT: movl %eax, %ecx
+; FALLBACK24-NEXT: shrl %cl, %ebx
+; FALLBACK24-NEXT: movl 56(%esp,%edi), %ecx
+; FALLBACK24-NEXT: movl %ecx, (%esp) # 4-byte Spill
+; FALLBACK24-NEXT: leal (%ecx,%ecx), %edi
+; FALLBACK24-NEXT: movl %edx, %ecx
+; FALLBACK24-NEXT: shll %cl, %edi
+; FALLBACK24-NEXT: orl %ebx, %edi
+; FALLBACK24-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl %eax, %ecx
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; FALLBACK24-NEXT: shrl %cl, %edi
+; FALLBACK24-NEXT: addl %ebp, %ebp
+; FALLBACK24-NEXT: movl %edx, %ecx
+; FALLBACK24-NEXT: shll %cl, %ebp
+; FALLBACK24-NEXT: orl %edi, %ebp
+; FALLBACK24-NEXT: movl %eax, %ecx
+; FALLBACK24-NEXT: shrl %cl, (%esp) # 4-byte Folded Spill
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl 60(%esp,%ecx), %ebx
+; FALLBACK24-NEXT: leal (%ebx,%ebx), %edi
+; FALLBACK24-NEXT: movl %edx, %ecx
+; FALLBACK24-NEXT: shll %cl, %edi
+; FALLBACK24-NEXT: orl (%esp), %edi # 4-byte Folded Reload
+; FALLBACK24-NEXT: movl %eax, %ecx
+; FALLBACK24-NEXT: shrl %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; FALLBACK24-NEXT: addl %esi, %esi
+; FALLBACK24-NEXT: movl %edx, %ecx
+; FALLBACK24-NEXT: shll %cl, %esi
+; FALLBACK24-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; FALLBACK24-NEXT: movl %eax, %ecx
+; FALLBACK24-NEXT: shrl %cl, %ebx
+; FALLBACK24-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK24-NEXT: movl %ebx, 28(%eax)
+; FALLBACK24-NEXT: movl %esi, 4(%eax)
+; FALLBACK24-NEXT: movl %edi, 24(%eax)
+; FALLBACK24-NEXT: movl %ebp, 16(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, 20(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, 8(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, 12(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, (%eax)
+; FALLBACK24-NEXT: addl $108, %esp
+; FALLBACK24-NEXT: popl %esi
+; FALLBACK24-NEXT: popl %edi
+; FALLBACK24-NEXT: popl %ebx
+; FALLBACK24-NEXT: popl %ebp
+; FALLBACK24-NEXT: vzeroupper
+; FALLBACK24-NEXT: retl
+;
+; FALLBACK25-LABEL: lshr_32bytes:
+; FALLBACK25: # %bb.0:
+; FALLBACK25-NEXT: pushl %ebp
+; FALLBACK25-NEXT: pushl %ebx
+; FALLBACK25-NEXT: pushl %edi
+; FALLBACK25-NEXT: pushl %esi
+; FALLBACK25-NEXT: subl $108, %esp
+; FALLBACK25-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK25-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK25-NEXT: vmovups (%ecx), %ymm0
+; FALLBACK25-NEXT: movzbl (%eax), %eax
+; FALLBACK25-NEXT: movl %eax, %ecx
+; FALLBACK25-NEXT: shlb $3, %cl
+; FALLBACK25-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK25-NEXT: vmovups %ymm1, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: andb $28, %al
+; FALLBACK25-NEXT: movzbl %al, %ebp
+; FALLBACK25-NEXT: movl 48(%esp,%ebp), %esi
+; FALLBACK25-NEXT: movl 44(%esp,%ebp), %eax
+; FALLBACK25-NEXT: movl %eax, %edx
+; FALLBACK25-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK25-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: movl 40(%esp,%ebp), %edx
+; FALLBACK25-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK25-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: movl 56(%esp,%ebp), %ebx
+; FALLBACK25-NEXT: movl 52(%esp,%ebp), %eax
+; FALLBACK25-NEXT: movl %eax, %edx
+; FALLBACK25-NEXT: shrdl %cl, %ebx, %edx
+; FALLBACK25-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK25-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: movl 60(%esp,%ebp), %eax
+; FALLBACK25-NEXT: shrdl %cl, %eax, %ebx
+; FALLBACK25-NEXT: movl 32(%esp,%ebp), %edx
+; FALLBACK25-NEXT: movl 36(%esp,%ebp), %edi
+; FALLBACK25-NEXT: movl %edi, %esi
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; FALLBACK25-NEXT: shrdl %cl, %ebp, %esi
+; FALLBACK25-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK25-NEXT: movl %esi, 4(%ebp)
+; FALLBACK25-NEXT: movl %ebx, 24(%ebp)
+; FALLBACK25-NEXT: shrdl %cl, %edi, %edx
+; FALLBACK25-NEXT: shrl %cl, %eax
+; FALLBACK25-NEXT: movl %eax, 28(%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 16(%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 20(%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 8(%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 12(%ebp)
+; FALLBACK25-NEXT: movl %edx, (%ebp)
+; FALLBACK25-NEXT: addl $108, %esp
+; FALLBACK25-NEXT: popl %esi
+; FALLBACK25-NEXT: popl %edi
+; FALLBACK25-NEXT: popl %ebx
+; FALLBACK25-NEXT: popl %ebp
+; FALLBACK25-NEXT: vzeroupper
+; FALLBACK25-NEXT: retl
+;
+; FALLBACK26-LABEL: lshr_32bytes:
+; FALLBACK26: # %bb.0:
+; FALLBACK26-NEXT: pushl %ebp
+; FALLBACK26-NEXT: pushl %ebx
+; FALLBACK26-NEXT: pushl %edi
+; FALLBACK26-NEXT: pushl %esi
+; FALLBACK26-NEXT: subl $108, %esp
+; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK26-NEXT: vmovups (%ecx), %ymm0
+; FALLBACK26-NEXT: movzbl (%eax), %ecx
+; FALLBACK26-NEXT: movl %ecx, %edx
+; FALLBACK26-NEXT: shlb $3, %dl
+; FALLBACK26-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK26-NEXT: vmovups %ymm1, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: andb $28, %cl
+; FALLBACK26-NEXT: movzbl %cl, %edi
+; FALLBACK26-NEXT: shrxl %edx, 32(%esp,%edi), %ecx
+; FALLBACK26-NEXT: movl %edx, %eax
+; FALLBACK26-NEXT: notb %al
+; FALLBACK26-NEXT: movl 36(%esp,%edi), %esi
+; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: addl %esi, %esi
+; FALLBACK26-NEXT: shlxl %eax, %esi, %esi
+; FALLBACK26-NEXT: orl %ecx, %esi
+; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 48(%esp,%edi), %ecx
+; FALLBACK26-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: addl %ecx, %ecx
+; FALLBACK26-NEXT: shlxl %eax, %ecx, %esi
+; FALLBACK26-NEXT: movl %eax, %ebp
+; FALLBACK26-NEXT: movl 44(%esp,%edi), %ecx
+; FALLBACK26-NEXT: shrxl %edx, %ecx, %ebx
+; FALLBACK26-NEXT: orl %ebx, %esi
+; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: addl %ecx, %ecx
+; FALLBACK26-NEXT: shlxl %eax, %ecx, %esi
+; FALLBACK26-NEXT: movl 40(%esp,%edi), %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shrxl %edx, %eax, %ebx
+; FALLBACK26-NEXT: orl %ebx, %esi
+; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 56(%esp,%edi), %esi
+; FALLBACK26-NEXT: leal (%esi,%esi), %ebx
+; FALLBACK26-NEXT: shlxl %ebp, %ebx, %eax
+; FALLBACK26-NEXT: movl %ebp, %ecx
+; FALLBACK26-NEXT: movl 52(%esp,%edi), %ebx
+; FALLBACK26-NEXT: shrxl %edx, %ebx, %ebp
+; FALLBACK26-NEXT: orl %ebp, %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; FALLBACK26-NEXT: addl %ebx, %ebx
+; FALLBACK26-NEXT: shlxl %ecx, %ebx, %ebx
+; FALLBACK26-NEXT: orl %ebp, %ebx
+; FALLBACK26-NEXT: shrxl %edx, %esi, %ebp
+; FALLBACK26-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; FALLBACK26-NEXT: movl 60(%esp,%edi), %edi
+; FALLBACK26-NEXT: shrxl %edx, %edi, %eax
+; FALLBACK26-NEXT: addl %edi, %edi
+; FALLBACK26-NEXT: movl %ecx, %edx
+; FALLBACK26-NEXT: shlxl %ecx, %edi, %edi
+; FALLBACK26-NEXT: orl %ebp, %edi
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK26-NEXT: addl %ecx, %ecx
+; FALLBACK26-NEXT: shlxl %edx, %ecx, %ecx
+; FALLBACK26-NEXT: orl %esi, %ecx
+; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK26-NEXT: movl %eax, 28(%edx)
+; FALLBACK26-NEXT: movl %ecx, 4(%edx)
+; FALLBACK26-NEXT: movl %edi, 24(%edx)
+; FALLBACK26-NEXT: movl %ebx, 16(%edx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 20(%edx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 8(%edx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 12(%edx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, (%edx)
+; FALLBACK26-NEXT: addl $108, %esp
+; FALLBACK26-NEXT: popl %esi
+; FALLBACK26-NEXT: popl %edi
+; FALLBACK26-NEXT: popl %ebx
+; FALLBACK26-NEXT: popl %ebp
+; FALLBACK26-NEXT: vzeroupper
+; FALLBACK26-NEXT: retl
+;
+; FALLBACK27-LABEL: lshr_32bytes:
+; FALLBACK27: # %bb.0:
+; FALLBACK27-NEXT: pushl %ebp
+; FALLBACK27-NEXT: pushl %ebx
+; FALLBACK27-NEXT: pushl %edi
+; FALLBACK27-NEXT: pushl %esi
+; FALLBACK27-NEXT: subl $108, %esp
+; FALLBACK27-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK27-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK27-NEXT: vmovups (%ecx), %ymm0
+; FALLBACK27-NEXT: movzbl (%eax), %eax
+; FALLBACK27-NEXT: movl %eax, %ecx
+; FALLBACK27-NEXT: shlb $3, %cl
+; FALLBACK27-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK27-NEXT: vmovups %ymm1, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: andb $28, %al
+; FALLBACK27-NEXT: movzbl %al, %ebx
+; FALLBACK27-NEXT: movl 48(%esp,%ebx), %esi
+; FALLBACK27-NEXT: movl 44(%esp,%ebx), %eax
+; FALLBACK27-NEXT: movl %eax, %edx
+; FALLBACK27-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK27-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: movl 40(%esp,%ebx), %edx
+; FALLBACK27-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK27-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: movl 56(%esp,%ebx), %ebp
+; FALLBACK27-NEXT: movl 52(%esp,%ebx), %eax
+; FALLBACK27-NEXT: movl %eax, %edi
+; FALLBACK27-NEXT: shrdl %cl, %ebp, %edi
+; FALLBACK27-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK27-NEXT: movl 60(%esp,%ebx), %eax
+; FALLBACK27-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: shrdl %cl, %eax, %ebp
+; FALLBACK27-NEXT: movl 32(%esp,%ebx), %edx
+; FALLBACK27-NEXT: movl 36(%esp,%ebx), %ebx
+; FALLBACK27-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK27-NEXT: shrdl %cl, %eax, %ebx
+; FALLBACK27-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK27-NEXT: movl %ebx, 4(%eax)
+; FALLBACK27-NEXT: movl %ebp, 24(%eax)
+; FALLBACK27-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; FALLBACK27-NEXT: movl %ebx, 28(%eax)
+; FALLBACK27-NEXT: movl %esi, 16(%eax)
+; FALLBACK27-NEXT: movl %edi, 20(%eax)
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; FALLBACK27-NEXT: movl %esi, 8(%eax)
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; FALLBACK27-NEXT: movl %esi, 12(%eax)
+; FALLBACK27-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; FALLBACK27-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK27-NEXT: movl %edx, (%eax)
+; FALLBACK27-NEXT: addl $108, %esp
+; FALLBACK27-NEXT: popl %esi
+; FALLBACK27-NEXT: popl %edi
+; FALLBACK27-NEXT: popl %ebx
+; FALLBACK27-NEXT: popl %ebp
+; FALLBACK27-NEXT: vzeroupper
+; FALLBACK27-NEXT: retl
+;
+; FALLBACK28-LABEL: lshr_32bytes:
+; FALLBACK28: # %bb.0:
+; FALLBACK28-NEXT: pushl %ebp
+; FALLBACK28-NEXT: pushl %ebx
+; FALLBACK28-NEXT: pushl %edi
+; FALLBACK28-NEXT: pushl %esi
+; FALLBACK28-NEXT: subl $108, %esp
+; FALLBACK28-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK28-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK28-NEXT: vmovups (%ecx), %ymm0
+; FALLBACK28-NEXT: movzbl (%eax), %ecx
+; FALLBACK28-NEXT: movl %ecx, %eax
+; FALLBACK28-NEXT: shlb $3, %al
+; FALLBACK28-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK28-NEXT: vmovups %ymm1, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: andb $28, %cl
+; FALLBACK28-NEXT: movzbl %cl, %ecx
+; FALLBACK28-NEXT: movl 32(%esp,%ecx), %esi
+; FALLBACK28-NEXT: movl 36(%esp,%ecx), %ebx
+; FALLBACK28-NEXT: movl %ecx, %edi
+; FALLBACK28-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl %eax, %ecx
+; FALLBACK28-NEXT: shrl %cl, %esi
+; FALLBACK28-NEXT: movl %eax, %edx
+; FALLBACK28-NEXT: notb %dl
+; FALLBACK28-NEXT: addl %ebx, %ebx
+; FALLBACK28-NEXT: movl %edx, %ecx
+; FALLBACK28-NEXT: shll %cl, %ebx
+; FALLBACK28-NEXT: orl %esi, %ebx
+; FALLBACK28-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl 44(%esp,%edi), %ebp
+; FALLBACK28-NEXT: movl %ebp, %esi
+; FALLBACK28-NEXT: movl %eax, %ecx
+; FALLBACK28-NEXT: shrl %cl, %esi
+; FALLBACK28-NEXT: movl 48(%esp,%edi), %ecx
+; FALLBACK28-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: leal (%ecx,%ecx), %ebx
+; FALLBACK28-NEXT: movl %edx, %ecx
+; FALLBACK28-NEXT: shll %cl, %ebx
+; FALLBACK28-NEXT: orl %esi, %ebx
+; FALLBACK28-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl 40(%esp,%edi), %esi
+; FALLBACK28-NEXT: movl %esi, %ebx
+; FALLBACK28-NEXT: movl %eax, %ecx
+; FALLBACK28-NEXT: shrl %cl, %ebx
+; FALLBACK28-NEXT: addl %ebp, %ebp
+; FALLBACK28-NEXT: movl %edx, %ecx
+; FALLBACK28-NEXT: shll %cl, %ebp
+; FALLBACK28-NEXT: orl %ebx, %ebp
+; FALLBACK28-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl 52(%esp,%edi), %ebp
+; FALLBACK28-NEXT: movl %ebp, %ebx
+; FALLBACK28-NEXT: movl %eax, %ecx
+; FALLBACK28-NEXT: shrl %cl, %ebx
+; FALLBACK28-NEXT: movl 56(%esp,%edi), %ecx
+; FALLBACK28-NEXT: movl %ecx, (%esp) # 4-byte Spill
+; FALLBACK28-NEXT: leal (%ecx,%ecx), %edi
+; FALLBACK28-NEXT: movl %edx, %ecx
+; FALLBACK28-NEXT: shll %cl, %edi
+; FALLBACK28-NEXT: orl %ebx, %edi
+; FALLBACK28-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl %eax, %ecx
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; FALLBACK28-NEXT: shrl %cl, %edi
+; FALLBACK28-NEXT: addl %ebp, %ebp
+; FALLBACK28-NEXT: movl %edx, %ecx
+; FALLBACK28-NEXT: shll %cl, %ebp
+; FALLBACK28-NEXT: orl %edi, %ebp
+; FALLBACK28-NEXT: movl %eax, %ecx
+; FALLBACK28-NEXT: shrl %cl, (%esp) # 4-byte Folded Spill
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl 60(%esp,%ecx), %ebx
+; FALLBACK28-NEXT: leal (%ebx,%ebx), %edi
+; FALLBACK28-NEXT: movl %edx, %ecx
+; FALLBACK28-NEXT: shll %cl, %edi
+; FALLBACK28-NEXT: orl (%esp), %edi # 4-byte Folded Reload
+; FALLBACK28-NEXT: movl %eax, %ecx
+; FALLBACK28-NEXT: shrl %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; FALLBACK28-NEXT: addl %esi, %esi
+; FALLBACK28-NEXT: movl %edx, %ecx
+; FALLBACK28-NEXT: shll %cl, %esi
+; FALLBACK28-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; FALLBACK28-NEXT: movl %eax, %ecx
+; FALLBACK28-NEXT: shrl %cl, %ebx
+; FALLBACK28-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK28-NEXT: movl %ebx, 28(%eax)
+; FALLBACK28-NEXT: movl %esi, 4(%eax)
+; FALLBACK28-NEXT: movl %edi, 24(%eax)
+; FALLBACK28-NEXT: movl %ebp, 16(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, 20(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, 8(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, 12(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, (%eax)
+; FALLBACK28-NEXT: addl $108, %esp
+; FALLBACK28-NEXT: popl %esi
+; FALLBACK28-NEXT: popl %edi
+; FALLBACK28-NEXT: popl %ebx
+; FALLBACK28-NEXT: popl %ebp
+; FALLBACK28-NEXT: vzeroupper
+; FALLBACK28-NEXT: retl
+;
+; FALLBACK29-LABEL: lshr_32bytes:
+; FALLBACK29: # %bb.0:
+; FALLBACK29-NEXT: pushl %ebp
+; FALLBACK29-NEXT: pushl %ebx
+; FALLBACK29-NEXT: pushl %edi
+; FALLBACK29-NEXT: pushl %esi
+; FALLBACK29-NEXT: subl $108, %esp
+; FALLBACK29-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK29-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK29-NEXT: vmovups (%ecx), %ymm0
+; FALLBACK29-NEXT: movzbl (%eax), %eax
+; FALLBACK29-NEXT: movl %eax, %ecx
+; FALLBACK29-NEXT: shlb $3, %cl
+; FALLBACK29-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK29-NEXT: vmovups %ymm1, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: andb $28, %al
+; FALLBACK29-NEXT: movzbl %al, %ebp
+; FALLBACK29-NEXT: movl 48(%esp,%ebp), %esi
+; FALLBACK29-NEXT: movl 44(%esp,%ebp), %eax
+; FALLBACK29-NEXT: movl %eax, %edx
+; FALLBACK29-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK29-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: movl 40(%esp,%ebp), %edx
+; FALLBACK29-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK29-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: movl 56(%esp,%ebp), %ebx
+; FALLBACK29-NEXT: movl 52(%esp,%ebp), %eax
+; FALLBACK29-NEXT: movl %eax, %edx
+; FALLBACK29-NEXT: shrdl %cl, %ebx, %edx
+; FALLBACK29-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK29-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: movl 60(%esp,%ebp), %eax
+; FALLBACK29-NEXT: shrdl %cl, %eax, %ebx
+; FALLBACK29-NEXT: movl 32(%esp,%ebp), %edx
+; FALLBACK29-NEXT: movl 36(%esp,%ebp), %edi
+; FALLBACK29-NEXT: movl %edi, %esi
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; FALLBACK29-NEXT: shrdl %cl, %ebp, %esi
+; FALLBACK29-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK29-NEXT: movl %esi, 4(%ebp)
+; FALLBACK29-NEXT: movl %ebx, 24(%ebp)
+; FALLBACK29-NEXT: shrdl %cl, %edi, %edx
+; FALLBACK29-NEXT: shrl %cl, %eax
+; FALLBACK29-NEXT: movl %eax, 28(%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 16(%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 20(%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 8(%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 12(%ebp)
+; FALLBACK29-NEXT: movl %edx, (%ebp)
+; FALLBACK29-NEXT: addl $108, %esp
+; FALLBACK29-NEXT: popl %esi
+; FALLBACK29-NEXT: popl %edi
+; FALLBACK29-NEXT: popl %ebx
+; FALLBACK29-NEXT: popl %ebp
+; FALLBACK29-NEXT: vzeroupper
+; FALLBACK29-NEXT: retl
+;
+; FALLBACK30-LABEL: lshr_32bytes:
+; FALLBACK30: # %bb.0:
+; FALLBACK30-NEXT: pushl %ebp
+; FALLBACK30-NEXT: pushl %ebx
+; FALLBACK30-NEXT: pushl %edi
+; FALLBACK30-NEXT: pushl %esi
+; FALLBACK30-NEXT: subl $108, %esp
+; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK30-NEXT: vmovups (%ecx), %ymm0
+; FALLBACK30-NEXT: movzbl (%eax), %ecx
+; FALLBACK30-NEXT: movl %ecx, %edx
+; FALLBACK30-NEXT: shlb $3, %dl
+; FALLBACK30-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK30-NEXT: vmovups %ymm1, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: andb $28, %cl
+; FALLBACK30-NEXT: movzbl %cl, %edi
+; FALLBACK30-NEXT: shrxl %edx, 32(%esp,%edi), %ecx
+; FALLBACK30-NEXT: movl %edx, %eax
+; FALLBACK30-NEXT: notb %al
+; FALLBACK30-NEXT: movl 36(%esp,%edi), %esi
+; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: addl %esi, %esi
+; FALLBACK30-NEXT: shlxl %eax, %esi, %esi
+; FALLBACK30-NEXT: orl %ecx, %esi
+; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 48(%esp,%edi), %ecx
+; FALLBACK30-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: addl %ecx, %ecx
+; FALLBACK30-NEXT: shlxl %eax, %ecx, %esi
+; FALLBACK30-NEXT: movl %eax, %ebp
+; FALLBACK30-NEXT: movl 44(%esp,%edi), %ecx
+; FALLBACK30-NEXT: shrxl %edx, %ecx, %ebx
+; FALLBACK30-NEXT: orl %ebx, %esi
+; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: addl %ecx, %ecx
+; FALLBACK30-NEXT: shlxl %eax, %ecx, %esi
+; FALLBACK30-NEXT: movl 40(%esp,%edi), %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shrxl %edx, %eax, %ebx
+; FALLBACK30-NEXT: orl %ebx, %esi
+; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 56(%esp,%edi), %esi
+; FALLBACK30-NEXT: leal (%esi,%esi), %ebx
+; FALLBACK30-NEXT: shlxl %ebp, %ebx, %eax
+; FALLBACK30-NEXT: movl %ebp, %ecx
+; FALLBACK30-NEXT: movl 52(%esp,%edi), %ebx
+; FALLBACK30-NEXT: shrxl %edx, %ebx, %ebp
+; FALLBACK30-NEXT: orl %ebp, %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; FALLBACK30-NEXT: addl %ebx, %ebx
+; FALLBACK30-NEXT: shlxl %ecx, %ebx, %ebx
+; FALLBACK30-NEXT: orl %ebp, %ebx
+; FALLBACK30-NEXT: shrxl %edx, %esi, %ebp
+; FALLBACK30-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; FALLBACK30-NEXT: movl 60(%esp,%edi), %edi
+; FALLBACK30-NEXT: shrxl %edx, %edi, %eax
+; FALLBACK30-NEXT: addl %edi, %edi
+; FALLBACK30-NEXT: movl %ecx, %edx
+; FALLBACK30-NEXT: shlxl %ecx, %edi, %edi
+; FALLBACK30-NEXT: orl %ebp, %edi
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK30-NEXT: addl %ecx, %ecx
+; FALLBACK30-NEXT: shlxl %edx, %ecx, %ecx
+; FALLBACK30-NEXT: orl %esi, %ecx
+; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK30-NEXT: movl %eax, 28(%edx)
+; FALLBACK30-NEXT: movl %ecx, 4(%edx)
+; FALLBACK30-NEXT: movl %edi, 24(%edx)
+; FALLBACK30-NEXT: movl %ebx, 16(%edx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 20(%edx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 8(%edx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 12(%edx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, (%edx)
+; FALLBACK30-NEXT: addl $108, %esp
+; FALLBACK30-NEXT: popl %esi
+; FALLBACK30-NEXT: popl %edi
+; FALLBACK30-NEXT: popl %ebx
+; FALLBACK30-NEXT: popl %ebp
+; FALLBACK30-NEXT: vzeroupper
+; FALLBACK30-NEXT: retl
+;
+; FALLBACK31-LABEL: lshr_32bytes:
+; FALLBACK31: # %bb.0:
+; FALLBACK31-NEXT: pushl %ebp
+; FALLBACK31-NEXT: pushl %ebx
+; FALLBACK31-NEXT: pushl %edi
+; FALLBACK31-NEXT: pushl %esi
+; FALLBACK31-NEXT: subl $108, %esp
+; FALLBACK31-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK31-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK31-NEXT: vmovups (%ecx), %ymm0
+; FALLBACK31-NEXT: movzbl (%eax), %eax
+; FALLBACK31-NEXT: movl %eax, %ecx
+; FALLBACK31-NEXT: shlb $3, %cl
+; FALLBACK31-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK31-NEXT: vmovups %ymm1, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: andb $28, %al
+; FALLBACK31-NEXT: movzbl %al, %ebx
+; FALLBACK31-NEXT: movl 48(%esp,%ebx), %esi
+; FALLBACK31-NEXT: movl 44(%esp,%ebx), %eax
+; FALLBACK31-NEXT: movl %eax, %edx
+; FALLBACK31-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK31-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: movl 40(%esp,%ebx), %edx
+; FALLBACK31-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK31-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: movl 56(%esp,%ebx), %ebp
+; FALLBACK31-NEXT: movl 52(%esp,%ebx), %eax
+; FALLBACK31-NEXT: movl %eax, %edi
+; FALLBACK31-NEXT: shrdl %cl, %ebp, %edi
+; FALLBACK31-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK31-NEXT: movl 60(%esp,%ebx), %eax
+; FALLBACK31-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: shrdl %cl, %eax, %ebp
+; FALLBACK31-NEXT: movl 32(%esp,%ebx), %edx
+; FALLBACK31-NEXT: movl 36(%esp,%ebx), %ebx
+; FALLBACK31-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK31-NEXT: shrdl %cl, %eax, %ebx
+; FALLBACK31-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK31-NEXT: movl %ebx, 4(%eax)
+; FALLBACK31-NEXT: movl %ebp, 24(%eax)
+; FALLBACK31-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; FALLBACK31-NEXT: movl %ebx, 28(%eax)
+; FALLBACK31-NEXT: movl %esi, 16(%eax)
+; FALLBACK31-NEXT: movl %edi, 20(%eax)
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; FALLBACK31-NEXT: movl %esi, 8(%eax)
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; FALLBACK31-NEXT: movl %esi, 12(%eax)
+; FALLBACK31-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; FALLBACK31-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK31-NEXT: movl %edx, (%eax)
+; FALLBACK31-NEXT: addl $108, %esp
+; FALLBACK31-NEXT: popl %esi
+; FALLBACK31-NEXT: popl %edi
+; FALLBACK31-NEXT: popl %ebx
+; FALLBACK31-NEXT: popl %ebp
+; FALLBACK31-NEXT: vzeroupper
+; FALLBACK31-NEXT: retl
+ %src = load i256, ptr %src.ptr, align 1
+ %byteOff = load i256, ptr %byteOff.ptr, align 1
+ %bitOff = shl i256 %byteOff, 3
+ %res = lshr i256 %src, %bitOff
+ store i256 %res, ptr %dst, align 1
+ ret void
+}
+
+define void @lshr_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) nounwind {
+; FALLBACK0-LABEL: lshr_32bytes_dwordOff:
+; FALLBACK0: # %bb.0:
+; FALLBACK0-NEXT: pushq %rbx
+; FALLBACK0-NEXT: movq (%rdi), %rcx
+; FALLBACK0-NEXT: movq 8(%rdi), %r8
+; FALLBACK0-NEXT: movq 16(%rdi), %r9
+; FALLBACK0-NEXT: movq 24(%rdi), %rdi
+; FALLBACK0-NEXT: movzbl (%rsi), %esi
+; FALLBACK0-NEXT: movl %esi, %eax
+; FALLBACK0-NEXT: shlb $5, %al
+; FALLBACK0-NEXT: xorps %xmm0, %xmm0
+; FALLBACK0-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: andb $6, %sil
+; FALLBACK0-NEXT: movzbl %sil, %r9d
+; FALLBACK0-NEXT: movq -64(%rsp,%r9,4), %r10
+; FALLBACK0-NEXT: movq -56(%rsp,%r9,4), %rdi
+; FALLBACK0-NEXT: movq %rdi, %r11
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shrq %cl, %r11
+; FALLBACK0-NEXT: movl %eax, %esi
+; FALLBACK0-NEXT: notb %sil
+; FALLBACK0-NEXT: movq -48(%rsp,%r9,4), %rbx
+; FALLBACK0-NEXT: leaq (%rbx,%rbx), %r8
+; FALLBACK0-NEXT: movl %esi, %ecx
+; FALLBACK0-NEXT: shlq %cl, %r8
+; FALLBACK0-NEXT: orq %r11, %r8
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shrq %cl, %r10
+; FALLBACK0-NEXT: addq %rdi, %rdi
+; FALLBACK0-NEXT: movl %esi, %ecx
+; FALLBACK0-NEXT: shlq %cl, %rdi
+; FALLBACK0-NEXT: orq %r10, %rdi
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shrq %cl, %rbx
+; FALLBACK0-NEXT: movq -40(%rsp,%r9,4), %r9
+; FALLBACK0-NEXT: leaq (%r9,%r9), %r10
+; FALLBACK0-NEXT: movl %esi, %ecx
+; FALLBACK0-NEXT: shlq %cl, %r10
+; FALLBACK0-NEXT: orq %rbx, %r10
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shrq %cl, %r9
+; FALLBACK0-NEXT: movq %r9, 24(%rdx)
+; FALLBACK0-NEXT: movq %r10, 16(%rdx)
+; FALLBACK0-NEXT: movq %rdi, (%rdx)
+; FALLBACK0-NEXT: movq %r8, 8(%rdx)
+; FALLBACK0-NEXT: popq %rbx
+; FALLBACK0-NEXT: retq
+;
+; FALLBACK1-LABEL: lshr_32bytes_dwordOff:
+; FALLBACK1: # %bb.0:
+; FALLBACK1-NEXT: movq (%rdi), %rax
+; FALLBACK1-NEXT: movq 8(%rdi), %r8
+; FALLBACK1-NEXT: movq 16(%rdi), %r9
+; FALLBACK1-NEXT: movq 24(%rdi), %rdi
+; FALLBACK1-NEXT: movzbl (%rsi), %esi
+; FALLBACK1-NEXT: movl %esi, %ecx
+; FALLBACK1-NEXT: shlb $5, %cl
+; FALLBACK1-NEXT: xorps %xmm0, %xmm0
+; FALLBACK1-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: andb $6, %sil
+; FALLBACK1-NEXT: movzbl %sil, %eax
+; FALLBACK1-NEXT: movq -56(%rsp,%rax,4), %rsi
+; FALLBACK1-NEXT: movq -72(%rsp,%rax,4), %rdi
+; FALLBACK1-NEXT: movq -64(%rsp,%rax,4), %r8
+; FALLBACK1-NEXT: movq %r8, %r9
+; FALLBACK1-NEXT: shrdq %cl, %rsi, %r9
+; FALLBACK1-NEXT: movq -48(%rsp,%rax,4), %rax
+; FALLBACK1-NEXT: shrdq %cl, %rax, %rsi
+; FALLBACK1-NEXT: shrdq %cl, %r8, %rdi
+; FALLBACK1-NEXT: shrq %cl, %rax
+; FALLBACK1-NEXT: movq %rsi, 16(%rdx)
+; FALLBACK1-NEXT: movq %rax, 24(%rdx)
+; FALLBACK1-NEXT: movq %rdi, (%rdx)
+; FALLBACK1-NEXT: movq %r9, 8(%rdx)
+; FALLBACK1-NEXT: retq
+;
+; FALLBACK2-LABEL: lshr_32bytes_dwordOff:
+; FALLBACK2: # %bb.0:
+; FALLBACK2-NEXT: movq (%rdi), %rcx
+; FALLBACK2-NEXT: movq 8(%rdi), %r8
+; FALLBACK2-NEXT: movq 16(%rdi), %r9
+; FALLBACK2-NEXT: movq 24(%rdi), %rdi
+; FALLBACK2-NEXT: movzbl (%rsi), %esi
+; FALLBACK2-NEXT: movl %esi, %eax
+; FALLBACK2-NEXT: shlb $5, %al
+; FALLBACK2-NEXT: xorps %xmm0, %xmm0
+; FALLBACK2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: andb $6, %sil
+; FALLBACK2-NEXT: movzbl %sil, %ecx
+; FALLBACK2-NEXT: movq -64(%rsp,%rcx,4), %rsi
+; FALLBACK2-NEXT: movq -56(%rsp,%rcx,4), %rdi
+; FALLBACK2-NEXT: shrxq %rax, %rsi, %r8
+; FALLBACK2-NEXT: shrxq %rax, -72(%rsp,%rcx,4), %r9
+; FALLBACK2-NEXT: shrxq %rax, %rdi, %r10
+; FALLBACK2-NEXT: movq -48(%rsp,%rcx,4), %rcx
+; FALLBACK2-NEXT: shrxq %rax, %rcx, %r11
+; FALLBACK2-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK2-NEXT: notb %al
+; FALLBACK2-NEXT: addq %rdi, %rdi
+; FALLBACK2-NEXT: shlxq %rax, %rdi, %rdi
+; FALLBACK2-NEXT: orq %r8, %rdi
+; FALLBACK2-NEXT: addq %rsi, %rsi
+; FALLBACK2-NEXT: shlxq %rax, %rsi, %rsi
+; FALLBACK2-NEXT: orq %r9, %rsi
+; FALLBACK2-NEXT: addq %rcx, %rcx
+; FALLBACK2-NEXT: shlxq %rax, %rcx, %rax
+; FALLBACK2-NEXT: orq %r10, %rax
+; FALLBACK2-NEXT: movq %r11, 24(%rdx)
+; FALLBACK2-NEXT: movq %rax, 16(%rdx)
+; FALLBACK2-NEXT: movq %rsi, (%rdx)
+; FALLBACK2-NEXT: movq %rdi, 8(%rdx)
+; FALLBACK2-NEXT: retq
+;
+; FALLBACK3-LABEL: lshr_32bytes_dwordOff:
+; FALLBACK3: # %bb.0:
+; FALLBACK3-NEXT: movq (%rdi), %rax
+; FALLBACK3-NEXT: movq 8(%rdi), %r8
+; FALLBACK3-NEXT: movq 16(%rdi), %r9
+; FALLBACK3-NEXT: movq 24(%rdi), %rdi
+; FALLBACK3-NEXT: movzbl (%rsi), %esi
+; FALLBACK3-NEXT: movl %esi, %ecx
+; FALLBACK3-NEXT: shlb $5, %cl
+; FALLBACK3-NEXT: xorps %xmm0, %xmm0
+; FALLBACK3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: andb $6, %sil
+; FALLBACK3-NEXT: movzbl %sil, %eax
+; FALLBACK3-NEXT: movq -56(%rsp,%rax,4), %rsi
+; FALLBACK3-NEXT: movq -72(%rsp,%rax,4), %rdi
+; FALLBACK3-NEXT: movq -64(%rsp,%rax,4), %r8
+; FALLBACK3-NEXT: movq %r8, %r9
+; FALLBACK3-NEXT: shrdq %cl, %rsi, %r9
+; FALLBACK3-NEXT: movq -48(%rsp,%rax,4), %rax
+; FALLBACK3-NEXT: shrdq %cl, %rax, %rsi
+; FALLBACK3-NEXT: shrdq %cl, %r8, %rdi
+; FALLBACK3-NEXT: shrxq %rcx, %rax, %rax
+; FALLBACK3-NEXT: movq %rsi, 16(%rdx)
+; FALLBACK3-NEXT: movq %rax, 24(%rdx)
+; FALLBACK3-NEXT: movq %rdi, (%rdx)
+; FALLBACK3-NEXT: movq %r9, 8(%rdx)
+; FALLBACK3-NEXT: retq
+;
+; FALLBACK4-LABEL: lshr_32bytes_dwordOff:
+; FALLBACK4: # %bb.0:
+; FALLBACK4-NEXT: pushq %rbx
+; FALLBACK4-NEXT: movups (%rdi), %xmm0
+; FALLBACK4-NEXT: movups 16(%rdi), %xmm1
+; FALLBACK4-NEXT: movzbl (%rsi), %ecx
+; FALLBACK4-NEXT: movl %ecx, %eax
+; FALLBACK4-NEXT: shlb $5, %al
+; FALLBACK4-NEXT: xorps %xmm2, %xmm2
+; FALLBACK4-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: andb $6, %cl
+; FALLBACK4-NEXT: movzbl %cl, %r9d
+; FALLBACK4-NEXT: movq -64(%rsp,%r9,4), %r10
+; FALLBACK4-NEXT: movq -56(%rsp,%r9,4), %r8
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shrq %cl, %r10
+; FALLBACK4-NEXT: movl %eax, %esi
+; FALLBACK4-NEXT: notb %sil
+; FALLBACK4-NEXT: leaq (%r8,%r8), %rdi
+; FALLBACK4-NEXT: movl %esi, %ecx
+; FALLBACK4-NEXT: shlq %cl, %rdi
+; FALLBACK4-NEXT: orq %r10, %rdi
+; FALLBACK4-NEXT: movq -48(%rsp,%r9,4), %r10
+; FALLBACK4-NEXT: movq %r10, %r11
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shrq %cl, %r11
+; FALLBACK4-NEXT: movq -40(%rsp,%r9,4), %r9
+; FALLBACK4-NEXT: leaq (%r9,%r9), %rbx
+; FALLBACK4-NEXT: movl %esi, %ecx
+; FALLBACK4-NEXT: shlq %cl, %rbx
+; FALLBACK4-NEXT: orq %r11, %rbx
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shrq %cl, %r8
+; FALLBACK4-NEXT: addq %r10, %r10
+; FALLBACK4-NEXT: movl %esi, %ecx
+; FALLBACK4-NEXT: shlq %cl, %r10
+; FALLBACK4-NEXT: orq %r8, %r10
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shrq %cl, %r9
+; FALLBACK4-NEXT: movq %r9, 24(%rdx)
+; FALLBACK4-NEXT: movq %r10, 8(%rdx)
+; FALLBACK4-NEXT: movq %rbx, 16(%rdx)
+; FALLBACK4-NEXT: movq %rdi, (%rdx)
+; FALLBACK4-NEXT: popq %rbx
+; FALLBACK4-NEXT: retq
+;
+; FALLBACK5-LABEL: lshr_32bytes_dwordOff:
+; FALLBACK5: # %bb.0:
+; FALLBACK5-NEXT: movups (%rdi), %xmm0
+; FALLBACK5-NEXT: movups 16(%rdi), %xmm1
+; FALLBACK5-NEXT: movzbl (%rsi), %eax
+; FALLBACK5-NEXT: movl %eax, %ecx
+; FALLBACK5-NEXT: shlb $5, %cl
+; FALLBACK5-NEXT: xorps %xmm2, %xmm2
+; FALLBACK5-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: andb $6, %al
+; FALLBACK5-NEXT: movzbl %al, %eax
+; FALLBACK5-NEXT: movq -48(%rsp,%rax,4), %rsi
+; FALLBACK5-NEXT: movq -56(%rsp,%rax,4), %rdi
+; FALLBACK5-NEXT: movq %rdi, %r8
+; FALLBACK5-NEXT: shrdq %cl, %rsi, %r8
+; FALLBACK5-NEXT: movq -72(%rsp,%rax,4), %r9
+; FALLBACK5-NEXT: movq -64(%rsp,%rax,4), %rax
+; FALLBACK5-NEXT: movq %rax, %r10
+; FALLBACK5-NEXT: shrdq %cl, %rdi, %r10
+; FALLBACK5-NEXT: shrdq %cl, %rax, %r9
+; FALLBACK5-NEXT: shrq %cl, %rsi
+; FALLBACK5-NEXT: movq %r10, 8(%rdx)
+; FALLBACK5-NEXT: movq %r8, 16(%rdx)
+; FALLBACK5-NEXT: movq %rsi, 24(%rdx)
+; FALLBACK5-NEXT: movq %r9, (%rdx)
+; FALLBACK5-NEXT: retq
+;
+; FALLBACK6-LABEL: lshr_32bytes_dwordOff:
+; FALLBACK6: # %bb.0:
+; FALLBACK6-NEXT: movups (%rdi), %xmm0
+; FALLBACK6-NEXT: movups 16(%rdi), %xmm1
+; FALLBACK6-NEXT: movzbl (%rsi), %ecx
+; FALLBACK6-NEXT: movl %ecx, %eax
+; FALLBACK6-NEXT: shlb $5, %al
+; FALLBACK6-NEXT: xorps %xmm2, %xmm2
+; FALLBACK6-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: andb $6, %cl
+; FALLBACK6-NEXT: movzbl %cl, %ecx
+; FALLBACK6-NEXT: shrxq %rax, -72(%rsp,%rcx,4), %rsi
+; FALLBACK6-NEXT: movq -64(%rsp,%rcx,4), %rdi
+; FALLBACK6-NEXT: movq -56(%rsp,%rcx,4), %r8
+; FALLBACK6-NEXT: shrxq %rax, %r8, %r9
+; FALLBACK6-NEXT: movq -48(%rsp,%rcx,4), %rcx
+; FALLBACK6-NEXT: shrxq %rax, %rdi, %r10
+; FALLBACK6-NEXT: shrxq %rax, %rcx, %r11
+; FALLBACK6-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK6-NEXT: notb %al
+; FALLBACK6-NEXT: addq %rdi, %rdi
+; FALLBACK6-NEXT: shlxq %rax, %rdi, %rdi
+; FALLBACK6-NEXT: orq %rsi, %rdi
+; FALLBACK6-NEXT: addq %rcx, %rcx
+; FALLBACK6-NEXT: shlxq %rax, %rcx, %rcx
+; FALLBACK6-NEXT: orq %r9, %rcx
+; FALLBACK6-NEXT: addq %r8, %r8
+; FALLBACK6-NEXT: shlxq %rax, %r8, %rax
+; FALLBACK6-NEXT: orq %r10, %rax
+; FALLBACK6-NEXT: movq %r11, 24(%rdx)
+; FALLBACK6-NEXT: movq %rax, 8(%rdx)
+; FALLBACK6-NEXT: movq %rcx, 16(%rdx)
+; FALLBACK6-NEXT: movq %rdi, (%rdx)
+; FALLBACK6-NEXT: retq
+;
+; FALLBACK7-LABEL: lshr_32bytes_dwordOff:
+; FALLBACK7: # %bb.0:
+; FALLBACK7-NEXT: movups (%rdi), %xmm0
+; FALLBACK7-NEXT: movups 16(%rdi), %xmm1
+; FALLBACK7-NEXT: movzbl (%rsi), %eax
+; FALLBACK7-NEXT: movl %eax, %ecx
+; FALLBACK7-NEXT: shlb $5, %cl
+; FALLBACK7-NEXT: xorps %xmm2, %xmm2
+; FALLBACK7-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: andb $6, %al
+; FALLBACK7-NEXT: movzbl %al, %eax
+; FALLBACK7-NEXT: movq -48(%rsp,%rax,4), %rsi
+; FALLBACK7-NEXT: movq -56(%rsp,%rax,4), %rdi
+; FALLBACK7-NEXT: movq %rdi, %r8
+; FALLBACK7-NEXT: shrdq %cl, %rsi, %r8
+; FALLBACK7-NEXT: movq -72(%rsp,%rax,4), %r9
+; FALLBACK7-NEXT: movq -64(%rsp,%rax,4), %rax
+; FALLBACK7-NEXT: movq %rax, %r10
+; FALLBACK7-NEXT: shrdq %cl, %rdi, %r10
+; FALLBACK7-NEXT: shrdq %cl, %rax, %r9
+; FALLBACK7-NEXT: shrxq %rcx, %rsi, %rax
+; FALLBACK7-NEXT: movq %r10, 8(%rdx)
+; FALLBACK7-NEXT: movq %r8, 16(%rdx)
+; FALLBACK7-NEXT: movq %rax, 24(%rdx)
+; FALLBACK7-NEXT: movq %r9, (%rdx)
+; FALLBACK7-NEXT: retq
+;
+; FALLBACK8-LABEL: lshr_32bytes_dwordOff:
+; FALLBACK8: # %bb.0:
+; FALLBACK8-NEXT: pushq %rbx
+; FALLBACK8-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK8-NEXT: movzbl (%rsi), %ecx
+; FALLBACK8-NEXT: movl %ecx, %eax
+; FALLBACK8-NEXT: shlb $5, %al
+; FALLBACK8-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK8-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: andb $6, %cl
+; FALLBACK8-NEXT: movzbl %cl, %r9d
+; FALLBACK8-NEXT: movq -64(%rsp,%r9,4), %r10
+; FALLBACK8-NEXT: movq -56(%rsp,%r9,4), %r8
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shrq %cl, %r10
+; FALLBACK8-NEXT: movl %eax, %esi
+; FALLBACK8-NEXT: notb %sil
+; FALLBACK8-NEXT: leaq (%r8,%r8), %rdi
+; FALLBACK8-NEXT: movl %esi, %ecx
+; FALLBACK8-NEXT: shlq %cl, %rdi
+; FALLBACK8-NEXT: orq %r10, %rdi
+; FALLBACK8-NEXT: movq -48(%rsp,%r9,4), %r10
+; FALLBACK8-NEXT: movq %r10, %r11
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shrq %cl, %r11
+; FALLBACK8-NEXT: movq -40(%rsp,%r9,4), %r9
+; FALLBACK8-NEXT: leaq (%r9,%r9), %rbx
+; FALLBACK8-NEXT: movl %esi, %ecx
+; FALLBACK8-NEXT: shlq %cl, %rbx
+; FALLBACK8-NEXT: orq %r11, %rbx
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shrq %cl, %r8
+; FALLBACK8-NEXT: addq %r10, %r10
+; FALLBACK8-NEXT: movl %esi, %ecx
+; FALLBACK8-NEXT: shlq %cl, %r10
+; FALLBACK8-NEXT: orq %r8, %r10
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shrq %cl, %r9
+; FALLBACK8-NEXT: movq %r9, 24(%rdx)
+; FALLBACK8-NEXT: movq %r10, 8(%rdx)
+; FALLBACK8-NEXT: movq %rbx, 16(%rdx)
+; FALLBACK8-NEXT: movq %rdi, (%rdx)
+; FALLBACK8-NEXT: popq %rbx
+; FALLBACK8-NEXT: vzeroupper
+; FALLBACK8-NEXT: retq
+;
+; FALLBACK9-LABEL: lshr_32bytes_dwordOff:
+; FALLBACK9: # %bb.0:
+; FALLBACK9-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK9-NEXT: movzbl (%rsi), %eax
+; FALLBACK9-NEXT: movl %eax, %ecx
+; FALLBACK9-NEXT: shlb $5, %cl
+; FALLBACK9-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK9-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: andb $6, %al
+; FALLBACK9-NEXT: movzbl %al, %eax
+; FALLBACK9-NEXT: movq -48(%rsp,%rax,4), %rsi
+; FALLBACK9-NEXT: movq -56(%rsp,%rax,4), %rdi
+; FALLBACK9-NEXT: movq %rdi, %r8
+; FALLBACK9-NEXT: shrdq %cl, %rsi, %r8
+; FALLBACK9-NEXT: movq -72(%rsp,%rax,4), %r9
+; FALLBACK9-NEXT: movq -64(%rsp,%rax,4), %rax
+; FALLBACK9-NEXT: movq %rax, %r10
+; FALLBACK9-NEXT: shrdq %cl, %rdi, %r10
+; FALLBACK9-NEXT: shrdq %cl, %rax, %r9
+; FALLBACK9-NEXT: shrq %cl, %rsi
+; FALLBACK9-NEXT: movq %r10, 8(%rdx)
+; FALLBACK9-NEXT: movq %r8, 16(%rdx)
+; FALLBACK9-NEXT: movq %rsi, 24(%rdx)
+; FALLBACK9-NEXT: movq %r9, (%rdx)
+; FALLBACK9-NEXT: vzeroupper
+; FALLBACK9-NEXT: retq
+;
+; FALLBACK10-LABEL: lshr_32bytes_dwordOff:
+; FALLBACK10: # %bb.0:
+; FALLBACK10-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK10-NEXT: movzbl (%rsi), %ecx
+; FALLBACK10-NEXT: movl %ecx, %eax
+; FALLBACK10-NEXT: shlb $5, %al
+; FALLBACK10-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK10-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: andb $6, %cl
+; FALLBACK10-NEXT: movzbl %cl, %ecx
+; FALLBACK10-NEXT: shrxq %rax, -72(%rsp,%rcx,4), %rsi
+; FALLBACK10-NEXT: movq -64(%rsp,%rcx,4), %rdi
+; FALLBACK10-NEXT: movq -56(%rsp,%rcx,4), %r8
+; FALLBACK10-NEXT: shrxq %rax, %r8, %r9
+; FALLBACK10-NEXT: movq -48(%rsp,%rcx,4), %rcx
+; FALLBACK10-NEXT: shrxq %rax, %rdi, %r10
+; FALLBACK10-NEXT: shrxq %rax, %rcx, %r11
+; FALLBACK10-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK10-NEXT: notb %al
+; FALLBACK10-NEXT: addq %rdi, %rdi
+; FALLBACK10-NEXT: shlxq %rax, %rdi, %rdi
+; FALLBACK10-NEXT: orq %rsi, %rdi
+; FALLBACK10-NEXT: addq %rcx, %rcx
+; FALLBACK10-NEXT: shlxq %rax, %rcx, %rcx
+; FALLBACK10-NEXT: orq %r9, %rcx
+; FALLBACK10-NEXT: addq %r8, %r8
+; FALLBACK10-NEXT: shlxq %rax, %r8, %rax
+; FALLBACK10-NEXT: orq %r10, %rax
+; FALLBACK10-NEXT: movq %r11, 24(%rdx)
+; FALLBACK10-NEXT: movq %rax, 8(%rdx)
+; FALLBACK10-NEXT: movq %rcx, 16(%rdx)
+; FALLBACK10-NEXT: movq %rdi, (%rdx)
+; FALLBACK10-NEXT: vzeroupper
+; FALLBACK10-NEXT: retq
+;
+; FALLBACK11-LABEL: lshr_32bytes_dwordOff:
+; FALLBACK11: # %bb.0:
+; FALLBACK11-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK11-NEXT: movzbl (%rsi), %eax
+; FALLBACK11-NEXT: movl %eax, %ecx
+; FALLBACK11-NEXT: shlb $5, %cl
+; FALLBACK11-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK11-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: andb $6, %al
+; FALLBACK11-NEXT: movzbl %al, %eax
+; FALLBACK11-NEXT: movq -48(%rsp,%rax,4), %rsi
+; FALLBACK11-NEXT: movq -56(%rsp,%rax,4), %rdi
+; FALLBACK11-NEXT: movq %rdi, %r8
+; FALLBACK11-NEXT: shrdq %cl, %rsi, %r8
+; FALLBACK11-NEXT: movq -72(%rsp,%rax,4), %r9
+; FALLBACK11-NEXT: movq -64(%rsp,%rax,4), %rax
+; FALLBACK11-NEXT: movq %rax, %r10
+; FALLBACK11-NEXT: shrdq %cl, %rdi, %r10
+; FALLBACK11-NEXT: shrdq %cl, %rax, %r9
+; FALLBACK11-NEXT: shrxq %rcx, %rsi, %rax
+; FALLBACK11-NEXT: movq %r10, 8(%rdx)
+; FALLBACK11-NEXT: movq %r8, 16(%rdx)
+; FALLBACK11-NEXT: movq %rax, 24(%rdx)
+; FALLBACK11-NEXT: movq %r9, (%rdx)
+; FALLBACK11-NEXT: vzeroupper
+; FALLBACK11-NEXT: retq
+;
+; FALLBACK12-LABEL: lshr_32bytes_dwordOff:
+; FALLBACK12: # %bb.0:
+; FALLBACK12-NEXT: pushq %rbx
+; FALLBACK12-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK12-NEXT: movzbl (%rsi), %ecx
+; FALLBACK12-NEXT: movl %ecx, %eax
+; FALLBACK12-NEXT: shlb $5, %al
+; FALLBACK12-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK12-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK12-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK12-NEXT: andb $6, %cl
+; FALLBACK12-NEXT: movzbl %cl, %r9d
+; FALLBACK12-NEXT: movq -64(%rsp,%r9,4), %r10
+; FALLBACK12-NEXT: movq -56(%rsp,%r9,4), %r8
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shrq %cl, %r10
+; FALLBACK12-NEXT: movl %eax, %esi
+; FALLBACK12-NEXT: notb %sil
+; FALLBACK12-NEXT: leaq (%r8,%r8), %rdi
+; FALLBACK12-NEXT: movl %esi, %ecx
+; FALLBACK12-NEXT: shlq %cl, %rdi
+; FALLBACK12-NEXT: orq %r10, %rdi
+; FALLBACK12-NEXT: movq -48(%rsp,%r9,4), %r10
+; FALLBACK12-NEXT: movq %r10, %r11
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shrq %cl, %r11
+; FALLBACK12-NEXT: movq -40(%rsp,%r9,4), %r9
+; FALLBACK12-NEXT: leaq (%r9,%r9), %rbx
+; FALLBACK12-NEXT: movl %esi, %ecx
+; FALLBACK12-NEXT: shlq %cl, %rbx
+; FALLBACK12-NEXT: orq %r11, %rbx
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shrq %cl, %r8
+; FALLBACK12-NEXT: addq %r10, %r10
+; FALLBACK12-NEXT: movl %esi, %ecx
+; FALLBACK12-NEXT: shlq %cl, %r10
+; FALLBACK12-NEXT: orq %r8, %r10
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shrq %cl, %r9
+; FALLBACK12-NEXT: movq %r9, 24(%rdx)
+; FALLBACK12-NEXT: movq %r10, 8(%rdx)
+; FALLBACK12-NEXT: movq %rbx, 16(%rdx)
+; FALLBACK12-NEXT: movq %rdi, (%rdx)
+; FALLBACK12-NEXT: popq %rbx
+; FALLBACK12-NEXT: vzeroupper
+; FALLBACK12-NEXT: retq
+;
+; FALLBACK13-LABEL: lshr_32bytes_dwordOff:
+; FALLBACK13: # %bb.0:
+; FALLBACK13-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK13-NEXT: movzbl (%rsi), %eax
+; FALLBACK13-NEXT: movl %eax, %ecx
+; FALLBACK13-NEXT: shlb $5, %cl
+; FALLBACK13-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK13-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK13-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK13-NEXT: andb $6, %al
+; FALLBACK13-NEXT: movzbl %al, %eax
+; FALLBACK13-NEXT: movq -48(%rsp,%rax,4), %rsi
+; FALLBACK13-NEXT: movq -56(%rsp,%rax,4), %rdi
+; FALLBACK13-NEXT: movq %rdi, %r8
+; FALLBACK13-NEXT: shrdq %cl, %rsi, %r8
+; FALLBACK13-NEXT: movq -72(%rsp,%rax,4), %r9
+; FALLBACK13-NEXT: movq -64(%rsp,%rax,4), %rax
+; FALLBACK13-NEXT: movq %rax, %r10
+; FALLBACK13-NEXT: shrdq %cl, %rdi, %r10
+; FALLBACK13-NEXT: shrdq %cl, %rax, %r9
+; FALLBACK13-NEXT: shrq %cl, %rsi
+; FALLBACK13-NEXT: movq %r10, 8(%rdx)
+; FALLBACK13-NEXT: movq %r8, 16(%rdx)
+; FALLBACK13-NEXT: movq %rsi, 24(%rdx)
+; FALLBACK13-NEXT: movq %r9, (%rdx)
+; FALLBACK13-NEXT: vzeroupper
+; FALLBACK13-NEXT: retq
+;
+; FALLBACK14-LABEL: lshr_32bytes_dwordOff:
+; FALLBACK14: # %bb.0:
+; FALLBACK14-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK14-NEXT: movzbl (%rsi), %ecx
+; FALLBACK14-NEXT: movl %ecx, %eax
+; FALLBACK14-NEXT: shlb $5, %al
+; FALLBACK14-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK14-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: andb $6, %cl
+; FALLBACK14-NEXT: movzbl %cl, %ecx
+; FALLBACK14-NEXT: shrxq %rax, -72(%rsp,%rcx,4), %rsi
+; FALLBACK14-NEXT: movq -64(%rsp,%rcx,4), %rdi
+; FALLBACK14-NEXT: movq -56(%rsp,%rcx,4), %r8
+; FALLBACK14-NEXT: shrxq %rax, %r8, %r9
+; FALLBACK14-NEXT: movq -48(%rsp,%rcx,4), %rcx
+; FALLBACK14-NEXT: shrxq %rax, %rdi, %r10
+; FALLBACK14-NEXT: shrxq %rax, %rcx, %r11
+; FALLBACK14-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK14-NEXT: notb %al
+; FALLBACK14-NEXT: addq %rdi, %rdi
+; FALLBACK14-NEXT: shlxq %rax, %rdi, %rdi
+; FALLBACK14-NEXT: orq %rsi, %rdi
+; FALLBACK14-NEXT: addq %rcx, %rcx
+; FALLBACK14-NEXT: shlxq %rax, %rcx, %rcx
+; FALLBACK14-NEXT: orq %r9, %rcx
+; FALLBACK14-NEXT: addq %r8, %r8
+; FALLBACK14-NEXT: shlxq %rax, %r8, %rax
+; FALLBACK14-NEXT: orq %r10, %rax
+; FALLBACK14-NEXT: movq %r11, 24(%rdx)
+; FALLBACK14-NEXT: movq %rax, 8(%rdx)
+; FALLBACK14-NEXT: movq %rcx, 16(%rdx)
+; FALLBACK14-NEXT: movq %rdi, (%rdx)
+; FALLBACK14-NEXT: vzeroupper
+; FALLBACK14-NEXT: retq
+;
+; FALLBACK15-LABEL: lshr_32bytes_dwordOff:
+; FALLBACK15: # %bb.0:
+; FALLBACK15-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK15-NEXT: movzbl (%rsi), %eax
+; FALLBACK15-NEXT: movl %eax, %ecx
+; FALLBACK15-NEXT: shlb $5, %cl
+; FALLBACK15-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK15-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK15-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK15-NEXT: andb $6, %al
+; FALLBACK15-NEXT: movzbl %al, %eax
+; FALLBACK15-NEXT: movq -48(%rsp,%rax,4), %rsi
+; FALLBACK15-NEXT: movq -56(%rsp,%rax,4), %rdi
+; FALLBACK15-NEXT: movq %rdi, %r8
+; FALLBACK15-NEXT: shrdq %cl, %rsi, %r8
+; FALLBACK15-NEXT: movq -72(%rsp,%rax,4), %r9
+; FALLBACK15-NEXT: movq -64(%rsp,%rax,4), %rax
+; FALLBACK15-NEXT: movq %rax, %r10
+; FALLBACK15-NEXT: shrdq %cl, %rdi, %r10
+; FALLBACK15-NEXT: shrdq %cl, %rax, %r9
+; FALLBACK15-NEXT: shrxq %rcx, %rsi, %rax
+; FALLBACK15-NEXT: movq %r10, 8(%rdx)
+; FALLBACK15-NEXT: movq %r8, 16(%rdx)
+; FALLBACK15-NEXT: movq %rax, 24(%rdx)
+; FALLBACK15-NEXT: movq %r9, (%rdx)
+; FALLBACK15-NEXT: vzeroupper
+; FALLBACK15-NEXT: retq
+;
+; X86-SSE2-LABEL: lshr_32bytes_dwordOff:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %ebp
+; X86-SSE2-NEXT: pushl %ebx
+; X86-SSE2-NEXT: pushl %edi
+; X86-SSE2-NEXT: pushl %esi
+; X86-SSE2-NEXT: subl $92, %esp
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movl (%eax), %ecx
+; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SSE2-NEXT: movl 4(%eax), %ecx
+; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SSE2-NEXT: movl 8(%eax), %esi
+; X86-SSE2-NEXT: movl 12(%eax), %edi
+; X86-SSE2-NEXT: movl 16(%eax), %ebx
+; X86-SSE2-NEXT: movl 20(%eax), %ebp
+; X86-SSE2-NEXT: movl 24(%eax), %edx
+; X86-SSE2-NEXT: movl 28(%eax), %ecx
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movzbl (%eax), %eax
+; X86-SSE2-NEXT: xorps %xmm0, %xmm0
+; X86-SSE2-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl %ebp, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: andl $7, %eax
+; X86-SSE2-NEXT: movl 16(%esp,%eax,4), %ecx
+; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SSE2-NEXT: movl 20(%esp,%eax,4), %ecx
+; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SSE2-NEXT: movl 28(%esp,%eax,4), %esi
+; X86-SSE2-NEXT: movl 24(%esp,%eax,4), %edi
+; X86-SSE2-NEXT: movl 36(%esp,%eax,4), %ebx
+; X86-SSE2-NEXT: movl 32(%esp,%eax,4), %ebp
+; X86-SSE2-NEXT: movl 44(%esp,%eax,4), %edx
+; X86-SSE2-NEXT: movl 40(%esp,%eax,4), %ecx
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movl %ecx, 24(%eax)
+; X86-SSE2-NEXT: movl %edx, 28(%eax)
+; X86-SSE2-NEXT: movl %ebp, 16(%eax)
+; X86-SSE2-NEXT: movl %ebx, 20(%eax)
+; X86-SSE2-NEXT: movl %edi, 8(%eax)
+; X86-SSE2-NEXT: movl %esi, 12(%eax)
+; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-SSE2-NEXT: movl %ecx, (%eax)
+; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-SSE2-NEXT: movl %ecx, 4(%eax)
+; X86-SSE2-NEXT: addl $92, %esp
+; X86-SSE2-NEXT: popl %esi
+; X86-SSE2-NEXT: popl %edi
+; X86-SSE2-NEXT: popl %ebx
+; X86-SSE2-NEXT: popl %ebp
+; X86-SSE2-NEXT: retl
+;
+; X86-SSE42-LABEL: lshr_32bytes_dwordOff:
+; X86-SSE42: # %bb.0:
+; X86-SSE42-NEXT: subl $76, %esp
+; X86-SSE42-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE42-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE42-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SSE42-NEXT: movups (%edx), %xmm0
+; X86-SSE42-NEXT: movups 16(%edx), %xmm1
+; X86-SSE42-NEXT: movzbl (%ecx), %ecx
+; X86-SSE42-NEXT: xorps %xmm2, %xmm2
+; X86-SSE42-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: movaps %xmm0, (%esp)
+; X86-SSE42-NEXT: andl $7, %ecx
+; X86-SSE42-NEXT: movups (%esp,%ecx,4), %xmm0
+; X86-SSE42-NEXT: movups 16(%esp,%ecx,4), %xmm1
+; X86-SSE42-NEXT: movups %xmm1, 16(%eax)
+; X86-SSE42-NEXT: movups %xmm0, (%eax)
+; X86-SSE42-NEXT: addl $76, %esp
+; X86-SSE42-NEXT: retl
+;
+; X86-AVX-LABEL: lshr_32bytes_dwordOff:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: subl $76, %esp
+; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-AVX-NEXT: vmovups (%edx), %ymm0
+; X86-AVX-NEXT: movzbl (%ecx), %ecx
+; X86-AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; X86-AVX-NEXT: vmovups %ymm1, {{[0-9]+}}(%esp)
+; X86-AVX-NEXT: vmovups %ymm0, (%esp)
+; X86-AVX-NEXT: andl $7, %ecx
+; X86-AVX-NEXT: vmovups (%esp,%ecx,4), %xmm0
+; X86-AVX-NEXT: vmovups 16(%esp,%ecx,4), %xmm1
+; X86-AVX-NEXT: vmovups %xmm1, 16(%eax)
+; X86-AVX-NEXT: vmovups %xmm0, (%eax)
+; X86-AVX-NEXT: addl $76, %esp
+; X86-AVX-NEXT: vzeroupper
+; X86-AVX-NEXT: retl
+ %src = load i256, ptr %src.ptr, align 1
+ %dwordOff = load i256, ptr %dwordOff.ptr, align 1
+ %bitOff = shl i256 %dwordOff, 5
+ %res = lshr i256 %src, %bitOff
+ store i256 %res, ptr %dst, align 1
+ ret void
+}
+
+define void @lshr_32bytes_qwordOff(ptr %src.ptr, ptr %qwordOff.ptr, ptr %dst) nounwind {
+; X64-SSE2-LABEL: lshr_32bytes_qwordOff:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: movq (%rdi), %rax
; X64-SSE2-NEXT: movq 8(%rdi), %rcx
; X64-SSE2-NEXT: movq 16(%rdi), %r8
; X64-SSE2-NEXT: movq 24(%rdi), %rdi
; X64-SSE2-NEXT: movzbl (%rsi), %esi
+; X64-SSE2-NEXT: xorps %xmm0, %xmm0
+; X64-SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-SSE2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-SSE2-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
; X64-SSE2-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; X64-SSE2-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
-; X64-SSE2-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-SSE2-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-SSE2-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-SSE2-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-SSE2-NEXT: andl $31, %esi
-; X64-SSE2-NEXT: movq -64(%rsp,%rsi), %rax
-; X64-SSE2-NEXT: movq -56(%rsp,%rsi), %rcx
-; X64-SSE2-NEXT: movq -40(%rsp,%rsi), %rdi
-; X64-SSE2-NEXT: movq -48(%rsp,%rsi), %rsi
+; X64-SSE2-NEXT: andl $3, %esi
+; X64-SSE2-NEXT: movq -72(%rsp,%rsi,8), %rax
+; X64-SSE2-NEXT: movq -64(%rsp,%rsi,8), %rcx
+; X64-SSE2-NEXT: movq -48(%rsp,%rsi,8), %rdi
+; X64-SSE2-NEXT: movq -56(%rsp,%rsi,8), %rsi
; X64-SSE2-NEXT: movq %rsi, 16(%rdx)
; X64-SSE2-NEXT: movq %rdi, 24(%rdx)
; X64-SSE2-NEXT: movq %rax, (%rdx)
; X64-SSE2-NEXT: movq %rcx, 8(%rdx)
; X64-SSE2-NEXT: retq
;
-; X64-SSE42-LABEL: lshr_32bytes:
+; X64-SSE42-LABEL: lshr_32bytes_qwordOff:
; X64-SSE42: # %bb.0:
; X64-SSE42-NEXT: movups (%rdi), %xmm0
; X64-SSE42-NEXT: movups 16(%rdi), %xmm1
; X64-SSE42-NEXT: movzbl (%rsi), %eax
; X64-SSE42-NEXT: xorps %xmm2, %xmm2
-; X64-SSE42-NEXT: movups %xmm2, -{{[0-9]+}}(%rsp)
-; X64-SSE42-NEXT: movups %xmm2, -{{[0-9]+}}(%rsp)
-; X64-SSE42-NEXT: movups %xmm1, -{{[0-9]+}}(%rsp)
-; X64-SSE42-NEXT: movups %xmm0, -{{[0-9]+}}(%rsp)
-; X64-SSE42-NEXT: andl $31, %eax
-; X64-SSE42-NEXT: movups -64(%rsp,%rax), %xmm0
-; X64-SSE42-NEXT: movups -48(%rsp,%rax), %xmm1
+; X64-SSE42-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-SSE42-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-SSE42-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-SSE42-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-SSE42-NEXT: andl $3, %eax
+; X64-SSE42-NEXT: movups -72(%rsp,%rax,8), %xmm0
+; X64-SSE42-NEXT: movups -56(%rsp,%rax,8), %xmm1
; X64-SSE42-NEXT: movups %xmm1, 16(%rdx)
; X64-SSE42-NEXT: movups %xmm0, (%rdx)
; X64-SSE42-NEXT: retq
;
-; X64-AVX-LABEL: lshr_32bytes:
+; X64-AVX-LABEL: lshr_32bytes_qwordOff:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovups (%rdi), %ymm0
; X64-AVX-NEXT: movzbl (%rsi), %eax
; X64-AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-AVX-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
; X64-AVX-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; X64-AVX-NEXT: andl $31, %eax
-; X64-AVX-NEXT: vmovups -64(%rsp,%rax), %xmm0
-; X64-AVX-NEXT: vmovups -48(%rsp,%rax), %xmm1
+; X64-AVX-NEXT: andl $3, %eax
+; X64-AVX-NEXT: vmovups -72(%rsp,%rax,8), %xmm0
+; X64-AVX-NEXT: vmovups -56(%rsp,%rax,8), %xmm1
; X64-AVX-NEXT: vmovups %xmm1, 16(%rdx)
; X64-AVX-NEXT: vmovups %xmm0, (%rdx)
; X64-AVX-NEXT: vzeroupper
; X64-AVX-NEXT: retq
;
-; X86-SSE2-LABEL: lshr_32bytes:
+; X86-SSE2-LABEL: lshr_32bytes_qwordOff:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pushl %ebp
; X86-SSE2-NEXT: pushl %ebx
; X86-SSE2-NEXT: pushl %edi
; X86-SSE2-NEXT: pushl %esi
-; X86-SSE2-NEXT: subl $72, %esp
+; X86-SSE2-NEXT: subl $92, %esp
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movl (%eax), %ecx
; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-SSE2-NEXT: movl 4(%eax), %ecx
-; X86-SSE2-NEXT: movl %ecx, (%esp) # 4-byte Spill
+; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-SSE2-NEXT: movl 8(%eax), %esi
; X86-SSE2-NEXT: movl 12(%eax), %edi
; X86-SSE2-NEXT: movl 16(%eax), %ebx
@@ -1148,35 +5833,30 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-SSE2-NEXT: movl 28(%eax), %ecx
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movzbl (%eax), %eax
+; X86-SSE2-NEXT: xorps %xmm0, %xmm0
+; X86-SSE2-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl %ebp, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl %ebx, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl (%esp), %ecx # 4-byte Reload
+; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: andl $31, %eax
-; X86-SSE2-NEXT: movl 8(%esp,%eax), %ecx
+; X86-SSE2-NEXT: andl $3, %eax
+; X86-SSE2-NEXT: movl 16(%esp,%eax,8), %ecx
+; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SSE2-NEXT: movl 20(%esp,%eax,8), %ecx
; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 12(%esp,%eax), %ecx
-; X86-SSE2-NEXT: movl %ecx, (%esp) # 4-byte Spill
-; X86-SSE2-NEXT: movl 20(%esp,%eax), %esi
-; X86-SSE2-NEXT: movl 16(%esp,%eax), %edi
-; X86-SSE2-NEXT: movl 28(%esp,%eax), %ebx
-; X86-SSE2-NEXT: movl 24(%esp,%eax), %ebp
-; X86-SSE2-NEXT: movl 36(%esp,%eax), %edx
-; X86-SSE2-NEXT: movl 32(%esp,%eax), %ecx
+; X86-SSE2-NEXT: movl 28(%esp,%eax,8), %esi
+; X86-SSE2-NEXT: movl 24(%esp,%eax,8), %edi
+; X86-SSE2-NEXT: movl 36(%esp,%eax,8), %ebx
+; X86-SSE2-NEXT: movl 32(%esp,%eax,8), %ebp
+; X86-SSE2-NEXT: movl 44(%esp,%eax,8), %edx
+; X86-SSE2-NEXT: movl 40(%esp,%eax,8), %ecx
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movl %ecx, 24(%eax)
; X86-SSE2-NEXT: movl %edx, 28(%eax)
@@ -1186,18 +5866,18 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-SSE2-NEXT: movl %esi, 12(%eax)
; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-SSE2-NEXT: movl %ecx, (%eax)
-; X86-SSE2-NEXT: movl (%esp), %ecx # 4-byte Reload
+; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-SSE2-NEXT: movl %ecx, 4(%eax)
-; X86-SSE2-NEXT: addl $72, %esp
+; X86-SSE2-NEXT: addl $92, %esp
; X86-SSE2-NEXT: popl %esi
; X86-SSE2-NEXT: popl %edi
; X86-SSE2-NEXT: popl %ebx
; X86-SSE2-NEXT: popl %ebp
; X86-SSE2-NEXT: retl
;
-; X86-SSE42-LABEL: lshr_32bytes:
+; X86-SSE42-LABEL: lshr_32bytes_qwordOff:
; X86-SSE42: # %bb.0:
-; X86-SSE42-NEXT: subl $64, %esp
+; X86-SSE42-NEXT: subl $76, %esp
; X86-SSE42-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE42-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-SSE42-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -1205,21 +5885,21 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-SSE42-NEXT: movups 16(%edx), %xmm1
; X86-SSE42-NEXT: movzbl (%ecx), %ecx
; X86-SSE42-NEXT: xorps %xmm2, %xmm2
-; X86-SSE42-NEXT: movups %xmm2, {{[0-9]+}}(%esp)
-; X86-SSE42-NEXT: movups %xmm2, {{[0-9]+}}(%esp)
-; X86-SSE42-NEXT: movups %xmm1, {{[0-9]+}}(%esp)
-; X86-SSE42-NEXT: movups %xmm0, (%esp)
-; X86-SSE42-NEXT: andl $31, %ecx
-; X86-SSE42-NEXT: movups (%esp,%ecx), %xmm0
-; X86-SSE42-NEXT: movups 16(%esp,%ecx), %xmm1
+; X86-SSE42-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: movaps %xmm0, (%esp)
+; X86-SSE42-NEXT: andl $3, %ecx
+; X86-SSE42-NEXT: movups (%esp,%ecx,8), %xmm0
+; X86-SSE42-NEXT: movups 16(%esp,%ecx,8), %xmm1
; X86-SSE42-NEXT: movups %xmm1, 16(%eax)
; X86-SSE42-NEXT: movups %xmm0, (%eax)
-; X86-SSE42-NEXT: addl $64, %esp
+; X86-SSE42-NEXT: addl $76, %esp
; X86-SSE42-NEXT: retl
;
-; X86-AVX-LABEL: lshr_32bytes:
+; X86-AVX-LABEL: lshr_32bytes_qwordOff:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: subl $64, %esp
+; X86-AVX-NEXT: subl $76, %esp
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -1228,137 +5908,2830 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X86-AVX-NEXT: vmovups %ymm1, {{[0-9]+}}(%esp)
; X86-AVX-NEXT: vmovups %ymm0, (%esp)
-; X86-AVX-NEXT: andl $31, %ecx
-; X86-AVX-NEXT: vmovups (%esp,%ecx), %xmm0
-; X86-AVX-NEXT: vmovups 16(%esp,%ecx), %xmm1
+; X86-AVX-NEXT: andl $3, %ecx
+; X86-AVX-NEXT: vmovups (%esp,%ecx,8), %xmm0
+; X86-AVX-NEXT: vmovups 16(%esp,%ecx,8), %xmm1
; X86-AVX-NEXT: vmovups %xmm1, 16(%eax)
; X86-AVX-NEXT: vmovups %xmm0, (%eax)
-; X86-AVX-NEXT: addl $64, %esp
+; X86-AVX-NEXT: addl $76, %esp
; X86-AVX-NEXT: vzeroupper
; X86-AVX-NEXT: retl
%src = load i256, ptr %src.ptr, align 1
- %byteOff = load i256, ptr %byteOff.ptr, align 1
- %bitOff = shl i256 %byteOff, 3
+ %qwordOff = load i256, ptr %qwordOff.ptr, align 1
+ %bitOff = shl i256 %qwordOff, 6
%res = lshr i256 %src, %bitOff
store i256 %res, ptr %dst, align 1
ret void
}
+
define void @shl_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
-; X64-SSE2-LABEL: shl_32bytes:
+; FALLBACK0-LABEL: shl_32bytes:
+; FALLBACK0: # %bb.0:
+; FALLBACK0-NEXT: pushq %rbx
+; FALLBACK0-NEXT: movq (%rdi), %rcx
+; FALLBACK0-NEXT: movq 8(%rdi), %r8
+; FALLBACK0-NEXT: movq 16(%rdi), %r9
+; FALLBACK0-NEXT: movq 24(%rdi), %rdi
+; FALLBACK0-NEXT: movzbl (%rsi), %esi
+; FALLBACK0-NEXT: leal (,%rsi,8), %eax
+; FALLBACK0-NEXT: xorps %xmm0, %xmm0
+; FALLBACK0-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: andb $24, %sil
+; FALLBACK0-NEXT: negb %sil
+; FALLBACK0-NEXT: movsbq %sil, %r10
+; FALLBACK0-NEXT: movq -32(%rsp,%r10), %r8
+; FALLBACK0-NEXT: movq -24(%rsp,%r10), %rdi
+; FALLBACK0-NEXT: movq %rdi, %r11
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shlq %cl, %r11
+; FALLBACK0-NEXT: movl %eax, %esi
+; FALLBACK0-NEXT: notb %sil
+; FALLBACK0-NEXT: movq %r8, %r9
+; FALLBACK0-NEXT: shrq %r9
+; FALLBACK0-NEXT: movl %esi, %ecx
+; FALLBACK0-NEXT: shrq %cl, %r9
+; FALLBACK0-NEXT: orq %r11, %r9
+; FALLBACK0-NEXT: movq -8(%rsp,%r10), %r11
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shlq %cl, %r11
+; FALLBACK0-NEXT: movq -16(%rsp,%r10), %r10
+; FALLBACK0-NEXT: movq %r10, %rbx
+; FALLBACK0-NEXT: shrq %rbx
+; FALLBACK0-NEXT: movl %esi, %ecx
+; FALLBACK0-NEXT: shrq %cl, %rbx
+; FALLBACK0-NEXT: orq %r11, %rbx
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shlq %cl, %r10
+; FALLBACK0-NEXT: shrq %rdi
+; FALLBACK0-NEXT: movl %esi, %ecx
+; FALLBACK0-NEXT: shrq %cl, %rdi
+; FALLBACK0-NEXT: orq %r10, %rdi
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shlq %cl, %r8
+; FALLBACK0-NEXT: movq %r8, (%rdx)
+; FALLBACK0-NEXT: movq %rdi, 16(%rdx)
+; FALLBACK0-NEXT: movq %rbx, 24(%rdx)
+; FALLBACK0-NEXT: movq %r9, 8(%rdx)
+; FALLBACK0-NEXT: popq %rbx
+; FALLBACK0-NEXT: retq
+;
+; FALLBACK1-LABEL: shl_32bytes:
+; FALLBACK1: # %bb.0:
+; FALLBACK1-NEXT: movq (%rdi), %rax
+; FALLBACK1-NEXT: movq 8(%rdi), %r8
+; FALLBACK1-NEXT: movq 16(%rdi), %r9
+; FALLBACK1-NEXT: movq 24(%rdi), %rdi
+; FALLBACK1-NEXT: movzbl (%rsi), %esi
+; FALLBACK1-NEXT: leal (,%rsi,8), %ecx
+; FALLBACK1-NEXT: xorps %xmm0, %xmm0
+; FALLBACK1-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: andb $24, %sil
+; FALLBACK1-NEXT: negb %sil
+; FALLBACK1-NEXT: movsbq %sil, %rax
+; FALLBACK1-NEXT: movq -24(%rsp,%rax), %rsi
+; FALLBACK1-NEXT: movq -16(%rsp,%rax), %rdi
+; FALLBACK1-NEXT: shldq %cl, %rsi, %rdi
+; FALLBACK1-NEXT: movq -40(%rsp,%rax), %r8
+; FALLBACK1-NEXT: movq -32(%rsp,%rax), %rax
+; FALLBACK1-NEXT: shldq %cl, %rax, %rsi
+; FALLBACK1-NEXT: shldq %cl, %r8, %rax
+; FALLBACK1-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK1-NEXT: shlq %cl, %r8
+; FALLBACK1-NEXT: movq %rsi, 16(%rdx)
+; FALLBACK1-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK1-NEXT: movq %r8, (%rdx)
+; FALLBACK1-NEXT: movq %rax, 8(%rdx)
+; FALLBACK1-NEXT: retq
+;
+; FALLBACK2-LABEL: shl_32bytes:
+; FALLBACK2: # %bb.0:
+; FALLBACK2-NEXT: movq (%rdi), %rcx
+; FALLBACK2-NEXT: movq 8(%rdi), %r8
+; FALLBACK2-NEXT: movq 16(%rdi), %r9
+; FALLBACK2-NEXT: movq 24(%rdi), %rdi
+; FALLBACK2-NEXT: movzbl (%rsi), %esi
+; FALLBACK2-NEXT: leal (,%rsi,8), %eax
+; FALLBACK2-NEXT: xorps %xmm0, %xmm0
+; FALLBACK2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: andb $24, %sil
+; FALLBACK2-NEXT: negb %sil
+; FALLBACK2-NEXT: movsbq %sil, %rsi
+; FALLBACK2-NEXT: movq -40(%rsp,%rsi), %rdi
+; FALLBACK2-NEXT: movq -32(%rsp,%rsi), %rcx
+; FALLBACK2-NEXT: shlxq %rax, %rcx, %r8
+; FALLBACK2-NEXT: shlxq %rax, -16(%rsp,%rsi), %r9
+; FALLBACK2-NEXT: movq -24(%rsp,%rsi), %rsi
+; FALLBACK2-NEXT: shlxq %rax, %rsi, %r10
+; FALLBACK2-NEXT: shlxq %rax, %rdi, %r11
+; FALLBACK2-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK2-NEXT: notb %al
+; FALLBACK2-NEXT: shrq %rdi
+; FALLBACK2-NEXT: shrxq %rax, %rdi, %rdi
+; FALLBACK2-NEXT: orq %r8, %rdi
+; FALLBACK2-NEXT: shrq %rsi
+; FALLBACK2-NEXT: shrxq %rax, %rsi, %rsi
+; FALLBACK2-NEXT: orq %r9, %rsi
+; FALLBACK2-NEXT: shrq %rcx
+; FALLBACK2-NEXT: shrxq %rax, %rcx, %rax
+; FALLBACK2-NEXT: orq %r10, %rax
+; FALLBACK2-NEXT: movq %r11, (%rdx)
+; FALLBACK2-NEXT: movq %rax, 16(%rdx)
+; FALLBACK2-NEXT: movq %rsi, 24(%rdx)
+; FALLBACK2-NEXT: movq %rdi, 8(%rdx)
+; FALLBACK2-NEXT: retq
+;
+; FALLBACK3-LABEL: shl_32bytes:
+; FALLBACK3: # %bb.0:
+; FALLBACK3-NEXT: movq (%rdi), %rax
+; FALLBACK3-NEXT: movq 8(%rdi), %r8
+; FALLBACK3-NEXT: movq 16(%rdi), %r9
+; FALLBACK3-NEXT: movq 24(%rdi), %rdi
+; FALLBACK3-NEXT: movzbl (%rsi), %esi
+; FALLBACK3-NEXT: leal (,%rsi,8), %ecx
+; FALLBACK3-NEXT: xorps %xmm0, %xmm0
+; FALLBACK3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: andb $24, %sil
+; FALLBACK3-NEXT: negb %sil
+; FALLBACK3-NEXT: movsbq %sil, %rax
+; FALLBACK3-NEXT: movq -24(%rsp,%rax), %rsi
+; FALLBACK3-NEXT: movq -16(%rsp,%rax), %rdi
+; FALLBACK3-NEXT: shldq %cl, %rsi, %rdi
+; FALLBACK3-NEXT: movq -40(%rsp,%rax), %r8
+; FALLBACK3-NEXT: movq -32(%rsp,%rax), %rax
+; FALLBACK3-NEXT: shldq %cl, %rax, %rsi
+; FALLBACK3-NEXT: shldq %cl, %r8, %rax
+; FALLBACK3-NEXT: shlxq %rcx, %r8, %rcx
+; FALLBACK3-NEXT: movq %rsi, 16(%rdx)
+; FALLBACK3-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK3-NEXT: movq %rcx, (%rdx)
+; FALLBACK3-NEXT: movq %rax, 8(%rdx)
+; FALLBACK3-NEXT: retq
+;
+; FALLBACK4-LABEL: shl_32bytes:
+; FALLBACK4: # %bb.0:
+; FALLBACK4-NEXT: movups (%rdi), %xmm0
+; FALLBACK4-NEXT: movups 16(%rdi), %xmm1
+; FALLBACK4-NEXT: movzbl (%rsi), %ecx
+; FALLBACK4-NEXT: leal (,%rcx,8), %eax
+; FALLBACK4-NEXT: xorps %xmm2, %xmm2
+; FALLBACK4-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: andb $24, %cl
+; FALLBACK4-NEXT: negb %cl
+; FALLBACK4-NEXT: movsbq %cl, %r8
+; FALLBACK4-NEXT: movq -16(%rsp,%r8), %r9
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shlq %cl, %r9
+; FALLBACK4-NEXT: movl %eax, %esi
+; FALLBACK4-NEXT: notb %sil
+; FALLBACK4-NEXT: movq -24(%rsp,%r8), %r10
+; FALLBACK4-NEXT: movq %r10, %rdi
+; FALLBACK4-NEXT: shrq %rdi
+; FALLBACK4-NEXT: movl %esi, %ecx
+; FALLBACK4-NEXT: shrq %cl, %rdi
+; FALLBACK4-NEXT: orq %r9, %rdi
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shlq %cl, %r10
+; FALLBACK4-NEXT: movq -40(%rsp,%r8), %r9
+; FALLBACK4-NEXT: movq -32(%rsp,%r8), %r8
+; FALLBACK4-NEXT: movq %r8, %r11
+; FALLBACK4-NEXT: shrq %r11
+; FALLBACK4-NEXT: movl %esi, %ecx
+; FALLBACK4-NEXT: shrq %cl, %r11
+; FALLBACK4-NEXT: orq %r10, %r11
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shlq %cl, %r8
+; FALLBACK4-NEXT: movq %r9, %r10
+; FALLBACK4-NEXT: shrq %r10
+; FALLBACK4-NEXT: movl %esi, %ecx
+; FALLBACK4-NEXT: shrq %cl, %r10
+; FALLBACK4-NEXT: orq %r8, %r10
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shlq %cl, %r9
+; FALLBACK4-NEXT: movq %r9, (%rdx)
+; FALLBACK4-NEXT: movq %r10, 8(%rdx)
+; FALLBACK4-NEXT: movq %r11, 16(%rdx)
+; FALLBACK4-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK4-NEXT: retq
+;
+; FALLBACK5-LABEL: shl_32bytes:
+; FALLBACK5: # %bb.0:
+; FALLBACK5-NEXT: movups (%rdi), %xmm0
+; FALLBACK5-NEXT: movups 16(%rdi), %xmm1
+; FALLBACK5-NEXT: movzbl (%rsi), %eax
+; FALLBACK5-NEXT: leal (,%rax,8), %ecx
+; FALLBACK5-NEXT: xorps %xmm2, %xmm2
+; FALLBACK5-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: andb $24, %al
+; FALLBACK5-NEXT: negb %al
+; FALLBACK5-NEXT: movsbq %al, %rax
+; FALLBACK5-NEXT: movq -24(%rsp,%rax), %rsi
+; FALLBACK5-NEXT: movq -16(%rsp,%rax), %rdi
+; FALLBACK5-NEXT: shldq %cl, %rsi, %rdi
+; FALLBACK5-NEXT: movq -40(%rsp,%rax), %r8
+; FALLBACK5-NEXT: movq -32(%rsp,%rax), %rax
+; FALLBACK5-NEXT: shldq %cl, %rax, %rsi
+; FALLBACK5-NEXT: movq %r8, %r9
+; FALLBACK5-NEXT: shlq %cl, %r9
+; FALLBACK5-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK5-NEXT: shldq %cl, %r8, %rax
+; FALLBACK5-NEXT: movq %rax, 8(%rdx)
+; FALLBACK5-NEXT: movq %rsi, 16(%rdx)
+; FALLBACK5-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK5-NEXT: movq %r9, (%rdx)
+; FALLBACK5-NEXT: retq
+;
+; FALLBACK6-LABEL: shl_32bytes:
+; FALLBACK6: # %bb.0:
+; FALLBACK6-NEXT: movups (%rdi), %xmm0
+; FALLBACK6-NEXT: movups 16(%rdi), %xmm1
+; FALLBACK6-NEXT: movzbl (%rsi), %ecx
+; FALLBACK6-NEXT: leal (,%rcx,8), %eax
+; FALLBACK6-NEXT: xorps %xmm2, %xmm2
+; FALLBACK6-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: andb $24, %cl
+; FALLBACK6-NEXT: negb %cl
+; FALLBACK6-NEXT: movsbq %cl, %rcx
+; FALLBACK6-NEXT: shlxq %rax, -16(%rsp,%rcx), %rsi
+; FALLBACK6-NEXT: movq -24(%rsp,%rcx), %rdi
+; FALLBACK6-NEXT: shlxq %rax, %rdi, %r8
+; FALLBACK6-NEXT: movq -40(%rsp,%rcx), %r9
+; FALLBACK6-NEXT: movq -32(%rsp,%rcx), %rcx
+; FALLBACK6-NEXT: shlxq %rax, %rcx, %r10
+; FALLBACK6-NEXT: shlxq %rax, %r9, %r11
+; FALLBACK6-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK6-NEXT: notb %al
+; FALLBACK6-NEXT: shrq %rdi
+; FALLBACK6-NEXT: shrxq %rax, %rdi, %rdi
+; FALLBACK6-NEXT: orq %rsi, %rdi
+; FALLBACK6-NEXT: shrq %rcx
+; FALLBACK6-NEXT: shrxq %rax, %rcx, %rcx
+; FALLBACK6-NEXT: orq %r8, %rcx
+; FALLBACK6-NEXT: shrq %r9
+; FALLBACK6-NEXT: shrxq %rax, %r9, %rax
+; FALLBACK6-NEXT: orq %r10, %rax
+; FALLBACK6-NEXT: movq %r11, (%rdx)
+; FALLBACK6-NEXT: movq %rax, 8(%rdx)
+; FALLBACK6-NEXT: movq %rcx, 16(%rdx)
+; FALLBACK6-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK6-NEXT: retq
+;
+; FALLBACK7-LABEL: shl_32bytes:
+; FALLBACK7: # %bb.0:
+; FALLBACK7-NEXT: movups (%rdi), %xmm0
+; FALLBACK7-NEXT: movups 16(%rdi), %xmm1
+; FALLBACK7-NEXT: movzbl (%rsi), %eax
+; FALLBACK7-NEXT: leal (,%rax,8), %ecx
+; FALLBACK7-NEXT: xorps %xmm2, %xmm2
+; FALLBACK7-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: andb $24, %al
+; FALLBACK7-NEXT: negb %al
+; FALLBACK7-NEXT: movsbq %al, %rax
+; FALLBACK7-NEXT: movq -24(%rsp,%rax), %rsi
+; FALLBACK7-NEXT: movq -16(%rsp,%rax), %rdi
+; FALLBACK7-NEXT: shldq %cl, %rsi, %rdi
+; FALLBACK7-NEXT: movq -40(%rsp,%rax), %r8
+; FALLBACK7-NEXT: movq -32(%rsp,%rax), %rax
+; FALLBACK7-NEXT: shldq %cl, %rax, %rsi
+; FALLBACK7-NEXT: shlxq %rcx, %r8, %r9
+; FALLBACK7-NEXT: # kill: def $cl killed $cl killed $rcx
+; FALLBACK7-NEXT: shldq %cl, %r8, %rax
+; FALLBACK7-NEXT: movq %rax, 8(%rdx)
+; FALLBACK7-NEXT: movq %rsi, 16(%rdx)
+; FALLBACK7-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK7-NEXT: movq %r9, (%rdx)
+; FALLBACK7-NEXT: retq
+;
+; FALLBACK8-LABEL: shl_32bytes:
+; FALLBACK8: # %bb.0:
+; FALLBACK8-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK8-NEXT: movzbl (%rsi), %ecx
+; FALLBACK8-NEXT: leal (,%rcx,8), %eax
+; FALLBACK8-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK8-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: andb $24, %cl
+; FALLBACK8-NEXT: negb %cl
+; FALLBACK8-NEXT: movsbq %cl, %r8
+; FALLBACK8-NEXT: movq -16(%rsp,%r8), %r9
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shlq %cl, %r9
+; FALLBACK8-NEXT: movl %eax, %esi
+; FALLBACK8-NEXT: notb %sil
+; FALLBACK8-NEXT: movq -24(%rsp,%r8), %r10
+; FALLBACK8-NEXT: movq %r10, %rdi
+; FALLBACK8-NEXT: shrq %rdi
+; FALLBACK8-NEXT: movl %esi, %ecx
+; FALLBACK8-NEXT: shrq %cl, %rdi
+; FALLBACK8-NEXT: orq %r9, %rdi
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shlq %cl, %r10
+; FALLBACK8-NEXT: movq -40(%rsp,%r8), %r9
+; FALLBACK8-NEXT: movq -32(%rsp,%r8), %r8
+; FALLBACK8-NEXT: movq %r8, %r11
+; FALLBACK8-NEXT: shrq %r11
+; FALLBACK8-NEXT: movl %esi, %ecx
+; FALLBACK8-NEXT: shrq %cl, %r11
+; FALLBACK8-NEXT: orq %r10, %r11
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shlq %cl, %r8
+; FALLBACK8-NEXT: movq %r9, %r10
+; FALLBACK8-NEXT: shrq %r10
+; FALLBACK8-NEXT: movl %esi, %ecx
+; FALLBACK8-NEXT: shrq %cl, %r10
+; FALLBACK8-NEXT: orq %r8, %r10
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shlq %cl, %r9
+; FALLBACK8-NEXT: movq %r9, (%rdx)
+; FALLBACK8-NEXT: movq %r10, 8(%rdx)
+; FALLBACK8-NEXT: movq %r11, 16(%rdx)
+; FALLBACK8-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK8-NEXT: vzeroupper
+; FALLBACK8-NEXT: retq
+;
+; FALLBACK9-LABEL: shl_32bytes:
+; FALLBACK9: # %bb.0:
+; FALLBACK9-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK9-NEXT: movzbl (%rsi), %eax
+; FALLBACK9-NEXT: leal (,%rax,8), %ecx
+; FALLBACK9-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK9-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: andb $24, %al
+; FALLBACK9-NEXT: negb %al
+; FALLBACK9-NEXT: movsbq %al, %rax
+; FALLBACK9-NEXT: movq -24(%rsp,%rax), %rsi
+; FALLBACK9-NEXT: movq -16(%rsp,%rax), %rdi
+; FALLBACK9-NEXT: shldq %cl, %rsi, %rdi
+; FALLBACK9-NEXT: movq -40(%rsp,%rax), %r8
+; FALLBACK9-NEXT: movq -32(%rsp,%rax), %rax
+; FALLBACK9-NEXT: shldq %cl, %rax, %rsi
+; FALLBACK9-NEXT: movq %r8, %r9
+; FALLBACK9-NEXT: shlq %cl, %r9
+; FALLBACK9-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK9-NEXT: shldq %cl, %r8, %rax
+; FALLBACK9-NEXT: movq %rax, 8(%rdx)
+; FALLBACK9-NEXT: movq %rsi, 16(%rdx)
+; FALLBACK9-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK9-NEXT: movq %r9, (%rdx)
+; FALLBACK9-NEXT: vzeroupper
+; FALLBACK9-NEXT: retq
+;
+; FALLBACK10-LABEL: shl_32bytes:
+; FALLBACK10: # %bb.0:
+; FALLBACK10-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK10-NEXT: movzbl (%rsi), %ecx
+; FALLBACK10-NEXT: leal (,%rcx,8), %eax
+; FALLBACK10-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK10-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: andb $24, %cl
+; FALLBACK10-NEXT: negb %cl
+; FALLBACK10-NEXT: movsbq %cl, %rcx
+; FALLBACK10-NEXT: shlxq %rax, -16(%rsp,%rcx), %rsi
+; FALLBACK10-NEXT: movq -24(%rsp,%rcx), %rdi
+; FALLBACK10-NEXT: shlxq %rax, %rdi, %r8
+; FALLBACK10-NEXT: movq -40(%rsp,%rcx), %r9
+; FALLBACK10-NEXT: movq -32(%rsp,%rcx), %rcx
+; FALLBACK10-NEXT: shlxq %rax, %rcx, %r10
+; FALLBACK10-NEXT: shlxq %rax, %r9, %r11
+; FALLBACK10-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK10-NEXT: notb %al
+; FALLBACK10-NEXT: shrq %rdi
+; FALLBACK10-NEXT: shrxq %rax, %rdi, %rdi
+; FALLBACK10-NEXT: orq %rsi, %rdi
+; FALLBACK10-NEXT: shrq %rcx
+; FALLBACK10-NEXT: shrxq %rax, %rcx, %rcx
+; FALLBACK10-NEXT: orq %r8, %rcx
+; FALLBACK10-NEXT: shrq %r9
+; FALLBACK10-NEXT: shrxq %rax, %r9, %rax
+; FALLBACK10-NEXT: orq %r10, %rax
+; FALLBACK10-NEXT: movq %r11, (%rdx)
+; FALLBACK10-NEXT: movq %rax, 8(%rdx)
+; FALLBACK10-NEXT: movq %rcx, 16(%rdx)
+; FALLBACK10-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK10-NEXT: vzeroupper
+; FALLBACK10-NEXT: retq
+;
+; FALLBACK11-LABEL: shl_32bytes:
+; FALLBACK11: # %bb.0:
+; FALLBACK11-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK11-NEXT: movzbl (%rsi), %eax
+; FALLBACK11-NEXT: leal (,%rax,8), %ecx
+; FALLBACK11-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK11-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: andb $24, %al
+; FALLBACK11-NEXT: negb %al
+; FALLBACK11-NEXT: movsbq %al, %rax
+; FALLBACK11-NEXT: movq -24(%rsp,%rax), %rsi
+; FALLBACK11-NEXT: movq -16(%rsp,%rax), %rdi
+; FALLBACK11-NEXT: shldq %cl, %rsi, %rdi
+; FALLBACK11-NEXT: movq -40(%rsp,%rax), %r8
+; FALLBACK11-NEXT: movq -32(%rsp,%rax), %rax
+; FALLBACK11-NEXT: shldq %cl, %rax, %rsi
+; FALLBACK11-NEXT: shlxq %rcx, %r8, %r9
+; FALLBACK11-NEXT: # kill: def $cl killed $cl killed $rcx
+; FALLBACK11-NEXT: shldq %cl, %r8, %rax
+; FALLBACK11-NEXT: movq %rax, 8(%rdx)
+; FALLBACK11-NEXT: movq %rsi, 16(%rdx)
+; FALLBACK11-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK11-NEXT: movq %r9, (%rdx)
+; FALLBACK11-NEXT: vzeroupper
+; FALLBACK11-NEXT: retq
+;
+; FALLBACK12-LABEL: shl_32bytes:
+; FALLBACK12: # %bb.0:
+; FALLBACK12-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK12-NEXT: movzbl (%rsi), %ecx
+; FALLBACK12-NEXT: leal (,%rcx,8), %eax
+; FALLBACK12-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK12-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK12-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK12-NEXT: andb $24, %cl
+; FALLBACK12-NEXT: negb %cl
+; FALLBACK12-NEXT: movsbq %cl, %r8
+; FALLBACK12-NEXT: movq -16(%rsp,%r8), %r9
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shlq %cl, %r9
+; FALLBACK12-NEXT: movl %eax, %esi
+; FALLBACK12-NEXT: notb %sil
+; FALLBACK12-NEXT: movq -24(%rsp,%r8), %r10
+; FALLBACK12-NEXT: movq %r10, %rdi
+; FALLBACK12-NEXT: shrq %rdi
+; FALLBACK12-NEXT: movl %esi, %ecx
+; FALLBACK12-NEXT: shrq %cl, %rdi
+; FALLBACK12-NEXT: orq %r9, %rdi
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shlq %cl, %r10
+; FALLBACK12-NEXT: movq -40(%rsp,%r8), %r9
+; FALLBACK12-NEXT: movq -32(%rsp,%r8), %r8
+; FALLBACK12-NEXT: movq %r8, %r11
+; FALLBACK12-NEXT: shrq %r11
+; FALLBACK12-NEXT: movl %esi, %ecx
+; FALLBACK12-NEXT: shrq %cl, %r11
+; FALLBACK12-NEXT: orq %r10, %r11
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shlq %cl, %r8
+; FALLBACK12-NEXT: movq %r9, %r10
+; FALLBACK12-NEXT: shrq %r10
+; FALLBACK12-NEXT: movl %esi, %ecx
+; FALLBACK12-NEXT: shrq %cl, %r10
+; FALLBACK12-NEXT: orq %r8, %r10
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shlq %cl, %r9
+; FALLBACK12-NEXT: movq %r9, (%rdx)
+; FALLBACK12-NEXT: movq %r10, 8(%rdx)
+; FALLBACK12-NEXT: movq %r11, 16(%rdx)
+; FALLBACK12-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK12-NEXT: vzeroupper
+; FALLBACK12-NEXT: retq
+;
+; FALLBACK13-LABEL: shl_32bytes:
+; FALLBACK13: # %bb.0:
+; FALLBACK13-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK13-NEXT: movzbl (%rsi), %eax
+; FALLBACK13-NEXT: leal (,%rax,8), %ecx
+; FALLBACK13-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK13-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK13-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK13-NEXT: andb $24, %al
+; FALLBACK13-NEXT: negb %al
+; FALLBACK13-NEXT: movsbq %al, %rax
+; FALLBACK13-NEXT: movq -24(%rsp,%rax), %rsi
+; FALLBACK13-NEXT: movq -16(%rsp,%rax), %rdi
+; FALLBACK13-NEXT: shldq %cl, %rsi, %rdi
+; FALLBACK13-NEXT: movq -40(%rsp,%rax), %r8
+; FALLBACK13-NEXT: movq -32(%rsp,%rax), %rax
+; FALLBACK13-NEXT: shldq %cl, %rax, %rsi
+; FALLBACK13-NEXT: movq %r8, %r9
+; FALLBACK13-NEXT: shlq %cl, %r9
+; FALLBACK13-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK13-NEXT: shldq %cl, %r8, %rax
+; FALLBACK13-NEXT: movq %rax, 8(%rdx)
+; FALLBACK13-NEXT: movq %rsi, 16(%rdx)
+; FALLBACK13-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK13-NEXT: movq %r9, (%rdx)
+; FALLBACK13-NEXT: vzeroupper
+; FALLBACK13-NEXT: retq
+;
+; FALLBACK14-LABEL: shl_32bytes:
+; FALLBACK14: # %bb.0:
+; FALLBACK14-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK14-NEXT: movzbl (%rsi), %ecx
+; FALLBACK14-NEXT: leal (,%rcx,8), %eax
+; FALLBACK14-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK14-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: andb $24, %cl
+; FALLBACK14-NEXT: negb %cl
+; FALLBACK14-NEXT: movsbq %cl, %rcx
+; FALLBACK14-NEXT: shlxq %rax, -16(%rsp,%rcx), %rsi
+; FALLBACK14-NEXT: movq -24(%rsp,%rcx), %rdi
+; FALLBACK14-NEXT: shlxq %rax, %rdi, %r8
+; FALLBACK14-NEXT: movq -40(%rsp,%rcx), %r9
+; FALLBACK14-NEXT: movq -32(%rsp,%rcx), %rcx
+; FALLBACK14-NEXT: shlxq %rax, %rcx, %r10
+; FALLBACK14-NEXT: shlxq %rax, %r9, %r11
+; FALLBACK14-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK14-NEXT: notb %al
+; FALLBACK14-NEXT: shrq %rdi
+; FALLBACK14-NEXT: shrxq %rax, %rdi, %rdi
+; FALLBACK14-NEXT: orq %rsi, %rdi
+; FALLBACK14-NEXT: shrq %rcx
+; FALLBACK14-NEXT: shrxq %rax, %rcx, %rcx
+; FALLBACK14-NEXT: orq %r8, %rcx
+; FALLBACK14-NEXT: shrq %r9
+; FALLBACK14-NEXT: shrxq %rax, %r9, %rax
+; FALLBACK14-NEXT: orq %r10, %rax
+; FALLBACK14-NEXT: movq %r11, (%rdx)
+; FALLBACK14-NEXT: movq %rax, 8(%rdx)
+; FALLBACK14-NEXT: movq %rcx, 16(%rdx)
+; FALLBACK14-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK14-NEXT: vzeroupper
+; FALLBACK14-NEXT: retq
+;
+; FALLBACK15-LABEL: shl_32bytes:
+; FALLBACK15: # %bb.0:
+; FALLBACK15-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK15-NEXT: movzbl (%rsi), %eax
+; FALLBACK15-NEXT: leal (,%rax,8), %ecx
+; FALLBACK15-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK15-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK15-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK15-NEXT: andb $24, %al
+; FALLBACK15-NEXT: negb %al
+; FALLBACK15-NEXT: movsbq %al, %rax
+; FALLBACK15-NEXT: movq -24(%rsp,%rax), %rsi
+; FALLBACK15-NEXT: movq -16(%rsp,%rax), %rdi
+; FALLBACK15-NEXT: shldq %cl, %rsi, %rdi
+; FALLBACK15-NEXT: movq -40(%rsp,%rax), %r8
+; FALLBACK15-NEXT: movq -32(%rsp,%rax), %rax
+; FALLBACK15-NEXT: shldq %cl, %rax, %rsi
+; FALLBACK15-NEXT: shlxq %rcx, %r8, %r9
+; FALLBACK15-NEXT: # kill: def $cl killed $cl killed $rcx
+; FALLBACK15-NEXT: shldq %cl, %r8, %rax
+; FALLBACK15-NEXT: movq %rax, 8(%rdx)
+; FALLBACK15-NEXT: movq %rsi, 16(%rdx)
+; FALLBACK15-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK15-NEXT: movq %r9, (%rdx)
+; FALLBACK15-NEXT: vzeroupper
+; FALLBACK15-NEXT: retq
+;
+; FALLBACK16-LABEL: shl_32bytes:
+; FALLBACK16: # %bb.0:
+; FALLBACK16-NEXT: pushl %ebp
+; FALLBACK16-NEXT: pushl %ebx
+; FALLBACK16-NEXT: pushl %edi
+; FALLBACK16-NEXT: pushl %esi
+; FALLBACK16-NEXT: subl $108, %esp
+; FALLBACK16-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK16-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK16-NEXT: movl (%ecx), %edx
+; FALLBACK16-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 4(%ecx), %edx
+; FALLBACK16-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 8(%ecx), %esi
+; FALLBACK16-NEXT: movl 12(%ecx), %edi
+; FALLBACK16-NEXT: movl 16(%ecx), %ebx
+; FALLBACK16-NEXT: movb (%eax), %ah
+; FALLBACK16-NEXT: movl 20(%ecx), %ebp
+; FALLBACK16-NEXT: movl 24(%ecx), %edx
+; FALLBACK16-NEXT: movl 28(%ecx), %ecx
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movb %ah, %ch
+; FALLBACK16-NEXT: shlb $3, %ch
+; FALLBACK16-NEXT: xorps %xmm0, %xmm0
+; FALLBACK16-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %ebp, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK16-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK16-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: andb $28, %ah
+; FALLBACK16-NEXT: negb %ah
+; FALLBACK16-NEXT: movsbl %ah, %ebx
+; FALLBACK16-NEXT: movl 64(%esp,%ebx), %edi
+; FALLBACK16-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 68(%esp,%ebx), %eax
+; FALLBACK16-NEXT: movl %eax, %esi
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %esi
+; FALLBACK16-NEXT: movb %ch, %dl
+; FALLBACK16-NEXT: notb %dl
+; FALLBACK16-NEXT: shrl %edi
+; FALLBACK16-NEXT: movb %dl, %cl
+; FALLBACK16-NEXT: shrl %cl, %edi
+; FALLBACK16-NEXT: orl %esi, %edi
+; FALLBACK16-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 76(%esp,%ebx), %edi
+; FALLBACK16-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %edi
+; FALLBACK16-NEXT: movl 72(%esp,%ebx), %esi
+; FALLBACK16-NEXT: movl %esi, %ebp
+; FALLBACK16-NEXT: shrl %ebp
+; FALLBACK16-NEXT: movb %dl, %cl
+; FALLBACK16-NEXT: shrl %cl, %ebp
+; FALLBACK16-NEXT: orl %edi, %ebp
+; FALLBACK16-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %esi
+; FALLBACK16-NEXT: shrl %eax
+; FALLBACK16-NEXT: movb %dl, %cl
+; FALLBACK16-NEXT: shrl %cl, %eax
+; FALLBACK16-NEXT: orl %esi, %eax
+; FALLBACK16-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 84(%esp,%ebx), %esi
+; FALLBACK16-NEXT: movl %esi, %eax
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %eax
+; FALLBACK16-NEXT: movl 80(%esp,%ebx), %edi
+; FALLBACK16-NEXT: movl %edi, %ebp
+; FALLBACK16-NEXT: shrl %ebp
+; FALLBACK16-NEXT: movb %dl, %cl
+; FALLBACK16-NEXT: shrl %cl, %ebp
+; FALLBACK16-NEXT: orl %eax, %ebp
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %edi
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK16-NEXT: shrl %eax
+; FALLBACK16-NEXT: movb %dl, %cl
+; FALLBACK16-NEXT: shrl %cl, %eax
+; FALLBACK16-NEXT: orl %edi, %eax
+; FALLBACK16-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 92(%esp,%ebx), %eax
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %eax
+; FALLBACK16-NEXT: movl 88(%esp,%ebx), %edi
+; FALLBACK16-NEXT: movl %edi, %ebx
+; FALLBACK16-NEXT: shrl %ebx
+; FALLBACK16-NEXT: movb %dl, %cl
+; FALLBACK16-NEXT: shrl %cl, %ebx
+; FALLBACK16-NEXT: orl %eax, %ebx
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %edi
+; FALLBACK16-NEXT: shrl %esi
+; FALLBACK16-NEXT: movb %dl, %cl
+; FALLBACK16-NEXT: shrl %cl, %esi
+; FALLBACK16-NEXT: orl %edi, %esi
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK16-NEXT: shll %cl, %edx
+; FALLBACK16-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK16-NEXT: movl %edx, (%eax)
+; FALLBACK16-NEXT: movl %esi, 24(%eax)
+; FALLBACK16-NEXT: movl %ebx, 28(%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, 16(%eax)
+; FALLBACK16-NEXT: movl %ebp, 20(%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, 8(%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, 12(%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, 4(%eax)
+; FALLBACK16-NEXT: addl $108, %esp
+; FALLBACK16-NEXT: popl %esi
+; FALLBACK16-NEXT: popl %edi
+; FALLBACK16-NEXT: popl %ebx
+; FALLBACK16-NEXT: popl %ebp
+; FALLBACK16-NEXT: retl
+;
+; FALLBACK17-LABEL: shl_32bytes:
+; FALLBACK17: # %bb.0:
+; FALLBACK17-NEXT: pushl %ebp
+; FALLBACK17-NEXT: pushl %ebx
+; FALLBACK17-NEXT: pushl %edi
+; FALLBACK17-NEXT: pushl %esi
+; FALLBACK17-NEXT: subl $92, %esp
+; FALLBACK17-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK17-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK17-NEXT: movl (%eax), %edx
+; FALLBACK17-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 4(%eax), %edx
+; FALLBACK17-NEXT: movl %edx, (%esp) # 4-byte Spill
+; FALLBACK17-NEXT: movl 8(%eax), %esi
+; FALLBACK17-NEXT: movl 12(%eax), %edi
+; FALLBACK17-NEXT: movl 16(%eax), %ebx
+; FALLBACK17-NEXT: movb (%ecx), %ch
+; FALLBACK17-NEXT: movl 20(%eax), %ebp
+; FALLBACK17-NEXT: movl 24(%eax), %edx
+; FALLBACK17-NEXT: movl 28(%eax), %eax
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movb %ch, %cl
+; FALLBACK17-NEXT: shlb $3, %cl
+; FALLBACK17-NEXT: xorps %xmm0, %xmm0
+; FALLBACK17-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %ebp, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl (%esp), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: andb $28, %ch
+; FALLBACK17-NEXT: negb %ch
+; FALLBACK17-NEXT: movsbl %ch, %eax
+; FALLBACK17-NEXT: movl 56(%esp,%eax), %edx
+; FALLBACK17-NEXT: movl 60(%esp,%eax), %ebx
+; FALLBACK17-NEXT: movl %ebx, %esi
+; FALLBACK17-NEXT: shldl %cl, %edx, %esi
+; FALLBACK17-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 52(%esp,%eax), %esi
+; FALLBACK17-NEXT: movl %esi, (%esp) # 4-byte Spill
+; FALLBACK17-NEXT: shldl %cl, %esi, %edx
+; FALLBACK17-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 64(%esp,%eax), %edi
+; FALLBACK17-NEXT: movl 68(%esp,%eax), %ebp
+; FALLBACK17-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: shldl %cl, %edi, %ebp
+; FALLBACK17-NEXT: shldl %cl, %ebx, %edi
+; FALLBACK17-NEXT: movl 48(%esp,%eax), %ebx
+; FALLBACK17-NEXT: movl 72(%esp,%eax), %edx
+; FALLBACK17-NEXT: movl 76(%esp,%eax), %esi
+; FALLBACK17-NEXT: shldl %cl, %edx, %esi
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: shldl %cl, %eax, %edx
+; FALLBACK17-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK17-NEXT: movl %edx, 24(%eax)
+; FALLBACK17-NEXT: movl %esi, 28(%eax)
+; FALLBACK17-NEXT: movl %edi, 16(%eax)
+; FALLBACK17-NEXT: movl %ebp, 20(%eax)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK17-NEXT: movl %edx, 8(%eax)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK17-NEXT: movl %edx, 12(%eax)
+; FALLBACK17-NEXT: movl (%esp), %edx # 4-byte Reload
+; FALLBACK17-NEXT: shldl %cl, %ebx, %edx
+; FALLBACK17-NEXT: shll %cl, %ebx
+; FALLBACK17-NEXT: movl %ebx, (%eax)
+; FALLBACK17-NEXT: movl %edx, 4(%eax)
+; FALLBACK17-NEXT: addl $92, %esp
+; FALLBACK17-NEXT: popl %esi
+; FALLBACK17-NEXT: popl %edi
+; FALLBACK17-NEXT: popl %ebx
+; FALLBACK17-NEXT: popl %ebp
+; FALLBACK17-NEXT: retl
+;
+; FALLBACK18-LABEL: shl_32bytes:
+; FALLBACK18: # %bb.0:
+; FALLBACK18-NEXT: pushl %ebp
+; FALLBACK18-NEXT: pushl %ebx
+; FALLBACK18-NEXT: pushl %edi
+; FALLBACK18-NEXT: pushl %esi
+; FALLBACK18-NEXT: subl $108, %esp
+; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK18-NEXT: movl (%eax), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 4(%eax), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 8(%eax), %esi
+; FALLBACK18-NEXT: movl 12(%eax), %edi
+; FALLBACK18-NEXT: movl 16(%eax), %ebp
+; FALLBACK18-NEXT: movzbl (%ebx), %ebx
+; FALLBACK18-NEXT: movl 20(%eax), %edx
+; FALLBACK18-NEXT: movl 24(%eax), %ecx
+; FALLBACK18-NEXT: movl 28(%eax), %eax
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %ebx, %edx
+; FALLBACK18-NEXT: shlb $3, %dl
+; FALLBACK18-NEXT: xorps %xmm0, %xmm0
+; FALLBACK18-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %ebp, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: andb $28, %bl
+; FALLBACK18-NEXT: negb %bl
+; FALLBACK18-NEXT: movsbl %bl, %esi
+; FALLBACK18-NEXT: movl 64(%esp,%esi), %ebx
+; FALLBACK18-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 68(%esp,%esi), %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shlxl %edx, %eax, %edi
+; FALLBACK18-NEXT: movl %edx, %ecx
+; FALLBACK18-NEXT: notb %cl
+; FALLBACK18-NEXT: shrl %ebx
+; FALLBACK18-NEXT: shrxl %ecx, %ebx, %ebx
+; FALLBACK18-NEXT: orl %edi, %ebx
+; FALLBACK18-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 72(%esp,%esi), %ebx
+; FALLBACK18-NEXT: movl %ebx, %edi
+; FALLBACK18-NEXT: shrl %edi
+; FALLBACK18-NEXT: shrxl %ecx, %edi, %eax
+; FALLBACK18-NEXT: movl 76(%esp,%esi), %edi
+; FALLBACK18-NEXT: shlxl %edx, %edi, %ebp
+; FALLBACK18-NEXT: orl %ebp, %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shlxl %edx, %ebx, %ebx
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: shrl %eax
+; FALLBACK18-NEXT: shrxl %ecx, %eax, %eax
+; FALLBACK18-NEXT: orl %ebx, %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 80(%esp,%esi), %ebx
+; FALLBACK18-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shrl %ebx
+; FALLBACK18-NEXT: shrxl %ecx, %ebx, %eax
+; FALLBACK18-NEXT: movl 84(%esp,%esi), %ebx
+; FALLBACK18-NEXT: shlxl %edx, %ebx, %ebp
+; FALLBACK18-NEXT: orl %ebp, %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK18-NEXT: shrl %edi
+; FALLBACK18-NEXT: shrxl %ecx, %edi, %edi
+; FALLBACK18-NEXT: orl %eax, %edi
+; FALLBACK18-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shlxl %edx, 92(%esp,%esi), %ebp
+; FALLBACK18-NEXT: movl 88(%esp,%esi), %esi
+; FALLBACK18-NEXT: shlxl %edx, %esi, %eax
+; FALLBACK18-NEXT: shrl %esi
+; FALLBACK18-NEXT: shrxl %ecx, %esi, %esi
+; FALLBACK18-NEXT: orl %ebp, %esi
+; FALLBACK18-NEXT: shrl %ebx
+; FALLBACK18-NEXT: shrxl %ecx, %ebx, %edx
+; FALLBACK18-NEXT: orl %eax, %edx
+; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, (%eax)
+; FALLBACK18-NEXT: movl %edx, 24(%eax)
+; FALLBACK18-NEXT: movl %esi, 28(%eax)
+; FALLBACK18-NEXT: movl %edi, 16(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 20(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 8(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 12(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 4(%eax)
+; FALLBACK18-NEXT: addl $108, %esp
+; FALLBACK18-NEXT: popl %esi
+; FALLBACK18-NEXT: popl %edi
+; FALLBACK18-NEXT: popl %ebx
+; FALLBACK18-NEXT: popl %ebp
+; FALLBACK18-NEXT: retl
+;
+; FALLBACK19-LABEL: shl_32bytes:
+; FALLBACK19: # %bb.0:
+; FALLBACK19-NEXT: pushl %ebp
+; FALLBACK19-NEXT: pushl %ebx
+; FALLBACK19-NEXT: pushl %edi
+; FALLBACK19-NEXT: pushl %esi
+; FALLBACK19-NEXT: subl $92, %esp
+; FALLBACK19-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; FALLBACK19-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK19-NEXT: movl (%ecx), %eax
+; FALLBACK19-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 4(%ecx), %eax
+; FALLBACK19-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 8(%ecx), %esi
+; FALLBACK19-NEXT: movl 12(%ecx), %edi
+; FALLBACK19-NEXT: movl 16(%ecx), %ebp
+; FALLBACK19-NEXT: movzbl (%ebx), %ebx
+; FALLBACK19-NEXT: movl 20(%ecx), %edx
+; FALLBACK19-NEXT: movl 24(%ecx), %eax
+; FALLBACK19-NEXT: movl 28(%ecx), %ecx
+; FALLBACK19-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %ebx, %ecx
+; FALLBACK19-NEXT: shlb $3, %cl
+; FALLBACK19-NEXT: xorps %xmm0, %xmm0
+; FALLBACK19-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %ebp, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: andb $28, %bl
+; FALLBACK19-NEXT: negb %bl
+; FALLBACK19-NEXT: movsbl %bl, %eax
+; FALLBACK19-NEXT: movl 56(%esp,%eax), %edx
+; FALLBACK19-NEXT: movl 60(%esp,%eax), %esi
+; FALLBACK19-NEXT: movl %esi, (%esp) # 4-byte Spill
+; FALLBACK19-NEXT: shldl %cl, %edx, %esi
+; FALLBACK19-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 52(%esp,%eax), %ebx
+; FALLBACK19-NEXT: shldl %cl, %ebx, %edx
+; FALLBACK19-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 64(%esp,%eax), %edi
+; FALLBACK19-NEXT: movl 68(%esp,%eax), %ebp
+; FALLBACK19-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: shldl %cl, %edi, %ebp
+; FALLBACK19-NEXT: movl (%esp), %edx # 4-byte Reload
+; FALLBACK19-NEXT: shldl %cl, %edx, %edi
+; FALLBACK19-NEXT: movl 48(%esp,%eax), %edx
+; FALLBACK19-NEXT: movl %edx, (%esp) # 4-byte Spill
+; FALLBACK19-NEXT: movl 72(%esp,%eax), %edx
+; FALLBACK19-NEXT: movl 76(%esp,%eax), %esi
+; FALLBACK19-NEXT: shldl %cl, %edx, %esi
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: shldl %cl, %eax, %edx
+; FALLBACK19-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK19-NEXT: movl %edx, 24(%eax)
+; FALLBACK19-NEXT: movl %esi, 28(%eax)
+; FALLBACK19-NEXT: movl %edi, 16(%eax)
+; FALLBACK19-NEXT: movl %ebp, 20(%eax)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK19-NEXT: movl %edx, 8(%eax)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK19-NEXT: movl %edx, 12(%eax)
+; FALLBACK19-NEXT: movl (%esp), %esi # 4-byte Reload
+; FALLBACK19-NEXT: shlxl %ecx, %esi, %edx
+; FALLBACK19-NEXT: movl %edx, (%eax)
+; FALLBACK19-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK19-NEXT: shldl %cl, %esi, %ebx
+; FALLBACK19-NEXT: movl %ebx, 4(%eax)
+; FALLBACK19-NEXT: addl $92, %esp
+; FALLBACK19-NEXT: popl %esi
+; FALLBACK19-NEXT: popl %edi
+; FALLBACK19-NEXT: popl %ebx
+; FALLBACK19-NEXT: popl %ebp
+; FALLBACK19-NEXT: retl
+;
+; FALLBACK20-LABEL: shl_32bytes:
+; FALLBACK20: # %bb.0:
+; FALLBACK20-NEXT: pushl %ebp
+; FALLBACK20-NEXT: pushl %ebx
+; FALLBACK20-NEXT: pushl %edi
+; FALLBACK20-NEXT: pushl %esi
+; FALLBACK20-NEXT: subl $108, %esp
+; FALLBACK20-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK20-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK20-NEXT: movups (%ecx), %xmm0
+; FALLBACK20-NEXT: movups 16(%ecx), %xmm1
+; FALLBACK20-NEXT: movzbl (%eax), %ecx
+; FALLBACK20-NEXT: movb %cl, %dh
+; FALLBACK20-NEXT: shlb $3, %dh
+; FALLBACK20-NEXT: xorps %xmm2, %xmm2
+; FALLBACK20-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: andb $28, %cl
+; FALLBACK20-NEXT: negb %cl
+; FALLBACK20-NEXT: movsbl %cl, %eax
+; FALLBACK20-NEXT: movl 84(%esp,%eax), %edi
+; FALLBACK20-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movb %dh, %cl
+; FALLBACK20-NEXT: shll %cl, %edi
+; FALLBACK20-NEXT: movb %dh, %dl
+; FALLBACK20-NEXT: notb %dl
+; FALLBACK20-NEXT: movl 80(%esp,%eax), %esi
+; FALLBACK20-NEXT: movl %eax, %ebx
+; FALLBACK20-NEXT: movl %esi, %eax
+; FALLBACK20-NEXT: shrl %eax
+; FALLBACK20-NEXT: movl %edx, %ecx
+; FALLBACK20-NEXT: shrl %cl, %eax
+; FALLBACK20-NEXT: orl %edi, %eax
+; FALLBACK20-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movb %dh, %cl
+; FALLBACK20-NEXT: shll %cl, %esi
+; FALLBACK20-NEXT: movl %ebx, %edi
+; FALLBACK20-NEXT: movl 76(%esp,%ebx), %ebp
+; FALLBACK20-NEXT: movl %ebp, %eax
+; FALLBACK20-NEXT: shrl %eax
+; FALLBACK20-NEXT: movl %edx, %ecx
+; FALLBACK20-NEXT: shrl %cl, %eax
+; FALLBACK20-NEXT: orl %esi, %eax
+; FALLBACK20-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movb %dh, %cl
+; FALLBACK20-NEXT: shll %cl, %ebp
+; FALLBACK20-NEXT: movl 72(%esp,%ebx), %ebx
+; FALLBACK20-NEXT: movl %ebx, %eax
+; FALLBACK20-NEXT: shrl %eax
+; FALLBACK20-NEXT: movl %edx, %ecx
+; FALLBACK20-NEXT: shrl %cl, %eax
+; FALLBACK20-NEXT: orl %ebp, %eax
+; FALLBACK20-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movb %dh, %cl
+; FALLBACK20-NEXT: shll %cl, %ebx
+; FALLBACK20-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl 68(%esp,%edi), %ebp
+; FALLBACK20-NEXT: movl %ebp, %esi
+; FALLBACK20-NEXT: shrl %esi
+; FALLBACK20-NEXT: movl %edx, %ecx
+; FALLBACK20-NEXT: shrl %cl, %esi
+; FALLBACK20-NEXT: orl %ebx, %esi
+; FALLBACK20-NEXT: movb %dh, %cl
+; FALLBACK20-NEXT: shll %cl, %ebp
+; FALLBACK20-NEXT: movl 64(%esp,%edi), %ebx
+; FALLBACK20-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: shrl %ebx
+; FALLBACK20-NEXT: movl %edx, %ecx
+; FALLBACK20-NEXT: shrl %cl, %ebx
+; FALLBACK20-NEXT: orl %ebp, %ebx
+; FALLBACK20-NEXT: movl 88(%esp,%edi), %ebp
+; FALLBACK20-NEXT: movl %ebp, %edi
+; FALLBACK20-NEXT: movb %dh, %cl
+; FALLBACK20-NEXT: shll %cl, %edi
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK20-NEXT: shrl %eax
+; FALLBACK20-NEXT: movl %edx, %ecx
+; FALLBACK20-NEXT: shrl %cl, %eax
+; FALLBACK20-NEXT: orl %edi, %eax
+; FALLBACK20-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK20-NEXT: movl 92(%esp,%eax), %edi
+; FALLBACK20-NEXT: movb %dh, %cl
+; FALLBACK20-NEXT: shll %cl, %edi
+; FALLBACK20-NEXT: shrl %ebp
+; FALLBACK20-NEXT: movl %edx, %ecx
+; FALLBACK20-NEXT: shrl %cl, %ebp
+; FALLBACK20-NEXT: orl %edi, %ebp
+; FALLBACK20-NEXT: movb %dh, %cl
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK20-NEXT: shll %cl, %edx
+; FALLBACK20-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK20-NEXT: movl %edx, (%eax)
+; FALLBACK20-NEXT: movl %ebp, 28(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, 24(%eax)
+; FALLBACK20-NEXT: movl %ebx, 4(%eax)
+; FALLBACK20-NEXT: movl %esi, 8(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, 12(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, 16(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, 20(%eax)
+; FALLBACK20-NEXT: addl $108, %esp
+; FALLBACK20-NEXT: popl %esi
+; FALLBACK20-NEXT: popl %edi
+; FALLBACK20-NEXT: popl %ebx
+; FALLBACK20-NEXT: popl %ebp
+; FALLBACK20-NEXT: retl
+;
+; FALLBACK21-LABEL: shl_32bytes:
+; FALLBACK21: # %bb.0:
+; FALLBACK21-NEXT: pushl %ebp
+; FALLBACK21-NEXT: pushl %ebx
+; FALLBACK21-NEXT: pushl %edi
+; FALLBACK21-NEXT: pushl %esi
+; FALLBACK21-NEXT: subl $92, %esp
+; FALLBACK21-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK21-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK21-NEXT: movups (%ecx), %xmm0
+; FALLBACK21-NEXT: movups 16(%ecx), %xmm1
+; FALLBACK21-NEXT: movzbl (%eax), %eax
+; FALLBACK21-NEXT: movl %eax, %ecx
+; FALLBACK21-NEXT: shlb $3, %cl
+; FALLBACK21-NEXT: xorps %xmm2, %xmm2
+; FALLBACK21-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: andb $28, %al
+; FALLBACK21-NEXT: negb %al
+; FALLBACK21-NEXT: movsbl %al, %ebp
+; FALLBACK21-NEXT: movl 64(%esp,%ebp), %eax
+; FALLBACK21-NEXT: movl 68(%esp,%ebp), %edx
+; FALLBACK21-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: shldl %cl, %eax, %edx
+; FALLBACK21-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: movl 60(%esp,%ebp), %edx
+; FALLBACK21-NEXT: shldl %cl, %edx, %eax
+; FALLBACK21-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: movl 56(%esp,%ebp), %edi
+; FALLBACK21-NEXT: shldl %cl, %edi, %edx
+; FALLBACK21-NEXT: movl %edx, (%esp) # 4-byte Spill
+; FALLBACK21-NEXT: movl 52(%esp,%ebp), %ebx
+; FALLBACK21-NEXT: shldl %cl, %ebx, %edi
+; FALLBACK21-NEXT: movl 72(%esp,%ebp), %edx
+; FALLBACK21-NEXT: movl %edx, %eax
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; FALLBACK21-NEXT: shldl %cl, %esi, %eax
+; FALLBACK21-NEXT: movl 48(%esp,%ebp), %esi
+; FALLBACK21-NEXT: movl 76(%esp,%ebp), %ebp
+; FALLBACK21-NEXT: shldl %cl, %edx, %ebp
+; FALLBACK21-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK21-NEXT: movl %ebp, 28(%edx)
+; FALLBACK21-NEXT: movl %eax, 24(%edx)
+; FALLBACK21-NEXT: movl %esi, %eax
+; FALLBACK21-NEXT: shll %cl, %eax
+; FALLBACK21-NEXT: shldl %cl, %esi, %ebx
+; FALLBACK21-NEXT: movl %ebx, 4(%edx)
+; FALLBACK21-NEXT: movl %edi, 8(%edx)
+; FALLBACK21-NEXT: movl (%esp), %ecx # 4-byte Reload
+; FALLBACK21-NEXT: movl %ecx, 12(%edx)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK21-NEXT: movl %ecx, 16(%edx)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK21-NEXT: movl %ecx, 20(%edx)
+; FALLBACK21-NEXT: movl %eax, (%edx)
+; FALLBACK21-NEXT: addl $92, %esp
+; FALLBACK21-NEXT: popl %esi
+; FALLBACK21-NEXT: popl %edi
+; FALLBACK21-NEXT: popl %ebx
+; FALLBACK21-NEXT: popl %ebp
+; FALLBACK21-NEXT: retl
+;
+; FALLBACK22-LABEL: shl_32bytes:
+; FALLBACK22: # %bb.0:
+; FALLBACK22-NEXT: pushl %ebp
+; FALLBACK22-NEXT: pushl %ebx
+; FALLBACK22-NEXT: pushl %edi
+; FALLBACK22-NEXT: pushl %esi
+; FALLBACK22-NEXT: subl $108, %esp
+; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK22-NEXT: movups (%ecx), %xmm0
+; FALLBACK22-NEXT: movups 16(%ecx), %xmm1
+; FALLBACK22-NEXT: movzbl (%eax), %ecx
+; FALLBACK22-NEXT: movl %ecx, %eax
+; FALLBACK22-NEXT: shlb $3, %al
+; FALLBACK22-NEXT: xorps %xmm2, %xmm2
+; FALLBACK22-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: andb $28, %cl
+; FALLBACK22-NEXT: negb %cl
+; FALLBACK22-NEXT: movsbl %cl, %edx
+; FALLBACK22-NEXT: movl 84(%esp,%edx), %ecx
+; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shlxl %eax, %ecx, %ecx
+; FALLBACK22-NEXT: movl 80(%esp,%edx), %esi
+; FALLBACK22-NEXT: shlxl %eax, %esi, %edi
+; FALLBACK22-NEXT: movl %eax, %ebx
+; FALLBACK22-NEXT: notb %bl
+; FALLBACK22-NEXT: shrl %esi
+; FALLBACK22-NEXT: shrxl %ebx, %esi, %esi
+; FALLBACK22-NEXT: orl %ecx, %esi
+; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 76(%esp,%edx), %ecx
+; FALLBACK22-NEXT: movl %ecx, %esi
+; FALLBACK22-NEXT: shrl %esi
+; FALLBACK22-NEXT: shrxl %ebx, %esi, %esi
+; FALLBACK22-NEXT: orl %edi, %esi
+; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shlxl %eax, %ecx, %ecx
+; FALLBACK22-NEXT: movl 72(%esp,%edx), %esi
+; FALLBACK22-NEXT: movl %esi, %edi
+; FALLBACK22-NEXT: shrl %edi
+; FALLBACK22-NEXT: shrxl %ebx, %edi, %edi
+; FALLBACK22-NEXT: orl %ecx, %edi
+; FALLBACK22-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shlxl %eax, %esi, %ecx
+; FALLBACK22-NEXT: movl 68(%esp,%edx), %esi
+; FALLBACK22-NEXT: movl %esi, %edi
+; FALLBACK22-NEXT: shrl %edi
+; FALLBACK22-NEXT: shrxl %ebx, %edi, %ebp
+; FALLBACK22-NEXT: orl %ecx, %ebp
+; FALLBACK22-NEXT: shlxl %eax, %esi, %edi
+; FALLBACK22-NEXT: movl 64(%esp,%edx), %esi
+; FALLBACK22-NEXT: movl %esi, %ecx
+; FALLBACK22-NEXT: shrl %ecx
+; FALLBACK22-NEXT: shrxl %ebx, %ecx, %ecx
+; FALLBACK22-NEXT: orl %edi, %ecx
+; FALLBACK22-NEXT: shlxl %eax, %esi, %esi
+; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shlxl %eax, 92(%esp,%edx), %edi
+; FALLBACK22-NEXT: movl 88(%esp,%edx), %edx
+; FALLBACK22-NEXT: shlxl %eax, %edx, %esi
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: shrl %eax
+; FALLBACK22-NEXT: shrxl %ebx, %eax, %eax
+; FALLBACK22-NEXT: orl %esi, %eax
+; FALLBACK22-NEXT: shrl %edx
+; FALLBACK22-NEXT: shrxl %ebx, %edx, %edx
+; FALLBACK22-NEXT: orl %edi, %edx
+; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %esi
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; FALLBACK22-NEXT: movl %edi, (%esi)
+; FALLBACK22-NEXT: movl %edx, 28(%esi)
+; FALLBACK22-NEXT: movl %eax, 24(%esi)
+; FALLBACK22-NEXT: movl %ecx, 4(%esi)
+; FALLBACK22-NEXT: movl %ebp, 8(%esi)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: movl %eax, 12(%esi)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: movl %eax, 16(%esi)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: movl %eax, 20(%esi)
+; FALLBACK22-NEXT: addl $108, %esp
+; FALLBACK22-NEXT: popl %esi
+; FALLBACK22-NEXT: popl %edi
+; FALLBACK22-NEXT: popl %ebx
+; FALLBACK22-NEXT: popl %ebp
+; FALLBACK22-NEXT: retl
+;
+; FALLBACK23-LABEL: shl_32bytes:
+; FALLBACK23: # %bb.0:
+; FALLBACK23-NEXT: pushl %ebp
+; FALLBACK23-NEXT: pushl %ebx
+; FALLBACK23-NEXT: pushl %edi
+; FALLBACK23-NEXT: pushl %esi
+; FALLBACK23-NEXT: subl $92, %esp
+; FALLBACK23-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK23-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK23-NEXT: movups (%ecx), %xmm0
+; FALLBACK23-NEXT: movups 16(%ecx), %xmm1
+; FALLBACK23-NEXT: movzbl (%eax), %eax
+; FALLBACK23-NEXT: movl %eax, %ecx
+; FALLBACK23-NEXT: shlb $3, %cl
+; FALLBACK23-NEXT: xorps %xmm2, %xmm2
+; FALLBACK23-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: andb $28, %al
+; FALLBACK23-NEXT: negb %al
+; FALLBACK23-NEXT: movsbl %al, %ebx
+; FALLBACK23-NEXT: movl 64(%esp,%ebx), %eax
+; FALLBACK23-NEXT: movl 68(%esp,%ebx), %edx
+; FALLBACK23-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: shldl %cl, %eax, %edx
+; FALLBACK23-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: movl 60(%esp,%ebx), %edx
+; FALLBACK23-NEXT: shldl %cl, %edx, %eax
+; FALLBACK23-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: movl 56(%esp,%ebx), %edi
+; FALLBACK23-NEXT: shldl %cl, %edi, %edx
+; FALLBACK23-NEXT: movl %edx, (%esp) # 4-byte Spill
+; FALLBACK23-NEXT: movl 52(%esp,%ebx), %ebp
+; FALLBACK23-NEXT: shldl %cl, %ebp, %edi
+; FALLBACK23-NEXT: movl 72(%esp,%ebx), %edx
+; FALLBACK23-NEXT: movl %edx, %eax
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; FALLBACK23-NEXT: shldl %cl, %esi, %eax
+; FALLBACK23-NEXT: movl 48(%esp,%ebx), %esi
+; FALLBACK23-NEXT: movl 76(%esp,%ebx), %ebx
+; FALLBACK23-NEXT: shldl %cl, %edx, %ebx
+; FALLBACK23-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK23-NEXT: movl %ebx, 28(%edx)
+; FALLBACK23-NEXT: movl %eax, 24(%edx)
+; FALLBACK23-NEXT: shlxl %ecx, %esi, %eax
+; FALLBACK23-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK23-NEXT: shldl %cl, %esi, %ebp
+; FALLBACK23-NEXT: movl %ebp, 4(%edx)
+; FALLBACK23-NEXT: movl %edi, 8(%edx)
+; FALLBACK23-NEXT: movl (%esp), %ecx # 4-byte Reload
+; FALLBACK23-NEXT: movl %ecx, 12(%edx)
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK23-NEXT: movl %ecx, 16(%edx)
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK23-NEXT: movl %ecx, 20(%edx)
+; FALLBACK23-NEXT: movl %eax, (%edx)
+; FALLBACK23-NEXT: addl $92, %esp
+; FALLBACK23-NEXT: popl %esi
+; FALLBACK23-NEXT: popl %edi
+; FALLBACK23-NEXT: popl %ebx
+; FALLBACK23-NEXT: popl %ebp
+; FALLBACK23-NEXT: retl
+;
+; FALLBACK24-LABEL: shl_32bytes:
+; FALLBACK24: # %bb.0:
+; FALLBACK24-NEXT: pushl %ebp
+; FALLBACK24-NEXT: pushl %ebx
+; FALLBACK24-NEXT: pushl %edi
+; FALLBACK24-NEXT: pushl %esi
+; FALLBACK24-NEXT: subl $108, %esp
+; FALLBACK24-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK24-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK24-NEXT: vmovups (%ecx), %ymm0
+; FALLBACK24-NEXT: movzbl (%eax), %ecx
+; FALLBACK24-NEXT: movb %cl, %dh
+; FALLBACK24-NEXT: shlb $3, %dh
+; FALLBACK24-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK24-NEXT: vmovups %ymm1, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: andb $28, %cl
+; FALLBACK24-NEXT: negb %cl
+; FALLBACK24-NEXT: movsbl %cl, %eax
+; FALLBACK24-NEXT: movl 84(%esp,%eax), %edi
+; FALLBACK24-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movb %dh, %cl
+; FALLBACK24-NEXT: shll %cl, %edi
+; FALLBACK24-NEXT: movb %dh, %dl
+; FALLBACK24-NEXT: notb %dl
+; FALLBACK24-NEXT: movl 80(%esp,%eax), %esi
+; FALLBACK24-NEXT: movl %eax, %ebx
+; FALLBACK24-NEXT: movl %esi, %eax
+; FALLBACK24-NEXT: shrl %eax
+; FALLBACK24-NEXT: movl %edx, %ecx
+; FALLBACK24-NEXT: shrl %cl, %eax
+; FALLBACK24-NEXT: orl %edi, %eax
+; FALLBACK24-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movb %dh, %cl
+; FALLBACK24-NEXT: shll %cl, %esi
+; FALLBACK24-NEXT: movl %ebx, %edi
+; FALLBACK24-NEXT: movl 76(%esp,%ebx), %ebp
+; FALLBACK24-NEXT: movl %ebp, %eax
+; FALLBACK24-NEXT: shrl %eax
+; FALLBACK24-NEXT: movl %edx, %ecx
+; FALLBACK24-NEXT: shrl %cl, %eax
+; FALLBACK24-NEXT: orl %esi, %eax
+; FALLBACK24-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movb %dh, %cl
+; FALLBACK24-NEXT: shll %cl, %ebp
+; FALLBACK24-NEXT: movl 72(%esp,%ebx), %ebx
+; FALLBACK24-NEXT: movl %ebx, %eax
+; FALLBACK24-NEXT: shrl %eax
+; FALLBACK24-NEXT: movl %edx, %ecx
+; FALLBACK24-NEXT: shrl %cl, %eax
+; FALLBACK24-NEXT: orl %ebp, %eax
+; FALLBACK24-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movb %dh, %cl
+; FALLBACK24-NEXT: shll %cl, %ebx
+; FALLBACK24-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl 68(%esp,%edi), %ebp
+; FALLBACK24-NEXT: movl %ebp, %esi
+; FALLBACK24-NEXT: shrl %esi
+; FALLBACK24-NEXT: movl %edx, %ecx
+; FALLBACK24-NEXT: shrl %cl, %esi
+; FALLBACK24-NEXT: orl %ebx, %esi
+; FALLBACK24-NEXT: movb %dh, %cl
+; FALLBACK24-NEXT: shll %cl, %ebp
+; FALLBACK24-NEXT: movl 64(%esp,%edi), %ebx
+; FALLBACK24-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: shrl %ebx
+; FALLBACK24-NEXT: movl %edx, %ecx
+; FALLBACK24-NEXT: shrl %cl, %ebx
+; FALLBACK24-NEXT: orl %ebp, %ebx
+; FALLBACK24-NEXT: movl 88(%esp,%edi), %ebp
+; FALLBACK24-NEXT: movl %ebp, %edi
+; FALLBACK24-NEXT: movb %dh, %cl
+; FALLBACK24-NEXT: shll %cl, %edi
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK24-NEXT: shrl %eax
+; FALLBACK24-NEXT: movl %edx, %ecx
+; FALLBACK24-NEXT: shrl %cl, %eax
+; FALLBACK24-NEXT: orl %edi, %eax
+; FALLBACK24-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK24-NEXT: movl 92(%esp,%eax), %edi
+; FALLBACK24-NEXT: movb %dh, %cl
+; FALLBACK24-NEXT: shll %cl, %edi
+; FALLBACK24-NEXT: shrl %ebp
+; FALLBACK24-NEXT: movl %edx, %ecx
+; FALLBACK24-NEXT: shrl %cl, %ebp
+; FALLBACK24-NEXT: orl %edi, %ebp
+; FALLBACK24-NEXT: movb %dh, %cl
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK24-NEXT: shll %cl, %edx
+; FALLBACK24-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK24-NEXT: movl %edx, (%eax)
+; FALLBACK24-NEXT: movl %ebp, 28(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, 24(%eax)
+; FALLBACK24-NEXT: movl %ebx, 4(%eax)
+; FALLBACK24-NEXT: movl %esi, 8(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, 12(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, 16(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, 20(%eax)
+; FALLBACK24-NEXT: addl $108, %esp
+; FALLBACK24-NEXT: popl %esi
+; FALLBACK24-NEXT: popl %edi
+; FALLBACK24-NEXT: popl %ebx
+; FALLBACK24-NEXT: popl %ebp
+; FALLBACK24-NEXT: vzeroupper
+; FALLBACK24-NEXT: retl
+;
+; FALLBACK25-LABEL: shl_32bytes:
+; FALLBACK25: # %bb.0:
+; FALLBACK25-NEXT: pushl %ebp
+; FALLBACK25-NEXT: pushl %ebx
+; FALLBACK25-NEXT: pushl %edi
+; FALLBACK25-NEXT: pushl %esi
+; FALLBACK25-NEXT: subl $92, %esp
+; FALLBACK25-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK25-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK25-NEXT: vmovups (%ecx), %ymm0
+; FALLBACK25-NEXT: movzbl (%eax), %eax
+; FALLBACK25-NEXT: movl %eax, %ecx
+; FALLBACK25-NEXT: shlb $3, %cl
+; FALLBACK25-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK25-NEXT: vmovups %ymm1, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: andb $28, %al
+; FALLBACK25-NEXT: negb %al
+; FALLBACK25-NEXT: movsbl %al, %ebp
+; FALLBACK25-NEXT: movl 64(%esp,%ebp), %eax
+; FALLBACK25-NEXT: movl 68(%esp,%ebp), %edx
+; FALLBACK25-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: shldl %cl, %eax, %edx
+; FALLBACK25-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: movl 60(%esp,%ebp), %edx
+; FALLBACK25-NEXT: shldl %cl, %edx, %eax
+; FALLBACK25-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: movl 56(%esp,%ebp), %edi
+; FALLBACK25-NEXT: shldl %cl, %edi, %edx
+; FALLBACK25-NEXT: movl %edx, (%esp) # 4-byte Spill
+; FALLBACK25-NEXT: movl 52(%esp,%ebp), %ebx
+; FALLBACK25-NEXT: shldl %cl, %ebx, %edi
+; FALLBACK25-NEXT: movl 72(%esp,%ebp), %edx
+; FALLBACK25-NEXT: movl %edx, %eax
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; FALLBACK25-NEXT: shldl %cl, %esi, %eax
+; FALLBACK25-NEXT: movl 48(%esp,%ebp), %esi
+; FALLBACK25-NEXT: movl 76(%esp,%ebp), %ebp
+; FALLBACK25-NEXT: shldl %cl, %edx, %ebp
+; FALLBACK25-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK25-NEXT: movl %ebp, 28(%edx)
+; FALLBACK25-NEXT: movl %eax, 24(%edx)
+; FALLBACK25-NEXT: movl %esi, %eax
+; FALLBACK25-NEXT: shll %cl, %eax
+; FALLBACK25-NEXT: shldl %cl, %esi, %ebx
+; FALLBACK25-NEXT: movl %ebx, 4(%edx)
+; FALLBACK25-NEXT: movl %edi, 8(%edx)
+; FALLBACK25-NEXT: movl (%esp), %ecx # 4-byte Reload
+; FALLBACK25-NEXT: movl %ecx, 12(%edx)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK25-NEXT: movl %ecx, 16(%edx)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK25-NEXT: movl %ecx, 20(%edx)
+; FALLBACK25-NEXT: movl %eax, (%edx)
+; FALLBACK25-NEXT: addl $92, %esp
+; FALLBACK25-NEXT: popl %esi
+; FALLBACK25-NEXT: popl %edi
+; FALLBACK25-NEXT: popl %ebx
+; FALLBACK25-NEXT: popl %ebp
+; FALLBACK25-NEXT: vzeroupper
+; FALLBACK25-NEXT: retl
+;
+; FALLBACK26-LABEL: shl_32bytes:
+; FALLBACK26: # %bb.0:
+; FALLBACK26-NEXT: pushl %ebp
+; FALLBACK26-NEXT: pushl %ebx
+; FALLBACK26-NEXT: pushl %edi
+; FALLBACK26-NEXT: pushl %esi
+; FALLBACK26-NEXT: subl $108, %esp
+; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK26-NEXT: vmovups (%ecx), %ymm0
+; FALLBACK26-NEXT: movzbl (%eax), %ecx
+; FALLBACK26-NEXT: movl %ecx, %eax
+; FALLBACK26-NEXT: shlb $3, %al
+; FALLBACK26-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK26-NEXT: vmovups %ymm1, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: andb $28, %cl
+; FALLBACK26-NEXT: negb %cl
+; FALLBACK26-NEXT: movsbl %cl, %edx
+; FALLBACK26-NEXT: movl 84(%esp,%edx), %ecx
+; FALLBACK26-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shlxl %eax, %ecx, %ecx
+; FALLBACK26-NEXT: movl 80(%esp,%edx), %esi
+; FALLBACK26-NEXT: shlxl %eax, %esi, %edi
+; FALLBACK26-NEXT: movl %eax, %ebx
+; FALLBACK26-NEXT: notb %bl
+; FALLBACK26-NEXT: shrl %esi
+; FALLBACK26-NEXT: shrxl %ebx, %esi, %esi
+; FALLBACK26-NEXT: orl %ecx, %esi
+; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 76(%esp,%edx), %ecx
+; FALLBACK26-NEXT: movl %ecx, %esi
+; FALLBACK26-NEXT: shrl %esi
+; FALLBACK26-NEXT: shrxl %ebx, %esi, %esi
+; FALLBACK26-NEXT: orl %edi, %esi
+; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shlxl %eax, %ecx, %ecx
+; FALLBACK26-NEXT: movl 72(%esp,%edx), %esi
+; FALLBACK26-NEXT: movl %esi, %edi
+; FALLBACK26-NEXT: shrl %edi
+; FALLBACK26-NEXT: shrxl %ebx, %edi, %edi
+; FALLBACK26-NEXT: orl %ecx, %edi
+; FALLBACK26-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shlxl %eax, %esi, %ecx
+; FALLBACK26-NEXT: movl 68(%esp,%edx), %esi
+; FALLBACK26-NEXT: movl %esi, %edi
+; FALLBACK26-NEXT: shrl %edi
+; FALLBACK26-NEXT: shrxl %ebx, %edi, %ebp
+; FALLBACK26-NEXT: orl %ecx, %ebp
+; FALLBACK26-NEXT: shlxl %eax, %esi, %edi
+; FALLBACK26-NEXT: movl 64(%esp,%edx), %esi
+; FALLBACK26-NEXT: movl %esi, %ecx
+; FALLBACK26-NEXT: shrl %ecx
+; FALLBACK26-NEXT: shrxl %ebx, %ecx, %ecx
+; FALLBACK26-NEXT: orl %edi, %ecx
+; FALLBACK26-NEXT: shlxl %eax, %esi, %esi
+; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shlxl %eax, 92(%esp,%edx), %edi
+; FALLBACK26-NEXT: movl 88(%esp,%edx), %edx
+; FALLBACK26-NEXT: shlxl %eax, %edx, %esi
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: shrl %eax
+; FALLBACK26-NEXT: shrxl %ebx, %eax, %eax
+; FALLBACK26-NEXT: orl %esi, %eax
+; FALLBACK26-NEXT: shrl %edx
+; FALLBACK26-NEXT: shrxl %ebx, %edx, %edx
+; FALLBACK26-NEXT: orl %edi, %edx
+; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %esi
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; FALLBACK26-NEXT: movl %edi, (%esi)
+; FALLBACK26-NEXT: movl %edx, 28(%esi)
+; FALLBACK26-NEXT: movl %eax, 24(%esi)
+; FALLBACK26-NEXT: movl %ecx, 4(%esi)
+; FALLBACK26-NEXT: movl %ebp, 8(%esi)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 12(%esi)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 16(%esi)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 20(%esi)
+; FALLBACK26-NEXT: addl $108, %esp
+; FALLBACK26-NEXT: popl %esi
+; FALLBACK26-NEXT: popl %edi
+; FALLBACK26-NEXT: popl %ebx
+; FALLBACK26-NEXT: popl %ebp
+; FALLBACK26-NEXT: vzeroupper
+; FALLBACK26-NEXT: retl
+;
+; FALLBACK27-LABEL: shl_32bytes:
+; FALLBACK27: # %bb.0:
+; FALLBACK27-NEXT: pushl %ebp
+; FALLBACK27-NEXT: pushl %ebx
+; FALLBACK27-NEXT: pushl %edi
+; FALLBACK27-NEXT: pushl %esi
+; FALLBACK27-NEXT: subl $92, %esp
+; FALLBACK27-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK27-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK27-NEXT: vmovups (%ecx), %ymm0
+; FALLBACK27-NEXT: movzbl (%eax), %eax
+; FALLBACK27-NEXT: movl %eax, %ecx
+; FALLBACK27-NEXT: shlb $3, %cl
+; FALLBACK27-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK27-NEXT: vmovups %ymm1, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: andb $28, %al
+; FALLBACK27-NEXT: negb %al
+; FALLBACK27-NEXT: movsbl %al, %ebx
+; FALLBACK27-NEXT: movl 64(%esp,%ebx), %eax
+; FALLBACK27-NEXT: movl 68(%esp,%ebx), %edx
+; FALLBACK27-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: shldl %cl, %eax, %edx
+; FALLBACK27-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: movl 60(%esp,%ebx), %edx
+; FALLBACK27-NEXT: shldl %cl, %edx, %eax
+; FALLBACK27-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: movl 56(%esp,%ebx), %edi
+; FALLBACK27-NEXT: shldl %cl, %edi, %edx
+; FALLBACK27-NEXT: movl %edx, (%esp) # 4-byte Spill
+; FALLBACK27-NEXT: movl 52(%esp,%ebx), %ebp
+; FALLBACK27-NEXT: shldl %cl, %ebp, %edi
+; FALLBACK27-NEXT: movl 72(%esp,%ebx), %edx
+; FALLBACK27-NEXT: movl %edx, %eax
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; FALLBACK27-NEXT: shldl %cl, %esi, %eax
+; FALLBACK27-NEXT: movl 48(%esp,%ebx), %esi
+; FALLBACK27-NEXT: movl 76(%esp,%ebx), %ebx
+; FALLBACK27-NEXT: shldl %cl, %edx, %ebx
+; FALLBACK27-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK27-NEXT: movl %ebx, 28(%edx)
+; FALLBACK27-NEXT: movl %eax, 24(%edx)
+; FALLBACK27-NEXT: shlxl %ecx, %esi, %eax
+; FALLBACK27-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK27-NEXT: shldl %cl, %esi, %ebp
+; FALLBACK27-NEXT: movl %ebp, 4(%edx)
+; FALLBACK27-NEXT: movl %edi, 8(%edx)
+; FALLBACK27-NEXT: movl (%esp), %ecx # 4-byte Reload
+; FALLBACK27-NEXT: movl %ecx, 12(%edx)
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK27-NEXT: movl %ecx, 16(%edx)
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK27-NEXT: movl %ecx, 20(%edx)
+; FALLBACK27-NEXT: movl %eax, (%edx)
+; FALLBACK27-NEXT: addl $92, %esp
+; FALLBACK27-NEXT: popl %esi
+; FALLBACK27-NEXT: popl %edi
+; FALLBACK27-NEXT: popl %ebx
+; FALLBACK27-NEXT: popl %ebp
+; FALLBACK27-NEXT: vzeroupper
+; FALLBACK27-NEXT: retl
+;
+; FALLBACK28-LABEL: shl_32bytes:
+; FALLBACK28: # %bb.0:
+; FALLBACK28-NEXT: pushl %ebp
+; FALLBACK28-NEXT: pushl %ebx
+; FALLBACK28-NEXT: pushl %edi
+; FALLBACK28-NEXT: pushl %esi
+; FALLBACK28-NEXT: subl $108, %esp
+; FALLBACK28-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK28-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK28-NEXT: vmovups (%ecx), %ymm0
+; FALLBACK28-NEXT: movzbl (%eax), %ecx
+; FALLBACK28-NEXT: movb %cl, %dh
+; FALLBACK28-NEXT: shlb $3, %dh
+; FALLBACK28-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK28-NEXT: vmovups %ymm1, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: andb $28, %cl
+; FALLBACK28-NEXT: negb %cl
+; FALLBACK28-NEXT: movsbl %cl, %eax
+; FALLBACK28-NEXT: movl 84(%esp,%eax), %edi
+; FALLBACK28-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movb %dh, %cl
+; FALLBACK28-NEXT: shll %cl, %edi
+; FALLBACK28-NEXT: movb %dh, %dl
+; FALLBACK28-NEXT: notb %dl
+; FALLBACK28-NEXT: movl 80(%esp,%eax), %esi
+; FALLBACK28-NEXT: movl %eax, %ebx
+; FALLBACK28-NEXT: movl %esi, %eax
+; FALLBACK28-NEXT: shrl %eax
+; FALLBACK28-NEXT: movl %edx, %ecx
+; FALLBACK28-NEXT: shrl %cl, %eax
+; FALLBACK28-NEXT: orl %edi, %eax
+; FALLBACK28-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movb %dh, %cl
+; FALLBACK28-NEXT: shll %cl, %esi
+; FALLBACK28-NEXT: movl %ebx, %edi
+; FALLBACK28-NEXT: movl 76(%esp,%ebx), %ebp
+; FALLBACK28-NEXT: movl %ebp, %eax
+; FALLBACK28-NEXT: shrl %eax
+; FALLBACK28-NEXT: movl %edx, %ecx
+; FALLBACK28-NEXT: shrl %cl, %eax
+; FALLBACK28-NEXT: orl %esi, %eax
+; FALLBACK28-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movb %dh, %cl
+; FALLBACK28-NEXT: shll %cl, %ebp
+; FALLBACK28-NEXT: movl 72(%esp,%ebx), %ebx
+; FALLBACK28-NEXT: movl %ebx, %eax
+; FALLBACK28-NEXT: shrl %eax
+; FALLBACK28-NEXT: movl %edx, %ecx
+; FALLBACK28-NEXT: shrl %cl, %eax
+; FALLBACK28-NEXT: orl %ebp, %eax
+; FALLBACK28-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movb %dh, %cl
+; FALLBACK28-NEXT: shll %cl, %ebx
+; FALLBACK28-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl 68(%esp,%edi), %ebp
+; FALLBACK28-NEXT: movl %ebp, %esi
+; FALLBACK28-NEXT: shrl %esi
+; FALLBACK28-NEXT: movl %edx, %ecx
+; FALLBACK28-NEXT: shrl %cl, %esi
+; FALLBACK28-NEXT: orl %ebx, %esi
+; FALLBACK28-NEXT: movb %dh, %cl
+; FALLBACK28-NEXT: shll %cl, %ebp
+; FALLBACK28-NEXT: movl 64(%esp,%edi), %ebx
+; FALLBACK28-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: shrl %ebx
+; FALLBACK28-NEXT: movl %edx, %ecx
+; FALLBACK28-NEXT: shrl %cl, %ebx
+; FALLBACK28-NEXT: orl %ebp, %ebx
+; FALLBACK28-NEXT: movl 88(%esp,%edi), %ebp
+; FALLBACK28-NEXT: movl %ebp, %edi
+; FALLBACK28-NEXT: movb %dh, %cl
+; FALLBACK28-NEXT: shll %cl, %edi
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK28-NEXT: shrl %eax
+; FALLBACK28-NEXT: movl %edx, %ecx
+; FALLBACK28-NEXT: shrl %cl, %eax
+; FALLBACK28-NEXT: orl %edi, %eax
+; FALLBACK28-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK28-NEXT: movl 92(%esp,%eax), %edi
+; FALLBACK28-NEXT: movb %dh, %cl
+; FALLBACK28-NEXT: shll %cl, %edi
+; FALLBACK28-NEXT: shrl %ebp
+; FALLBACK28-NEXT: movl %edx, %ecx
+; FALLBACK28-NEXT: shrl %cl, %ebp
+; FALLBACK28-NEXT: orl %edi, %ebp
+; FALLBACK28-NEXT: movb %dh, %cl
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK28-NEXT: shll %cl, %edx
+; FALLBACK28-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK28-NEXT: movl %edx, (%eax)
+; FALLBACK28-NEXT: movl %ebp, 28(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, 24(%eax)
+; FALLBACK28-NEXT: movl %ebx, 4(%eax)
+; FALLBACK28-NEXT: movl %esi, 8(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, 12(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, 16(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, 20(%eax)
+; FALLBACK28-NEXT: addl $108, %esp
+; FALLBACK28-NEXT: popl %esi
+; FALLBACK28-NEXT: popl %edi
+; FALLBACK28-NEXT: popl %ebx
+; FALLBACK28-NEXT: popl %ebp
+; FALLBACK28-NEXT: vzeroupper
+; FALLBACK28-NEXT: retl
+;
+; FALLBACK29-LABEL: shl_32bytes:
+; FALLBACK29: # %bb.0:
+; FALLBACK29-NEXT: pushl %ebp
+; FALLBACK29-NEXT: pushl %ebx
+; FALLBACK29-NEXT: pushl %edi
+; FALLBACK29-NEXT: pushl %esi
+; FALLBACK29-NEXT: subl $92, %esp
+; FALLBACK29-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK29-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK29-NEXT: vmovups (%ecx), %ymm0
+; FALLBACK29-NEXT: movzbl (%eax), %eax
+; FALLBACK29-NEXT: movl %eax, %ecx
+; FALLBACK29-NEXT: shlb $3, %cl
+; FALLBACK29-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK29-NEXT: vmovups %ymm1, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: andb $28, %al
+; FALLBACK29-NEXT: negb %al
+; FALLBACK29-NEXT: movsbl %al, %ebp
+; FALLBACK29-NEXT: movl 64(%esp,%ebp), %eax
+; FALLBACK29-NEXT: movl 68(%esp,%ebp), %edx
+; FALLBACK29-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: shldl %cl, %eax, %edx
+; FALLBACK29-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: movl 60(%esp,%ebp), %edx
+; FALLBACK29-NEXT: shldl %cl, %edx, %eax
+; FALLBACK29-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: movl 56(%esp,%ebp), %edi
+; FALLBACK29-NEXT: shldl %cl, %edi, %edx
+; FALLBACK29-NEXT: movl %edx, (%esp) # 4-byte Spill
+; FALLBACK29-NEXT: movl 52(%esp,%ebp), %ebx
+; FALLBACK29-NEXT: shldl %cl, %ebx, %edi
+; FALLBACK29-NEXT: movl 72(%esp,%ebp), %edx
+; FALLBACK29-NEXT: movl %edx, %eax
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; FALLBACK29-NEXT: shldl %cl, %esi, %eax
+; FALLBACK29-NEXT: movl 48(%esp,%ebp), %esi
+; FALLBACK29-NEXT: movl 76(%esp,%ebp), %ebp
+; FALLBACK29-NEXT: shldl %cl, %edx, %ebp
+; FALLBACK29-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK29-NEXT: movl %ebp, 28(%edx)
+; FALLBACK29-NEXT: movl %eax, 24(%edx)
+; FALLBACK29-NEXT: movl %esi, %eax
+; FALLBACK29-NEXT: shll %cl, %eax
+; FALLBACK29-NEXT: shldl %cl, %esi, %ebx
+; FALLBACK29-NEXT: movl %ebx, 4(%edx)
+; FALLBACK29-NEXT: movl %edi, 8(%edx)
+; FALLBACK29-NEXT: movl (%esp), %ecx # 4-byte Reload
+; FALLBACK29-NEXT: movl %ecx, 12(%edx)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK29-NEXT: movl %ecx, 16(%edx)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK29-NEXT: movl %ecx, 20(%edx)
+; FALLBACK29-NEXT: movl %eax, (%edx)
+; FALLBACK29-NEXT: addl $92, %esp
+; FALLBACK29-NEXT: popl %esi
+; FALLBACK29-NEXT: popl %edi
+; FALLBACK29-NEXT: popl %ebx
+; FALLBACK29-NEXT: popl %ebp
+; FALLBACK29-NEXT: vzeroupper
+; FALLBACK29-NEXT: retl
+;
+; FALLBACK30-LABEL: shl_32bytes:
+; FALLBACK30: # %bb.0:
+; FALLBACK30-NEXT: pushl %ebp
+; FALLBACK30-NEXT: pushl %ebx
+; FALLBACK30-NEXT: pushl %edi
+; FALLBACK30-NEXT: pushl %esi
+; FALLBACK30-NEXT: subl $108, %esp
+; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK30-NEXT: vmovups (%ecx), %ymm0
+; FALLBACK30-NEXT: movzbl (%eax), %ecx
+; FALLBACK30-NEXT: movl %ecx, %eax
+; FALLBACK30-NEXT: shlb $3, %al
+; FALLBACK30-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK30-NEXT: vmovups %ymm1, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: andb $28, %cl
+; FALLBACK30-NEXT: negb %cl
+; FALLBACK30-NEXT: movsbl %cl, %edx
+; FALLBACK30-NEXT: movl 84(%esp,%edx), %ecx
+; FALLBACK30-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shlxl %eax, %ecx, %ecx
+; FALLBACK30-NEXT: movl 80(%esp,%edx), %esi
+; FALLBACK30-NEXT: shlxl %eax, %esi, %edi
+; FALLBACK30-NEXT: movl %eax, %ebx
+; FALLBACK30-NEXT: notb %bl
+; FALLBACK30-NEXT: shrl %esi
+; FALLBACK30-NEXT: shrxl %ebx, %esi, %esi
+; FALLBACK30-NEXT: orl %ecx, %esi
+; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 76(%esp,%edx), %ecx
+; FALLBACK30-NEXT: movl %ecx, %esi
+; FALLBACK30-NEXT: shrl %esi
+; FALLBACK30-NEXT: shrxl %ebx, %esi, %esi
+; FALLBACK30-NEXT: orl %edi, %esi
+; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shlxl %eax, %ecx, %ecx
+; FALLBACK30-NEXT: movl 72(%esp,%edx), %esi
+; FALLBACK30-NEXT: movl %esi, %edi
+; FALLBACK30-NEXT: shrl %edi
+; FALLBACK30-NEXT: shrxl %ebx, %edi, %edi
+; FALLBACK30-NEXT: orl %ecx, %edi
+; FALLBACK30-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shlxl %eax, %esi, %ecx
+; FALLBACK30-NEXT: movl 68(%esp,%edx), %esi
+; FALLBACK30-NEXT: movl %esi, %edi
+; FALLBACK30-NEXT: shrl %edi
+; FALLBACK30-NEXT: shrxl %ebx, %edi, %ebp
+; FALLBACK30-NEXT: orl %ecx, %ebp
+; FALLBACK30-NEXT: shlxl %eax, %esi, %edi
+; FALLBACK30-NEXT: movl 64(%esp,%edx), %esi
+; FALLBACK30-NEXT: movl %esi, %ecx
+; FALLBACK30-NEXT: shrl %ecx
+; FALLBACK30-NEXT: shrxl %ebx, %ecx, %ecx
+; FALLBACK30-NEXT: orl %edi, %ecx
+; FALLBACK30-NEXT: shlxl %eax, %esi, %esi
+; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shlxl %eax, 92(%esp,%edx), %edi
+; FALLBACK30-NEXT: movl 88(%esp,%edx), %edx
+; FALLBACK30-NEXT: shlxl %eax, %edx, %esi
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: shrl %eax
+; FALLBACK30-NEXT: shrxl %ebx, %eax, %eax
+; FALLBACK30-NEXT: orl %esi, %eax
+; FALLBACK30-NEXT: shrl %edx
+; FALLBACK30-NEXT: shrxl %ebx, %edx, %edx
+; FALLBACK30-NEXT: orl %edi, %edx
+; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %esi
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; FALLBACK30-NEXT: movl %edi, (%esi)
+; FALLBACK30-NEXT: movl %edx, 28(%esi)
+; FALLBACK30-NEXT: movl %eax, 24(%esi)
+; FALLBACK30-NEXT: movl %ecx, 4(%esi)
+; FALLBACK30-NEXT: movl %ebp, 8(%esi)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 12(%esi)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 16(%esi)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 20(%esi)
+; FALLBACK30-NEXT: addl $108, %esp
+; FALLBACK30-NEXT: popl %esi
+; FALLBACK30-NEXT: popl %edi
+; FALLBACK30-NEXT: popl %ebx
+; FALLBACK30-NEXT: popl %ebp
+; FALLBACK30-NEXT: vzeroupper
+; FALLBACK30-NEXT: retl
+;
+; FALLBACK31-LABEL: shl_32bytes:
+; FALLBACK31: # %bb.0:
+; FALLBACK31-NEXT: pushl %ebp
+; FALLBACK31-NEXT: pushl %ebx
+; FALLBACK31-NEXT: pushl %edi
+; FALLBACK31-NEXT: pushl %esi
+; FALLBACK31-NEXT: subl $92, %esp
+; FALLBACK31-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK31-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK31-NEXT: vmovups (%ecx), %ymm0
+; FALLBACK31-NEXT: movzbl (%eax), %eax
+; FALLBACK31-NEXT: movl %eax, %ecx
+; FALLBACK31-NEXT: shlb $3, %cl
+; FALLBACK31-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK31-NEXT: vmovups %ymm1, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: andb $28, %al
+; FALLBACK31-NEXT: negb %al
+; FALLBACK31-NEXT: movsbl %al, %ebx
+; FALLBACK31-NEXT: movl 64(%esp,%ebx), %eax
+; FALLBACK31-NEXT: movl 68(%esp,%ebx), %edx
+; FALLBACK31-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: shldl %cl, %eax, %edx
+; FALLBACK31-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: movl 60(%esp,%ebx), %edx
+; FALLBACK31-NEXT: shldl %cl, %edx, %eax
+; FALLBACK31-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: movl 56(%esp,%ebx), %edi
+; FALLBACK31-NEXT: shldl %cl, %edi, %edx
+; FALLBACK31-NEXT: movl %edx, (%esp) # 4-byte Spill
+; FALLBACK31-NEXT: movl 52(%esp,%ebx), %ebp
+; FALLBACK31-NEXT: shldl %cl, %ebp, %edi
+; FALLBACK31-NEXT: movl 72(%esp,%ebx), %edx
+; FALLBACK31-NEXT: movl %edx, %eax
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; FALLBACK31-NEXT: shldl %cl, %esi, %eax
+; FALLBACK31-NEXT: movl 48(%esp,%ebx), %esi
+; FALLBACK31-NEXT: movl 76(%esp,%ebx), %ebx
+; FALLBACK31-NEXT: shldl %cl, %edx, %ebx
+; FALLBACK31-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK31-NEXT: movl %ebx, 28(%edx)
+; FALLBACK31-NEXT: movl %eax, 24(%edx)
+; FALLBACK31-NEXT: shlxl %ecx, %esi, %eax
+; FALLBACK31-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK31-NEXT: shldl %cl, %esi, %ebp
+; FALLBACK31-NEXT: movl %ebp, 4(%edx)
+; FALLBACK31-NEXT: movl %edi, 8(%edx)
+; FALLBACK31-NEXT: movl (%esp), %ecx # 4-byte Reload
+; FALLBACK31-NEXT: movl %ecx, 12(%edx)
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK31-NEXT: movl %ecx, 16(%edx)
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK31-NEXT: movl %ecx, 20(%edx)
+; FALLBACK31-NEXT: movl %eax, (%edx)
+; FALLBACK31-NEXT: addl $92, %esp
+; FALLBACK31-NEXT: popl %esi
+; FALLBACK31-NEXT: popl %edi
+; FALLBACK31-NEXT: popl %ebx
+; FALLBACK31-NEXT: popl %ebp
+; FALLBACK31-NEXT: vzeroupper
+; FALLBACK31-NEXT: retl
+ %src = load i256, ptr %src.ptr, align 1
+ %byteOff = load i256, ptr %byteOff.ptr, align 1
+ %bitOff = shl i256 %byteOff, 3
+ %res = shl i256 %src, %bitOff
+ store i256 %res, ptr %dst, align 1
+ ret void
+}
+
+define void @shl_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) nounwind {
+; FALLBACK0-LABEL: shl_32bytes_dwordOff:
+; FALLBACK0: # %bb.0:
+; FALLBACK0-NEXT: pushq %rbx
+; FALLBACK0-NEXT: movq (%rdi), %rcx
+; FALLBACK0-NEXT: movq 8(%rdi), %r8
+; FALLBACK0-NEXT: movq 16(%rdi), %r9
+; FALLBACK0-NEXT: movq 24(%rdi), %rdi
+; FALLBACK0-NEXT: movzbl (%rsi), %esi
+; FALLBACK0-NEXT: movl %esi, %eax
+; FALLBACK0-NEXT: shlb $5, %al
+; FALLBACK0-NEXT: xorps %xmm0, %xmm0
+; FALLBACK0-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: shlb $2, %sil
+; FALLBACK0-NEXT: andb $24, %sil
+; FALLBACK0-NEXT: negb %sil
+; FALLBACK0-NEXT: movsbq %sil, %r10
+; FALLBACK0-NEXT: movq -32(%rsp,%r10), %r8
+; FALLBACK0-NEXT: movq -24(%rsp,%r10), %rdi
+; FALLBACK0-NEXT: movq %rdi, %r11
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shlq %cl, %r11
+; FALLBACK0-NEXT: movl %eax, %esi
+; FALLBACK0-NEXT: notb %sil
+; FALLBACK0-NEXT: movq %r8, %r9
+; FALLBACK0-NEXT: shrq %r9
+; FALLBACK0-NEXT: movl %esi, %ecx
+; FALLBACK0-NEXT: shrq %cl, %r9
+; FALLBACK0-NEXT: orq %r11, %r9
+; FALLBACK0-NEXT: movq -8(%rsp,%r10), %r11
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shlq %cl, %r11
+; FALLBACK0-NEXT: movq -16(%rsp,%r10), %r10
+; FALLBACK0-NEXT: movq %r10, %rbx
+; FALLBACK0-NEXT: shrq %rbx
+; FALLBACK0-NEXT: movl %esi, %ecx
+; FALLBACK0-NEXT: shrq %cl, %rbx
+; FALLBACK0-NEXT: orq %r11, %rbx
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shlq %cl, %r10
+; FALLBACK0-NEXT: shrq %rdi
+; FALLBACK0-NEXT: movl %esi, %ecx
+; FALLBACK0-NEXT: shrq %cl, %rdi
+; FALLBACK0-NEXT: orq %r10, %rdi
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shlq %cl, %r8
+; FALLBACK0-NEXT: movq %r8, (%rdx)
+; FALLBACK0-NEXT: movq %rdi, 16(%rdx)
+; FALLBACK0-NEXT: movq %rbx, 24(%rdx)
+; FALLBACK0-NEXT: movq %r9, 8(%rdx)
+; FALLBACK0-NEXT: popq %rbx
+; FALLBACK0-NEXT: retq
+;
+; FALLBACK1-LABEL: shl_32bytes_dwordOff:
+; FALLBACK1: # %bb.0:
+; FALLBACK1-NEXT: movq (%rdi), %rax
+; FALLBACK1-NEXT: movq 8(%rdi), %r8
+; FALLBACK1-NEXT: movq 16(%rdi), %r9
+; FALLBACK1-NEXT: movq 24(%rdi), %rdi
+; FALLBACK1-NEXT: movzbl (%rsi), %esi
+; FALLBACK1-NEXT: movl %esi, %ecx
+; FALLBACK1-NEXT: shlb $5, %cl
+; FALLBACK1-NEXT: xorps %xmm0, %xmm0
+; FALLBACK1-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: shlb $2, %sil
+; FALLBACK1-NEXT: andb $24, %sil
+; FALLBACK1-NEXT: negb %sil
+; FALLBACK1-NEXT: movsbq %sil, %rax
+; FALLBACK1-NEXT: movq -24(%rsp,%rax), %rsi
+; FALLBACK1-NEXT: movq -16(%rsp,%rax), %rdi
+; FALLBACK1-NEXT: shldq %cl, %rsi, %rdi
+; FALLBACK1-NEXT: movq -40(%rsp,%rax), %r8
+; FALLBACK1-NEXT: movq -32(%rsp,%rax), %rax
+; FALLBACK1-NEXT: shldq %cl, %rax, %rsi
+; FALLBACK1-NEXT: shldq %cl, %r8, %rax
+; FALLBACK1-NEXT: shlq %cl, %r8
+; FALLBACK1-NEXT: movq %rsi, 16(%rdx)
+; FALLBACK1-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK1-NEXT: movq %r8, (%rdx)
+; FALLBACK1-NEXT: movq %rax, 8(%rdx)
+; FALLBACK1-NEXT: retq
+;
+; FALLBACK2-LABEL: shl_32bytes_dwordOff:
+; FALLBACK2: # %bb.0:
+; FALLBACK2-NEXT: movq (%rdi), %rcx
+; FALLBACK2-NEXT: movq 8(%rdi), %r8
+; FALLBACK2-NEXT: movq 16(%rdi), %r9
+; FALLBACK2-NEXT: movq 24(%rdi), %rdi
+; FALLBACK2-NEXT: movzbl (%rsi), %esi
+; FALLBACK2-NEXT: movl %esi, %eax
+; FALLBACK2-NEXT: shlb $5, %al
+; FALLBACK2-NEXT: xorps %xmm0, %xmm0
+; FALLBACK2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: shlb $2, %sil
+; FALLBACK2-NEXT: andb $24, %sil
+; FALLBACK2-NEXT: negb %sil
+; FALLBACK2-NEXT: movsbq %sil, %rsi
+; FALLBACK2-NEXT: movq -40(%rsp,%rsi), %rdi
+; FALLBACK2-NEXT: movq -32(%rsp,%rsi), %rcx
+; FALLBACK2-NEXT: shlxq %rax, %rcx, %r8
+; FALLBACK2-NEXT: shlxq %rax, -16(%rsp,%rsi), %r9
+; FALLBACK2-NEXT: movq -24(%rsp,%rsi), %rsi
+; FALLBACK2-NEXT: shlxq %rax, %rsi, %r10
+; FALLBACK2-NEXT: shlxq %rax, %rdi, %r11
+; FALLBACK2-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK2-NEXT: notb %al
+; FALLBACK2-NEXT: shrq %rdi
+; FALLBACK2-NEXT: shrxq %rax, %rdi, %rdi
+; FALLBACK2-NEXT: orq %r8, %rdi
+; FALLBACK2-NEXT: shrq %rsi
+; FALLBACK2-NEXT: shrxq %rax, %rsi, %rsi
+; FALLBACK2-NEXT: orq %r9, %rsi
+; FALLBACK2-NEXT: shrq %rcx
+; FALLBACK2-NEXT: shrxq %rax, %rcx, %rax
+; FALLBACK2-NEXT: orq %r10, %rax
+; FALLBACK2-NEXT: movq %r11, (%rdx)
+; FALLBACK2-NEXT: movq %rax, 16(%rdx)
+; FALLBACK2-NEXT: movq %rsi, 24(%rdx)
+; FALLBACK2-NEXT: movq %rdi, 8(%rdx)
+; FALLBACK2-NEXT: retq
+;
+; FALLBACK3-LABEL: shl_32bytes_dwordOff:
+; FALLBACK3: # %bb.0:
+; FALLBACK3-NEXT: movq (%rdi), %rax
+; FALLBACK3-NEXT: movq 8(%rdi), %r8
+; FALLBACK3-NEXT: movq 16(%rdi), %r9
+; FALLBACK3-NEXT: movq 24(%rdi), %rdi
+; FALLBACK3-NEXT: movzbl (%rsi), %esi
+; FALLBACK3-NEXT: movl %esi, %ecx
+; FALLBACK3-NEXT: shlb $5, %cl
+; FALLBACK3-NEXT: xorps %xmm0, %xmm0
+; FALLBACK3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: shlb $2, %sil
+; FALLBACK3-NEXT: andb $24, %sil
+; FALLBACK3-NEXT: negb %sil
+; FALLBACK3-NEXT: movsbq %sil, %rax
+; FALLBACK3-NEXT: movq -24(%rsp,%rax), %rsi
+; FALLBACK3-NEXT: movq -16(%rsp,%rax), %rdi
+; FALLBACK3-NEXT: shldq %cl, %rsi, %rdi
+; FALLBACK3-NEXT: movq -40(%rsp,%rax), %r8
+; FALLBACK3-NEXT: movq -32(%rsp,%rax), %rax
+; FALLBACK3-NEXT: shldq %cl, %rax, %rsi
+; FALLBACK3-NEXT: shldq %cl, %r8, %rax
+; FALLBACK3-NEXT: shlxq %rcx, %r8, %rcx
+; FALLBACK3-NEXT: movq %rsi, 16(%rdx)
+; FALLBACK3-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK3-NEXT: movq %rcx, (%rdx)
+; FALLBACK3-NEXT: movq %rax, 8(%rdx)
+; FALLBACK3-NEXT: retq
+;
+; FALLBACK4-LABEL: shl_32bytes_dwordOff:
+; FALLBACK4: # %bb.0:
+; FALLBACK4-NEXT: movups (%rdi), %xmm0
+; FALLBACK4-NEXT: movups 16(%rdi), %xmm1
+; FALLBACK4-NEXT: movzbl (%rsi), %ecx
+; FALLBACK4-NEXT: movl %ecx, %eax
+; FALLBACK4-NEXT: shlb $5, %al
+; FALLBACK4-NEXT: xorps %xmm2, %xmm2
+; FALLBACK4-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: shlb $2, %cl
+; FALLBACK4-NEXT: andb $24, %cl
+; FALLBACK4-NEXT: negb %cl
+; FALLBACK4-NEXT: movsbq %cl, %r8
+; FALLBACK4-NEXT: movq -16(%rsp,%r8), %r9
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shlq %cl, %r9
+; FALLBACK4-NEXT: movl %eax, %esi
+; FALLBACK4-NEXT: notb %sil
+; FALLBACK4-NEXT: movq -24(%rsp,%r8), %r10
+; FALLBACK4-NEXT: movq %r10, %rdi
+; FALLBACK4-NEXT: shrq %rdi
+; FALLBACK4-NEXT: movl %esi, %ecx
+; FALLBACK4-NEXT: shrq %cl, %rdi
+; FALLBACK4-NEXT: orq %r9, %rdi
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shlq %cl, %r10
+; FALLBACK4-NEXT: movq -40(%rsp,%r8), %r9
+; FALLBACK4-NEXT: movq -32(%rsp,%r8), %r8
+; FALLBACK4-NEXT: movq %r8, %r11
+; FALLBACK4-NEXT: shrq %r11
+; FALLBACK4-NEXT: movl %esi, %ecx
+; FALLBACK4-NEXT: shrq %cl, %r11
+; FALLBACK4-NEXT: orq %r10, %r11
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shlq %cl, %r8
+; FALLBACK4-NEXT: movq %r9, %r10
+; FALLBACK4-NEXT: shrq %r10
+; FALLBACK4-NEXT: movl %esi, %ecx
+; FALLBACK4-NEXT: shrq %cl, %r10
+; FALLBACK4-NEXT: orq %r8, %r10
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shlq %cl, %r9
+; FALLBACK4-NEXT: movq %r9, (%rdx)
+; FALLBACK4-NEXT: movq %r10, 8(%rdx)
+; FALLBACK4-NEXT: movq %r11, 16(%rdx)
+; FALLBACK4-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK4-NEXT: retq
+;
+; FALLBACK5-LABEL: shl_32bytes_dwordOff:
+; FALLBACK5: # %bb.0:
+; FALLBACK5-NEXT: movups (%rdi), %xmm0
+; FALLBACK5-NEXT: movups 16(%rdi), %xmm1
+; FALLBACK5-NEXT: movzbl (%rsi), %eax
+; FALLBACK5-NEXT: movl %eax, %ecx
+; FALLBACK5-NEXT: shlb $5, %cl
+; FALLBACK5-NEXT: xorps %xmm2, %xmm2
+; FALLBACK5-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: shlb $2, %al
+; FALLBACK5-NEXT: andb $24, %al
+; FALLBACK5-NEXT: negb %al
+; FALLBACK5-NEXT: movsbq %al, %rax
+; FALLBACK5-NEXT: movq -24(%rsp,%rax), %rsi
+; FALLBACK5-NEXT: movq -16(%rsp,%rax), %rdi
+; FALLBACK5-NEXT: shldq %cl, %rsi, %rdi
+; FALLBACK5-NEXT: movq -40(%rsp,%rax), %r8
+; FALLBACK5-NEXT: movq -32(%rsp,%rax), %rax
+; FALLBACK5-NEXT: shldq %cl, %rax, %rsi
+; FALLBACK5-NEXT: movq %r8, %r9
+; FALLBACK5-NEXT: shlq %cl, %r9
+; FALLBACK5-NEXT: shldq %cl, %r8, %rax
+; FALLBACK5-NEXT: movq %rax, 8(%rdx)
+; FALLBACK5-NEXT: movq %rsi, 16(%rdx)
+; FALLBACK5-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK5-NEXT: movq %r9, (%rdx)
+; FALLBACK5-NEXT: retq
+;
+; FALLBACK6-LABEL: shl_32bytes_dwordOff:
+; FALLBACK6: # %bb.0:
+; FALLBACK6-NEXT: movups (%rdi), %xmm0
+; FALLBACK6-NEXT: movups 16(%rdi), %xmm1
+; FALLBACK6-NEXT: movzbl (%rsi), %ecx
+; FALLBACK6-NEXT: movl %ecx, %eax
+; FALLBACK6-NEXT: shlb $5, %al
+; FALLBACK6-NEXT: xorps %xmm2, %xmm2
+; FALLBACK6-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: shlb $2, %cl
+; FALLBACK6-NEXT: andb $24, %cl
+; FALLBACK6-NEXT: negb %cl
+; FALLBACK6-NEXT: movsbq %cl, %rcx
+; FALLBACK6-NEXT: shlxq %rax, -16(%rsp,%rcx), %rsi
+; FALLBACK6-NEXT: movq -24(%rsp,%rcx), %rdi
+; FALLBACK6-NEXT: shlxq %rax, %rdi, %r8
+; FALLBACK6-NEXT: movq -40(%rsp,%rcx), %r9
+; FALLBACK6-NEXT: movq -32(%rsp,%rcx), %rcx
+; FALLBACK6-NEXT: shlxq %rax, %rcx, %r10
+; FALLBACK6-NEXT: shlxq %rax, %r9, %r11
+; FALLBACK6-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK6-NEXT: notb %al
+; FALLBACK6-NEXT: shrq %rdi
+; FALLBACK6-NEXT: shrxq %rax, %rdi, %rdi
+; FALLBACK6-NEXT: orq %rsi, %rdi
+; FALLBACK6-NEXT: shrq %rcx
+; FALLBACK6-NEXT: shrxq %rax, %rcx, %rcx
+; FALLBACK6-NEXT: orq %r8, %rcx
+; FALLBACK6-NEXT: shrq %r9
+; FALLBACK6-NEXT: shrxq %rax, %r9, %rax
+; FALLBACK6-NEXT: orq %r10, %rax
+; FALLBACK6-NEXT: movq %r11, (%rdx)
+; FALLBACK6-NEXT: movq %rax, 8(%rdx)
+; FALLBACK6-NEXT: movq %rcx, 16(%rdx)
+; FALLBACK6-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK6-NEXT: retq
+;
+; FALLBACK7-LABEL: shl_32bytes_dwordOff:
+; FALLBACK7: # %bb.0:
+; FALLBACK7-NEXT: movups (%rdi), %xmm0
+; FALLBACK7-NEXT: movups 16(%rdi), %xmm1
+; FALLBACK7-NEXT: movzbl (%rsi), %eax
+; FALLBACK7-NEXT: movl %eax, %ecx
+; FALLBACK7-NEXT: shlb $5, %cl
+; FALLBACK7-NEXT: xorps %xmm2, %xmm2
+; FALLBACK7-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: shlb $2, %al
+; FALLBACK7-NEXT: andb $24, %al
+; FALLBACK7-NEXT: negb %al
+; FALLBACK7-NEXT: movsbq %al, %rax
+; FALLBACK7-NEXT: movq -24(%rsp,%rax), %rsi
+; FALLBACK7-NEXT: movq -16(%rsp,%rax), %rdi
+; FALLBACK7-NEXT: shldq %cl, %rsi, %rdi
+; FALLBACK7-NEXT: movq -40(%rsp,%rax), %r8
+; FALLBACK7-NEXT: movq -32(%rsp,%rax), %rax
+; FALLBACK7-NEXT: shldq %cl, %rax, %rsi
+; FALLBACK7-NEXT: shlxq %rcx, %r8, %r9
+; FALLBACK7-NEXT: # kill: def $cl killed $cl killed $rcx
+; FALLBACK7-NEXT: shldq %cl, %r8, %rax
+; FALLBACK7-NEXT: movq %rax, 8(%rdx)
+; FALLBACK7-NEXT: movq %rsi, 16(%rdx)
+; FALLBACK7-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK7-NEXT: movq %r9, (%rdx)
+; FALLBACK7-NEXT: retq
+;
+; FALLBACK8-LABEL: shl_32bytes_dwordOff:
+; FALLBACK8: # %bb.0:
+; FALLBACK8-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK8-NEXT: movzbl (%rsi), %ecx
+; FALLBACK8-NEXT: movl %ecx, %eax
+; FALLBACK8-NEXT: shlb $5, %al
+; FALLBACK8-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK8-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: shlb $2, %cl
+; FALLBACK8-NEXT: andb $24, %cl
+; FALLBACK8-NEXT: negb %cl
+; FALLBACK8-NEXT: movsbq %cl, %r8
+; FALLBACK8-NEXT: movq -16(%rsp,%r8), %r9
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shlq %cl, %r9
+; FALLBACK8-NEXT: movl %eax, %esi
+; FALLBACK8-NEXT: notb %sil
+; FALLBACK8-NEXT: movq -24(%rsp,%r8), %r10
+; FALLBACK8-NEXT: movq %r10, %rdi
+; FALLBACK8-NEXT: shrq %rdi
+; FALLBACK8-NEXT: movl %esi, %ecx
+; FALLBACK8-NEXT: shrq %cl, %rdi
+; FALLBACK8-NEXT: orq %r9, %rdi
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shlq %cl, %r10
+; FALLBACK8-NEXT: movq -40(%rsp,%r8), %r9
+; FALLBACK8-NEXT: movq -32(%rsp,%r8), %r8
+; FALLBACK8-NEXT: movq %r8, %r11
+; FALLBACK8-NEXT: shrq %r11
+; FALLBACK8-NEXT: movl %esi, %ecx
+; FALLBACK8-NEXT: shrq %cl, %r11
+; FALLBACK8-NEXT: orq %r10, %r11
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shlq %cl, %r8
+; FALLBACK8-NEXT: movq %r9, %r10
+; FALLBACK8-NEXT: shrq %r10
+; FALLBACK8-NEXT: movl %esi, %ecx
+; FALLBACK8-NEXT: shrq %cl, %r10
+; FALLBACK8-NEXT: orq %r8, %r10
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shlq %cl, %r9
+; FALLBACK8-NEXT: movq %r9, (%rdx)
+; FALLBACK8-NEXT: movq %r10, 8(%rdx)
+; FALLBACK8-NEXT: movq %r11, 16(%rdx)
+; FALLBACK8-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK8-NEXT: vzeroupper
+; FALLBACK8-NEXT: retq
+;
+; FALLBACK9-LABEL: shl_32bytes_dwordOff:
+; FALLBACK9: # %bb.0:
+; FALLBACK9-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK9-NEXT: movzbl (%rsi), %eax
+; FALLBACK9-NEXT: movl %eax, %ecx
+; FALLBACK9-NEXT: shlb $5, %cl
+; FALLBACK9-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK9-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: shlb $2, %al
+; FALLBACK9-NEXT: andb $24, %al
+; FALLBACK9-NEXT: negb %al
+; FALLBACK9-NEXT: movsbq %al, %rax
+; FALLBACK9-NEXT: movq -24(%rsp,%rax), %rsi
+; FALLBACK9-NEXT: movq -16(%rsp,%rax), %rdi
+; FALLBACK9-NEXT: shldq %cl, %rsi, %rdi
+; FALLBACK9-NEXT: movq -40(%rsp,%rax), %r8
+; FALLBACK9-NEXT: movq -32(%rsp,%rax), %rax
+; FALLBACK9-NEXT: shldq %cl, %rax, %rsi
+; FALLBACK9-NEXT: movq %r8, %r9
+; FALLBACK9-NEXT: shlq %cl, %r9
+; FALLBACK9-NEXT: shldq %cl, %r8, %rax
+; FALLBACK9-NEXT: movq %rax, 8(%rdx)
+; FALLBACK9-NEXT: movq %rsi, 16(%rdx)
+; FALLBACK9-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK9-NEXT: movq %r9, (%rdx)
+; FALLBACK9-NEXT: vzeroupper
+; FALLBACK9-NEXT: retq
+;
+; FALLBACK10-LABEL: shl_32bytes_dwordOff:
+; FALLBACK10: # %bb.0:
+; FALLBACK10-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK10-NEXT: movzbl (%rsi), %ecx
+; FALLBACK10-NEXT: movl %ecx, %eax
+; FALLBACK10-NEXT: shlb $5, %al
+; FALLBACK10-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK10-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: shlb $2, %cl
+; FALLBACK10-NEXT: andb $24, %cl
+; FALLBACK10-NEXT: negb %cl
+; FALLBACK10-NEXT: movsbq %cl, %rcx
+; FALLBACK10-NEXT: shlxq %rax, -16(%rsp,%rcx), %rsi
+; FALLBACK10-NEXT: movq -24(%rsp,%rcx), %rdi
+; FALLBACK10-NEXT: shlxq %rax, %rdi, %r8
+; FALLBACK10-NEXT: movq -40(%rsp,%rcx), %r9
+; FALLBACK10-NEXT: movq -32(%rsp,%rcx), %rcx
+; FALLBACK10-NEXT: shlxq %rax, %rcx, %r10
+; FALLBACK10-NEXT: shlxq %rax, %r9, %r11
+; FALLBACK10-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK10-NEXT: notb %al
+; FALLBACK10-NEXT: shrq %rdi
+; FALLBACK10-NEXT: shrxq %rax, %rdi, %rdi
+; FALLBACK10-NEXT: orq %rsi, %rdi
+; FALLBACK10-NEXT: shrq %rcx
+; FALLBACK10-NEXT: shrxq %rax, %rcx, %rcx
+; FALLBACK10-NEXT: orq %r8, %rcx
+; FALLBACK10-NEXT: shrq %r9
+; FALLBACK10-NEXT: shrxq %rax, %r9, %rax
+; FALLBACK10-NEXT: orq %r10, %rax
+; FALLBACK10-NEXT: movq %r11, (%rdx)
+; FALLBACK10-NEXT: movq %rax, 8(%rdx)
+; FALLBACK10-NEXT: movq %rcx, 16(%rdx)
+; FALLBACK10-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK10-NEXT: vzeroupper
+; FALLBACK10-NEXT: retq
+;
+; FALLBACK11-LABEL: shl_32bytes_dwordOff:
+; FALLBACK11: # %bb.0:
+; FALLBACK11-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK11-NEXT: movzbl (%rsi), %eax
+; FALLBACK11-NEXT: movl %eax, %ecx
+; FALLBACK11-NEXT: shlb $5, %cl
+; FALLBACK11-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK11-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: shlb $2, %al
+; FALLBACK11-NEXT: andb $24, %al
+; FALLBACK11-NEXT: negb %al
+; FALLBACK11-NEXT: movsbq %al, %rax
+; FALLBACK11-NEXT: movq -24(%rsp,%rax), %rsi
+; FALLBACK11-NEXT: movq -16(%rsp,%rax), %rdi
+; FALLBACK11-NEXT: shldq %cl, %rsi, %rdi
+; FALLBACK11-NEXT: movq -40(%rsp,%rax), %r8
+; FALLBACK11-NEXT: movq -32(%rsp,%rax), %rax
+; FALLBACK11-NEXT: shldq %cl, %rax, %rsi
+; FALLBACK11-NEXT: shlxq %rcx, %r8, %r9
+; FALLBACK11-NEXT: # kill: def $cl killed $cl killed $rcx
+; FALLBACK11-NEXT: shldq %cl, %r8, %rax
+; FALLBACK11-NEXT: movq %rax, 8(%rdx)
+; FALLBACK11-NEXT: movq %rsi, 16(%rdx)
+; FALLBACK11-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK11-NEXT: movq %r9, (%rdx)
+; FALLBACK11-NEXT: vzeroupper
+; FALLBACK11-NEXT: retq
+;
+; FALLBACK12-LABEL: shl_32bytes_dwordOff:
+; FALLBACK12: # %bb.0:
+; FALLBACK12-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK12-NEXT: movzbl (%rsi), %ecx
+; FALLBACK12-NEXT: movl %ecx, %eax
+; FALLBACK12-NEXT: shlb $5, %al
+; FALLBACK12-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK12-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK12-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK12-NEXT: shlb $2, %cl
+; FALLBACK12-NEXT: andb $24, %cl
+; FALLBACK12-NEXT: negb %cl
+; FALLBACK12-NEXT: movsbq %cl, %r8
+; FALLBACK12-NEXT: movq -16(%rsp,%r8), %r9
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shlq %cl, %r9
+; FALLBACK12-NEXT: movl %eax, %esi
+; FALLBACK12-NEXT: notb %sil
+; FALLBACK12-NEXT: movq -24(%rsp,%r8), %r10
+; FALLBACK12-NEXT: movq %r10, %rdi
+; FALLBACK12-NEXT: shrq %rdi
+; FALLBACK12-NEXT: movl %esi, %ecx
+; FALLBACK12-NEXT: shrq %cl, %rdi
+; FALLBACK12-NEXT: orq %r9, %rdi
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shlq %cl, %r10
+; FALLBACK12-NEXT: movq -40(%rsp,%r8), %r9
+; FALLBACK12-NEXT: movq -32(%rsp,%r8), %r8
+; FALLBACK12-NEXT: movq %r8, %r11
+; FALLBACK12-NEXT: shrq %r11
+; FALLBACK12-NEXT: movl %esi, %ecx
+; FALLBACK12-NEXT: shrq %cl, %r11
+; FALLBACK12-NEXT: orq %r10, %r11
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shlq %cl, %r8
+; FALLBACK12-NEXT: movq %r9, %r10
+; FALLBACK12-NEXT: shrq %r10
+; FALLBACK12-NEXT: movl %esi, %ecx
+; FALLBACK12-NEXT: shrq %cl, %r10
+; FALLBACK12-NEXT: orq %r8, %r10
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shlq %cl, %r9
+; FALLBACK12-NEXT: movq %r9, (%rdx)
+; FALLBACK12-NEXT: movq %r10, 8(%rdx)
+; FALLBACK12-NEXT: movq %r11, 16(%rdx)
+; FALLBACK12-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK12-NEXT: vzeroupper
+; FALLBACK12-NEXT: retq
+;
+; FALLBACK13-LABEL: shl_32bytes_dwordOff:
+; FALLBACK13: # %bb.0:
+; FALLBACK13-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK13-NEXT: movzbl (%rsi), %eax
+; FALLBACK13-NEXT: movl %eax, %ecx
+; FALLBACK13-NEXT: shlb $5, %cl
+; FALLBACK13-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK13-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK13-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK13-NEXT: shlb $2, %al
+; FALLBACK13-NEXT: andb $24, %al
+; FALLBACK13-NEXT: negb %al
+; FALLBACK13-NEXT: movsbq %al, %rax
+; FALLBACK13-NEXT: movq -24(%rsp,%rax), %rsi
+; FALLBACK13-NEXT: movq -16(%rsp,%rax), %rdi
+; FALLBACK13-NEXT: shldq %cl, %rsi, %rdi
+; FALLBACK13-NEXT: movq -40(%rsp,%rax), %r8
+; FALLBACK13-NEXT: movq -32(%rsp,%rax), %rax
+; FALLBACK13-NEXT: shldq %cl, %rax, %rsi
+; FALLBACK13-NEXT: movq %r8, %r9
+; FALLBACK13-NEXT: shlq %cl, %r9
+; FALLBACK13-NEXT: shldq %cl, %r8, %rax
+; FALLBACK13-NEXT: movq %rax, 8(%rdx)
+; FALLBACK13-NEXT: movq %rsi, 16(%rdx)
+; FALLBACK13-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK13-NEXT: movq %r9, (%rdx)
+; FALLBACK13-NEXT: vzeroupper
+; FALLBACK13-NEXT: retq
+;
+; FALLBACK14-LABEL: shl_32bytes_dwordOff:
+; FALLBACK14: # %bb.0:
+; FALLBACK14-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK14-NEXT: movzbl (%rsi), %ecx
+; FALLBACK14-NEXT: movl %ecx, %eax
+; FALLBACK14-NEXT: shlb $5, %al
+; FALLBACK14-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK14-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: shlb $2, %cl
+; FALLBACK14-NEXT: andb $24, %cl
+; FALLBACK14-NEXT: negb %cl
+; FALLBACK14-NEXT: movsbq %cl, %rcx
+; FALLBACK14-NEXT: shlxq %rax, -16(%rsp,%rcx), %rsi
+; FALLBACK14-NEXT: movq -24(%rsp,%rcx), %rdi
+; FALLBACK14-NEXT: shlxq %rax, %rdi, %r8
+; FALLBACK14-NEXT: movq -40(%rsp,%rcx), %r9
+; FALLBACK14-NEXT: movq -32(%rsp,%rcx), %rcx
+; FALLBACK14-NEXT: shlxq %rax, %rcx, %r10
+; FALLBACK14-NEXT: shlxq %rax, %r9, %r11
+; FALLBACK14-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK14-NEXT: notb %al
+; FALLBACK14-NEXT: shrq %rdi
+; FALLBACK14-NEXT: shrxq %rax, %rdi, %rdi
+; FALLBACK14-NEXT: orq %rsi, %rdi
+; FALLBACK14-NEXT: shrq %rcx
+; FALLBACK14-NEXT: shrxq %rax, %rcx, %rcx
+; FALLBACK14-NEXT: orq %r8, %rcx
+; FALLBACK14-NEXT: shrq %r9
+; FALLBACK14-NEXT: shrxq %rax, %r9, %rax
+; FALLBACK14-NEXT: orq %r10, %rax
+; FALLBACK14-NEXT: movq %r11, (%rdx)
+; FALLBACK14-NEXT: movq %rax, 8(%rdx)
+; FALLBACK14-NEXT: movq %rcx, 16(%rdx)
+; FALLBACK14-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK14-NEXT: vzeroupper
+; FALLBACK14-NEXT: retq
+;
+; FALLBACK15-LABEL: shl_32bytes_dwordOff:
+; FALLBACK15: # %bb.0:
+; FALLBACK15-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK15-NEXT: movzbl (%rsi), %eax
+; FALLBACK15-NEXT: movl %eax, %ecx
+; FALLBACK15-NEXT: shlb $5, %cl
+; FALLBACK15-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK15-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK15-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK15-NEXT: shlb $2, %al
+; FALLBACK15-NEXT: andb $24, %al
+; FALLBACK15-NEXT: negb %al
+; FALLBACK15-NEXT: movsbq %al, %rax
+; FALLBACK15-NEXT: movq -24(%rsp,%rax), %rsi
+; FALLBACK15-NEXT: movq -16(%rsp,%rax), %rdi
+; FALLBACK15-NEXT: shldq %cl, %rsi, %rdi
+; FALLBACK15-NEXT: movq -40(%rsp,%rax), %r8
+; FALLBACK15-NEXT: movq -32(%rsp,%rax), %rax
+; FALLBACK15-NEXT: shldq %cl, %rax, %rsi
+; FALLBACK15-NEXT: shlxq %rcx, %r8, %r9
+; FALLBACK15-NEXT: # kill: def $cl killed $cl killed $rcx
+; FALLBACK15-NEXT: shldq %cl, %r8, %rax
+; FALLBACK15-NEXT: movq %rax, 8(%rdx)
+; FALLBACK15-NEXT: movq %rsi, 16(%rdx)
+; FALLBACK15-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK15-NEXT: movq %r9, (%rdx)
+; FALLBACK15-NEXT: vzeroupper
+; FALLBACK15-NEXT: retq
+;
+; X86-SSE2-LABEL: shl_32bytes_dwordOff:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %ebp
+; X86-SSE2-NEXT: pushl %ebx
+; X86-SSE2-NEXT: pushl %edi
+; X86-SSE2-NEXT: pushl %esi
+; X86-SSE2-NEXT: subl $92, %esp
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; X86-SSE2-NEXT: movl (%ebp), %eax
+; X86-SSE2-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SSE2-NEXT: movl 4(%ebp), %eax
+; X86-SSE2-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SSE2-NEXT: movl 8(%ebp), %esi
+; X86-SSE2-NEXT: movl 12(%ebp), %edi
+; X86-SSE2-NEXT: movl 16(%ebp), %ebx
+; X86-SSE2-NEXT: movzbl (%ecx), %ecx
+; X86-SSE2-NEXT: movl 20(%ebp), %edx
+; X86-SSE2-NEXT: movl 24(%ebp), %eax
+; X86-SSE2-NEXT: movl 28(%ebp), %ebp
+; X86-SSE2-NEXT: movl %ebp, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: xorps %xmm0, %xmm0
+; X86-SSE2-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-SSE2-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-SSE2-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: shlb $2, %cl
+; X86-SSE2-NEXT: andb $28, %cl
+; X86-SSE2-NEXT: negb %cl
+; X86-SSE2-NEXT: movsbl %cl, %edx
+; X86-SSE2-NEXT: movl 48(%esp,%edx), %eax
+; X86-SSE2-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SSE2-NEXT: movl 52(%esp,%edx), %eax
+; X86-SSE2-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SSE2-NEXT: movl 60(%esp,%edx), %esi
+; X86-SSE2-NEXT: movl 56(%esp,%edx), %edi
+; X86-SSE2-NEXT: movl 68(%esp,%edx), %ebx
+; X86-SSE2-NEXT: movl 64(%esp,%edx), %ebp
+; X86-SSE2-NEXT: movl 76(%esp,%edx), %ecx
+; X86-SSE2-NEXT: movl 72(%esp,%edx), %edx
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movl %edx, 24(%eax)
+; X86-SSE2-NEXT: movl %ecx, 28(%eax)
+; X86-SSE2-NEXT: movl %ebp, 16(%eax)
+; X86-SSE2-NEXT: movl %ebx, 20(%eax)
+; X86-SSE2-NEXT: movl %edi, 8(%eax)
+; X86-SSE2-NEXT: movl %esi, 12(%eax)
+; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-SSE2-NEXT: movl %ecx, (%eax)
+; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-SSE2-NEXT: movl %ecx, 4(%eax)
+; X86-SSE2-NEXT: addl $92, %esp
+; X86-SSE2-NEXT: popl %esi
+; X86-SSE2-NEXT: popl %edi
+; X86-SSE2-NEXT: popl %ebx
+; X86-SSE2-NEXT: popl %ebp
+; X86-SSE2-NEXT: retl
+;
+; X86-SSE42-LABEL: shl_32bytes_dwordOff:
+; X86-SSE42: # %bb.0:
+; X86-SSE42-NEXT: subl $76, %esp
+; X86-SSE42-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE42-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE42-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SSE42-NEXT: movups (%edx), %xmm0
+; X86-SSE42-NEXT: movups 16(%edx), %xmm1
+; X86-SSE42-NEXT: movzbl (%ecx), %ecx
+; X86-SSE42-NEXT: xorps %xmm2, %xmm2
+; X86-SSE42-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: movaps %xmm2, (%esp)
+; X86-SSE42-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: shlb $2, %cl
+; X86-SSE42-NEXT: andb $28, %cl
+; X86-SSE42-NEXT: negb %cl
+; X86-SSE42-NEXT: movsbl %cl, %ecx
+; X86-SSE42-NEXT: movups 32(%esp,%ecx), %xmm0
+; X86-SSE42-NEXT: movups 48(%esp,%ecx), %xmm1
+; X86-SSE42-NEXT: movups %xmm1, 16(%eax)
+; X86-SSE42-NEXT: movups %xmm0, (%eax)
+; X86-SSE42-NEXT: addl $76, %esp
+; X86-SSE42-NEXT: retl
+;
+; X86-AVX-LABEL: shl_32bytes_dwordOff:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: subl $76, %esp
+; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-AVX-NEXT: vmovups (%edx), %ymm0
+; X86-AVX-NEXT: movzbl (%ecx), %ecx
+; X86-AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; X86-AVX-NEXT: vmovups %ymm1, (%esp)
+; X86-AVX-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
+; X86-AVX-NEXT: shlb $2, %cl
+; X86-AVX-NEXT: andb $28, %cl
+; X86-AVX-NEXT: negb %cl
+; X86-AVX-NEXT: movsbl %cl, %ecx
+; X86-AVX-NEXT: vmovups 32(%esp,%ecx), %xmm0
+; X86-AVX-NEXT: vmovups 48(%esp,%ecx), %xmm1
+; X86-AVX-NEXT: vmovups %xmm1, 16(%eax)
+; X86-AVX-NEXT: vmovups %xmm0, (%eax)
+; X86-AVX-NEXT: addl $76, %esp
+; X86-AVX-NEXT: vzeroupper
+; X86-AVX-NEXT: retl
+ %src = load i256, ptr %src.ptr, align 1
+ %dwordOff = load i256, ptr %dwordOff.ptr, align 1
+ %bitOff = shl i256 %dwordOff, 5
+ %res = shl i256 %src, %bitOff
+ store i256 %res, ptr %dst, align 1
+ ret void
+}
+
+define void @shl_32bytes_qwordOff(ptr %src.ptr, ptr %qwordOff.ptr, ptr %dst) nounwind {
+; X64-SSE2-LABEL: shl_32bytes_qwordOff:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: movq (%rdi), %rax
; X64-SSE2-NEXT: movq 8(%rdi), %rcx
; X64-SSE2-NEXT: movq 16(%rdi), %r8
; X64-SSE2-NEXT: movq 24(%rdi), %rdi
; X64-SSE2-NEXT: movzbl (%rsi), %esi
+; X64-SSE2-NEXT: xorps %xmm0, %xmm0
+; X64-SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-SSE2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-SSE2-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
; X64-SSE2-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; X64-SSE2-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
-; X64-SSE2-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-SSE2-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-SSE2-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-SSE2-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-SSE2-NEXT: andb $31, %sil
+; X64-SSE2-NEXT: shlb $3, %sil
+; X64-SSE2-NEXT: andb $24, %sil
; X64-SSE2-NEXT: negb %sil
; X64-SSE2-NEXT: movsbq %sil, %rax
-; X64-SSE2-NEXT: movq -32(%rsp,%rax), %rcx
-; X64-SSE2-NEXT: movq -24(%rsp,%rax), %rsi
-; X64-SSE2-NEXT: movq -8(%rsp,%rax), %rdi
-; X64-SSE2-NEXT: movq -16(%rsp,%rax), %rax
+; X64-SSE2-NEXT: movq -40(%rsp,%rax), %rcx
+; X64-SSE2-NEXT: movq -32(%rsp,%rax), %rsi
+; X64-SSE2-NEXT: movq -16(%rsp,%rax), %rdi
+; X64-SSE2-NEXT: movq -24(%rsp,%rax), %rax
; X64-SSE2-NEXT: movq %rax, 16(%rdx)
; X64-SSE2-NEXT: movq %rdi, 24(%rdx)
; X64-SSE2-NEXT: movq %rcx, (%rdx)
; X64-SSE2-NEXT: movq %rsi, 8(%rdx)
; X64-SSE2-NEXT: retq
;
-; X64-SSE42-LABEL: shl_32bytes:
+; X64-SSE42-LABEL: shl_32bytes_qwordOff:
; X64-SSE42: # %bb.0:
; X64-SSE42-NEXT: movups (%rdi), %xmm0
; X64-SSE42-NEXT: movups 16(%rdi), %xmm1
; X64-SSE42-NEXT: movzbl (%rsi), %eax
; X64-SSE42-NEXT: xorps %xmm2, %xmm2
-; X64-SSE42-NEXT: movups %xmm2, -{{[0-9]+}}(%rsp)
-; X64-SSE42-NEXT: movups %xmm2, -{{[0-9]+}}(%rsp)
-; X64-SSE42-NEXT: movups %xmm1, -{{[0-9]+}}(%rsp)
-; X64-SSE42-NEXT: movups %xmm0, -{{[0-9]+}}(%rsp)
-; X64-SSE42-NEXT: andb $31, %al
+; X64-SSE42-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-SSE42-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-SSE42-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-SSE42-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-SSE42-NEXT: shlb $3, %al
+; X64-SSE42-NEXT: andb $24, %al
; X64-SSE42-NEXT: negb %al
; X64-SSE42-NEXT: movsbq %al, %rax
-; X64-SSE42-NEXT: movups -32(%rsp,%rax), %xmm0
-; X64-SSE42-NEXT: movups -16(%rsp,%rax), %xmm1
+; X64-SSE42-NEXT: movups -40(%rsp,%rax), %xmm0
+; X64-SSE42-NEXT: movups -24(%rsp,%rax), %xmm1
; X64-SSE42-NEXT: movups %xmm1, 16(%rdx)
; X64-SSE42-NEXT: movups %xmm0, (%rdx)
; X64-SSE42-NEXT: retq
;
-; X64-AVX-LABEL: shl_32bytes:
+; X64-AVX-LABEL: shl_32bytes_qwordOff:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovups (%rdi), %ymm0
; X64-AVX-NEXT: movzbl (%rsi), %eax
; X64-AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-AVX-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
; X64-AVX-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; X64-AVX-NEXT: andb $31, %al
+; X64-AVX-NEXT: shlb $3, %al
+; X64-AVX-NEXT: andb $24, %al
; X64-AVX-NEXT: negb %al
; X64-AVX-NEXT: movsbq %al, %rax
-; X64-AVX-NEXT: vmovups -32(%rsp,%rax), %xmm0
-; X64-AVX-NEXT: vmovups -16(%rsp,%rax), %xmm1
+; X64-AVX-NEXT: vmovups -40(%rsp,%rax), %xmm0
+; X64-AVX-NEXT: vmovups -24(%rsp,%rax), %xmm1
; X64-AVX-NEXT: vmovups %xmm1, 16(%rdx)
; X64-AVX-NEXT: vmovups %xmm0, (%rdx)
; X64-AVX-NEXT: vzeroupper
; X64-AVX-NEXT: retq
;
-; X86-SSE2-LABEL: shl_32bytes:
+; X86-SSE2-LABEL: shl_32bytes_qwordOff:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pushl %ebp
; X86-SSE2-NEXT: pushl %ebx
; X86-SSE2-NEXT: pushl %edi
; X86-SSE2-NEXT: pushl %esi
-; X86-SSE2-NEXT: subl $72, %esp
-; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-SSE2-NEXT: movl (%edi), %ecx
-; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 4(%edi), %ecx
-; X86-SSE2-NEXT: movl %ecx, (%esp) # 4-byte Spill
-; X86-SSE2-NEXT: movl 8(%edi), %esi
-; X86-SSE2-NEXT: movl 12(%edi), %ebx
-; X86-SSE2-NEXT: movl 16(%edi), %ebp
-; X86-SSE2-NEXT: movzbl (%eax), %eax
-; X86-SSE2-NEXT: movl 20(%edi), %edx
-; X86-SSE2-NEXT: movl 24(%edi), %ecx
-; X86-SSE2-NEXT: movl 28(%edi), %edi
-; X86-SSE2-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: subl $92, %esp
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; X86-SSE2-NEXT: movl (%ebp), %eax
+; X86-SSE2-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SSE2-NEXT: movl 4(%ebp), %eax
+; X86-SSE2-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SSE2-NEXT: movl 8(%ebp), %esi
+; X86-SSE2-NEXT: movl 12(%ebp), %edi
+; X86-SSE2-NEXT: movl 16(%ebp), %ebx
+; X86-SSE2-NEXT: movzbl (%ecx), %ecx
+; X86-SSE2-NEXT: movl 20(%ebp), %edx
+; X86-SSE2-NEXT: movl 24(%ebp), %eax
+; X86-SSE2-NEXT: movl 28(%ebp), %ebp
; X86-SSE2-NEXT: movl %ebp, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: xorps %xmm0, %xmm0
+; X86-SSE2-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl (%esp), %ecx # 4-byte Reload
-; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: andb $31, %al
-; X86-SSE2-NEXT: negb %al
-; X86-SSE2-NEXT: movsbl %al, %edx
-; X86-SSE2-NEXT: movl 40(%esp,%edx), %eax
+; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-SSE2-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-SSE2-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: shlb $3, %cl
+; X86-SSE2-NEXT: andb $24, %cl
+; X86-SSE2-NEXT: negb %cl
+; X86-SSE2-NEXT: movsbl %cl, %edx
+; X86-SSE2-NEXT: movl 48(%esp,%edx), %eax
; X86-SSE2-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 44(%esp,%edx), %eax
-; X86-SSE2-NEXT: movl %eax, (%esp) # 4-byte Spill
-; X86-SSE2-NEXT: movl 52(%esp,%edx), %esi
-; X86-SSE2-NEXT: movl 48(%esp,%edx), %edi
-; X86-SSE2-NEXT: movl 60(%esp,%edx), %ebx
-; X86-SSE2-NEXT: movl 56(%esp,%edx), %ebp
-; X86-SSE2-NEXT: movl 68(%esp,%edx), %ecx
-; X86-SSE2-NEXT: movl 64(%esp,%edx), %edx
+; X86-SSE2-NEXT: movl 52(%esp,%edx), %eax
+; X86-SSE2-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SSE2-NEXT: movl 60(%esp,%edx), %esi
+; X86-SSE2-NEXT: movl 56(%esp,%edx), %edi
+; X86-SSE2-NEXT: movl 68(%esp,%edx), %ebx
+; X86-SSE2-NEXT: movl 64(%esp,%edx), %ebp
+; X86-SSE2-NEXT: movl 76(%esp,%edx), %ecx
+; X86-SSE2-NEXT: movl 72(%esp,%edx), %edx
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movl %edx, 24(%eax)
; X86-SSE2-NEXT: movl %ecx, 28(%eax)
@@ -1368,18 +8741,18 @@ define void @shl_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-SSE2-NEXT: movl %esi, 12(%eax)
; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-SSE2-NEXT: movl %ecx, (%eax)
-; X86-SSE2-NEXT: movl (%esp), %ecx # 4-byte Reload
+; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-SSE2-NEXT: movl %ecx, 4(%eax)
-; X86-SSE2-NEXT: addl $72, %esp
+; X86-SSE2-NEXT: addl $92, %esp
; X86-SSE2-NEXT: popl %esi
; X86-SSE2-NEXT: popl %edi
; X86-SSE2-NEXT: popl %ebx
; X86-SSE2-NEXT: popl %ebp
; X86-SSE2-NEXT: retl
;
-; X86-SSE42-LABEL: shl_32bytes:
+; X86-SSE42-LABEL: shl_32bytes_qwordOff:
; X86-SSE42: # %bb.0:
-; X86-SSE42-NEXT: subl $64, %esp
+; X86-SSE42-NEXT: subl $76, %esp
; X86-SSE42-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE42-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-SSE42-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -1387,23 +8760,24 @@ define void @shl_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-SSE42-NEXT: movups 16(%edx), %xmm1
; X86-SSE42-NEXT: movzbl (%ecx), %ecx
; X86-SSE42-NEXT: xorps %xmm2, %xmm2
-; X86-SSE42-NEXT: movups %xmm2, {{[0-9]+}}(%esp)
-; X86-SSE42-NEXT: movups %xmm2, (%esp)
-; X86-SSE42-NEXT: movups %xmm1, {{[0-9]+}}(%esp)
-; X86-SSE42-NEXT: movups %xmm0, {{[0-9]+}}(%esp)
-; X86-SSE42-NEXT: andb $31, %cl
+; X86-SSE42-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: movaps %xmm2, (%esp)
+; X86-SSE42-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: shlb $3, %cl
+; X86-SSE42-NEXT: andb $24, %cl
; X86-SSE42-NEXT: negb %cl
; X86-SSE42-NEXT: movsbl %cl, %ecx
; X86-SSE42-NEXT: movups 32(%esp,%ecx), %xmm0
; X86-SSE42-NEXT: movups 48(%esp,%ecx), %xmm1
; X86-SSE42-NEXT: movups %xmm1, 16(%eax)
; X86-SSE42-NEXT: movups %xmm0, (%eax)
-; X86-SSE42-NEXT: addl $64, %esp
+; X86-SSE42-NEXT: addl $76, %esp
; X86-SSE42-NEXT: retl
;
-; X86-AVX-LABEL: shl_32bytes:
+; X86-AVX-LABEL: shl_32bytes_qwordOff:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: subl $64, %esp
+; X86-AVX-NEXT: subl $76, %esp
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -1412,25 +8786,3037 @@ define void @shl_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X86-AVX-NEXT: vmovups %ymm1, (%esp)
; X86-AVX-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
-; X86-AVX-NEXT: andb $31, %cl
+; X86-AVX-NEXT: shlb $3, %cl
+; X86-AVX-NEXT: andb $24, %cl
; X86-AVX-NEXT: negb %cl
; X86-AVX-NEXT: movsbl %cl, %ecx
; X86-AVX-NEXT: vmovups 32(%esp,%ecx), %xmm0
; X86-AVX-NEXT: vmovups 48(%esp,%ecx), %xmm1
; X86-AVX-NEXT: vmovups %xmm1, 16(%eax)
; X86-AVX-NEXT: vmovups %xmm0, (%eax)
-; X86-AVX-NEXT: addl $64, %esp
+; X86-AVX-NEXT: addl $76, %esp
; X86-AVX-NEXT: vzeroupper
; X86-AVX-NEXT: retl
%src = load i256, ptr %src.ptr, align 1
- %byteOff = load i256, ptr %byteOff.ptr, align 1
- %bitOff = shl i256 %byteOff, 3
+ %qwordOff = load i256, ptr %qwordOff.ptr, align 1
+ %bitOff = shl i256 %qwordOff, 6
%res = shl i256 %src, %bitOff
store i256 %res, ptr %dst, align 1
ret void
}
+
define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
-; X64-SSE2-LABEL: ashr_32bytes:
+; FALLBACK0-LABEL: ashr_32bytes:
+; FALLBACK0: # %bb.0:
+; FALLBACK0-NEXT: pushq %rbx
+; FALLBACK0-NEXT: movq (%rdi), %rcx
+; FALLBACK0-NEXT: movq 8(%rdi), %r8
+; FALLBACK0-NEXT: movq 16(%rdi), %r9
+; FALLBACK0-NEXT: movq 24(%rdi), %rdi
+; FALLBACK0-NEXT: movzbl (%rsi), %esi
+; FALLBACK0-NEXT: leal (,%rsi,8), %eax
+; FALLBACK0-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: sarq $63, %rdi
+; FALLBACK0-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: andb $24, %sil
+; FALLBACK0-NEXT: movzbl %sil, %r9d
+; FALLBACK0-NEXT: movq -64(%rsp,%r9), %r10
+; FALLBACK0-NEXT: movq -56(%rsp,%r9), %rdi
+; FALLBACK0-NEXT: movq %rdi, %r11
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shrq %cl, %r11
+; FALLBACK0-NEXT: movl %eax, %esi
+; FALLBACK0-NEXT: notb %sil
+; FALLBACK0-NEXT: movq -48(%rsp,%r9), %rbx
+; FALLBACK0-NEXT: leaq (%rbx,%rbx), %r8
+; FALLBACK0-NEXT: movl %esi, %ecx
+; FALLBACK0-NEXT: shlq %cl, %r8
+; FALLBACK0-NEXT: orq %r11, %r8
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shrq %cl, %r10
+; FALLBACK0-NEXT: addq %rdi, %rdi
+; FALLBACK0-NEXT: movl %esi, %ecx
+; FALLBACK0-NEXT: shlq %cl, %rdi
+; FALLBACK0-NEXT: orq %r10, %rdi
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shrq %cl, %rbx
+; FALLBACK0-NEXT: movq -40(%rsp,%r9), %r9
+; FALLBACK0-NEXT: leaq (%r9,%r9), %r10
+; FALLBACK0-NEXT: movl %esi, %ecx
+; FALLBACK0-NEXT: shlq %cl, %r10
+; FALLBACK0-NEXT: orq %rbx, %r10
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: sarq %cl, %r9
+; FALLBACK0-NEXT: movq %r9, 24(%rdx)
+; FALLBACK0-NEXT: movq %r10, 16(%rdx)
+; FALLBACK0-NEXT: movq %rdi, (%rdx)
+; FALLBACK0-NEXT: movq %r8, 8(%rdx)
+; FALLBACK0-NEXT: popq %rbx
+; FALLBACK0-NEXT: retq
+;
+; FALLBACK1-LABEL: ashr_32bytes:
+; FALLBACK1: # %bb.0:
+; FALLBACK1-NEXT: movq (%rdi), %rax
+; FALLBACK1-NEXT: movq 8(%rdi), %r8
+; FALLBACK1-NEXT: movq 16(%rdi), %r9
+; FALLBACK1-NEXT: movq 24(%rdi), %rdi
+; FALLBACK1-NEXT: movzbl (%rsi), %esi
+; FALLBACK1-NEXT: leal (,%rsi,8), %ecx
+; FALLBACK1-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: sarq $63, %rdi
+; FALLBACK1-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: andb $24, %sil
+; FALLBACK1-NEXT: movzbl %sil, %eax
+; FALLBACK1-NEXT: movq -56(%rsp,%rax), %rsi
+; FALLBACK1-NEXT: movq -72(%rsp,%rax), %rdi
+; FALLBACK1-NEXT: movq -64(%rsp,%rax), %r8
+; FALLBACK1-NEXT: movq %r8, %r9
+; FALLBACK1-NEXT: shrdq %cl, %rsi, %r9
+; FALLBACK1-NEXT: movq -48(%rsp,%rax), %rax
+; FALLBACK1-NEXT: shrdq %cl, %rax, %rsi
+; FALLBACK1-NEXT: shrdq %cl, %r8, %rdi
+; FALLBACK1-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK1-NEXT: sarq %cl, %rax
+; FALLBACK1-NEXT: movq %rsi, 16(%rdx)
+; FALLBACK1-NEXT: movq %rax, 24(%rdx)
+; FALLBACK1-NEXT: movq %rdi, (%rdx)
+; FALLBACK1-NEXT: movq %r9, 8(%rdx)
+; FALLBACK1-NEXT: retq
+;
+; FALLBACK2-LABEL: ashr_32bytes:
+; FALLBACK2: # %bb.0:
+; FALLBACK2-NEXT: movq (%rdi), %rcx
+; FALLBACK2-NEXT: movq 8(%rdi), %r8
+; FALLBACK2-NEXT: movq 16(%rdi), %r9
+; FALLBACK2-NEXT: movq 24(%rdi), %rdi
+; FALLBACK2-NEXT: movzbl (%rsi), %esi
+; FALLBACK2-NEXT: leal (,%rsi,8), %eax
+; FALLBACK2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: sarq $63, %rdi
+; FALLBACK2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: andb $24, %sil
+; FALLBACK2-NEXT: movzbl %sil, %ecx
+; FALLBACK2-NEXT: movq -64(%rsp,%rcx), %rsi
+; FALLBACK2-NEXT: movq -56(%rsp,%rcx), %rdi
+; FALLBACK2-NEXT: shrxq %rax, %rsi, %r8
+; FALLBACK2-NEXT: shrxq %rax, -72(%rsp,%rcx), %r9
+; FALLBACK2-NEXT: shrxq %rax, %rdi, %r10
+; FALLBACK2-NEXT: movq -48(%rsp,%rcx), %rcx
+; FALLBACK2-NEXT: sarxq %rax, %rcx, %r11
+; FALLBACK2-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK2-NEXT: notb %al
+; FALLBACK2-NEXT: addq %rdi, %rdi
+; FALLBACK2-NEXT: shlxq %rax, %rdi, %rdi
+; FALLBACK2-NEXT: orq %r8, %rdi
+; FALLBACK2-NEXT: addq %rsi, %rsi
+; FALLBACK2-NEXT: shlxq %rax, %rsi, %rsi
+; FALLBACK2-NEXT: orq %r9, %rsi
+; FALLBACK2-NEXT: addq %rcx, %rcx
+; FALLBACK2-NEXT: shlxq %rax, %rcx, %rax
+; FALLBACK2-NEXT: orq %r10, %rax
+; FALLBACK2-NEXT: movq %r11, 24(%rdx)
+; FALLBACK2-NEXT: movq %rax, 16(%rdx)
+; FALLBACK2-NEXT: movq %rsi, (%rdx)
+; FALLBACK2-NEXT: movq %rdi, 8(%rdx)
+; FALLBACK2-NEXT: retq
+;
+; FALLBACK3-LABEL: ashr_32bytes:
+; FALLBACK3: # %bb.0:
+; FALLBACK3-NEXT: movq (%rdi), %rax
+; FALLBACK3-NEXT: movq 8(%rdi), %r8
+; FALLBACK3-NEXT: movq 16(%rdi), %r9
+; FALLBACK3-NEXT: movq 24(%rdi), %rdi
+; FALLBACK3-NEXT: movzbl (%rsi), %esi
+; FALLBACK3-NEXT: leal (,%rsi,8), %ecx
+; FALLBACK3-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: sarq $63, %rdi
+; FALLBACK3-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: andb $24, %sil
+; FALLBACK3-NEXT: movzbl %sil, %eax
+; FALLBACK3-NEXT: movq -56(%rsp,%rax), %rsi
+; FALLBACK3-NEXT: movq -72(%rsp,%rax), %rdi
+; FALLBACK3-NEXT: movq -64(%rsp,%rax), %r8
+; FALLBACK3-NEXT: movq %r8, %r9
+; FALLBACK3-NEXT: shrdq %cl, %rsi, %r9
+; FALLBACK3-NEXT: movq -48(%rsp,%rax), %rax
+; FALLBACK3-NEXT: shrdq %cl, %rax, %rsi
+; FALLBACK3-NEXT: shrdq %cl, %r8, %rdi
+; FALLBACK3-NEXT: sarxq %rcx, %rax, %rax
+; FALLBACK3-NEXT: movq %rsi, 16(%rdx)
+; FALLBACK3-NEXT: movq %rax, 24(%rdx)
+; FALLBACK3-NEXT: movq %rdi, (%rdx)
+; FALLBACK3-NEXT: movq %r9, 8(%rdx)
+; FALLBACK3-NEXT: retq
+;
+; FALLBACK4-LABEL: ashr_32bytes:
+; FALLBACK4: # %bb.0:
+; FALLBACK4-NEXT: pushq %rbx
+; FALLBACK4-NEXT: movups (%rdi), %xmm0
+; FALLBACK4-NEXT: movq 16(%rdi), %rcx
+; FALLBACK4-NEXT: movq 24(%rdi), %rdi
+; FALLBACK4-NEXT: movzbl (%rsi), %esi
+; FALLBACK4-NEXT: leal (,%rsi,8), %eax
+; FALLBACK4-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: sarq $63, %rdi
+; FALLBACK4-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: andb $24, %sil
+; FALLBACK4-NEXT: movzbl %sil, %r9d
+; FALLBACK4-NEXT: movq -64(%rsp,%r9), %r10
+; FALLBACK4-NEXT: movq -56(%rsp,%r9), %r8
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shrq %cl, %r10
+; FALLBACK4-NEXT: movl %eax, %esi
+; FALLBACK4-NEXT: notb %sil
+; FALLBACK4-NEXT: leaq (%r8,%r8), %rdi
+; FALLBACK4-NEXT: movl %esi, %ecx
+; FALLBACK4-NEXT: shlq %cl, %rdi
+; FALLBACK4-NEXT: orq %r10, %rdi
+; FALLBACK4-NEXT: movq -48(%rsp,%r9), %r10
+; FALLBACK4-NEXT: movq %r10, %r11
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shrq %cl, %r11
+; FALLBACK4-NEXT: movq -40(%rsp,%r9), %r9
+; FALLBACK4-NEXT: leaq (%r9,%r9), %rbx
+; FALLBACK4-NEXT: movl %esi, %ecx
+; FALLBACK4-NEXT: shlq %cl, %rbx
+; FALLBACK4-NEXT: orq %r11, %rbx
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shrq %cl, %r8
+; FALLBACK4-NEXT: addq %r10, %r10
+; FALLBACK4-NEXT: movl %esi, %ecx
+; FALLBACK4-NEXT: shlq %cl, %r10
+; FALLBACK4-NEXT: orq %r8, %r10
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: sarq %cl, %r9
+; FALLBACK4-NEXT: movq %r9, 24(%rdx)
+; FALLBACK4-NEXT: movq %r10, 8(%rdx)
+; FALLBACK4-NEXT: movq %rbx, 16(%rdx)
+; FALLBACK4-NEXT: movq %rdi, (%rdx)
+; FALLBACK4-NEXT: popq %rbx
+; FALLBACK4-NEXT: retq
+;
+; FALLBACK5-LABEL: ashr_32bytes:
+; FALLBACK5: # %bb.0:
+; FALLBACK5-NEXT: movups (%rdi), %xmm0
+; FALLBACK5-NEXT: movq 16(%rdi), %rax
+; FALLBACK5-NEXT: movq 24(%rdi), %rdi
+; FALLBACK5-NEXT: movzbl (%rsi), %esi
+; FALLBACK5-NEXT: leal (,%rsi,8), %ecx
+; FALLBACK5-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: sarq $63, %rdi
+; FALLBACK5-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: andb $24, %sil
+; FALLBACK5-NEXT: movzbl %sil, %eax
+; FALLBACK5-NEXT: movq -48(%rsp,%rax), %rsi
+; FALLBACK5-NEXT: movq -56(%rsp,%rax), %rdi
+; FALLBACK5-NEXT: movq %rdi, %r8
+; FALLBACK5-NEXT: shrdq %cl, %rsi, %r8
+; FALLBACK5-NEXT: movq -72(%rsp,%rax), %r9
+; FALLBACK5-NEXT: movq -64(%rsp,%rax), %rax
+; FALLBACK5-NEXT: movq %rax, %r10
+; FALLBACK5-NEXT: shrdq %cl, %rdi, %r10
+; FALLBACK5-NEXT: shrdq %cl, %rax, %r9
+; FALLBACK5-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK5-NEXT: sarq %cl, %rsi
+; FALLBACK5-NEXT: movq %r10, 8(%rdx)
+; FALLBACK5-NEXT: movq %r8, 16(%rdx)
+; FALLBACK5-NEXT: movq %rsi, 24(%rdx)
+; FALLBACK5-NEXT: movq %r9, (%rdx)
+; FALLBACK5-NEXT: retq
+;
+; FALLBACK6-LABEL: ashr_32bytes:
+; FALLBACK6: # %bb.0:
+; FALLBACK6-NEXT: movups (%rdi), %xmm0
+; FALLBACK6-NEXT: movq 16(%rdi), %rcx
+; FALLBACK6-NEXT: movq 24(%rdi), %rdi
+; FALLBACK6-NEXT: movzbl (%rsi), %esi
+; FALLBACK6-NEXT: leal (,%rsi,8), %eax
+; FALLBACK6-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: sarq $63, %rdi
+; FALLBACK6-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: andb $24, %sil
+; FALLBACK6-NEXT: movzbl %sil, %ecx
+; FALLBACK6-NEXT: shrxq %rax, -72(%rsp,%rcx), %rsi
+; FALLBACK6-NEXT: movq -64(%rsp,%rcx), %rdi
+; FALLBACK6-NEXT: movq -56(%rsp,%rcx), %r8
+; FALLBACK6-NEXT: shrxq %rax, %r8, %r9
+; FALLBACK6-NEXT: movq -48(%rsp,%rcx), %rcx
+; FALLBACK6-NEXT: shrxq %rax, %rdi, %r10
+; FALLBACK6-NEXT: sarxq %rax, %rcx, %r11
+; FALLBACK6-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK6-NEXT: notb %al
+; FALLBACK6-NEXT: addq %rdi, %rdi
+; FALLBACK6-NEXT: shlxq %rax, %rdi, %rdi
+; FALLBACK6-NEXT: orq %rsi, %rdi
+; FALLBACK6-NEXT: addq %rcx, %rcx
+; FALLBACK6-NEXT: shlxq %rax, %rcx, %rcx
+; FALLBACK6-NEXT: orq %r9, %rcx
+; FALLBACK6-NEXT: addq %r8, %r8
+; FALLBACK6-NEXT: shlxq %rax, %r8, %rax
+; FALLBACK6-NEXT: orq %r10, %rax
+; FALLBACK6-NEXT: movq %r11, 24(%rdx)
+; FALLBACK6-NEXT: movq %rax, 8(%rdx)
+; FALLBACK6-NEXT: movq %rcx, 16(%rdx)
+; FALLBACK6-NEXT: movq %rdi, (%rdx)
+; FALLBACK6-NEXT: retq
+;
+; FALLBACK7-LABEL: ashr_32bytes:
+; FALLBACK7: # %bb.0:
+; FALLBACK7-NEXT: movups (%rdi), %xmm0
+; FALLBACK7-NEXT: movq 16(%rdi), %rax
+; FALLBACK7-NEXT: movq 24(%rdi), %rdi
+; FALLBACK7-NEXT: movzbl (%rsi), %esi
+; FALLBACK7-NEXT: leal (,%rsi,8), %ecx
+; FALLBACK7-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: sarq $63, %rdi
+; FALLBACK7-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: andb $24, %sil
+; FALLBACK7-NEXT: movzbl %sil, %eax
+; FALLBACK7-NEXT: movq -48(%rsp,%rax), %rsi
+; FALLBACK7-NEXT: movq -56(%rsp,%rax), %rdi
+; FALLBACK7-NEXT: movq %rdi, %r8
+; FALLBACK7-NEXT: shrdq %cl, %rsi, %r8
+; FALLBACK7-NEXT: movq -72(%rsp,%rax), %r9
+; FALLBACK7-NEXT: movq -64(%rsp,%rax), %rax
+; FALLBACK7-NEXT: movq %rax, %r10
+; FALLBACK7-NEXT: shrdq %cl, %rdi, %r10
+; FALLBACK7-NEXT: shrdq %cl, %rax, %r9
+; FALLBACK7-NEXT: sarxq %rcx, %rsi, %rax
+; FALLBACK7-NEXT: movq %r10, 8(%rdx)
+; FALLBACK7-NEXT: movq %r8, 16(%rdx)
+; FALLBACK7-NEXT: movq %rax, 24(%rdx)
+; FALLBACK7-NEXT: movq %r9, (%rdx)
+; FALLBACK7-NEXT: retq
+;
+; FALLBACK8-LABEL: ashr_32bytes:
+; FALLBACK8: # %bb.0:
+; FALLBACK8-NEXT: pushq %rbx
+; FALLBACK8-NEXT: vmovups (%rdi), %xmm0
+; FALLBACK8-NEXT: movq 16(%rdi), %rcx
+; FALLBACK8-NEXT: movq 24(%rdi), %rdi
+; FALLBACK8-NEXT: movzbl (%rsi), %esi
+; FALLBACK8-NEXT: leal (,%rsi,8), %eax
+; FALLBACK8-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: sarq $63, %rdi
+; FALLBACK8-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: andb $24, %sil
+; FALLBACK8-NEXT: movzbl %sil, %r9d
+; FALLBACK8-NEXT: movq -64(%rsp,%r9), %r10
+; FALLBACK8-NEXT: movq -56(%rsp,%r9), %r8
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shrq %cl, %r10
+; FALLBACK8-NEXT: movl %eax, %esi
+; FALLBACK8-NEXT: notb %sil
+; FALLBACK8-NEXT: leaq (%r8,%r8), %rdi
+; FALLBACK8-NEXT: movl %esi, %ecx
+; FALLBACK8-NEXT: shlq %cl, %rdi
+; FALLBACK8-NEXT: orq %r10, %rdi
+; FALLBACK8-NEXT: movq -48(%rsp,%r9), %r10
+; FALLBACK8-NEXT: movq %r10, %r11
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shrq %cl, %r11
+; FALLBACK8-NEXT: movq -40(%rsp,%r9), %r9
+; FALLBACK8-NEXT: leaq (%r9,%r9), %rbx
+; FALLBACK8-NEXT: movl %esi, %ecx
+; FALLBACK8-NEXT: shlq %cl, %rbx
+; FALLBACK8-NEXT: orq %r11, %rbx
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shrq %cl, %r8
+; FALLBACK8-NEXT: addq %r10, %r10
+; FALLBACK8-NEXT: movl %esi, %ecx
+; FALLBACK8-NEXT: shlq %cl, %r10
+; FALLBACK8-NEXT: orq %r8, %r10
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: sarq %cl, %r9
+; FALLBACK8-NEXT: movq %r9, 24(%rdx)
+; FALLBACK8-NEXT: movq %r10, 8(%rdx)
+; FALLBACK8-NEXT: movq %rbx, 16(%rdx)
+; FALLBACK8-NEXT: movq %rdi, (%rdx)
+; FALLBACK8-NEXT: popq %rbx
+; FALLBACK8-NEXT: retq
+;
+; FALLBACK9-LABEL: ashr_32bytes:
+; FALLBACK9: # %bb.0:
+; FALLBACK9-NEXT: vmovups (%rdi), %xmm0
+; FALLBACK9-NEXT: movq 16(%rdi), %rax
+; FALLBACK9-NEXT: movq 24(%rdi), %rdi
+; FALLBACK9-NEXT: movzbl (%rsi), %esi
+; FALLBACK9-NEXT: leal (,%rsi,8), %ecx
+; FALLBACK9-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: sarq $63, %rdi
+; FALLBACK9-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: andb $24, %sil
+; FALLBACK9-NEXT: movzbl %sil, %eax
+; FALLBACK9-NEXT: movq -48(%rsp,%rax), %rsi
+; FALLBACK9-NEXT: movq -56(%rsp,%rax), %rdi
+; FALLBACK9-NEXT: movq %rdi, %r8
+; FALLBACK9-NEXT: shrdq %cl, %rsi, %r8
+; FALLBACK9-NEXT: movq -72(%rsp,%rax), %r9
+; FALLBACK9-NEXT: movq -64(%rsp,%rax), %rax
+; FALLBACK9-NEXT: movq %rax, %r10
+; FALLBACK9-NEXT: shrdq %cl, %rdi, %r10
+; FALLBACK9-NEXT: shrdq %cl, %rax, %r9
+; FALLBACK9-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK9-NEXT: sarq %cl, %rsi
+; FALLBACK9-NEXT: movq %r10, 8(%rdx)
+; FALLBACK9-NEXT: movq %r8, 16(%rdx)
+; FALLBACK9-NEXT: movq %rsi, 24(%rdx)
+; FALLBACK9-NEXT: movq %r9, (%rdx)
+; FALLBACK9-NEXT: retq
+;
+; FALLBACK10-LABEL: ashr_32bytes:
+; FALLBACK10: # %bb.0:
+; FALLBACK10-NEXT: vmovups (%rdi), %xmm0
+; FALLBACK10-NEXT: movq 16(%rdi), %rcx
+; FALLBACK10-NEXT: movq 24(%rdi), %rdi
+; FALLBACK10-NEXT: movzbl (%rsi), %esi
+; FALLBACK10-NEXT: leal (,%rsi,8), %eax
+; FALLBACK10-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: sarq $63, %rdi
+; FALLBACK10-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: andb $24, %sil
+; FALLBACK10-NEXT: movzbl %sil, %ecx
+; FALLBACK10-NEXT: shrxq %rax, -72(%rsp,%rcx), %rsi
+; FALLBACK10-NEXT: movq -64(%rsp,%rcx), %rdi
+; FALLBACK10-NEXT: movq -56(%rsp,%rcx), %r8
+; FALLBACK10-NEXT: shrxq %rax, %r8, %r9
+; FALLBACK10-NEXT: movq -48(%rsp,%rcx), %rcx
+; FALLBACK10-NEXT: shrxq %rax, %rdi, %r10
+; FALLBACK10-NEXT: sarxq %rax, %rcx, %r11
+; FALLBACK10-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK10-NEXT: notb %al
+; FALLBACK10-NEXT: addq %rdi, %rdi
+; FALLBACK10-NEXT: shlxq %rax, %rdi, %rdi
+; FALLBACK10-NEXT: orq %rsi, %rdi
+; FALLBACK10-NEXT: addq %rcx, %rcx
+; FALLBACK10-NEXT: shlxq %rax, %rcx, %rcx
+; FALLBACK10-NEXT: orq %r9, %rcx
+; FALLBACK10-NEXT: addq %r8, %r8
+; FALLBACK10-NEXT: shlxq %rax, %r8, %rax
+; FALLBACK10-NEXT: orq %r10, %rax
+; FALLBACK10-NEXT: movq %r11, 24(%rdx)
+; FALLBACK10-NEXT: movq %rax, 8(%rdx)
+; FALLBACK10-NEXT: movq %rcx, 16(%rdx)
+; FALLBACK10-NEXT: movq %rdi, (%rdx)
+; FALLBACK10-NEXT: retq
+;
+; FALLBACK11-LABEL: ashr_32bytes:
+; FALLBACK11: # %bb.0:
+; FALLBACK11-NEXT: vmovups (%rdi), %xmm0
+; FALLBACK11-NEXT: movq 16(%rdi), %rax
+; FALLBACK11-NEXT: movq 24(%rdi), %rdi
+; FALLBACK11-NEXT: movzbl (%rsi), %esi
+; FALLBACK11-NEXT: leal (,%rsi,8), %ecx
+; FALLBACK11-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: sarq $63, %rdi
+; FALLBACK11-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: andb $24, %sil
+; FALLBACK11-NEXT: movzbl %sil, %eax
+; FALLBACK11-NEXT: movq -48(%rsp,%rax), %rsi
+; FALLBACK11-NEXT: movq -56(%rsp,%rax), %rdi
+; FALLBACK11-NEXT: movq %rdi, %r8
+; FALLBACK11-NEXT: shrdq %cl, %rsi, %r8
+; FALLBACK11-NEXT: movq -72(%rsp,%rax), %r9
+; FALLBACK11-NEXT: movq -64(%rsp,%rax), %rax
+; FALLBACK11-NEXT: movq %rax, %r10
+; FALLBACK11-NEXT: shrdq %cl, %rdi, %r10
+; FALLBACK11-NEXT: shrdq %cl, %rax, %r9
+; FALLBACK11-NEXT: sarxq %rcx, %rsi, %rax
+; FALLBACK11-NEXT: movq %r10, 8(%rdx)
+; FALLBACK11-NEXT: movq %r8, 16(%rdx)
+; FALLBACK11-NEXT: movq %rax, 24(%rdx)
+; FALLBACK11-NEXT: movq %r9, (%rdx)
+; FALLBACK11-NEXT: retq
+;
+; FALLBACK12-LABEL: ashr_32bytes:
+; FALLBACK12: # %bb.0:
+; FALLBACK12-NEXT: pushq %rbx
+; FALLBACK12-NEXT: vmovups (%rdi), %xmm0
+; FALLBACK12-NEXT: movq 16(%rdi), %rcx
+; FALLBACK12-NEXT: movq 24(%rdi), %rdi
+; FALLBACK12-NEXT: movzbl (%rsi), %esi
+; FALLBACK12-NEXT: leal (,%rsi,8), %eax
+; FALLBACK12-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK12-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK12-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK12-NEXT: sarq $63, %rdi
+; FALLBACK12-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK12-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK12-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK12-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK12-NEXT: andb $24, %sil
+; FALLBACK12-NEXT: movzbl %sil, %r9d
+; FALLBACK12-NEXT: movq -64(%rsp,%r9), %r10
+; FALLBACK12-NEXT: movq -56(%rsp,%r9), %r8
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shrq %cl, %r10
+; FALLBACK12-NEXT: movl %eax, %esi
+; FALLBACK12-NEXT: notb %sil
+; FALLBACK12-NEXT: leaq (%r8,%r8), %rdi
+; FALLBACK12-NEXT: movl %esi, %ecx
+; FALLBACK12-NEXT: shlq %cl, %rdi
+; FALLBACK12-NEXT: orq %r10, %rdi
+; FALLBACK12-NEXT: movq -48(%rsp,%r9), %r10
+; FALLBACK12-NEXT: movq %r10, %r11
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shrq %cl, %r11
+; FALLBACK12-NEXT: movq -40(%rsp,%r9), %r9
+; FALLBACK12-NEXT: leaq (%r9,%r9), %rbx
+; FALLBACK12-NEXT: movl %esi, %ecx
+; FALLBACK12-NEXT: shlq %cl, %rbx
+; FALLBACK12-NEXT: orq %r11, %rbx
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shrq %cl, %r8
+; FALLBACK12-NEXT: addq %r10, %r10
+; FALLBACK12-NEXT: movl %esi, %ecx
+; FALLBACK12-NEXT: shlq %cl, %r10
+; FALLBACK12-NEXT: orq %r8, %r10
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: sarq %cl, %r9
+; FALLBACK12-NEXT: movq %r9, 24(%rdx)
+; FALLBACK12-NEXT: movq %r10, 8(%rdx)
+; FALLBACK12-NEXT: movq %rbx, 16(%rdx)
+; FALLBACK12-NEXT: movq %rdi, (%rdx)
+; FALLBACK12-NEXT: popq %rbx
+; FALLBACK12-NEXT: retq
+;
+; FALLBACK13-LABEL: ashr_32bytes:
+; FALLBACK13: # %bb.0:
+; FALLBACK13-NEXT: vmovups (%rdi), %xmm0
+; FALLBACK13-NEXT: movq 16(%rdi), %rax
+; FALLBACK13-NEXT: movq 24(%rdi), %rdi
+; FALLBACK13-NEXT: movzbl (%rsi), %esi
+; FALLBACK13-NEXT: leal (,%rsi,8), %ecx
+; FALLBACK13-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK13-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
+; FALLBACK13-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK13-NEXT: sarq $63, %rdi
+; FALLBACK13-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK13-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK13-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK13-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK13-NEXT: andb $24, %sil
+; FALLBACK13-NEXT: movzbl %sil, %eax
+; FALLBACK13-NEXT: movq -48(%rsp,%rax), %rsi
+; FALLBACK13-NEXT: movq -56(%rsp,%rax), %rdi
+; FALLBACK13-NEXT: movq %rdi, %r8
+; FALLBACK13-NEXT: shrdq %cl, %rsi, %r8
+; FALLBACK13-NEXT: movq -72(%rsp,%rax), %r9
+; FALLBACK13-NEXT: movq -64(%rsp,%rax), %rax
+; FALLBACK13-NEXT: movq %rax, %r10
+; FALLBACK13-NEXT: shrdq %cl, %rdi, %r10
+; FALLBACK13-NEXT: shrdq %cl, %rax, %r9
+; FALLBACK13-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK13-NEXT: sarq %cl, %rsi
+; FALLBACK13-NEXT: movq %r10, 8(%rdx)
+; FALLBACK13-NEXT: movq %r8, 16(%rdx)
+; FALLBACK13-NEXT: movq %rsi, 24(%rdx)
+; FALLBACK13-NEXT: movq %r9, (%rdx)
+; FALLBACK13-NEXT: retq
+;
+; FALLBACK14-LABEL: ashr_32bytes:
+; FALLBACK14: # %bb.0:
+; FALLBACK14-NEXT: vmovups (%rdi), %xmm0
+; FALLBACK14-NEXT: movq 16(%rdi), %rcx
+; FALLBACK14-NEXT: movq 24(%rdi), %rdi
+; FALLBACK14-NEXT: movzbl (%rsi), %esi
+; FALLBACK14-NEXT: leal (,%rsi,8), %eax
+; FALLBACK14-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: sarq $63, %rdi
+; FALLBACK14-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: andb $24, %sil
+; FALLBACK14-NEXT: movzbl %sil, %ecx
+; FALLBACK14-NEXT: shrxq %rax, -72(%rsp,%rcx), %rsi
+; FALLBACK14-NEXT: movq -64(%rsp,%rcx), %rdi
+; FALLBACK14-NEXT: movq -56(%rsp,%rcx), %r8
+; FALLBACK14-NEXT: shrxq %rax, %r8, %r9
+; FALLBACK14-NEXT: movq -48(%rsp,%rcx), %rcx
+; FALLBACK14-NEXT: shrxq %rax, %rdi, %r10
+; FALLBACK14-NEXT: sarxq %rax, %rcx, %r11
+; FALLBACK14-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK14-NEXT: notb %al
+; FALLBACK14-NEXT: addq %rdi, %rdi
+; FALLBACK14-NEXT: shlxq %rax, %rdi, %rdi
+; FALLBACK14-NEXT: orq %rsi, %rdi
+; FALLBACK14-NEXT: addq %rcx, %rcx
+; FALLBACK14-NEXT: shlxq %rax, %rcx, %rcx
+; FALLBACK14-NEXT: orq %r9, %rcx
+; FALLBACK14-NEXT: addq %r8, %r8
+; FALLBACK14-NEXT: shlxq %rax, %r8, %rax
+; FALLBACK14-NEXT: orq %r10, %rax
+; FALLBACK14-NEXT: movq %r11, 24(%rdx)
+; FALLBACK14-NEXT: movq %rax, 8(%rdx)
+; FALLBACK14-NEXT: movq %rcx, 16(%rdx)
+; FALLBACK14-NEXT: movq %rdi, (%rdx)
+; FALLBACK14-NEXT: retq
+;
+; FALLBACK15-LABEL: ashr_32bytes:
+; FALLBACK15: # %bb.0:
+; FALLBACK15-NEXT: vmovups (%rdi), %xmm0
+; FALLBACK15-NEXT: movq 16(%rdi), %rax
+; FALLBACK15-NEXT: movq 24(%rdi), %rdi
+; FALLBACK15-NEXT: movzbl (%rsi), %esi
+; FALLBACK15-NEXT: leal (,%rsi,8), %ecx
+; FALLBACK15-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK15-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
+; FALLBACK15-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK15-NEXT: sarq $63, %rdi
+; FALLBACK15-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK15-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK15-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK15-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK15-NEXT: andb $24, %sil
+; FALLBACK15-NEXT: movzbl %sil, %eax
+; FALLBACK15-NEXT: movq -48(%rsp,%rax), %rsi
+; FALLBACK15-NEXT: movq -56(%rsp,%rax), %rdi
+; FALLBACK15-NEXT: movq %rdi, %r8
+; FALLBACK15-NEXT: shrdq %cl, %rsi, %r8
+; FALLBACK15-NEXT: movq -72(%rsp,%rax), %r9
+; FALLBACK15-NEXT: movq -64(%rsp,%rax), %rax
+; FALLBACK15-NEXT: movq %rax, %r10
+; FALLBACK15-NEXT: shrdq %cl, %rdi, %r10
+; FALLBACK15-NEXT: shrdq %cl, %rax, %r9
+; FALLBACK15-NEXT: sarxq %rcx, %rsi, %rax
+; FALLBACK15-NEXT: movq %r10, 8(%rdx)
+; FALLBACK15-NEXT: movq %r8, 16(%rdx)
+; FALLBACK15-NEXT: movq %rax, 24(%rdx)
+; FALLBACK15-NEXT: movq %r9, (%rdx)
+; FALLBACK15-NEXT: retq
+;
+; FALLBACK16-LABEL: ashr_32bytes:
+; FALLBACK16: # %bb.0:
+; FALLBACK16-NEXT: pushl %ebp
+; FALLBACK16-NEXT: pushl %ebx
+; FALLBACK16-NEXT: pushl %edi
+; FALLBACK16-NEXT: pushl %esi
+; FALLBACK16-NEXT: subl $108, %esp
+; FALLBACK16-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK16-NEXT: movl {{[0-9]+}}(%esp), %esi
+; FALLBACK16-NEXT: movl (%esi), %ecx
+; FALLBACK16-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 4(%esi), %ecx
+; FALLBACK16-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 8(%esi), %ebx
+; FALLBACK16-NEXT: movl 12(%esi), %ebp
+; FALLBACK16-NEXT: movl 16(%esi), %edi
+; FALLBACK16-NEXT: movzbl (%eax), %ecx
+; FALLBACK16-NEXT: movl 20(%esi), %edx
+; FALLBACK16-NEXT: movl 24(%esi), %eax
+; FALLBACK16-NEXT: movl 28(%esi), %esi
+; FALLBACK16-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %ecx, %edx
+; FALLBACK16-NEXT: shlb $3, %dl
+; FALLBACK16-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %ebp, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK16-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK16-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: sarl $31, %esi
+; FALLBACK16-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: andb $28, %cl
+; FALLBACK16-NEXT: movzbl %cl, %edi
+; FALLBACK16-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 32(%esp,%edi), %esi
+; FALLBACK16-NEXT: movl 36(%esp,%edi), %eax
+; FALLBACK16-NEXT: movl %eax, %ebx
+; FALLBACK16-NEXT: movl %edx, %ecx
+; FALLBACK16-NEXT: shrl %cl, %ebx
+; FALLBACK16-NEXT: movb %dl, %ch
+; FALLBACK16-NEXT: notb %ch
+; FALLBACK16-NEXT: movl 40(%esp,%edi), %edi
+; FALLBACK16-NEXT: leal (%edi,%edi), %ebp
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %ebp
+; FALLBACK16-NEXT: orl %ebx, %ebp
+; FALLBACK16-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movb %dl, %cl
+; FALLBACK16-NEXT: shrl %cl, %esi
+; FALLBACK16-NEXT: addl %eax, %eax
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %eax
+; FALLBACK16-NEXT: orl %esi, %eax
+; FALLBACK16-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK16-NEXT: movl 44(%esp,%eax), %ebp
+; FALLBACK16-NEXT: movl %ebp, %esi
+; FALLBACK16-NEXT: movb %dl, %cl
+; FALLBACK16-NEXT: movl %edx, %ebx
+; FALLBACK16-NEXT: shrl %cl, %esi
+; FALLBACK16-NEXT: movl 48(%esp,%eax), %edx
+; FALLBACK16-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: leal (%edx,%edx), %eax
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %eax
+; FALLBACK16-NEXT: orl %esi, %eax
+; FALLBACK16-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl %ebx, %edx
+; FALLBACK16-NEXT: movb %bl, %cl
+; FALLBACK16-NEXT: shrl %cl, %edi
+; FALLBACK16-NEXT: addl %ebp, %ebp
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %ebp
+; FALLBACK16-NEXT: orl %edi, %ebp
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; FALLBACK16-NEXT: movl 52(%esp,%esi), %edi
+; FALLBACK16-NEXT: movl %edi, %eax
+; FALLBACK16-NEXT: movb %bl, %cl
+; FALLBACK16-NEXT: shrl %cl, %eax
+; FALLBACK16-NEXT: movl 56(%esp,%esi), %ebx
+; FALLBACK16-NEXT: leal (%ebx,%ebx), %esi
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %esi
+; FALLBACK16-NEXT: orl %eax, %esi
+; FALLBACK16-NEXT: movb %dl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; FALLBACK16-NEXT: movb %dl, %cl
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK16-NEXT: shrl %cl, %eax
+; FALLBACK16-NEXT: addl %edi, %edi
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %edi
+; FALLBACK16-NEXT: orl %eax, %edi
+; FALLBACK16-NEXT: movb %dl, %cl
+; FALLBACK16-NEXT: shrl %cl, %ebx
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK16-NEXT: movl 60(%esp,%eax), %eax
+; FALLBACK16-NEXT: leal (%eax,%eax), %edx
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %edx
+; FALLBACK16-NEXT: orl %ebx, %edx
+; FALLBACK16-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
+; FALLBACK16-NEXT: sarl %cl, %eax
+; FALLBACK16-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK16-NEXT: movl %eax, 28(%ecx)
+; FALLBACK16-NEXT: movl %edx, 24(%ecx)
+; FALLBACK16-NEXT: movl %edi, 16(%ecx)
+; FALLBACK16-NEXT: movl %esi, 20(%ecx)
+; FALLBACK16-NEXT: movl %ebp, 8(%ecx)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK16-NEXT: movl %eax, 12(%ecx)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK16-NEXT: movl %eax, (%ecx)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK16-NEXT: movl %eax, 4(%ecx)
+; FALLBACK16-NEXT: addl $108, %esp
+; FALLBACK16-NEXT: popl %esi
+; FALLBACK16-NEXT: popl %edi
+; FALLBACK16-NEXT: popl %ebx
+; FALLBACK16-NEXT: popl %ebp
+; FALLBACK16-NEXT: retl
+;
+; FALLBACK17-LABEL: ashr_32bytes:
+; FALLBACK17: # %bb.0:
+; FALLBACK17-NEXT: pushl %ebp
+; FALLBACK17-NEXT: pushl %ebx
+; FALLBACK17-NEXT: pushl %edi
+; FALLBACK17-NEXT: pushl %esi
+; FALLBACK17-NEXT: subl $92, %esp
+; FALLBACK17-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK17-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK17-NEXT: movl (%ecx), %edx
+; FALLBACK17-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 4(%ecx), %edx
+; FALLBACK17-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 8(%ecx), %edx
+; FALLBACK17-NEXT: movl %edx, (%esp) # 4-byte Spill
+; FALLBACK17-NEXT: movl 12(%ecx), %ebp
+; FALLBACK17-NEXT: movl 16(%ecx), %ebx
+; FALLBACK17-NEXT: movzbl (%eax), %eax
+; FALLBACK17-NEXT: movl 20(%ecx), %edi
+; FALLBACK17-NEXT: movl 24(%ecx), %edx
+; FALLBACK17-NEXT: movl 28(%ecx), %esi
+; FALLBACK17-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %eax, %ecx
+; FALLBACK17-NEXT: shlb $3, %cl
+; FALLBACK17-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %ebp, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl (%esp), %edx # 4-byte Reload
+; FALLBACK17-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK17-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK17-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: sarl $31, %esi
+; FALLBACK17-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: andb $28, %al
+; FALLBACK17-NEXT: movzbl %al, %ebp
+; FALLBACK17-NEXT: movl 24(%esp,%ebp), %edx
+; FALLBACK17-NEXT: movl 20(%esp,%ebp), %eax
+; FALLBACK17-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: shrdl %cl, %edx, %eax
+; FALLBACK17-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 32(%esp,%ebp), %ebx
+; FALLBACK17-NEXT: movl 28(%esp,%ebp), %eax
+; FALLBACK17-NEXT: movl %eax, %esi
+; FALLBACK17-NEXT: shrdl %cl, %ebx, %esi
+; FALLBACK17-NEXT: movl %esi, (%esp) # 4-byte Spill
+; FALLBACK17-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK17-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 40(%esp,%ebp), %edx
+; FALLBACK17-NEXT: movl 36(%esp,%ebp), %eax
+; FALLBACK17-NEXT: movl %eax, %edi
+; FALLBACK17-NEXT: shrdl %cl, %edx, %edi
+; FALLBACK17-NEXT: shrdl %cl, %eax, %ebx
+; FALLBACK17-NEXT: movl 16(%esp,%ebp), %esi
+; FALLBACK17-NEXT: movl 44(%esp,%ebp), %eax
+; FALLBACK17-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK17-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK17-NEXT: movl %edx, 24(%ebp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK17-NEXT: shrdl %cl, %edx, %esi
+; FALLBACK17-NEXT: sarl %cl, %eax
+; FALLBACK17-NEXT: movl %eax, 28(%ebp)
+; FALLBACK17-NEXT: movl %ebx, 16(%ebp)
+; FALLBACK17-NEXT: movl %edi, 20(%ebp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, 8(%ebp)
+; FALLBACK17-NEXT: movl (%esp), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, 12(%ebp)
+; FALLBACK17-NEXT: movl %esi, (%ebp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, 4(%ebp)
+; FALLBACK17-NEXT: addl $92, %esp
+; FALLBACK17-NEXT: popl %esi
+; FALLBACK17-NEXT: popl %edi
+; FALLBACK17-NEXT: popl %ebx
+; FALLBACK17-NEXT: popl %ebp
+; FALLBACK17-NEXT: retl
+;
+; FALLBACK18-LABEL: ashr_32bytes:
+; FALLBACK18: # %bb.0:
+; FALLBACK18-NEXT: pushl %ebp
+; FALLBACK18-NEXT: pushl %ebx
+; FALLBACK18-NEXT: pushl %edi
+; FALLBACK18-NEXT: pushl %esi
+; FALLBACK18-NEXT: subl $108, %esp
+; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %esi
+; FALLBACK18-NEXT: movl (%esi), %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 4(%esi), %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 8(%esi), %ebx
+; FALLBACK18-NEXT: movl 12(%esi), %ebp
+; FALLBACK18-NEXT: movl 16(%esi), %edi
+; FALLBACK18-NEXT: movzbl (%ecx), %ecx
+; FALLBACK18-NEXT: movl 20(%esi), %edx
+; FALLBACK18-NEXT: movl 24(%esi), %eax
+; FALLBACK18-NEXT: movl 28(%esi), %esi
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %ecx, %eax
+; FALLBACK18-NEXT: shlb $3, %al
+; FALLBACK18-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %ebp, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK18-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK18-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: sarl $31, %esi
+; FALLBACK18-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: andb $28, %cl
+; FALLBACK18-NEXT: movzbl %cl, %edi
+; FALLBACK18-NEXT: movl 36(%esp,%edi), %esi
+; FALLBACK18-NEXT: movl 40(%esp,%edi), %ecx
+; FALLBACK18-NEXT: shrxl %eax, %esi, %ebx
+; FALLBACK18-NEXT: movl %eax, %edx
+; FALLBACK18-NEXT: notb %dl
+; FALLBACK18-NEXT: leal (%ecx,%ecx), %ebp
+; FALLBACK18-NEXT: shlxl %edx, %ebp, %ebp
+; FALLBACK18-NEXT: orl %ebx, %ebp
+; FALLBACK18-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shrxl %eax, 32(%esp,%edi), %ebx
+; FALLBACK18-NEXT: addl %esi, %esi
+; FALLBACK18-NEXT: shlxl %edx, %esi, %esi
+; FALLBACK18-NEXT: orl %ebx, %esi
+; FALLBACK18-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 48(%esp,%edi), %esi
+; FALLBACK18-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: leal (%esi,%esi), %ebx
+; FALLBACK18-NEXT: shlxl %edx, %ebx, %esi
+; FALLBACK18-NEXT: movl 44(%esp,%edi), %ebp
+; FALLBACK18-NEXT: shrxl %eax, %ebp, %ebx
+; FALLBACK18-NEXT: orl %ebx, %esi
+; FALLBACK18-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shrxl %eax, %ecx, %ecx
+; FALLBACK18-NEXT: movl %eax, %ebx
+; FALLBACK18-NEXT: addl %ebp, %ebp
+; FALLBACK18-NEXT: shlxl %edx, %ebp, %eax
+; FALLBACK18-NEXT: orl %ecx, %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 56(%esp,%edi), %ebp
+; FALLBACK18-NEXT: leal (%ebp,%ebp), %ecx
+; FALLBACK18-NEXT: shlxl %edx, %ecx, %ecx
+; FALLBACK18-NEXT: movl 52(%esp,%edi), %eax
+; FALLBACK18-NEXT: shrxl %ebx, %eax, %esi
+; FALLBACK18-NEXT: orl %esi, %ecx
+; FALLBACK18-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; FALLBACK18-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: addl %eax, %eax
+; FALLBACK18-NEXT: shlxl %edx, %eax, %esi
+; FALLBACK18-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; FALLBACK18-NEXT: shrxl %ebx, %ebp, %eax
+; FALLBACK18-NEXT: movl 60(%esp,%edi), %edi
+; FALLBACK18-NEXT: sarxl %ebx, %edi, %ebx
+; FALLBACK18-NEXT: addl %edi, %edi
+; FALLBACK18-NEXT: shlxl %edx, %edi, %edx
+; FALLBACK18-NEXT: orl %eax, %edx
+; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK18-NEXT: movl %ebx, 28(%eax)
+; FALLBACK18-NEXT: movl %edx, 24(%eax)
+; FALLBACK18-NEXT: movl %esi, 16(%eax)
+; FALLBACK18-NEXT: movl %ecx, 20(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 8(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 12(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, (%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 4(%eax)
+; FALLBACK18-NEXT: addl $108, %esp
+; FALLBACK18-NEXT: popl %esi
+; FALLBACK18-NEXT: popl %edi
+; FALLBACK18-NEXT: popl %ebx
+; FALLBACK18-NEXT: popl %ebp
+; FALLBACK18-NEXT: retl
+;
+; FALLBACK19-LABEL: ashr_32bytes:
+; FALLBACK19: # %bb.0:
+; FALLBACK19-NEXT: pushl %ebp
+; FALLBACK19-NEXT: pushl %ebx
+; FALLBACK19-NEXT: pushl %edi
+; FALLBACK19-NEXT: pushl %esi
+; FALLBACK19-NEXT: subl $92, %esp
+; FALLBACK19-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK19-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK19-NEXT: movl (%ecx), %edx
+; FALLBACK19-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 4(%ecx), %edx
+; FALLBACK19-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 8(%ecx), %edx
+; FALLBACK19-NEXT: movl %edx, (%esp) # 4-byte Spill
+; FALLBACK19-NEXT: movl 12(%ecx), %ebp
+; FALLBACK19-NEXT: movl 16(%ecx), %ebx
+; FALLBACK19-NEXT: movzbl (%eax), %eax
+; FALLBACK19-NEXT: movl 20(%ecx), %edi
+; FALLBACK19-NEXT: movl 24(%ecx), %edx
+; FALLBACK19-NEXT: movl 28(%ecx), %esi
+; FALLBACK19-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %eax, %ecx
+; FALLBACK19-NEXT: shlb $3, %cl
+; FALLBACK19-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %ebp, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl (%esp), %edx # 4-byte Reload
+; FALLBACK19-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK19-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK19-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: sarl $31, %esi
+; FALLBACK19-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: andb $28, %al
+; FALLBACK19-NEXT: movzbl %al, %ebp
+; FALLBACK19-NEXT: movl 24(%esp,%ebp), %esi
+; FALLBACK19-NEXT: movl 20(%esp,%ebp), %eax
+; FALLBACK19-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: shrdl %cl, %esi, %eax
+; FALLBACK19-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 32(%esp,%ebp), %ebx
+; FALLBACK19-NEXT: movl 28(%esp,%ebp), %eax
+; FALLBACK19-NEXT: movl %eax, %edx
+; FALLBACK19-NEXT: shrdl %cl, %ebx, %edx
+; FALLBACK19-NEXT: movl %edx, (%esp) # 4-byte Spill
+; FALLBACK19-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK19-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 40(%esp,%ebp), %eax
+; FALLBACK19-NEXT: movl 36(%esp,%ebp), %edx
+; FALLBACK19-NEXT: movl %edx, %esi
+; FALLBACK19-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK19-NEXT: shrdl %cl, %edx, %ebx
+; FALLBACK19-NEXT: movl 16(%esp,%ebp), %edx
+; FALLBACK19-NEXT: movl 44(%esp,%ebp), %edi
+; FALLBACK19-NEXT: shrdl %cl, %edi, %eax
+; FALLBACK19-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK19-NEXT: movl %eax, 24(%ebp)
+; FALLBACK19-NEXT: sarxl %ecx, %edi, %eax
+; FALLBACK19-NEXT: movl %eax, 28(%ebp)
+; FALLBACK19-NEXT: movl %ebx, 16(%ebp)
+; FALLBACK19-NEXT: movl %esi, 20(%ebp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, 8(%ebp)
+; FALLBACK19-NEXT: movl (%esp), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, 12(%ebp)
+; FALLBACK19-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK19-NEXT: movl %edx, (%ebp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, 4(%ebp)
+; FALLBACK19-NEXT: addl $92, %esp
+; FALLBACK19-NEXT: popl %esi
+; FALLBACK19-NEXT: popl %edi
+; FALLBACK19-NEXT: popl %ebx
+; FALLBACK19-NEXT: popl %ebp
+; FALLBACK19-NEXT: retl
+;
+; FALLBACK20-LABEL: ashr_32bytes:
+; FALLBACK20: # %bb.0:
+; FALLBACK20-NEXT: pushl %ebp
+; FALLBACK20-NEXT: pushl %ebx
+; FALLBACK20-NEXT: pushl %edi
+; FALLBACK20-NEXT: pushl %esi
+; FALLBACK20-NEXT: subl $108, %esp
+; FALLBACK20-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK20-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK20-NEXT: movups (%ecx), %xmm0
+; FALLBACK20-NEXT: movl 16(%ecx), %esi
+; FALLBACK20-NEXT: movl 20(%ecx), %edi
+; FALLBACK20-NEXT: movl 24(%ecx), %ebx
+; FALLBACK20-NEXT: movl 28(%ecx), %edx
+; FALLBACK20-NEXT: movzbl (%eax), %eax
+; FALLBACK20-NEXT: movl %eax, %ecx
+; FALLBACK20-NEXT: shlb $3, %cl
+; FALLBACK20-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: sarl $31, %edx
+; FALLBACK20-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: andb $28, %al
+; FALLBACK20-NEXT: movzbl %al, %edi
+; FALLBACK20-NEXT: movl 32(%esp,%edi), %eax
+; FALLBACK20-NEXT: movl 36(%esp,%edi), %esi
+; FALLBACK20-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: shrl %cl, %eax
+; FALLBACK20-NEXT: movl %ecx, %edx
+; FALLBACK20-NEXT: movb %cl, %dh
+; FALLBACK20-NEXT: notb %dl
+; FALLBACK20-NEXT: addl %esi, %esi
+; FALLBACK20-NEXT: movl %edx, %ecx
+; FALLBACK20-NEXT: shll %cl, %esi
+; FALLBACK20-NEXT: orl %eax, %esi
+; FALLBACK20-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl 44(%esp,%edi), %ebx
+; FALLBACK20-NEXT: movl %ebx, %eax
+; FALLBACK20-NEXT: movb %dh, %cl
+; FALLBACK20-NEXT: shrl %cl, %eax
+; FALLBACK20-NEXT: movl 48(%esp,%edi), %esi
+; FALLBACK20-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: addl %esi, %esi
+; FALLBACK20-NEXT: movl %edx, %ecx
+; FALLBACK20-NEXT: shll %cl, %esi
+; FALLBACK20-NEXT: orl %eax, %esi
+; FALLBACK20-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl 40(%esp,%edi), %esi
+; FALLBACK20-NEXT: movl %esi, %eax
+; FALLBACK20-NEXT: movb %dh, %cl
+; FALLBACK20-NEXT: shrl %cl, %eax
+; FALLBACK20-NEXT: addl %ebx, %ebx
+; FALLBACK20-NEXT: movl %edx, %ecx
+; FALLBACK20-NEXT: shll %cl, %ebx
+; FALLBACK20-NEXT: orl %eax, %ebx
+; FALLBACK20-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl 52(%esp,%edi), %ebp
+; FALLBACK20-NEXT: movl %ebp, %eax
+; FALLBACK20-NEXT: movb %dh, %cl
+; FALLBACK20-NEXT: shrl %cl, %eax
+; FALLBACK20-NEXT: movl 56(%esp,%edi), %ecx
+; FALLBACK20-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: leal (%ecx,%ecx), %ebx
+; FALLBACK20-NEXT: movl %edx, %ecx
+; FALLBACK20-NEXT: shll %cl, %ebx
+; FALLBACK20-NEXT: orl %eax, %ebx
+; FALLBACK20-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movb %dh, %cl
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK20-NEXT: shrl %cl, %eax
+; FALLBACK20-NEXT: addl %ebp, %ebp
+; FALLBACK20-NEXT: movl %edx, %ecx
+; FALLBACK20-NEXT: shll %cl, %ebp
+; FALLBACK20-NEXT: orl %eax, %ebp
+; FALLBACK20-NEXT: movb %dh, %cl
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; FALLBACK20-NEXT: shrl %cl, %ebx
+; FALLBACK20-NEXT: movl 60(%esp,%edi), %eax
+; FALLBACK20-NEXT: leal (%eax,%eax), %edi
+; FALLBACK20-NEXT: movl %edx, %ecx
+; FALLBACK20-NEXT: shll %cl, %edi
+; FALLBACK20-NEXT: orl %ebx, %edi
+; FALLBACK20-NEXT: movb %dh, %cl
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; FALLBACK20-NEXT: shrl %cl, %ebx
+; FALLBACK20-NEXT: addl %esi, %esi
+; FALLBACK20-NEXT: movl %edx, %ecx
+; FALLBACK20-NEXT: shll %cl, %esi
+; FALLBACK20-NEXT: orl %ebx, %esi
+; FALLBACK20-NEXT: movb %dh, %cl
+; FALLBACK20-NEXT: sarl %cl, %eax
+; FALLBACK20-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK20-NEXT: movl %eax, 28(%ecx)
+; FALLBACK20-NEXT: movl %esi, 4(%ecx)
+; FALLBACK20-NEXT: movl %edi, 24(%ecx)
+; FALLBACK20-NEXT: movl %ebp, 16(%ecx)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK20-NEXT: movl %eax, 20(%ecx)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK20-NEXT: movl %eax, 8(%ecx)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK20-NEXT: movl %eax, 12(%ecx)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK20-NEXT: movl %eax, (%ecx)
+; FALLBACK20-NEXT: addl $108, %esp
+; FALLBACK20-NEXT: popl %esi
+; FALLBACK20-NEXT: popl %edi
+; FALLBACK20-NEXT: popl %ebx
+; FALLBACK20-NEXT: popl %ebp
+; FALLBACK20-NEXT: retl
+;
+; FALLBACK21-LABEL: ashr_32bytes:
+; FALLBACK21: # %bb.0:
+; FALLBACK21-NEXT: pushl %ebp
+; FALLBACK21-NEXT: pushl %ebx
+; FALLBACK21-NEXT: pushl %edi
+; FALLBACK21-NEXT: pushl %esi
+; FALLBACK21-NEXT: subl $108, %esp
+; FALLBACK21-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK21-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK21-NEXT: movups (%ecx), %xmm0
+; FALLBACK21-NEXT: movl 16(%ecx), %esi
+; FALLBACK21-NEXT: movl 20(%ecx), %edi
+; FALLBACK21-NEXT: movl 24(%ecx), %ebx
+; FALLBACK21-NEXT: movl 28(%ecx), %edx
+; FALLBACK21-NEXT: movzbl (%eax), %eax
+; FALLBACK21-NEXT: movl %eax, %ecx
+; FALLBACK21-NEXT: shlb $3, %cl
+; FALLBACK21-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: sarl $31, %edx
+; FALLBACK21-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: andb $28, %al
+; FALLBACK21-NEXT: movzbl %al, %ebp
+; FALLBACK21-NEXT: movl 48(%esp,%ebp), %esi
+; FALLBACK21-NEXT: movl 44(%esp,%ebp), %eax
+; FALLBACK21-NEXT: movl %eax, %edx
+; FALLBACK21-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK21-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: movl 40(%esp,%ebp), %edx
+; FALLBACK21-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK21-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: movl 56(%esp,%ebp), %ebx
+; FALLBACK21-NEXT: movl 52(%esp,%ebp), %eax
+; FALLBACK21-NEXT: movl %eax, %edx
+; FALLBACK21-NEXT: shrdl %cl, %ebx, %edx
+; FALLBACK21-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK21-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: movl 60(%esp,%ebp), %eax
+; FALLBACK21-NEXT: shrdl %cl, %eax, %ebx
+; FALLBACK21-NEXT: movl 32(%esp,%ebp), %edx
+; FALLBACK21-NEXT: movl 36(%esp,%ebp), %edi
+; FALLBACK21-NEXT: movl %edi, %esi
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; FALLBACK21-NEXT: shrdl %cl, %ebp, %esi
+; FALLBACK21-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK21-NEXT: movl %esi, 4(%ebp)
+; FALLBACK21-NEXT: movl %ebx, 24(%ebp)
+; FALLBACK21-NEXT: shrdl %cl, %edi, %edx
+; FALLBACK21-NEXT: sarl %cl, %eax
+; FALLBACK21-NEXT: movl %eax, 28(%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 16(%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 20(%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 8(%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 12(%ebp)
+; FALLBACK21-NEXT: movl %edx, (%ebp)
+; FALLBACK21-NEXT: addl $108, %esp
+; FALLBACK21-NEXT: popl %esi
+; FALLBACK21-NEXT: popl %edi
+; FALLBACK21-NEXT: popl %ebx
+; FALLBACK21-NEXT: popl %ebp
+; FALLBACK21-NEXT: retl
+;
+; FALLBACK22-LABEL: ashr_32bytes:
+; FALLBACK22: # %bb.0:
+; FALLBACK22-NEXT: pushl %ebp
+; FALLBACK22-NEXT: pushl %ebx
+; FALLBACK22-NEXT: pushl %edi
+; FALLBACK22-NEXT: pushl %esi
+; FALLBACK22-NEXT: subl $108, %esp
+; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK22-NEXT: movups (%ecx), %xmm0
+; FALLBACK22-NEXT: movl 16(%ecx), %esi
+; FALLBACK22-NEXT: movl 20(%ecx), %edi
+; FALLBACK22-NEXT: movl 24(%ecx), %ebx
+; FALLBACK22-NEXT: movl 28(%ecx), %edx
+; FALLBACK22-NEXT: movzbl (%eax), %ecx
+; FALLBACK22-NEXT: movl %ecx, %eax
+; FALLBACK22-NEXT: shlb $3, %al
+; FALLBACK22-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: sarl $31, %edx
+; FALLBACK22-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: andb $28, %cl
+; FALLBACK22-NEXT: movzbl %cl, %edi
+; FALLBACK22-NEXT: shrxl %eax, 32(%esp,%edi), %ecx
+; FALLBACK22-NEXT: movl %eax, %edx
+; FALLBACK22-NEXT: notb %dl
+; FALLBACK22-NEXT: movl 36(%esp,%edi), %esi
+; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: addl %esi, %esi
+; FALLBACK22-NEXT: shlxl %edx, %esi, %esi
+; FALLBACK22-NEXT: orl %ecx, %esi
+; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 48(%esp,%edi), %ecx
+; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: addl %ecx, %ecx
+; FALLBACK22-NEXT: shlxl %edx, %ecx, %esi
+; FALLBACK22-NEXT: movl 44(%esp,%edi), %ecx
+; FALLBACK22-NEXT: shrxl %eax, %ecx, %ebx
+; FALLBACK22-NEXT: orl %ebx, %esi
+; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: addl %ecx, %ecx
+; FALLBACK22-NEXT: shlxl %edx, %ecx, %esi
+; FALLBACK22-NEXT: movl 40(%esp,%edi), %ecx
+; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shrxl %eax, %ecx, %ebx
+; FALLBACK22-NEXT: movl %eax, %ecx
+; FALLBACK22-NEXT: orl %ebx, %esi
+; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 56(%esp,%edi), %esi
+; FALLBACK22-NEXT: leal (%esi,%esi), %ebx
+; FALLBACK22-NEXT: shlxl %edx, %ebx, %eax
+; FALLBACK22-NEXT: movl 52(%esp,%edi), %ebx
+; FALLBACK22-NEXT: shrxl %ecx, %ebx, %ebp
+; FALLBACK22-NEXT: orl %ebp, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl %ecx, %eax
+; FALLBACK22-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; FALLBACK22-NEXT: addl %ebx, %ebx
+; FALLBACK22-NEXT: shlxl %edx, %ebx, %ebx
+; FALLBACK22-NEXT: orl %ebp, %ebx
+; FALLBACK22-NEXT: shrxl %ecx, %esi, %ecx
+; FALLBACK22-NEXT: shrxl %eax, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; FALLBACK22-NEXT: movl 60(%esp,%edi), %edi
+; FALLBACK22-NEXT: sarxl %eax, %edi, %eax
+; FALLBACK22-NEXT: addl %edi, %edi
+; FALLBACK22-NEXT: shlxl %edx, %edi, %edi
+; FALLBACK22-NEXT: orl %ecx, %edi
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: addl %ecx, %ecx
+; FALLBACK22-NEXT: shlxl %edx, %ecx, %ecx
+; FALLBACK22-NEXT: orl %esi, %ecx
+; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK22-NEXT: movl %eax, 28(%edx)
+; FALLBACK22-NEXT: movl %ecx, 4(%edx)
+; FALLBACK22-NEXT: movl %edi, 24(%edx)
+; FALLBACK22-NEXT: movl %ebx, 16(%edx)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: movl %eax, 20(%edx)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: movl %eax, 8(%edx)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: movl %eax, 12(%edx)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: movl %eax, (%edx)
+; FALLBACK22-NEXT: addl $108, %esp
+; FALLBACK22-NEXT: popl %esi
+; FALLBACK22-NEXT: popl %edi
+; FALLBACK22-NEXT: popl %ebx
+; FALLBACK22-NEXT: popl %ebp
+; FALLBACK22-NEXT: retl
+;
+; FALLBACK23-LABEL: ashr_32bytes:
+; FALLBACK23: # %bb.0:
+; FALLBACK23-NEXT: pushl %ebp
+; FALLBACK23-NEXT: pushl %ebx
+; FALLBACK23-NEXT: pushl %edi
+; FALLBACK23-NEXT: pushl %esi
+; FALLBACK23-NEXT: subl $108, %esp
+; FALLBACK23-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK23-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK23-NEXT: movups (%ecx), %xmm0
+; FALLBACK23-NEXT: movl 16(%ecx), %esi
+; FALLBACK23-NEXT: movl 20(%ecx), %edi
+; FALLBACK23-NEXT: movl 24(%ecx), %ebx
+; FALLBACK23-NEXT: movl 28(%ecx), %edx
+; FALLBACK23-NEXT: movzbl (%eax), %eax
+; FALLBACK23-NEXT: movl %eax, %ecx
+; FALLBACK23-NEXT: shlb $3, %cl
+; FALLBACK23-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: sarl $31, %edx
+; FALLBACK23-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: andb $28, %al
+; FALLBACK23-NEXT: movzbl %al, %ebx
+; FALLBACK23-NEXT: movl 48(%esp,%ebx), %esi
+; FALLBACK23-NEXT: movl 44(%esp,%ebx), %eax
+; FALLBACK23-NEXT: movl %eax, %edx
+; FALLBACK23-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK23-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: movl 40(%esp,%ebx), %edx
+; FALLBACK23-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK23-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: movl 56(%esp,%ebx), %ebp
+; FALLBACK23-NEXT: movl 52(%esp,%ebx), %eax
+; FALLBACK23-NEXT: movl %eax, %edi
+; FALLBACK23-NEXT: shrdl %cl, %ebp, %edi
+; FALLBACK23-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK23-NEXT: movl 60(%esp,%ebx), %eax
+; FALLBACK23-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: shrdl %cl, %eax, %ebp
+; FALLBACK23-NEXT: movl 32(%esp,%ebx), %edx
+; FALLBACK23-NEXT: movl 36(%esp,%ebx), %ebx
+; FALLBACK23-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK23-NEXT: shrdl %cl, %eax, %ebx
+; FALLBACK23-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK23-NEXT: movl %ebx, 4(%eax)
+; FALLBACK23-NEXT: movl %ebp, 24(%eax)
+; FALLBACK23-NEXT: sarxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; FALLBACK23-NEXT: movl %ebx, 28(%eax)
+; FALLBACK23-NEXT: movl %esi, 16(%eax)
+; FALLBACK23-NEXT: movl %edi, 20(%eax)
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; FALLBACK23-NEXT: movl %esi, 8(%eax)
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; FALLBACK23-NEXT: movl %esi, 12(%eax)
+; FALLBACK23-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; FALLBACK23-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK23-NEXT: movl %edx, (%eax)
+; FALLBACK23-NEXT: addl $108, %esp
+; FALLBACK23-NEXT: popl %esi
+; FALLBACK23-NEXT: popl %edi
+; FALLBACK23-NEXT: popl %ebx
+; FALLBACK23-NEXT: popl %ebp
+; FALLBACK23-NEXT: retl
+;
+; FALLBACK24-LABEL: ashr_32bytes:
+; FALLBACK24: # %bb.0:
+; FALLBACK24-NEXT: pushl %ebp
+; FALLBACK24-NEXT: pushl %ebx
+; FALLBACK24-NEXT: pushl %edi
+; FALLBACK24-NEXT: pushl %esi
+; FALLBACK24-NEXT: subl $108, %esp
+; FALLBACK24-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK24-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK24-NEXT: vmovups (%ecx), %xmm0
+; FALLBACK24-NEXT: movl 16(%ecx), %esi
+; FALLBACK24-NEXT: movl 20(%ecx), %edi
+; FALLBACK24-NEXT: movl 24(%ecx), %ebx
+; FALLBACK24-NEXT: movl 28(%ecx), %edx
+; FALLBACK24-NEXT: movzbl (%eax), %eax
+; FALLBACK24-NEXT: movl %eax, %ecx
+; FALLBACK24-NEXT: shlb $3, %cl
+; FALLBACK24-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: sarl $31, %edx
+; FALLBACK24-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: andb $28, %al
+; FALLBACK24-NEXT: movzbl %al, %edi
+; FALLBACK24-NEXT: movl 32(%esp,%edi), %eax
+; FALLBACK24-NEXT: movl 36(%esp,%edi), %esi
+; FALLBACK24-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: shrl %cl, %eax
+; FALLBACK24-NEXT: movl %ecx, %edx
+; FALLBACK24-NEXT: movb %cl, %dh
+; FALLBACK24-NEXT: notb %dl
+; FALLBACK24-NEXT: addl %esi, %esi
+; FALLBACK24-NEXT: movl %edx, %ecx
+; FALLBACK24-NEXT: shll %cl, %esi
+; FALLBACK24-NEXT: orl %eax, %esi
+; FALLBACK24-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl 44(%esp,%edi), %ebx
+; FALLBACK24-NEXT: movl %ebx, %eax
+; FALLBACK24-NEXT: movb %dh, %cl
+; FALLBACK24-NEXT: shrl %cl, %eax
+; FALLBACK24-NEXT: movl 48(%esp,%edi), %esi
+; FALLBACK24-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: addl %esi, %esi
+; FALLBACK24-NEXT: movl %edx, %ecx
+; FALLBACK24-NEXT: shll %cl, %esi
+; FALLBACK24-NEXT: orl %eax, %esi
+; FALLBACK24-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl 40(%esp,%edi), %esi
+; FALLBACK24-NEXT: movl %esi, %eax
+; FALLBACK24-NEXT: movb %dh, %cl
+; FALLBACK24-NEXT: shrl %cl, %eax
+; FALLBACK24-NEXT: addl %ebx, %ebx
+; FALLBACK24-NEXT: movl %edx, %ecx
+; FALLBACK24-NEXT: shll %cl, %ebx
+; FALLBACK24-NEXT: orl %eax, %ebx
+; FALLBACK24-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl 52(%esp,%edi), %ebp
+; FALLBACK24-NEXT: movl %ebp, %eax
+; FALLBACK24-NEXT: movb %dh, %cl
+; FALLBACK24-NEXT: shrl %cl, %eax
+; FALLBACK24-NEXT: movl 56(%esp,%edi), %ecx
+; FALLBACK24-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: leal (%ecx,%ecx), %ebx
+; FALLBACK24-NEXT: movl %edx, %ecx
+; FALLBACK24-NEXT: shll %cl, %ebx
+; FALLBACK24-NEXT: orl %eax, %ebx
+; FALLBACK24-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movb %dh, %cl
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK24-NEXT: shrl %cl, %eax
+; FALLBACK24-NEXT: addl %ebp, %ebp
+; FALLBACK24-NEXT: movl %edx, %ecx
+; FALLBACK24-NEXT: shll %cl, %ebp
+; FALLBACK24-NEXT: orl %eax, %ebp
+; FALLBACK24-NEXT: movb %dh, %cl
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; FALLBACK24-NEXT: shrl %cl, %ebx
+; FALLBACK24-NEXT: movl 60(%esp,%edi), %eax
+; FALLBACK24-NEXT: leal (%eax,%eax), %edi
+; FALLBACK24-NEXT: movl %edx, %ecx
+; FALLBACK24-NEXT: shll %cl, %edi
+; FALLBACK24-NEXT: orl %ebx, %edi
+; FALLBACK24-NEXT: movb %dh, %cl
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; FALLBACK24-NEXT: shrl %cl, %ebx
+; FALLBACK24-NEXT: addl %esi, %esi
+; FALLBACK24-NEXT: movl %edx, %ecx
+; FALLBACK24-NEXT: shll %cl, %esi
+; FALLBACK24-NEXT: orl %ebx, %esi
+; FALLBACK24-NEXT: movb %dh, %cl
+; FALLBACK24-NEXT: sarl %cl, %eax
+; FALLBACK24-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK24-NEXT: movl %eax, 28(%ecx)
+; FALLBACK24-NEXT: movl %esi, 4(%ecx)
+; FALLBACK24-NEXT: movl %edi, 24(%ecx)
+; FALLBACK24-NEXT: movl %ebp, 16(%ecx)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK24-NEXT: movl %eax, 20(%ecx)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK24-NEXT: movl %eax, 8(%ecx)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK24-NEXT: movl %eax, 12(%ecx)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK24-NEXT: movl %eax, (%ecx)
+; FALLBACK24-NEXT: addl $108, %esp
+; FALLBACK24-NEXT: popl %esi
+; FALLBACK24-NEXT: popl %edi
+; FALLBACK24-NEXT: popl %ebx
+; FALLBACK24-NEXT: popl %ebp
+; FALLBACK24-NEXT: retl
+;
+; FALLBACK25-LABEL: ashr_32bytes:
+; FALLBACK25: # %bb.0:
+; FALLBACK25-NEXT: pushl %ebp
+; FALLBACK25-NEXT: pushl %ebx
+; FALLBACK25-NEXT: pushl %edi
+; FALLBACK25-NEXT: pushl %esi
+; FALLBACK25-NEXT: subl $108, %esp
+; FALLBACK25-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK25-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK25-NEXT: vmovups (%ecx), %xmm0
+; FALLBACK25-NEXT: movl 16(%ecx), %esi
+; FALLBACK25-NEXT: movl 20(%ecx), %edi
+; FALLBACK25-NEXT: movl 24(%ecx), %ebx
+; FALLBACK25-NEXT: movl 28(%ecx), %edx
+; FALLBACK25-NEXT: movzbl (%eax), %eax
+; FALLBACK25-NEXT: movl %eax, %ecx
+; FALLBACK25-NEXT: shlb $3, %cl
+; FALLBACK25-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: sarl $31, %edx
+; FALLBACK25-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: andb $28, %al
+; FALLBACK25-NEXT: movzbl %al, %ebp
+; FALLBACK25-NEXT: movl 48(%esp,%ebp), %esi
+; FALLBACK25-NEXT: movl 44(%esp,%ebp), %eax
+; FALLBACK25-NEXT: movl %eax, %edx
+; FALLBACK25-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK25-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: movl 40(%esp,%ebp), %edx
+; FALLBACK25-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK25-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: movl 56(%esp,%ebp), %ebx
+; FALLBACK25-NEXT: movl 52(%esp,%ebp), %eax
+; FALLBACK25-NEXT: movl %eax, %edx
+; FALLBACK25-NEXT: shrdl %cl, %ebx, %edx
+; FALLBACK25-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK25-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: movl 60(%esp,%ebp), %eax
+; FALLBACK25-NEXT: shrdl %cl, %eax, %ebx
+; FALLBACK25-NEXT: movl 32(%esp,%ebp), %edx
+; FALLBACK25-NEXT: movl 36(%esp,%ebp), %edi
+; FALLBACK25-NEXT: movl %edi, %esi
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; FALLBACK25-NEXT: shrdl %cl, %ebp, %esi
+; FALLBACK25-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK25-NEXT: movl %esi, 4(%ebp)
+; FALLBACK25-NEXT: movl %ebx, 24(%ebp)
+; FALLBACK25-NEXT: shrdl %cl, %edi, %edx
+; FALLBACK25-NEXT: sarl %cl, %eax
+; FALLBACK25-NEXT: movl %eax, 28(%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 16(%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 20(%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 8(%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 12(%ebp)
+; FALLBACK25-NEXT: movl %edx, (%ebp)
+; FALLBACK25-NEXT: addl $108, %esp
+; FALLBACK25-NEXT: popl %esi
+; FALLBACK25-NEXT: popl %edi
+; FALLBACK25-NEXT: popl %ebx
+; FALLBACK25-NEXT: popl %ebp
+; FALLBACK25-NEXT: retl
+;
+; FALLBACK26-LABEL: ashr_32bytes:
+; FALLBACK26: # %bb.0:
+; FALLBACK26-NEXT: pushl %ebp
+; FALLBACK26-NEXT: pushl %ebx
+; FALLBACK26-NEXT: pushl %edi
+; FALLBACK26-NEXT: pushl %esi
+; FALLBACK26-NEXT: subl $108, %esp
+; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK26-NEXT: vmovups (%ecx), %xmm0
+; FALLBACK26-NEXT: movl 16(%ecx), %esi
+; FALLBACK26-NEXT: movl 20(%ecx), %edi
+; FALLBACK26-NEXT: movl 24(%ecx), %ebx
+; FALLBACK26-NEXT: movl 28(%ecx), %edx
+; FALLBACK26-NEXT: movzbl (%eax), %ecx
+; FALLBACK26-NEXT: movl %ecx, %eax
+; FALLBACK26-NEXT: shlb $3, %al
+; FALLBACK26-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: sarl $31, %edx
+; FALLBACK26-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: andb $28, %cl
+; FALLBACK26-NEXT: movzbl %cl, %edi
+; FALLBACK26-NEXT: shrxl %eax, 32(%esp,%edi), %ecx
+; FALLBACK26-NEXT: movl %eax, %edx
+; FALLBACK26-NEXT: notb %dl
+; FALLBACK26-NEXT: movl 36(%esp,%edi), %esi
+; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: addl %esi, %esi
+; FALLBACK26-NEXT: shlxl %edx, %esi, %esi
+; FALLBACK26-NEXT: orl %ecx, %esi
+; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 48(%esp,%edi), %ecx
+; FALLBACK26-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: addl %ecx, %ecx
+; FALLBACK26-NEXT: shlxl %edx, %ecx, %esi
+; FALLBACK26-NEXT: movl 44(%esp,%edi), %ecx
+; FALLBACK26-NEXT: shrxl %eax, %ecx, %ebx
+; FALLBACK26-NEXT: orl %ebx, %esi
+; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: addl %ecx, %ecx
+; FALLBACK26-NEXT: shlxl %edx, %ecx, %esi
+; FALLBACK26-NEXT: movl 40(%esp,%edi), %ecx
+; FALLBACK26-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shrxl %eax, %ecx, %ebx
+; FALLBACK26-NEXT: movl %eax, %ecx
+; FALLBACK26-NEXT: orl %ebx, %esi
+; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 56(%esp,%edi), %esi
+; FALLBACK26-NEXT: leal (%esi,%esi), %ebx
+; FALLBACK26-NEXT: shlxl %edx, %ebx, %eax
+; FALLBACK26-NEXT: movl 52(%esp,%edi), %ebx
+; FALLBACK26-NEXT: shrxl %ecx, %ebx, %ebp
+; FALLBACK26-NEXT: orl %ebp, %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl %ecx, %eax
+; FALLBACK26-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; FALLBACK26-NEXT: addl %ebx, %ebx
+; FALLBACK26-NEXT: shlxl %edx, %ebx, %ebx
+; FALLBACK26-NEXT: orl %ebp, %ebx
+; FALLBACK26-NEXT: shrxl %ecx, %esi, %ecx
+; FALLBACK26-NEXT: shrxl %eax, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; FALLBACK26-NEXT: movl 60(%esp,%edi), %edi
+; FALLBACK26-NEXT: sarxl %eax, %edi, %eax
+; FALLBACK26-NEXT: addl %edi, %edi
+; FALLBACK26-NEXT: shlxl %edx, %edi, %edi
+; FALLBACK26-NEXT: orl %ecx, %edi
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK26-NEXT: addl %ecx, %ecx
+; FALLBACK26-NEXT: shlxl %edx, %ecx, %ecx
+; FALLBACK26-NEXT: orl %esi, %ecx
+; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK26-NEXT: movl %eax, 28(%edx)
+; FALLBACK26-NEXT: movl %ecx, 4(%edx)
+; FALLBACK26-NEXT: movl %edi, 24(%edx)
+; FALLBACK26-NEXT: movl %ebx, 16(%edx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 20(%edx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 8(%edx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 12(%edx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, (%edx)
+; FALLBACK26-NEXT: addl $108, %esp
+; FALLBACK26-NEXT: popl %esi
+; FALLBACK26-NEXT: popl %edi
+; FALLBACK26-NEXT: popl %ebx
+; FALLBACK26-NEXT: popl %ebp
+; FALLBACK26-NEXT: retl
+;
+; FALLBACK27-LABEL: ashr_32bytes:
+; FALLBACK27: # %bb.0:
+; FALLBACK27-NEXT: pushl %ebp
+; FALLBACK27-NEXT: pushl %ebx
+; FALLBACK27-NEXT: pushl %edi
+; FALLBACK27-NEXT: pushl %esi
+; FALLBACK27-NEXT: subl $108, %esp
+; FALLBACK27-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK27-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK27-NEXT: vmovups (%ecx), %xmm0
+; FALLBACK27-NEXT: movl 16(%ecx), %esi
+; FALLBACK27-NEXT: movl 20(%ecx), %edi
+; FALLBACK27-NEXT: movl 24(%ecx), %ebx
+; FALLBACK27-NEXT: movl 28(%ecx), %edx
+; FALLBACK27-NEXT: movzbl (%eax), %eax
+; FALLBACK27-NEXT: movl %eax, %ecx
+; FALLBACK27-NEXT: shlb $3, %cl
+; FALLBACK27-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: sarl $31, %edx
+; FALLBACK27-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: andb $28, %al
+; FALLBACK27-NEXT: movzbl %al, %ebx
+; FALLBACK27-NEXT: movl 48(%esp,%ebx), %esi
+; FALLBACK27-NEXT: movl 44(%esp,%ebx), %eax
+; FALLBACK27-NEXT: movl %eax, %edx
+; FALLBACK27-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK27-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: movl 40(%esp,%ebx), %edx
+; FALLBACK27-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK27-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: movl 56(%esp,%ebx), %ebp
+; FALLBACK27-NEXT: movl 52(%esp,%ebx), %eax
+; FALLBACK27-NEXT: movl %eax, %edi
+; FALLBACK27-NEXT: shrdl %cl, %ebp, %edi
+; FALLBACK27-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK27-NEXT: movl 60(%esp,%ebx), %eax
+; FALLBACK27-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: shrdl %cl, %eax, %ebp
+; FALLBACK27-NEXT: movl 32(%esp,%ebx), %edx
+; FALLBACK27-NEXT: movl 36(%esp,%ebx), %ebx
+; FALLBACK27-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK27-NEXT: shrdl %cl, %eax, %ebx
+; FALLBACK27-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK27-NEXT: movl %ebx, 4(%eax)
+; FALLBACK27-NEXT: movl %ebp, 24(%eax)
+; FALLBACK27-NEXT: sarxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; FALLBACK27-NEXT: movl %ebx, 28(%eax)
+; FALLBACK27-NEXT: movl %esi, 16(%eax)
+; FALLBACK27-NEXT: movl %edi, 20(%eax)
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; FALLBACK27-NEXT: movl %esi, 8(%eax)
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; FALLBACK27-NEXT: movl %esi, 12(%eax)
+; FALLBACK27-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; FALLBACK27-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK27-NEXT: movl %edx, (%eax)
+; FALLBACK27-NEXT: addl $108, %esp
+; FALLBACK27-NEXT: popl %esi
+; FALLBACK27-NEXT: popl %edi
+; FALLBACK27-NEXT: popl %ebx
+; FALLBACK27-NEXT: popl %ebp
+; FALLBACK27-NEXT: retl
+;
+; FALLBACK28-LABEL: ashr_32bytes:
+; FALLBACK28: # %bb.0:
+; FALLBACK28-NEXT: pushl %ebp
+; FALLBACK28-NEXT: pushl %ebx
+; FALLBACK28-NEXT: pushl %edi
+; FALLBACK28-NEXT: pushl %esi
+; FALLBACK28-NEXT: subl $108, %esp
+; FALLBACK28-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK28-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK28-NEXT: vmovups (%ecx), %xmm0
+; FALLBACK28-NEXT: movl 16(%ecx), %esi
+; FALLBACK28-NEXT: movl 20(%ecx), %edi
+; FALLBACK28-NEXT: movl 24(%ecx), %ebx
+; FALLBACK28-NEXT: movl 28(%ecx), %edx
+; FALLBACK28-NEXT: movzbl (%eax), %eax
+; FALLBACK28-NEXT: movl %eax, %ecx
+; FALLBACK28-NEXT: shlb $3, %cl
+; FALLBACK28-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: sarl $31, %edx
+; FALLBACK28-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: andb $28, %al
+; FALLBACK28-NEXT: movzbl %al, %edi
+; FALLBACK28-NEXT: movl 32(%esp,%edi), %eax
+; FALLBACK28-NEXT: movl 36(%esp,%edi), %esi
+; FALLBACK28-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: shrl %cl, %eax
+; FALLBACK28-NEXT: movl %ecx, %edx
+; FALLBACK28-NEXT: movb %cl, %dh
+; FALLBACK28-NEXT: notb %dl
+; FALLBACK28-NEXT: addl %esi, %esi
+; FALLBACK28-NEXT: movl %edx, %ecx
+; FALLBACK28-NEXT: shll %cl, %esi
+; FALLBACK28-NEXT: orl %eax, %esi
+; FALLBACK28-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl 44(%esp,%edi), %ebx
+; FALLBACK28-NEXT: movl %ebx, %eax
+; FALLBACK28-NEXT: movb %dh, %cl
+; FALLBACK28-NEXT: shrl %cl, %eax
+; FALLBACK28-NEXT: movl 48(%esp,%edi), %esi
+; FALLBACK28-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: addl %esi, %esi
+; FALLBACK28-NEXT: movl %edx, %ecx
+; FALLBACK28-NEXT: shll %cl, %esi
+; FALLBACK28-NEXT: orl %eax, %esi
+; FALLBACK28-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl 40(%esp,%edi), %esi
+; FALLBACK28-NEXT: movl %esi, %eax
+; FALLBACK28-NEXT: movb %dh, %cl
+; FALLBACK28-NEXT: shrl %cl, %eax
+; FALLBACK28-NEXT: addl %ebx, %ebx
+; FALLBACK28-NEXT: movl %edx, %ecx
+; FALLBACK28-NEXT: shll %cl, %ebx
+; FALLBACK28-NEXT: orl %eax, %ebx
+; FALLBACK28-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl 52(%esp,%edi), %ebp
+; FALLBACK28-NEXT: movl %ebp, %eax
+; FALLBACK28-NEXT: movb %dh, %cl
+; FALLBACK28-NEXT: shrl %cl, %eax
+; FALLBACK28-NEXT: movl 56(%esp,%edi), %ecx
+; FALLBACK28-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: leal (%ecx,%ecx), %ebx
+; FALLBACK28-NEXT: movl %edx, %ecx
+; FALLBACK28-NEXT: shll %cl, %ebx
+; FALLBACK28-NEXT: orl %eax, %ebx
+; FALLBACK28-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movb %dh, %cl
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK28-NEXT: shrl %cl, %eax
+; FALLBACK28-NEXT: addl %ebp, %ebp
+; FALLBACK28-NEXT: movl %edx, %ecx
+; FALLBACK28-NEXT: shll %cl, %ebp
+; FALLBACK28-NEXT: orl %eax, %ebp
+; FALLBACK28-NEXT: movb %dh, %cl
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; FALLBACK28-NEXT: shrl %cl, %ebx
+; FALLBACK28-NEXT: movl 60(%esp,%edi), %eax
+; FALLBACK28-NEXT: leal (%eax,%eax), %edi
+; FALLBACK28-NEXT: movl %edx, %ecx
+; FALLBACK28-NEXT: shll %cl, %edi
+; FALLBACK28-NEXT: orl %ebx, %edi
+; FALLBACK28-NEXT: movb %dh, %cl
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; FALLBACK28-NEXT: shrl %cl, %ebx
+; FALLBACK28-NEXT: addl %esi, %esi
+; FALLBACK28-NEXT: movl %edx, %ecx
+; FALLBACK28-NEXT: shll %cl, %esi
+; FALLBACK28-NEXT: orl %ebx, %esi
+; FALLBACK28-NEXT: movb %dh, %cl
+; FALLBACK28-NEXT: sarl %cl, %eax
+; FALLBACK28-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK28-NEXT: movl %eax, 28(%ecx)
+; FALLBACK28-NEXT: movl %esi, 4(%ecx)
+; FALLBACK28-NEXT: movl %edi, 24(%ecx)
+; FALLBACK28-NEXT: movl %ebp, 16(%ecx)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK28-NEXT: movl %eax, 20(%ecx)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK28-NEXT: movl %eax, 8(%ecx)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK28-NEXT: movl %eax, 12(%ecx)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK28-NEXT: movl %eax, (%ecx)
+; FALLBACK28-NEXT: addl $108, %esp
+; FALLBACK28-NEXT: popl %esi
+; FALLBACK28-NEXT: popl %edi
+; FALLBACK28-NEXT: popl %ebx
+; FALLBACK28-NEXT: popl %ebp
+; FALLBACK28-NEXT: retl
+;
+; FALLBACK29-LABEL: ashr_32bytes:
+; FALLBACK29: # %bb.0:
+; FALLBACK29-NEXT: pushl %ebp
+; FALLBACK29-NEXT: pushl %ebx
+; FALLBACK29-NEXT: pushl %edi
+; FALLBACK29-NEXT: pushl %esi
+; FALLBACK29-NEXT: subl $108, %esp
+; FALLBACK29-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK29-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK29-NEXT: vmovups (%ecx), %xmm0
+; FALLBACK29-NEXT: movl 16(%ecx), %esi
+; FALLBACK29-NEXT: movl 20(%ecx), %edi
+; FALLBACK29-NEXT: movl 24(%ecx), %ebx
+; FALLBACK29-NEXT: movl 28(%ecx), %edx
+; FALLBACK29-NEXT: movzbl (%eax), %eax
+; FALLBACK29-NEXT: movl %eax, %ecx
+; FALLBACK29-NEXT: shlb $3, %cl
+; FALLBACK29-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: sarl $31, %edx
+; FALLBACK29-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: andb $28, %al
+; FALLBACK29-NEXT: movzbl %al, %ebp
+; FALLBACK29-NEXT: movl 48(%esp,%ebp), %esi
+; FALLBACK29-NEXT: movl 44(%esp,%ebp), %eax
+; FALLBACK29-NEXT: movl %eax, %edx
+; FALLBACK29-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK29-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: movl 40(%esp,%ebp), %edx
+; FALLBACK29-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK29-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: movl 56(%esp,%ebp), %ebx
+; FALLBACK29-NEXT: movl 52(%esp,%ebp), %eax
+; FALLBACK29-NEXT: movl %eax, %edx
+; FALLBACK29-NEXT: shrdl %cl, %ebx, %edx
+; FALLBACK29-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK29-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: movl 60(%esp,%ebp), %eax
+; FALLBACK29-NEXT: shrdl %cl, %eax, %ebx
+; FALLBACK29-NEXT: movl 32(%esp,%ebp), %edx
+; FALLBACK29-NEXT: movl 36(%esp,%ebp), %edi
+; FALLBACK29-NEXT: movl %edi, %esi
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; FALLBACK29-NEXT: shrdl %cl, %ebp, %esi
+; FALLBACK29-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK29-NEXT: movl %esi, 4(%ebp)
+; FALLBACK29-NEXT: movl %ebx, 24(%ebp)
+; FALLBACK29-NEXT: shrdl %cl, %edi, %edx
+; FALLBACK29-NEXT: sarl %cl, %eax
+; FALLBACK29-NEXT: movl %eax, 28(%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 16(%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 20(%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 8(%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 12(%ebp)
+; FALLBACK29-NEXT: movl %edx, (%ebp)
+; FALLBACK29-NEXT: addl $108, %esp
+; FALLBACK29-NEXT: popl %esi
+; FALLBACK29-NEXT: popl %edi
+; FALLBACK29-NEXT: popl %ebx
+; FALLBACK29-NEXT: popl %ebp
+; FALLBACK29-NEXT: retl
+;
+; FALLBACK30-LABEL: ashr_32bytes:
+; FALLBACK30: # %bb.0:
+; FALLBACK30-NEXT: pushl %ebp
+; FALLBACK30-NEXT: pushl %ebx
+; FALLBACK30-NEXT: pushl %edi
+; FALLBACK30-NEXT: pushl %esi
+; FALLBACK30-NEXT: subl $108, %esp
+; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK30-NEXT: vmovups (%ecx), %xmm0
+; FALLBACK30-NEXT: movl 16(%ecx), %esi
+; FALLBACK30-NEXT: movl 20(%ecx), %edi
+; FALLBACK30-NEXT: movl 24(%ecx), %ebx
+; FALLBACK30-NEXT: movl 28(%ecx), %edx
+; FALLBACK30-NEXT: movzbl (%eax), %ecx
+; FALLBACK30-NEXT: movl %ecx, %eax
+; FALLBACK30-NEXT: shlb $3, %al
+; FALLBACK30-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: sarl $31, %edx
+; FALLBACK30-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: andb $28, %cl
+; FALLBACK30-NEXT: movzbl %cl, %edi
+; FALLBACK30-NEXT: shrxl %eax, 32(%esp,%edi), %ecx
+; FALLBACK30-NEXT: movl %eax, %edx
+; FALLBACK30-NEXT: notb %dl
+; FALLBACK30-NEXT: movl 36(%esp,%edi), %esi
+; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: addl %esi, %esi
+; FALLBACK30-NEXT: shlxl %edx, %esi, %esi
+; FALLBACK30-NEXT: orl %ecx, %esi
+; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 48(%esp,%edi), %ecx
+; FALLBACK30-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: addl %ecx, %ecx
+; FALLBACK30-NEXT: shlxl %edx, %ecx, %esi
+; FALLBACK30-NEXT: movl 44(%esp,%edi), %ecx
+; FALLBACK30-NEXT: shrxl %eax, %ecx, %ebx
+; FALLBACK30-NEXT: orl %ebx, %esi
+; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: addl %ecx, %ecx
+; FALLBACK30-NEXT: shlxl %edx, %ecx, %esi
+; FALLBACK30-NEXT: movl 40(%esp,%edi), %ecx
+; FALLBACK30-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shrxl %eax, %ecx, %ebx
+; FALLBACK30-NEXT: movl %eax, %ecx
+; FALLBACK30-NEXT: orl %ebx, %esi
+; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 56(%esp,%edi), %esi
+; FALLBACK30-NEXT: leal (%esi,%esi), %ebx
+; FALLBACK30-NEXT: shlxl %edx, %ebx, %eax
+; FALLBACK30-NEXT: movl 52(%esp,%edi), %ebx
+; FALLBACK30-NEXT: shrxl %ecx, %ebx, %ebp
+; FALLBACK30-NEXT: orl %ebp, %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl %ecx, %eax
+; FALLBACK30-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; FALLBACK30-NEXT: addl %ebx, %ebx
+; FALLBACK30-NEXT: shlxl %edx, %ebx, %ebx
+; FALLBACK30-NEXT: orl %ebp, %ebx
+; FALLBACK30-NEXT: shrxl %ecx, %esi, %ecx
+; FALLBACK30-NEXT: shrxl %eax, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; FALLBACK30-NEXT: movl 60(%esp,%edi), %edi
+; FALLBACK30-NEXT: sarxl %eax, %edi, %eax
+; FALLBACK30-NEXT: addl %edi, %edi
+; FALLBACK30-NEXT: shlxl %edx, %edi, %edi
+; FALLBACK30-NEXT: orl %ecx, %edi
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK30-NEXT: addl %ecx, %ecx
+; FALLBACK30-NEXT: shlxl %edx, %ecx, %ecx
+; FALLBACK30-NEXT: orl %esi, %ecx
+; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK30-NEXT: movl %eax, 28(%edx)
+; FALLBACK30-NEXT: movl %ecx, 4(%edx)
+; FALLBACK30-NEXT: movl %edi, 24(%edx)
+; FALLBACK30-NEXT: movl %ebx, 16(%edx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 20(%edx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 8(%edx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 12(%edx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, (%edx)
+; FALLBACK30-NEXT: addl $108, %esp
+; FALLBACK30-NEXT: popl %esi
+; FALLBACK30-NEXT: popl %edi
+; FALLBACK30-NEXT: popl %ebx
+; FALLBACK30-NEXT: popl %ebp
+; FALLBACK30-NEXT: retl
+;
+; FALLBACK31-LABEL: ashr_32bytes:
+; FALLBACK31: # %bb.0:
+; FALLBACK31-NEXT: pushl %ebp
+; FALLBACK31-NEXT: pushl %ebx
+; FALLBACK31-NEXT: pushl %edi
+; FALLBACK31-NEXT: pushl %esi
+; FALLBACK31-NEXT: subl $108, %esp
+; FALLBACK31-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK31-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK31-NEXT: vmovups (%ecx), %xmm0
+; FALLBACK31-NEXT: movl 16(%ecx), %esi
+; FALLBACK31-NEXT: movl 20(%ecx), %edi
+; FALLBACK31-NEXT: movl 24(%ecx), %ebx
+; FALLBACK31-NEXT: movl 28(%ecx), %edx
+; FALLBACK31-NEXT: movzbl (%eax), %eax
+; FALLBACK31-NEXT: movl %eax, %ecx
+; FALLBACK31-NEXT: shlb $3, %cl
+; FALLBACK31-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: sarl $31, %edx
+; FALLBACK31-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: andb $28, %al
+; FALLBACK31-NEXT: movzbl %al, %ebx
+; FALLBACK31-NEXT: movl 48(%esp,%ebx), %esi
+; FALLBACK31-NEXT: movl 44(%esp,%ebx), %eax
+; FALLBACK31-NEXT: movl %eax, %edx
+; FALLBACK31-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK31-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: movl 40(%esp,%ebx), %edx
+; FALLBACK31-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK31-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: movl 56(%esp,%ebx), %ebp
+; FALLBACK31-NEXT: movl 52(%esp,%ebx), %eax
+; FALLBACK31-NEXT: movl %eax, %edi
+; FALLBACK31-NEXT: shrdl %cl, %ebp, %edi
+; FALLBACK31-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK31-NEXT: movl 60(%esp,%ebx), %eax
+; FALLBACK31-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: shrdl %cl, %eax, %ebp
+; FALLBACK31-NEXT: movl 32(%esp,%ebx), %edx
+; FALLBACK31-NEXT: movl 36(%esp,%ebx), %ebx
+; FALLBACK31-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK31-NEXT: shrdl %cl, %eax, %ebx
+; FALLBACK31-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK31-NEXT: movl %ebx, 4(%eax)
+; FALLBACK31-NEXT: movl %ebp, 24(%eax)
+; FALLBACK31-NEXT: sarxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; FALLBACK31-NEXT: movl %ebx, 28(%eax)
+; FALLBACK31-NEXT: movl %esi, 16(%eax)
+; FALLBACK31-NEXT: movl %edi, 20(%eax)
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; FALLBACK31-NEXT: movl %esi, 8(%eax)
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; FALLBACK31-NEXT: movl %esi, 12(%eax)
+; FALLBACK31-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; FALLBACK31-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK31-NEXT: movl %edx, (%eax)
+; FALLBACK31-NEXT: addl $108, %esp
+; FALLBACK31-NEXT: popl %esi
+; FALLBACK31-NEXT: popl %edi
+; FALLBACK31-NEXT: popl %ebx
+; FALLBACK31-NEXT: popl %ebp
+; FALLBACK31-NEXT: retl
+ %src = load i256, ptr %src.ptr, align 1
+ %byteOff = load i256, ptr %byteOff.ptr, align 1
+ %bitOff = shl i256 %byteOff, 3
+ %res = ashr i256 %src, %bitOff
+ store i256 %res, ptr %dst, align 1
+ ret void
+}
+
+define void @ashr_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) nounwind {
+; FALLBACK0-LABEL: ashr_32bytes_dwordOff:
+; FALLBACK0: # %bb.0:
+; FALLBACK0-NEXT: pushq %rbx
+; FALLBACK0-NEXT: movq (%rdi), %rcx
+; FALLBACK0-NEXT: movq 8(%rdi), %r8
+; FALLBACK0-NEXT: movq 16(%rdi), %r9
+; FALLBACK0-NEXT: movq 24(%rdi), %rdi
+; FALLBACK0-NEXT: movzbl (%rsi), %esi
+; FALLBACK0-NEXT: movl %esi, %eax
+; FALLBACK0-NEXT: shlb $5, %al
+; FALLBACK0-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: sarq $63, %rdi
+; FALLBACK0-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: andb $6, %sil
+; FALLBACK0-NEXT: movzbl %sil, %r9d
+; FALLBACK0-NEXT: movq -64(%rsp,%r9,4), %r10
+; FALLBACK0-NEXT: movq -56(%rsp,%r9,4), %rdi
+; FALLBACK0-NEXT: movq %rdi, %r11
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shrq %cl, %r11
+; FALLBACK0-NEXT: movl %eax, %esi
+; FALLBACK0-NEXT: notb %sil
+; FALLBACK0-NEXT: movq -48(%rsp,%r9,4), %rbx
+; FALLBACK0-NEXT: leaq (%rbx,%rbx), %r8
+; FALLBACK0-NEXT: movl %esi, %ecx
+; FALLBACK0-NEXT: shlq %cl, %r8
+; FALLBACK0-NEXT: orq %r11, %r8
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shrq %cl, %r10
+; FALLBACK0-NEXT: addq %rdi, %rdi
+; FALLBACK0-NEXT: movl %esi, %ecx
+; FALLBACK0-NEXT: shlq %cl, %rdi
+; FALLBACK0-NEXT: orq %r10, %rdi
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shrq %cl, %rbx
+; FALLBACK0-NEXT: movq -40(%rsp,%r9,4), %r9
+; FALLBACK0-NEXT: leaq (%r9,%r9), %r10
+; FALLBACK0-NEXT: movl %esi, %ecx
+; FALLBACK0-NEXT: shlq %cl, %r10
+; FALLBACK0-NEXT: orq %rbx, %r10
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: sarq %cl, %r9
+; FALLBACK0-NEXT: movq %r9, 24(%rdx)
+; FALLBACK0-NEXT: movq %r10, 16(%rdx)
+; FALLBACK0-NEXT: movq %rdi, (%rdx)
+; FALLBACK0-NEXT: movq %r8, 8(%rdx)
+; FALLBACK0-NEXT: popq %rbx
+; FALLBACK0-NEXT: retq
+;
+; FALLBACK1-LABEL: ashr_32bytes_dwordOff:
+; FALLBACK1: # %bb.0:
+; FALLBACK1-NEXT: movq (%rdi), %rax
+; FALLBACK1-NEXT: movq 8(%rdi), %r8
+; FALLBACK1-NEXT: movq 16(%rdi), %r9
+; FALLBACK1-NEXT: movq 24(%rdi), %rdi
+; FALLBACK1-NEXT: movzbl (%rsi), %esi
+; FALLBACK1-NEXT: movl %esi, %ecx
+; FALLBACK1-NEXT: shlb $5, %cl
+; FALLBACK1-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: sarq $63, %rdi
+; FALLBACK1-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: andb $6, %sil
+; FALLBACK1-NEXT: movzbl %sil, %eax
+; FALLBACK1-NEXT: movq -56(%rsp,%rax,4), %rsi
+; FALLBACK1-NEXT: movq -72(%rsp,%rax,4), %rdi
+; FALLBACK1-NEXT: movq -64(%rsp,%rax,4), %r8
+; FALLBACK1-NEXT: movq %r8, %r9
+; FALLBACK1-NEXT: shrdq %cl, %rsi, %r9
+; FALLBACK1-NEXT: movq -48(%rsp,%rax,4), %rax
+; FALLBACK1-NEXT: shrdq %cl, %rax, %rsi
+; FALLBACK1-NEXT: shrdq %cl, %r8, %rdi
+; FALLBACK1-NEXT: sarq %cl, %rax
+; FALLBACK1-NEXT: movq %rsi, 16(%rdx)
+; FALLBACK1-NEXT: movq %rax, 24(%rdx)
+; FALLBACK1-NEXT: movq %rdi, (%rdx)
+; FALLBACK1-NEXT: movq %r9, 8(%rdx)
+; FALLBACK1-NEXT: retq
+;
+; FALLBACK2-LABEL: ashr_32bytes_dwordOff:
+; FALLBACK2: # %bb.0:
+; FALLBACK2-NEXT: movq (%rdi), %rcx
+; FALLBACK2-NEXT: movq 8(%rdi), %r8
+; FALLBACK2-NEXT: movq 16(%rdi), %r9
+; FALLBACK2-NEXT: movq 24(%rdi), %rdi
+; FALLBACK2-NEXT: movzbl (%rsi), %esi
+; FALLBACK2-NEXT: movl %esi, %eax
+; FALLBACK2-NEXT: shlb $5, %al
+; FALLBACK2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: sarq $63, %rdi
+; FALLBACK2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: andb $6, %sil
+; FALLBACK2-NEXT: movzbl %sil, %ecx
+; FALLBACK2-NEXT: movq -64(%rsp,%rcx,4), %rsi
+; FALLBACK2-NEXT: movq -56(%rsp,%rcx,4), %rdi
+; FALLBACK2-NEXT: shrxq %rax, %rsi, %r8
+; FALLBACK2-NEXT: shrxq %rax, -72(%rsp,%rcx,4), %r9
+; FALLBACK2-NEXT: shrxq %rax, %rdi, %r10
+; FALLBACK2-NEXT: movq -48(%rsp,%rcx,4), %rcx
+; FALLBACK2-NEXT: sarxq %rax, %rcx, %r11
+; FALLBACK2-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK2-NEXT: notb %al
+; FALLBACK2-NEXT: addq %rdi, %rdi
+; FALLBACK2-NEXT: shlxq %rax, %rdi, %rdi
+; FALLBACK2-NEXT: orq %r8, %rdi
+; FALLBACK2-NEXT: addq %rsi, %rsi
+; FALLBACK2-NEXT: shlxq %rax, %rsi, %rsi
+; FALLBACK2-NEXT: orq %r9, %rsi
+; FALLBACK2-NEXT: addq %rcx, %rcx
+; FALLBACK2-NEXT: shlxq %rax, %rcx, %rax
+; FALLBACK2-NEXT: orq %r10, %rax
+; FALLBACK2-NEXT: movq %r11, 24(%rdx)
+; FALLBACK2-NEXT: movq %rax, 16(%rdx)
+; FALLBACK2-NEXT: movq %rsi, (%rdx)
+; FALLBACK2-NEXT: movq %rdi, 8(%rdx)
+; FALLBACK2-NEXT: retq
+;
+; FALLBACK3-LABEL: ashr_32bytes_dwordOff:
+; FALLBACK3: # %bb.0:
+; FALLBACK3-NEXT: movq (%rdi), %rax
+; FALLBACK3-NEXT: movq 8(%rdi), %r8
+; FALLBACK3-NEXT: movq 16(%rdi), %r9
+; FALLBACK3-NEXT: movq 24(%rdi), %rdi
+; FALLBACK3-NEXT: movzbl (%rsi), %esi
+; FALLBACK3-NEXT: movl %esi, %ecx
+; FALLBACK3-NEXT: shlb $5, %cl
+; FALLBACK3-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: sarq $63, %rdi
+; FALLBACK3-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: andb $6, %sil
+; FALLBACK3-NEXT: movzbl %sil, %eax
+; FALLBACK3-NEXT: movq -56(%rsp,%rax,4), %rsi
+; FALLBACK3-NEXT: movq -72(%rsp,%rax,4), %rdi
+; FALLBACK3-NEXT: movq -64(%rsp,%rax,4), %r8
+; FALLBACK3-NEXT: movq %r8, %r9
+; FALLBACK3-NEXT: shrdq %cl, %rsi, %r9
+; FALLBACK3-NEXT: movq -48(%rsp,%rax,4), %rax
+; FALLBACK3-NEXT: shrdq %cl, %rax, %rsi
+; FALLBACK3-NEXT: shrdq %cl, %r8, %rdi
+; FALLBACK3-NEXT: sarxq %rcx, %rax, %rax
+; FALLBACK3-NEXT: movq %rsi, 16(%rdx)
+; FALLBACK3-NEXT: movq %rax, 24(%rdx)
+; FALLBACK3-NEXT: movq %rdi, (%rdx)
+; FALLBACK3-NEXT: movq %r9, 8(%rdx)
+; FALLBACK3-NEXT: retq
+;
+; FALLBACK4-LABEL: ashr_32bytes_dwordOff:
+; FALLBACK4: # %bb.0:
+; FALLBACK4-NEXT: pushq %rbx
+; FALLBACK4-NEXT: movups (%rdi), %xmm0
+; FALLBACK4-NEXT: movq 16(%rdi), %rcx
+; FALLBACK4-NEXT: movq 24(%rdi), %rdi
+; FALLBACK4-NEXT: movzbl (%rsi), %esi
+; FALLBACK4-NEXT: movl %esi, %eax
+; FALLBACK4-NEXT: shlb $5, %al
+; FALLBACK4-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: sarq $63, %rdi
+; FALLBACK4-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: andb $6, %sil
+; FALLBACK4-NEXT: movzbl %sil, %r9d
+; FALLBACK4-NEXT: movq -64(%rsp,%r9,4), %r10
+; FALLBACK4-NEXT: movq -56(%rsp,%r9,4), %r8
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shrq %cl, %r10
+; FALLBACK4-NEXT: movl %eax, %esi
+; FALLBACK4-NEXT: notb %sil
+; FALLBACK4-NEXT: leaq (%r8,%r8), %rdi
+; FALLBACK4-NEXT: movl %esi, %ecx
+; FALLBACK4-NEXT: shlq %cl, %rdi
+; FALLBACK4-NEXT: orq %r10, %rdi
+; FALLBACK4-NEXT: movq -48(%rsp,%r9,4), %r10
+; FALLBACK4-NEXT: movq %r10, %r11
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shrq %cl, %r11
+; FALLBACK4-NEXT: movq -40(%rsp,%r9,4), %r9
+; FALLBACK4-NEXT: leaq (%r9,%r9), %rbx
+; FALLBACK4-NEXT: movl %esi, %ecx
+; FALLBACK4-NEXT: shlq %cl, %rbx
+; FALLBACK4-NEXT: orq %r11, %rbx
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shrq %cl, %r8
+; FALLBACK4-NEXT: addq %r10, %r10
+; FALLBACK4-NEXT: movl %esi, %ecx
+; FALLBACK4-NEXT: shlq %cl, %r10
+; FALLBACK4-NEXT: orq %r8, %r10
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: sarq %cl, %r9
+; FALLBACK4-NEXT: movq %r9, 24(%rdx)
+; FALLBACK4-NEXT: movq %r10, 8(%rdx)
+; FALLBACK4-NEXT: movq %rbx, 16(%rdx)
+; FALLBACK4-NEXT: movq %rdi, (%rdx)
+; FALLBACK4-NEXT: popq %rbx
+; FALLBACK4-NEXT: retq
+;
+; FALLBACK5-LABEL: ashr_32bytes_dwordOff:
+; FALLBACK5: # %bb.0:
+; FALLBACK5-NEXT: movups (%rdi), %xmm0
+; FALLBACK5-NEXT: movq 16(%rdi), %rax
+; FALLBACK5-NEXT: movq 24(%rdi), %rdi
+; FALLBACK5-NEXT: movzbl (%rsi), %esi
+; FALLBACK5-NEXT: movl %esi, %ecx
+; FALLBACK5-NEXT: shlb $5, %cl
+; FALLBACK5-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: sarq $63, %rdi
+; FALLBACK5-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: andb $6, %sil
+; FALLBACK5-NEXT: movzbl %sil, %eax
+; FALLBACK5-NEXT: movq -48(%rsp,%rax,4), %rsi
+; FALLBACK5-NEXT: movq -56(%rsp,%rax,4), %rdi
+; FALLBACK5-NEXT: movq %rdi, %r8
+; FALLBACK5-NEXT: shrdq %cl, %rsi, %r8
+; FALLBACK5-NEXT: movq -72(%rsp,%rax,4), %r9
+; FALLBACK5-NEXT: movq -64(%rsp,%rax,4), %rax
+; FALLBACK5-NEXT: movq %rax, %r10
+; FALLBACK5-NEXT: shrdq %cl, %rdi, %r10
+; FALLBACK5-NEXT: shrdq %cl, %rax, %r9
+; FALLBACK5-NEXT: sarq %cl, %rsi
+; FALLBACK5-NEXT: movq %r10, 8(%rdx)
+; FALLBACK5-NEXT: movq %r8, 16(%rdx)
+; FALLBACK5-NEXT: movq %rsi, 24(%rdx)
+; FALLBACK5-NEXT: movq %r9, (%rdx)
+; FALLBACK5-NEXT: retq
+;
+; FALLBACK6-LABEL: ashr_32bytes_dwordOff:
+; FALLBACK6: # %bb.0:
+; FALLBACK6-NEXT: movups (%rdi), %xmm0
+; FALLBACK6-NEXT: movq 16(%rdi), %rcx
+; FALLBACK6-NEXT: movq 24(%rdi), %rdi
+; FALLBACK6-NEXT: movzbl (%rsi), %esi
+; FALLBACK6-NEXT: movl %esi, %eax
+; FALLBACK6-NEXT: shlb $5, %al
+; FALLBACK6-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: sarq $63, %rdi
+; FALLBACK6-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: andb $6, %sil
+; FALLBACK6-NEXT: movzbl %sil, %ecx
+; FALLBACK6-NEXT: shrxq %rax, -72(%rsp,%rcx,4), %rsi
+; FALLBACK6-NEXT: movq -64(%rsp,%rcx,4), %rdi
+; FALLBACK6-NEXT: movq -56(%rsp,%rcx,4), %r8
+; FALLBACK6-NEXT: shrxq %rax, %r8, %r9
+; FALLBACK6-NEXT: movq -48(%rsp,%rcx,4), %rcx
+; FALLBACK6-NEXT: shrxq %rax, %rdi, %r10
+; FALLBACK6-NEXT: sarxq %rax, %rcx, %r11
+; FALLBACK6-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK6-NEXT: notb %al
+; FALLBACK6-NEXT: addq %rdi, %rdi
+; FALLBACK6-NEXT: shlxq %rax, %rdi, %rdi
+; FALLBACK6-NEXT: orq %rsi, %rdi
+; FALLBACK6-NEXT: addq %rcx, %rcx
+; FALLBACK6-NEXT: shlxq %rax, %rcx, %rcx
+; FALLBACK6-NEXT: orq %r9, %rcx
+; FALLBACK6-NEXT: addq %r8, %r8
+; FALLBACK6-NEXT: shlxq %rax, %r8, %rax
+; FALLBACK6-NEXT: orq %r10, %rax
+; FALLBACK6-NEXT: movq %r11, 24(%rdx)
+; FALLBACK6-NEXT: movq %rax, 8(%rdx)
+; FALLBACK6-NEXT: movq %rcx, 16(%rdx)
+; FALLBACK6-NEXT: movq %rdi, (%rdx)
+; FALLBACK6-NEXT: retq
+;
+; FALLBACK7-LABEL: ashr_32bytes_dwordOff:
+; FALLBACK7: # %bb.0:
+; FALLBACK7-NEXT: movups (%rdi), %xmm0
+; FALLBACK7-NEXT: movq 16(%rdi), %rax
+; FALLBACK7-NEXT: movq 24(%rdi), %rdi
+; FALLBACK7-NEXT: movzbl (%rsi), %esi
+; FALLBACK7-NEXT: movl %esi, %ecx
+; FALLBACK7-NEXT: shlb $5, %cl
+; FALLBACK7-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: sarq $63, %rdi
+; FALLBACK7-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: andb $6, %sil
+; FALLBACK7-NEXT: movzbl %sil, %eax
+; FALLBACK7-NEXT: movq -48(%rsp,%rax,4), %rsi
+; FALLBACK7-NEXT: movq -56(%rsp,%rax,4), %rdi
+; FALLBACK7-NEXT: movq %rdi, %r8
+; FALLBACK7-NEXT: shrdq %cl, %rsi, %r8
+; FALLBACK7-NEXT: movq -72(%rsp,%rax,4), %r9
+; FALLBACK7-NEXT: movq -64(%rsp,%rax,4), %rax
+; FALLBACK7-NEXT: movq %rax, %r10
+; FALLBACK7-NEXT: shrdq %cl, %rdi, %r10
+; FALLBACK7-NEXT: shrdq %cl, %rax, %r9
+; FALLBACK7-NEXT: sarxq %rcx, %rsi, %rax
+; FALLBACK7-NEXT: movq %r10, 8(%rdx)
+; FALLBACK7-NEXT: movq %r8, 16(%rdx)
+; FALLBACK7-NEXT: movq %rax, 24(%rdx)
+; FALLBACK7-NEXT: movq %r9, (%rdx)
+; FALLBACK7-NEXT: retq
+;
+; FALLBACK8-LABEL: ashr_32bytes_dwordOff:
+; FALLBACK8: # %bb.0:
+; FALLBACK8-NEXT: pushq %rbx
+; FALLBACK8-NEXT: vmovups (%rdi), %xmm0
+; FALLBACK8-NEXT: movq 16(%rdi), %rcx
+; FALLBACK8-NEXT: movq 24(%rdi), %rdi
+; FALLBACK8-NEXT: movzbl (%rsi), %esi
+; FALLBACK8-NEXT: movl %esi, %eax
+; FALLBACK8-NEXT: shlb $5, %al
+; FALLBACK8-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: sarq $63, %rdi
+; FALLBACK8-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: andb $6, %sil
+; FALLBACK8-NEXT: movzbl %sil, %r9d
+; FALLBACK8-NEXT: movq -64(%rsp,%r9,4), %r10
+; FALLBACK8-NEXT: movq -56(%rsp,%r9,4), %r8
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shrq %cl, %r10
+; FALLBACK8-NEXT: movl %eax, %esi
+; FALLBACK8-NEXT: notb %sil
+; FALLBACK8-NEXT: leaq (%r8,%r8), %rdi
+; FALLBACK8-NEXT: movl %esi, %ecx
+; FALLBACK8-NEXT: shlq %cl, %rdi
+; FALLBACK8-NEXT: orq %r10, %rdi
+; FALLBACK8-NEXT: movq -48(%rsp,%r9,4), %r10
+; FALLBACK8-NEXT: movq %r10, %r11
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shrq %cl, %r11
+; FALLBACK8-NEXT: movq -40(%rsp,%r9,4), %r9
+; FALLBACK8-NEXT: leaq (%r9,%r9), %rbx
+; FALLBACK8-NEXT: movl %esi, %ecx
+; FALLBACK8-NEXT: shlq %cl, %rbx
+; FALLBACK8-NEXT: orq %r11, %rbx
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shrq %cl, %r8
+; FALLBACK8-NEXT: addq %r10, %r10
+; FALLBACK8-NEXT: movl %esi, %ecx
+; FALLBACK8-NEXT: shlq %cl, %r10
+; FALLBACK8-NEXT: orq %r8, %r10
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: sarq %cl, %r9
+; FALLBACK8-NEXT: movq %r9, 24(%rdx)
+; FALLBACK8-NEXT: movq %r10, 8(%rdx)
+; FALLBACK8-NEXT: movq %rbx, 16(%rdx)
+; FALLBACK8-NEXT: movq %rdi, (%rdx)
+; FALLBACK8-NEXT: popq %rbx
+; FALLBACK8-NEXT: retq
+;
+; FALLBACK9-LABEL: ashr_32bytes_dwordOff:
+; FALLBACK9: # %bb.0:
+; FALLBACK9-NEXT: vmovups (%rdi), %xmm0
+; FALLBACK9-NEXT: movq 16(%rdi), %rax
+; FALLBACK9-NEXT: movq 24(%rdi), %rdi
+; FALLBACK9-NEXT: movzbl (%rsi), %esi
+; FALLBACK9-NEXT: movl %esi, %ecx
+; FALLBACK9-NEXT: shlb $5, %cl
+; FALLBACK9-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: sarq $63, %rdi
+; FALLBACK9-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: andb $6, %sil
+; FALLBACK9-NEXT: movzbl %sil, %eax
+; FALLBACK9-NEXT: movq -48(%rsp,%rax,4), %rsi
+; FALLBACK9-NEXT: movq -56(%rsp,%rax,4), %rdi
+; FALLBACK9-NEXT: movq %rdi, %r8
+; FALLBACK9-NEXT: shrdq %cl, %rsi, %r8
+; FALLBACK9-NEXT: movq -72(%rsp,%rax,4), %r9
+; FALLBACK9-NEXT: movq -64(%rsp,%rax,4), %rax
+; FALLBACK9-NEXT: movq %rax, %r10
+; FALLBACK9-NEXT: shrdq %cl, %rdi, %r10
+; FALLBACK9-NEXT: shrdq %cl, %rax, %r9
+; FALLBACK9-NEXT: sarq %cl, %rsi
+; FALLBACK9-NEXT: movq %r10, 8(%rdx)
+; FALLBACK9-NEXT: movq %r8, 16(%rdx)
+; FALLBACK9-NEXT: movq %rsi, 24(%rdx)
+; FALLBACK9-NEXT: movq %r9, (%rdx)
+; FALLBACK9-NEXT: retq
+;
+; FALLBACK10-LABEL: ashr_32bytes_dwordOff:
+; FALLBACK10: # %bb.0:
+; FALLBACK10-NEXT: vmovups (%rdi), %xmm0
+; FALLBACK10-NEXT: movq 16(%rdi), %rcx
+; FALLBACK10-NEXT: movq 24(%rdi), %rdi
+; FALLBACK10-NEXT: movzbl (%rsi), %esi
+; FALLBACK10-NEXT: movl %esi, %eax
+; FALLBACK10-NEXT: shlb $5, %al
+; FALLBACK10-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: sarq $63, %rdi
+; FALLBACK10-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: andb $6, %sil
+; FALLBACK10-NEXT: movzbl %sil, %ecx
+; FALLBACK10-NEXT: shrxq %rax, -72(%rsp,%rcx,4), %rsi
+; FALLBACK10-NEXT: movq -64(%rsp,%rcx,4), %rdi
+; FALLBACK10-NEXT: movq -56(%rsp,%rcx,4), %r8
+; FALLBACK10-NEXT: shrxq %rax, %r8, %r9
+; FALLBACK10-NEXT: movq -48(%rsp,%rcx,4), %rcx
+; FALLBACK10-NEXT: shrxq %rax, %rdi, %r10
+; FALLBACK10-NEXT: sarxq %rax, %rcx, %r11
+; FALLBACK10-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK10-NEXT: notb %al
+; FALLBACK10-NEXT: addq %rdi, %rdi
+; FALLBACK10-NEXT: shlxq %rax, %rdi, %rdi
+; FALLBACK10-NEXT: orq %rsi, %rdi
+; FALLBACK10-NEXT: addq %rcx, %rcx
+; FALLBACK10-NEXT: shlxq %rax, %rcx, %rcx
+; FALLBACK10-NEXT: orq %r9, %rcx
+; FALLBACK10-NEXT: addq %r8, %r8
+; FALLBACK10-NEXT: shlxq %rax, %r8, %rax
+; FALLBACK10-NEXT: orq %r10, %rax
+; FALLBACK10-NEXT: movq %r11, 24(%rdx)
+; FALLBACK10-NEXT: movq %rax, 8(%rdx)
+; FALLBACK10-NEXT: movq %rcx, 16(%rdx)
+; FALLBACK10-NEXT: movq %rdi, (%rdx)
+; FALLBACK10-NEXT: retq
+;
+; FALLBACK11-LABEL: ashr_32bytes_dwordOff:
+; FALLBACK11: # %bb.0:
+; FALLBACK11-NEXT: vmovups (%rdi), %xmm0
+; FALLBACK11-NEXT: movq 16(%rdi), %rax
+; FALLBACK11-NEXT: movq 24(%rdi), %rdi
+; FALLBACK11-NEXT: movzbl (%rsi), %esi
+; FALLBACK11-NEXT: movl %esi, %ecx
+; FALLBACK11-NEXT: shlb $5, %cl
+; FALLBACK11-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: sarq $63, %rdi
+; FALLBACK11-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: andb $6, %sil
+; FALLBACK11-NEXT: movzbl %sil, %eax
+; FALLBACK11-NEXT: movq -48(%rsp,%rax,4), %rsi
+; FALLBACK11-NEXT: movq -56(%rsp,%rax,4), %rdi
+; FALLBACK11-NEXT: movq %rdi, %r8
+; FALLBACK11-NEXT: shrdq %cl, %rsi, %r8
+; FALLBACK11-NEXT: movq -72(%rsp,%rax,4), %r9
+; FALLBACK11-NEXT: movq -64(%rsp,%rax,4), %rax
+; FALLBACK11-NEXT: movq %rax, %r10
+; FALLBACK11-NEXT: shrdq %cl, %rdi, %r10
+; FALLBACK11-NEXT: shrdq %cl, %rax, %r9
+; FALLBACK11-NEXT: sarxq %rcx, %rsi, %rax
+; FALLBACK11-NEXT: movq %r10, 8(%rdx)
+; FALLBACK11-NEXT: movq %r8, 16(%rdx)
+; FALLBACK11-NEXT: movq %rax, 24(%rdx)
+; FALLBACK11-NEXT: movq %r9, (%rdx)
+; FALLBACK11-NEXT: retq
+;
+; FALLBACK12-LABEL: ashr_32bytes_dwordOff:
+; FALLBACK12: # %bb.0:
+; FALLBACK12-NEXT: pushq %rbx
+; FALLBACK12-NEXT: vmovups (%rdi), %xmm0
+; FALLBACK12-NEXT: movq 16(%rdi), %rcx
+; FALLBACK12-NEXT: movq 24(%rdi), %rdi
+; FALLBACK12-NEXT: movzbl (%rsi), %esi
+; FALLBACK12-NEXT: movl %esi, %eax
+; FALLBACK12-NEXT: shlb $5, %al
+; FALLBACK12-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK12-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK12-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK12-NEXT: sarq $63, %rdi
+; FALLBACK12-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK12-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK12-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK12-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK12-NEXT: andb $6, %sil
+; FALLBACK12-NEXT: movzbl %sil, %r9d
+; FALLBACK12-NEXT: movq -64(%rsp,%r9,4), %r10
+; FALLBACK12-NEXT: movq -56(%rsp,%r9,4), %r8
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shrq %cl, %r10
+; FALLBACK12-NEXT: movl %eax, %esi
+; FALLBACK12-NEXT: notb %sil
+; FALLBACK12-NEXT: leaq (%r8,%r8), %rdi
+; FALLBACK12-NEXT: movl %esi, %ecx
+; FALLBACK12-NEXT: shlq %cl, %rdi
+; FALLBACK12-NEXT: orq %r10, %rdi
+; FALLBACK12-NEXT: movq -48(%rsp,%r9,4), %r10
+; FALLBACK12-NEXT: movq %r10, %r11
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shrq %cl, %r11
+; FALLBACK12-NEXT: movq -40(%rsp,%r9,4), %r9
+; FALLBACK12-NEXT: leaq (%r9,%r9), %rbx
+; FALLBACK12-NEXT: movl %esi, %ecx
+; FALLBACK12-NEXT: shlq %cl, %rbx
+; FALLBACK12-NEXT: orq %r11, %rbx
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shrq %cl, %r8
+; FALLBACK12-NEXT: addq %r10, %r10
+; FALLBACK12-NEXT: movl %esi, %ecx
+; FALLBACK12-NEXT: shlq %cl, %r10
+; FALLBACK12-NEXT: orq %r8, %r10
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: sarq %cl, %r9
+; FALLBACK12-NEXT: movq %r9, 24(%rdx)
+; FALLBACK12-NEXT: movq %r10, 8(%rdx)
+; FALLBACK12-NEXT: movq %rbx, 16(%rdx)
+; FALLBACK12-NEXT: movq %rdi, (%rdx)
+; FALLBACK12-NEXT: popq %rbx
+; FALLBACK12-NEXT: retq
+;
+; FALLBACK13-LABEL: ashr_32bytes_dwordOff:
+; FALLBACK13: # %bb.0:
+; FALLBACK13-NEXT: vmovups (%rdi), %xmm0
+; FALLBACK13-NEXT: movq 16(%rdi), %rax
+; FALLBACK13-NEXT: movq 24(%rdi), %rdi
+; FALLBACK13-NEXT: movzbl (%rsi), %esi
+; FALLBACK13-NEXT: movl %esi, %ecx
+; FALLBACK13-NEXT: shlb $5, %cl
+; FALLBACK13-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK13-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
+; FALLBACK13-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK13-NEXT: sarq $63, %rdi
+; FALLBACK13-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK13-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK13-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK13-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK13-NEXT: andb $6, %sil
+; FALLBACK13-NEXT: movzbl %sil, %eax
+; FALLBACK13-NEXT: movq -48(%rsp,%rax,4), %rsi
+; FALLBACK13-NEXT: movq -56(%rsp,%rax,4), %rdi
+; FALLBACK13-NEXT: movq %rdi, %r8
+; FALLBACK13-NEXT: shrdq %cl, %rsi, %r8
+; FALLBACK13-NEXT: movq -72(%rsp,%rax,4), %r9
+; FALLBACK13-NEXT: movq -64(%rsp,%rax,4), %rax
+; FALLBACK13-NEXT: movq %rax, %r10
+; FALLBACK13-NEXT: shrdq %cl, %rdi, %r10
+; FALLBACK13-NEXT: shrdq %cl, %rax, %r9
+; FALLBACK13-NEXT: sarq %cl, %rsi
+; FALLBACK13-NEXT: movq %r10, 8(%rdx)
+; FALLBACK13-NEXT: movq %r8, 16(%rdx)
+; FALLBACK13-NEXT: movq %rsi, 24(%rdx)
+; FALLBACK13-NEXT: movq %r9, (%rdx)
+; FALLBACK13-NEXT: retq
+;
+; FALLBACK14-LABEL: ashr_32bytes_dwordOff:
+; FALLBACK14: # %bb.0:
+; FALLBACK14-NEXT: vmovups (%rdi), %xmm0
+; FALLBACK14-NEXT: movq 16(%rdi), %rcx
+; FALLBACK14-NEXT: movq 24(%rdi), %rdi
+; FALLBACK14-NEXT: movzbl (%rsi), %esi
+; FALLBACK14-NEXT: movl %esi, %eax
+; FALLBACK14-NEXT: shlb $5, %al
+; FALLBACK14-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: sarq $63, %rdi
+; FALLBACK14-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: andb $6, %sil
+; FALLBACK14-NEXT: movzbl %sil, %ecx
+; FALLBACK14-NEXT: shrxq %rax, -72(%rsp,%rcx,4), %rsi
+; FALLBACK14-NEXT: movq -64(%rsp,%rcx,4), %rdi
+; FALLBACK14-NEXT: movq -56(%rsp,%rcx,4), %r8
+; FALLBACK14-NEXT: shrxq %rax, %r8, %r9
+; FALLBACK14-NEXT: movq -48(%rsp,%rcx,4), %rcx
+; FALLBACK14-NEXT: shrxq %rax, %rdi, %r10
+; FALLBACK14-NEXT: sarxq %rax, %rcx, %r11
+; FALLBACK14-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK14-NEXT: notb %al
+; FALLBACK14-NEXT: addq %rdi, %rdi
+; FALLBACK14-NEXT: shlxq %rax, %rdi, %rdi
+; FALLBACK14-NEXT: orq %rsi, %rdi
+; FALLBACK14-NEXT: addq %rcx, %rcx
+; FALLBACK14-NEXT: shlxq %rax, %rcx, %rcx
+; FALLBACK14-NEXT: orq %r9, %rcx
+; FALLBACK14-NEXT: addq %r8, %r8
+; FALLBACK14-NEXT: shlxq %rax, %r8, %rax
+; FALLBACK14-NEXT: orq %r10, %rax
+; FALLBACK14-NEXT: movq %r11, 24(%rdx)
+; FALLBACK14-NEXT: movq %rax, 8(%rdx)
+; FALLBACK14-NEXT: movq %rcx, 16(%rdx)
+; FALLBACK14-NEXT: movq %rdi, (%rdx)
+; FALLBACK14-NEXT: retq
+;
+; FALLBACK15-LABEL: ashr_32bytes_dwordOff:
+; FALLBACK15: # %bb.0:
+; FALLBACK15-NEXT: vmovups (%rdi), %xmm0
+; FALLBACK15-NEXT: movq 16(%rdi), %rax
+; FALLBACK15-NEXT: movq 24(%rdi), %rdi
+; FALLBACK15-NEXT: movzbl (%rsi), %esi
+; FALLBACK15-NEXT: movl %esi, %ecx
+; FALLBACK15-NEXT: shlb $5, %cl
+; FALLBACK15-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK15-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
+; FALLBACK15-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK15-NEXT: sarq $63, %rdi
+; FALLBACK15-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK15-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK15-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK15-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK15-NEXT: andb $6, %sil
+; FALLBACK15-NEXT: movzbl %sil, %eax
+; FALLBACK15-NEXT: movq -48(%rsp,%rax,4), %rsi
+; FALLBACK15-NEXT: movq -56(%rsp,%rax,4), %rdi
+; FALLBACK15-NEXT: movq %rdi, %r8
+; FALLBACK15-NEXT: shrdq %cl, %rsi, %r8
+; FALLBACK15-NEXT: movq -72(%rsp,%rax,4), %r9
+; FALLBACK15-NEXT: movq -64(%rsp,%rax,4), %rax
+; FALLBACK15-NEXT: movq %rax, %r10
+; FALLBACK15-NEXT: shrdq %cl, %rdi, %r10
+; FALLBACK15-NEXT: shrdq %cl, %rax, %r9
+; FALLBACK15-NEXT: sarxq %rcx, %rsi, %rax
+; FALLBACK15-NEXT: movq %r10, 8(%rdx)
+; FALLBACK15-NEXT: movq %r8, 16(%rdx)
+; FALLBACK15-NEXT: movq %rax, 24(%rdx)
+; FALLBACK15-NEXT: movq %r9, (%rdx)
+; FALLBACK15-NEXT: retq
+;
+; X86-SSE2-LABEL: ashr_32bytes_dwordOff:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %ebp
+; X86-SSE2-NEXT: pushl %ebx
+; X86-SSE2-NEXT: pushl %edi
+; X86-SSE2-NEXT: pushl %esi
+; X86-SSE2-NEXT: subl $92, %esp
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movl (%eax), %ecx
+; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SSE2-NEXT: movl 4(%eax), %ecx
+; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SSE2-NEXT: movl 8(%eax), %edi
+; X86-SSE2-NEXT: movl 12(%eax), %ebx
+; X86-SSE2-NEXT: movl 16(%eax), %ebp
+; X86-SSE2-NEXT: movl 20(%eax), %esi
+; X86-SSE2-NEXT: movl 24(%eax), %edx
+; X86-SSE2-NEXT: movl 28(%eax), %ecx
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movzbl (%eax), %eax
+; X86-SSE2-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl %ebp, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-SSE2-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-SSE2-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: sarl $31, %ecx
+; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: andl $7, %eax
+; X86-SSE2-NEXT: movl 16(%esp,%eax,4), %ecx
+; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SSE2-NEXT: movl 20(%esp,%eax,4), %ecx
+; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SSE2-NEXT: movl 28(%esp,%eax,4), %esi
+; X86-SSE2-NEXT: movl 24(%esp,%eax,4), %edi
+; X86-SSE2-NEXT: movl 36(%esp,%eax,4), %ebx
+; X86-SSE2-NEXT: movl 32(%esp,%eax,4), %ebp
+; X86-SSE2-NEXT: movl 44(%esp,%eax,4), %edx
+; X86-SSE2-NEXT: movl 40(%esp,%eax,4), %ecx
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movl %ecx, 24(%eax)
+; X86-SSE2-NEXT: movl %edx, 28(%eax)
+; X86-SSE2-NEXT: movl %ebp, 16(%eax)
+; X86-SSE2-NEXT: movl %ebx, 20(%eax)
+; X86-SSE2-NEXT: movl %edi, 8(%eax)
+; X86-SSE2-NEXT: movl %esi, 12(%eax)
+; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-SSE2-NEXT: movl %ecx, (%eax)
+; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-SSE2-NEXT: movl %ecx, 4(%eax)
+; X86-SSE2-NEXT: addl $92, %esp
+; X86-SSE2-NEXT: popl %esi
+; X86-SSE2-NEXT: popl %edi
+; X86-SSE2-NEXT: popl %ebx
+; X86-SSE2-NEXT: popl %ebp
+; X86-SSE2-NEXT: retl
+;
+; X86-SSE42-LABEL: ashr_32bytes_dwordOff:
+; X86-SSE42: # %bb.0:
+; X86-SSE42-NEXT: pushl %ebx
+; X86-SSE42-NEXT: pushl %edi
+; X86-SSE42-NEXT: pushl %esi
+; X86-SSE42-NEXT: subl $64, %esp
+; X86-SSE42-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE42-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE42-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SSE42-NEXT: movups (%edx), %xmm0
+; X86-SSE42-NEXT: movl 16(%edx), %esi
+; X86-SSE42-NEXT: movl 20(%edx), %edi
+; X86-SSE42-NEXT: movl 24(%edx), %ebx
+; X86-SSE42-NEXT: movl 28(%edx), %edx
+; X86-SSE42-NEXT: movzbl (%ecx), %ecx
+; X86-SSE42-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: movaps %xmm0, (%esp)
+; X86-SSE42-NEXT: sarl $31, %edx
+; X86-SSE42-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: andl $7, %ecx
+; X86-SSE42-NEXT: movups (%esp,%ecx,4), %xmm0
+; X86-SSE42-NEXT: movups 16(%esp,%ecx,4), %xmm1
+; X86-SSE42-NEXT: movups %xmm1, 16(%eax)
+; X86-SSE42-NEXT: movups %xmm0, (%eax)
+; X86-SSE42-NEXT: addl $64, %esp
+; X86-SSE42-NEXT: popl %esi
+; X86-SSE42-NEXT: popl %edi
+; X86-SSE42-NEXT: popl %ebx
+; X86-SSE42-NEXT: retl
+;
+; X86-AVX-LABEL: ashr_32bytes_dwordOff:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: pushl %ebx
+; X86-AVX-NEXT: pushl %edi
+; X86-AVX-NEXT: pushl %esi
+; X86-AVX-NEXT: subl $64, %esp
+; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-AVX-NEXT: vmovups (%edx), %xmm0
+; X86-AVX-NEXT: movl 16(%edx), %esi
+; X86-AVX-NEXT: movl 20(%edx), %edi
+; X86-AVX-NEXT: movl 24(%edx), %ebx
+; X86-AVX-NEXT: movl 28(%edx), %edx
+; X86-AVX-NEXT: movzbl (%ecx), %ecx
+; X86-AVX-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-AVX-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; X86-AVX-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; X86-AVX-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; X86-AVX-NEXT: vmovaps %xmm0, (%esp)
+; X86-AVX-NEXT: sarl $31, %edx
+; X86-AVX-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-AVX-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-AVX-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-AVX-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-AVX-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-AVX-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-AVX-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-AVX-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-AVX-NEXT: andl $7, %ecx
+; X86-AVX-NEXT: vmovups (%esp,%ecx,4), %xmm0
+; X86-AVX-NEXT: vmovups 16(%esp,%ecx,4), %xmm1
+; X86-AVX-NEXT: vmovups %xmm1, 16(%eax)
+; X86-AVX-NEXT: vmovups %xmm0, (%eax)
+; X86-AVX-NEXT: addl $64, %esp
+; X86-AVX-NEXT: popl %esi
+; X86-AVX-NEXT: popl %edi
+; X86-AVX-NEXT: popl %ebx
+; X86-AVX-NEXT: retl
+ %src = load i256, ptr %src.ptr, align 1
+ %dwordOff = load i256, ptr %dwordOff.ptr, align 1
+ %bitOff = shl i256 %dwordOff, 5
+ %res = ashr i256 %src, %bitOff
+ store i256 %res, ptr %dst, align 1
+ ret void
+}
+
+define void @ashr_32bytes_qwordOff(ptr %src.ptr, ptr %qwordOff.ptr, ptr %dst) nounwind {
+; X64-SSE2-LABEL: ashr_32bytes_qwordOff:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: movq (%rdi), %rax
; X64-SSE2-NEXT: movq 8(%rdi), %rcx
@@ -1446,18 +11832,18 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X64-SSE2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-SSE2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-SSE2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
-; X64-SSE2-NEXT: andl $31, %esi
-; X64-SSE2-NEXT: movq -64(%rsp,%rsi), %rax
-; X64-SSE2-NEXT: movq -56(%rsp,%rsi), %rcx
-; X64-SSE2-NEXT: movq -40(%rsp,%rsi), %rdi
-; X64-SSE2-NEXT: movq -48(%rsp,%rsi), %rsi
+; X64-SSE2-NEXT: andl $3, %esi
+; X64-SSE2-NEXT: movq -72(%rsp,%rsi,8), %rax
+; X64-SSE2-NEXT: movq -64(%rsp,%rsi,8), %rcx
+; X64-SSE2-NEXT: movq -48(%rsp,%rsi,8), %rdi
+; X64-SSE2-NEXT: movq -56(%rsp,%rsi,8), %rsi
; X64-SSE2-NEXT: movq %rsi, 16(%rdx)
; X64-SSE2-NEXT: movq %rdi, 24(%rdx)
; X64-SSE2-NEXT: movq %rax, (%rdx)
; X64-SSE2-NEXT: movq %rcx, 8(%rdx)
; X64-SSE2-NEXT: retq
;
-; X64-SSE42-LABEL: ashr_32bytes:
+; X64-SSE42-LABEL: ashr_32bytes_qwordOff:
; X64-SSE42: # %bb.0:
; X64-SSE42-NEXT: movups (%rdi), %xmm0
; X64-SSE42-NEXT: movq 16(%rdi), %rax
@@ -1465,20 +11851,20 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X64-SSE42-NEXT: movzbl (%rsi), %esi
; X64-SSE42-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; X64-SSE42-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
-; X64-SSE42-NEXT: movups %xmm0, -{{[0-9]+}}(%rsp)
+; X64-SSE42-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-SSE42-NEXT: sarq $63, %rcx
; X64-SSE42-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; X64-SSE42-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; X64-SSE42-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; X64-SSE42-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
-; X64-SSE42-NEXT: andl $31, %esi
-; X64-SSE42-NEXT: movups -64(%rsp,%rsi), %xmm0
-; X64-SSE42-NEXT: movups -48(%rsp,%rsi), %xmm1
+; X64-SSE42-NEXT: andl $3, %esi
+; X64-SSE42-NEXT: movups -72(%rsp,%rsi,8), %xmm0
+; X64-SSE42-NEXT: movups -56(%rsp,%rsi,8), %xmm1
; X64-SSE42-NEXT: movups %xmm1, 16(%rdx)
; X64-SSE42-NEXT: movups %xmm0, (%rdx)
; X64-SSE42-NEXT: retq
;
-; X64-AVX-LABEL: ashr_32bytes:
+; X64-AVX-LABEL: ashr_32bytes_qwordOff:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovups (%rdi), %xmm0
; X64-AVX-NEXT: movq 16(%rdi), %rax
@@ -1486,31 +11872,31 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X64-AVX-NEXT: movzbl (%rsi), %esi
; X64-AVX-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; X64-AVX-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
-; X64-AVX-NEXT: vmovups %xmm0, -{{[0-9]+}}(%rsp)
+; X64-AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-AVX-NEXT: sarq $63, %rcx
; X64-AVX-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; X64-AVX-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; X64-AVX-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; X64-AVX-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
-; X64-AVX-NEXT: andl $31, %esi
-; X64-AVX-NEXT: vmovups -64(%rsp,%rsi), %xmm0
-; X64-AVX-NEXT: vmovups -48(%rsp,%rsi), %xmm1
+; X64-AVX-NEXT: andl $3, %esi
+; X64-AVX-NEXT: vmovups -72(%rsp,%rsi,8), %xmm0
+; X64-AVX-NEXT: vmovups -56(%rsp,%rsi,8), %xmm1
; X64-AVX-NEXT: vmovups %xmm1, 16(%rdx)
; X64-AVX-NEXT: vmovups %xmm0, (%rdx)
; X64-AVX-NEXT: retq
;
-; X86-SSE2-LABEL: ashr_32bytes:
+; X86-SSE2-LABEL: ashr_32bytes_qwordOff:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pushl %ebp
; X86-SSE2-NEXT: pushl %ebx
; X86-SSE2-NEXT: pushl %edi
; X86-SSE2-NEXT: pushl %esi
-; X86-SSE2-NEXT: subl $72, %esp
+; X86-SSE2-NEXT: subl $92, %esp
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movl (%eax), %ecx
; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-SSE2-NEXT: movl 4(%eax), %ecx
-; X86-SSE2-NEXT: movl %ecx, (%esp) # 4-byte Spill
+; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-SSE2-NEXT: movl 8(%eax), %edi
; X86-SSE2-NEXT: movl 12(%eax), %ebx
; X86-SSE2-NEXT: movl 16(%eax), %ebp
@@ -1525,7 +11911,7 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-SSE2-NEXT: movl %ebp, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl %ebx, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl (%esp), %edx # 4-byte Reload
+; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X86-SSE2-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X86-SSE2-NEXT: movl %edx, {{[0-9]+}}(%esp)
@@ -1538,17 +11924,17 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: andl $31, %eax
-; X86-SSE2-NEXT: movl 8(%esp,%eax), %ecx
+; X86-SSE2-NEXT: andl $3, %eax
+; X86-SSE2-NEXT: movl 16(%esp,%eax,8), %ecx
+; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SSE2-NEXT: movl 20(%esp,%eax,8), %ecx
; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 12(%esp,%eax), %ecx
-; X86-SSE2-NEXT: movl %ecx, (%esp) # 4-byte Spill
-; X86-SSE2-NEXT: movl 20(%esp,%eax), %esi
-; X86-SSE2-NEXT: movl 16(%esp,%eax), %edi
-; X86-SSE2-NEXT: movl 28(%esp,%eax), %ebx
-; X86-SSE2-NEXT: movl 24(%esp,%eax), %ebp
-; X86-SSE2-NEXT: movl 36(%esp,%eax), %edx
-; X86-SSE2-NEXT: movl 32(%esp,%eax), %ecx
+; X86-SSE2-NEXT: movl 28(%esp,%eax,8), %esi
+; X86-SSE2-NEXT: movl 24(%esp,%eax,8), %edi
+; X86-SSE2-NEXT: movl 36(%esp,%eax,8), %ebx
+; X86-SSE2-NEXT: movl 32(%esp,%eax,8), %ebp
+; X86-SSE2-NEXT: movl 44(%esp,%eax,8), %edx
+; X86-SSE2-NEXT: movl 40(%esp,%eax,8), %ecx
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movl %ecx, 24(%eax)
; X86-SSE2-NEXT: movl %edx, 28(%eax)
@@ -1558,16 +11944,16 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-SSE2-NEXT: movl %esi, 12(%eax)
; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-SSE2-NEXT: movl %ecx, (%eax)
-; X86-SSE2-NEXT: movl (%esp), %ecx # 4-byte Reload
+; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-SSE2-NEXT: movl %ecx, 4(%eax)
-; X86-SSE2-NEXT: addl $72, %esp
+; X86-SSE2-NEXT: addl $92, %esp
; X86-SSE2-NEXT: popl %esi
; X86-SSE2-NEXT: popl %edi
; X86-SSE2-NEXT: popl %ebx
; X86-SSE2-NEXT: popl %ebp
; X86-SSE2-NEXT: retl
;
-; X86-SSE42-LABEL: ashr_32bytes:
+; X86-SSE42-LABEL: ashr_32bytes_qwordOff:
; X86-SSE42: # %bb.0:
; X86-SSE42-NEXT: pushl %ebx
; X86-SSE42-NEXT: pushl %edi
@@ -1586,7 +11972,7 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-SSE42-NEXT: movl %ebx, {{[0-9]+}}(%esp)
; X86-SSE42-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-SSE42-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X86-SSE42-NEXT: movups %xmm0, (%esp)
+; X86-SSE42-NEXT: movaps %xmm0, (%esp)
; X86-SSE42-NEXT: sarl $31, %edx
; X86-SSE42-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-SSE42-NEXT: movl %edx, {{[0-9]+}}(%esp)
@@ -1596,9 +11982,9 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-SSE42-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-SSE42-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-SSE42-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-SSE42-NEXT: andl $31, %ecx
-; X86-SSE42-NEXT: movups (%esp,%ecx), %xmm0
-; X86-SSE42-NEXT: movups 16(%esp,%ecx), %xmm1
+; X86-SSE42-NEXT: andl $3, %ecx
+; X86-SSE42-NEXT: movups (%esp,%ecx,8), %xmm0
+; X86-SSE42-NEXT: movups 16(%esp,%ecx,8), %xmm1
; X86-SSE42-NEXT: movups %xmm1, 16(%eax)
; X86-SSE42-NEXT: movups %xmm0, (%eax)
; X86-SSE42-NEXT: addl $64, %esp
@@ -1607,7 +11993,7 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-SSE42-NEXT: popl %ebx
; X86-SSE42-NEXT: retl
;
-; X86-AVX-LABEL: ashr_32bytes:
+; X86-AVX-LABEL: ashr_32bytes_qwordOff:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: pushl %ebx
; X86-AVX-NEXT: pushl %edi
@@ -1626,7 +12012,7 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-AVX-NEXT: movl %ebx, {{[0-9]+}}(%esp)
; X86-AVX-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-AVX-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X86-AVX-NEXT: vmovups %xmm0, (%esp)
+; X86-AVX-NEXT: vmovaps %xmm0, (%esp)
; X86-AVX-NEXT: sarl $31, %edx
; X86-AVX-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-AVX-NEXT: movl %edx, {{[0-9]+}}(%esp)
@@ -1636,9 +12022,9 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-AVX-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-AVX-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-AVX-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-AVX-NEXT: andl $31, %ecx
-; X86-AVX-NEXT: vmovups (%esp,%ecx), %xmm0
-; X86-AVX-NEXT: vmovups 16(%esp,%ecx), %xmm1
+; X86-AVX-NEXT: andl $3, %ecx
+; X86-AVX-NEXT: vmovups (%esp,%ecx,8), %xmm0
+; X86-AVX-NEXT: vmovups 16(%esp,%ecx,8), %xmm1
; X86-AVX-NEXT: vmovups %xmm1, 16(%eax)
; X86-AVX-NEXT: vmovups %xmm0, (%eax)
; X86-AVX-NEXT: addl $64, %esp
@@ -1647,15 +12033,3662 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-AVX-NEXT: popl %ebx
; X86-AVX-NEXT: retl
%src = load i256, ptr %src.ptr, align 1
- %byteOff = load i256, ptr %byteOff.ptr, align 1
- %bitOff = shl i256 %byteOff, 3
+ %qwordOff = load i256, ptr %qwordOff.ptr, align 1
+ %bitOff = shl i256 %qwordOff, 6
%res = ashr i256 %src, %bitOff
store i256 %res, ptr %dst, align 1
ret void
}
define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
-; X64-SSE2-LABEL: lshr_64bytes:
+; FALLBACK0-LABEL: lshr_64bytes:
+; FALLBACK0: # %bb.0:
+; FALLBACK0-NEXT: pushq %r15
+; FALLBACK0-NEXT: pushq %r14
+; FALLBACK0-NEXT: pushq %r13
+; FALLBACK0-NEXT: pushq %r12
+; FALLBACK0-NEXT: pushq %rbx
+; FALLBACK0-NEXT: movq (%rdi), %rax
+; FALLBACK0-NEXT: movq 8(%rdi), %rcx
+; FALLBACK0-NEXT: movq 16(%rdi), %r8
+; FALLBACK0-NEXT: movq 24(%rdi), %r9
+; FALLBACK0-NEXT: movq 32(%rdi), %r10
+; FALLBACK0-NEXT: movq 40(%rdi), %r11
+; FALLBACK0-NEXT: movq 48(%rdi), %rbx
+; FALLBACK0-NEXT: movq 56(%rdi), %r14
+; FALLBACK0-NEXT: movl (%rsi), %edi
+; FALLBACK0-NEXT: xorps %xmm0, %xmm0
+; FALLBACK0-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %r14, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %rbx, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %r11, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %r10, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: leal (,%rdi,8), %eax
+; FALLBACK0-NEXT: andl $56, %eax
+; FALLBACK0-NEXT: andl $56, %edi
+; FALLBACK0-NEXT: movq -128(%rsp,%rdi), %r10
+; FALLBACK0-NEXT: movq -120(%rsp,%rdi), %r8
+; FALLBACK0-NEXT: movq %r8, %r11
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shrq %cl, %r11
+; FALLBACK0-NEXT: movl %eax, %esi
+; FALLBACK0-NEXT: notb %sil
+; FALLBACK0-NEXT: movq -112(%rsp,%rdi), %rbx
+; FALLBACK0-NEXT: leaq (%rbx,%rbx), %r9
+; FALLBACK0-NEXT: movl %esi, %ecx
+; FALLBACK0-NEXT: shlq %cl, %r9
+; FALLBACK0-NEXT: orq %r11, %r9
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shrq %cl, %r10
+; FALLBACK0-NEXT: addq %r8, %r8
+; FALLBACK0-NEXT: movl %esi, %ecx
+; FALLBACK0-NEXT: shlq %cl, %r8
+; FALLBACK0-NEXT: orq %r10, %r8
+; FALLBACK0-NEXT: movq -104(%rsp,%rdi), %r10
+; FALLBACK0-NEXT: movq %r10, %r15
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shrq %cl, %r15
+; FALLBACK0-NEXT: movq -96(%rsp,%rdi), %r14
+; FALLBACK0-NEXT: leaq (%r14,%r14), %r11
+; FALLBACK0-NEXT: movl %esi, %ecx
+; FALLBACK0-NEXT: shlq %cl, %r11
+; FALLBACK0-NEXT: orq %r15, %r11
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shrq %cl, %rbx
+; FALLBACK0-NEXT: addq %r10, %r10
+; FALLBACK0-NEXT: movl %esi, %ecx
+; FALLBACK0-NEXT: shlq %cl, %r10
+; FALLBACK0-NEXT: orq %rbx, %r10
+; FALLBACK0-NEXT: movq -88(%rsp,%rdi), %rbx
+; FALLBACK0-NEXT: movq %rbx, %r12
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shrq %cl, %r12
+; FALLBACK0-NEXT: movq -80(%rsp,%rdi), %r13
+; FALLBACK0-NEXT: leaq (%r13,%r13), %r15
+; FALLBACK0-NEXT: movl %esi, %ecx
+; FALLBACK0-NEXT: shlq %cl, %r15
+; FALLBACK0-NEXT: orq %r12, %r15
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shrq %cl, %r14
+; FALLBACK0-NEXT: addq %rbx, %rbx
+; FALLBACK0-NEXT: movl %esi, %ecx
+; FALLBACK0-NEXT: shlq %cl, %rbx
+; FALLBACK0-NEXT: orq %r14, %rbx
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shrq %cl, %r13
+; FALLBACK0-NEXT: movq -72(%rsp,%rdi), %rdi
+; FALLBACK0-NEXT: leaq (%rdi,%rdi), %r14
+; FALLBACK0-NEXT: movl %esi, %ecx
+; FALLBACK0-NEXT: shlq %cl, %r14
+; FALLBACK0-NEXT: orq %r13, %r14
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shrq %cl, %rdi
+; FALLBACK0-NEXT: movq %rdi, 56(%rdx)
+; FALLBACK0-NEXT: movq %r14, 48(%rdx)
+; FALLBACK0-NEXT: movq %rbx, 32(%rdx)
+; FALLBACK0-NEXT: movq %r15, 40(%rdx)
+; FALLBACK0-NEXT: movq %r10, 16(%rdx)
+; FALLBACK0-NEXT: movq %r11, 24(%rdx)
+; FALLBACK0-NEXT: movq %r8, (%rdx)
+; FALLBACK0-NEXT: movq %r9, 8(%rdx)
+; FALLBACK0-NEXT: popq %rbx
+; FALLBACK0-NEXT: popq %r12
+; FALLBACK0-NEXT: popq %r13
+; FALLBACK0-NEXT: popq %r14
+; FALLBACK0-NEXT: popq %r15
+; FALLBACK0-NEXT: retq
+;
+; FALLBACK1-LABEL: lshr_64bytes:
+; FALLBACK1: # %bb.0:
+; FALLBACK1-NEXT: pushq %r15
+; FALLBACK1-NEXT: pushq %r14
+; FALLBACK1-NEXT: pushq %rbx
+; FALLBACK1-NEXT: movq (%rdi), %rcx
+; FALLBACK1-NEXT: movq 8(%rdi), %r8
+; FALLBACK1-NEXT: movq 16(%rdi), %r9
+; FALLBACK1-NEXT: movq 24(%rdi), %r10
+; FALLBACK1-NEXT: movq 32(%rdi), %r11
+; FALLBACK1-NEXT: movq 40(%rdi), %rbx
+; FALLBACK1-NEXT: movq 48(%rdi), %r14
+; FALLBACK1-NEXT: movq 56(%rdi), %rdi
+; FALLBACK1-NEXT: movl (%rsi), %eax
+; FALLBACK1-NEXT: xorps %xmm0, %xmm0
+; FALLBACK1-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %r14, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %rbx, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %r11, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %r10, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: leal (,%rax,8), %ecx
+; FALLBACK1-NEXT: andl $56, %ecx
+; FALLBACK1-NEXT: andl $56, %eax
+; FALLBACK1-NEXT: movq -112(%rsp,%rax), %rdi
+; FALLBACK1-NEXT: movq -128(%rsp,%rax), %rsi
+; FALLBACK1-NEXT: movq -120(%rsp,%rax), %r9
+; FALLBACK1-NEXT: movq %r9, %r8
+; FALLBACK1-NEXT: shrdq %cl, %rdi, %r8
+; FALLBACK1-NEXT: movq -96(%rsp,%rax), %r10
+; FALLBACK1-NEXT: movq -104(%rsp,%rax), %r11
+; FALLBACK1-NEXT: movq %r11, %rbx
+; FALLBACK1-NEXT: shrdq %cl, %r10, %rbx
+; FALLBACK1-NEXT: shrdq %cl, %r11, %rdi
+; FALLBACK1-NEXT: movq -80(%rsp,%rax), %r11
+; FALLBACK1-NEXT: movq -88(%rsp,%rax), %r14
+; FALLBACK1-NEXT: movq %r14, %r15
+; FALLBACK1-NEXT: shrdq %cl, %r11, %r15
+; FALLBACK1-NEXT: shrdq %cl, %r14, %r10
+; FALLBACK1-NEXT: movq -72(%rsp,%rax), %rax
+; FALLBACK1-NEXT: shrdq %cl, %rax, %r11
+; FALLBACK1-NEXT: shrdq %cl, %r9, %rsi
+; FALLBACK1-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK1-NEXT: shrq %cl, %rax
+; FALLBACK1-NEXT: movq %r11, 48(%rdx)
+; FALLBACK1-NEXT: movq %rax, 56(%rdx)
+; FALLBACK1-NEXT: movq %r10, 32(%rdx)
+; FALLBACK1-NEXT: movq %r15, 40(%rdx)
+; FALLBACK1-NEXT: movq %rdi, 16(%rdx)
+; FALLBACK1-NEXT: movq %rbx, 24(%rdx)
+; FALLBACK1-NEXT: movq %rsi, (%rdx)
+; FALLBACK1-NEXT: movq %r8, 8(%rdx)
+; FALLBACK1-NEXT: popq %rbx
+; FALLBACK1-NEXT: popq %r14
+; FALLBACK1-NEXT: popq %r15
+; FALLBACK1-NEXT: retq
+;
+; FALLBACK2-LABEL: lshr_64bytes:
+; FALLBACK2: # %bb.0:
+; FALLBACK2-NEXT: pushq %rbp
+; FALLBACK2-NEXT: pushq %r15
+; FALLBACK2-NEXT: pushq %r14
+; FALLBACK2-NEXT: pushq %r13
+; FALLBACK2-NEXT: pushq %r12
+; FALLBACK2-NEXT: pushq %rbx
+; FALLBACK2-NEXT: pushq %rax
+; FALLBACK2-NEXT: movq (%rdi), %rcx
+; FALLBACK2-NEXT: movq 8(%rdi), %r8
+; FALLBACK2-NEXT: movq 16(%rdi), %r9
+; FALLBACK2-NEXT: movq 24(%rdi), %r10
+; FALLBACK2-NEXT: movq 32(%rdi), %r11
+; FALLBACK2-NEXT: movq 40(%rdi), %rbx
+; FALLBACK2-NEXT: movq 48(%rdi), %r14
+; FALLBACK2-NEXT: movq 56(%rdi), %rdi
+; FALLBACK2-NEXT: movl (%rsi), %eax
+; FALLBACK2-NEXT: xorps %xmm0, %xmm0
+; FALLBACK2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %r14, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %rbx, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %r11, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %r10, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: leal (,%rax,8), %ecx
+; FALLBACK2-NEXT: andl $56, %ecx
+; FALLBACK2-NEXT: andl $56, %eax
+; FALLBACK2-NEXT: movq -120(%rsp,%rax), %rdi
+; FALLBACK2-NEXT: movq -112(%rsp,%rax), %r9
+; FALLBACK2-NEXT: shrxq %rcx, %rdi, %rbx
+; FALLBACK2-NEXT: shrxq %rcx, -128(%rsp,%rax), %r13
+; FALLBACK2-NEXT: movq -104(%rsp,%rax), %rsi
+; FALLBACK2-NEXT: shrxq %rcx, %rsi, %r8
+; FALLBACK2-NEXT: movq -96(%rsp,%rax), %r10
+; FALLBACK2-NEXT: shrxq %rcx, %r9, %r11
+; FALLBACK2-NEXT: movq -88(%rsp,%rax), %r14
+; FALLBACK2-NEXT: shrxq %rcx, %r14, %r15
+; FALLBACK2-NEXT: shrxq %rcx, %r10, %rbp
+; FALLBACK2-NEXT: movl %ecx, %r12d
+; FALLBACK2-NEXT: notb %r12b
+; FALLBACK2-NEXT: addq %r9, %r9
+; FALLBACK2-NEXT: shlxq %r12, %r9, %r9
+; FALLBACK2-NEXT: orq %rbx, %r9
+; FALLBACK2-NEXT: addq %rdi, %rdi
+; FALLBACK2-NEXT: shlxq %r12, %rdi, %rdi
+; FALLBACK2-NEXT: orq %r13, %rdi
+; FALLBACK2-NEXT: movq -80(%rsp,%rax), %rbx
+; FALLBACK2-NEXT: shrxq %rcx, %rbx, %r13
+; FALLBACK2-NEXT: movq -72(%rsp,%rax), %rax
+; FALLBACK2-NEXT: shrxq %rcx, %rax, %rcx
+; FALLBACK2-NEXT: addq %r10, %r10
+; FALLBACK2-NEXT: shlxq %r12, %r10, %r10
+; FALLBACK2-NEXT: orq %r8, %r10
+; FALLBACK2-NEXT: addq %rsi, %rsi
+; FALLBACK2-NEXT: shlxq %r12, %rsi, %rsi
+; FALLBACK2-NEXT: orq %r11, %rsi
+; FALLBACK2-NEXT: leaq (%rbx,%rbx), %r8
+; FALLBACK2-NEXT: shlxq %r12, %r8, %r8
+; FALLBACK2-NEXT: orq %r15, %r8
+; FALLBACK2-NEXT: addq %r14, %r14
+; FALLBACK2-NEXT: shlxq %r12, %r14, %r11
+; FALLBACK2-NEXT: orq %rbp, %r11
+; FALLBACK2-NEXT: addq %rax, %rax
+; FALLBACK2-NEXT: shlxq %r12, %rax, %rax
+; FALLBACK2-NEXT: orq %r13, %rax
+; FALLBACK2-NEXT: movq %rcx, 56(%rdx)
+; FALLBACK2-NEXT: movq %rax, 48(%rdx)
+; FALLBACK2-NEXT: movq %r11, 32(%rdx)
+; FALLBACK2-NEXT: movq %r8, 40(%rdx)
+; FALLBACK2-NEXT: movq %rsi, 16(%rdx)
+; FALLBACK2-NEXT: movq %r10, 24(%rdx)
+; FALLBACK2-NEXT: movq %rdi, (%rdx)
+; FALLBACK2-NEXT: movq %r9, 8(%rdx)
+; FALLBACK2-NEXT: addq $8, %rsp
+; FALLBACK2-NEXT: popq %rbx
+; FALLBACK2-NEXT: popq %r12
+; FALLBACK2-NEXT: popq %r13
+; FALLBACK2-NEXT: popq %r14
+; FALLBACK2-NEXT: popq %r15
+; FALLBACK2-NEXT: popq %rbp
+; FALLBACK2-NEXT: retq
+;
+; FALLBACK3-LABEL: lshr_64bytes:
+; FALLBACK3: # %bb.0:
+; FALLBACK3-NEXT: pushq %r15
+; FALLBACK3-NEXT: pushq %r14
+; FALLBACK3-NEXT: pushq %rbx
+; FALLBACK3-NEXT: movq (%rdi), %rcx
+; FALLBACK3-NEXT: movq 8(%rdi), %r8
+; FALLBACK3-NEXT: movq 16(%rdi), %r9
+; FALLBACK3-NEXT: movq 24(%rdi), %r10
+; FALLBACK3-NEXT: movq 32(%rdi), %r11
+; FALLBACK3-NEXT: movq 40(%rdi), %rbx
+; FALLBACK3-NEXT: movq 48(%rdi), %r14
+; FALLBACK3-NEXT: movq 56(%rdi), %rdi
+; FALLBACK3-NEXT: movl (%rsi), %eax
+; FALLBACK3-NEXT: xorps %xmm0, %xmm0
+; FALLBACK3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %r14, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %rbx, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %r11, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %r10, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: leal (,%rax,8), %ecx
+; FALLBACK3-NEXT: andl $56, %ecx
+; FALLBACK3-NEXT: andl $56, %eax
+; FALLBACK3-NEXT: movq -112(%rsp,%rax), %rdi
+; FALLBACK3-NEXT: movq -128(%rsp,%rax), %rsi
+; FALLBACK3-NEXT: movq -120(%rsp,%rax), %r9
+; FALLBACK3-NEXT: movq %r9, %r8
+; FALLBACK3-NEXT: shrdq %cl, %rdi, %r8
+; FALLBACK3-NEXT: movq -96(%rsp,%rax), %r10
+; FALLBACK3-NEXT: movq -104(%rsp,%rax), %r11
+; FALLBACK3-NEXT: movq %r11, %rbx
+; FALLBACK3-NEXT: shrdq %cl, %r10, %rbx
+; FALLBACK3-NEXT: shrdq %cl, %r11, %rdi
+; FALLBACK3-NEXT: movq -80(%rsp,%rax), %r11
+; FALLBACK3-NEXT: movq -88(%rsp,%rax), %r14
+; FALLBACK3-NEXT: movq %r14, %r15
+; FALLBACK3-NEXT: shrdq %cl, %r11, %r15
+; FALLBACK3-NEXT: shrdq %cl, %r14, %r10
+; FALLBACK3-NEXT: movq -72(%rsp,%rax), %rax
+; FALLBACK3-NEXT: shrdq %cl, %rax, %r11
+; FALLBACK3-NEXT: shrxq %rcx, %rax, %rax
+; FALLBACK3-NEXT: # kill: def $cl killed $cl killed $rcx
+; FALLBACK3-NEXT: shrdq %cl, %r9, %rsi
+; FALLBACK3-NEXT: movq %r11, 48(%rdx)
+; FALLBACK3-NEXT: movq %r10, 32(%rdx)
+; FALLBACK3-NEXT: movq %r15, 40(%rdx)
+; FALLBACK3-NEXT: movq %rdi, 16(%rdx)
+; FALLBACK3-NEXT: movq %rbx, 24(%rdx)
+; FALLBACK3-NEXT: movq %rsi, (%rdx)
+; FALLBACK3-NEXT: movq %r8, 8(%rdx)
+; FALLBACK3-NEXT: movq %rax, 56(%rdx)
+; FALLBACK3-NEXT: popq %rbx
+; FALLBACK3-NEXT: popq %r14
+; FALLBACK3-NEXT: popq %r15
+; FALLBACK3-NEXT: retq
+;
+; FALLBACK4-LABEL: lshr_64bytes:
+; FALLBACK4: # %bb.0:
+; FALLBACK4-NEXT: pushq %rbp
+; FALLBACK4-NEXT: pushq %r15
+; FALLBACK4-NEXT: pushq %r14
+; FALLBACK4-NEXT: pushq %r13
+; FALLBACK4-NEXT: pushq %r12
+; FALLBACK4-NEXT: pushq %rbx
+; FALLBACK4-NEXT: pushq %rax
+; FALLBACK4-NEXT: movups (%rdi), %xmm0
+; FALLBACK4-NEXT: movups 16(%rdi), %xmm1
+; FALLBACK4-NEXT: movups 32(%rdi), %xmm2
+; FALLBACK4-NEXT: movups 48(%rdi), %xmm3
+; FALLBACK4-NEXT: movl (%rsi), %r8d
+; FALLBACK4-NEXT: xorps %xmm4, %xmm4
+; FALLBACK4-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movaps %xmm3, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: leal (,%r8,8), %eax
+; FALLBACK4-NEXT: andl $56, %eax
+; FALLBACK4-NEXT: andl $56, %r8d
+; FALLBACK4-NEXT: movq -128(%rsp,%r8), %r10
+; FALLBACK4-NEXT: movq -120(%rsp,%r8), %r9
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shrq %cl, %r10
+; FALLBACK4-NEXT: movl %eax, %esi
+; FALLBACK4-NEXT: notb %sil
+; FALLBACK4-NEXT: leaq (%r9,%r9), %rdi
+; FALLBACK4-NEXT: movl %esi, %ecx
+; FALLBACK4-NEXT: shlq %cl, %rdi
+; FALLBACK4-NEXT: orq %r10, %rdi
+; FALLBACK4-NEXT: movq -104(%rsp,%r8), %r10
+; FALLBACK4-NEXT: movq %r10, %rbx
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shrq %cl, %rbx
+; FALLBACK4-NEXT: movq -96(%rsp,%r8), %r12
+; FALLBACK4-NEXT: leaq (%r12,%r12), %r11
+; FALLBACK4-NEXT: movl %esi, %ecx
+; FALLBACK4-NEXT: shlq %cl, %r11
+; FALLBACK4-NEXT: orq %rbx, %r11
+; FALLBACK4-NEXT: movq -112(%rsp,%r8), %rbx
+; FALLBACK4-NEXT: movq %rbx, %r14
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shrq %cl, %r14
+; FALLBACK4-NEXT: addq %r10, %r10
+; FALLBACK4-NEXT: movl %esi, %ecx
+; FALLBACK4-NEXT: shlq %cl, %r10
+; FALLBACK4-NEXT: orq %r14, %r10
+; FALLBACK4-NEXT: movq -88(%rsp,%r8), %r14
+; FALLBACK4-NEXT: movq %r14, %r13
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shrq %cl, %r13
+; FALLBACK4-NEXT: movq -80(%rsp,%r8), %rbp
+; FALLBACK4-NEXT: leaq (%rbp,%rbp), %r15
+; FALLBACK4-NEXT: movl %esi, %ecx
+; FALLBACK4-NEXT: shlq %cl, %r15
+; FALLBACK4-NEXT: orq %r13, %r15
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shrq %cl, %r12
+; FALLBACK4-NEXT: addq %r14, %r14
+; FALLBACK4-NEXT: movl %esi, %ecx
+; FALLBACK4-NEXT: shlq %cl, %r14
+; FALLBACK4-NEXT: orq %r12, %r14
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shrq %cl, %rbp
+; FALLBACK4-NEXT: movq -72(%rsp,%r8), %r8
+; FALLBACK4-NEXT: leaq (%r8,%r8), %r12
+; FALLBACK4-NEXT: movl %esi, %ecx
+; FALLBACK4-NEXT: shlq %cl, %r12
+; FALLBACK4-NEXT: orq %rbp, %r12
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shrq %cl, %r9
+; FALLBACK4-NEXT: addq %rbx, %rbx
+; FALLBACK4-NEXT: movl %esi, %ecx
+; FALLBACK4-NEXT: shlq %cl, %rbx
+; FALLBACK4-NEXT: orq %r9, %rbx
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shrq %cl, %r8
+; FALLBACK4-NEXT: movq %r8, 56(%rdx)
+; FALLBACK4-NEXT: movq %rbx, 8(%rdx)
+; FALLBACK4-NEXT: movq %r12, 48(%rdx)
+; FALLBACK4-NEXT: movq %r14, 32(%rdx)
+; FALLBACK4-NEXT: movq %r15, 40(%rdx)
+; FALLBACK4-NEXT: movq %r10, 16(%rdx)
+; FALLBACK4-NEXT: movq %r11, 24(%rdx)
+; FALLBACK4-NEXT: movq %rdi, (%rdx)
+; FALLBACK4-NEXT: addq $8, %rsp
+; FALLBACK4-NEXT: popq %rbx
+; FALLBACK4-NEXT: popq %r12
+; FALLBACK4-NEXT: popq %r13
+; FALLBACK4-NEXT: popq %r14
+; FALLBACK4-NEXT: popq %r15
+; FALLBACK4-NEXT: popq %rbp
+; FALLBACK4-NEXT: retq
+;
+; FALLBACK5-LABEL: lshr_64bytes:
+; FALLBACK5: # %bb.0:
+; FALLBACK5-NEXT: pushq %r15
+; FALLBACK5-NEXT: pushq %r14
+; FALLBACK5-NEXT: pushq %rbx
+; FALLBACK5-NEXT: movups (%rdi), %xmm0
+; FALLBACK5-NEXT: movups 16(%rdi), %xmm1
+; FALLBACK5-NEXT: movups 32(%rdi), %xmm2
+; FALLBACK5-NEXT: movups 48(%rdi), %xmm3
+; FALLBACK5-NEXT: movl (%rsi), %eax
+; FALLBACK5-NEXT: xorps %xmm4, %xmm4
+; FALLBACK5-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movaps %xmm3, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: leal (,%rax,8), %ecx
+; FALLBACK5-NEXT: andl $56, %ecx
+; FALLBACK5-NEXT: andl $56, %eax
+; FALLBACK5-NEXT: movq -96(%rsp,%rax), %rdi
+; FALLBACK5-NEXT: movq -104(%rsp,%rax), %r9
+; FALLBACK5-NEXT: movq %r9, %rsi
+; FALLBACK5-NEXT: shrdq %cl, %rdi, %rsi
+; FALLBACK5-NEXT: movq -112(%rsp,%rax), %r10
+; FALLBACK5-NEXT: movq %r10, %r8
+; FALLBACK5-NEXT: shrdq %cl, %r9, %r8
+; FALLBACK5-NEXT: movq -80(%rsp,%rax), %r9
+; FALLBACK5-NEXT: movq -88(%rsp,%rax), %r11
+; FALLBACK5-NEXT: movq %r11, %rbx
+; FALLBACK5-NEXT: shrdq %cl, %r9, %rbx
+; FALLBACK5-NEXT: shrdq %cl, %r11, %rdi
+; FALLBACK5-NEXT: movq -72(%rsp,%rax), %r11
+; FALLBACK5-NEXT: shrdq %cl, %r11, %r9
+; FALLBACK5-NEXT: movq -128(%rsp,%rax), %r14
+; FALLBACK5-NEXT: movq -120(%rsp,%rax), %rax
+; FALLBACK5-NEXT: movq %rax, %r15
+; FALLBACK5-NEXT: shrdq %cl, %r10, %r15
+; FALLBACK5-NEXT: shrdq %cl, %rax, %r14
+; FALLBACK5-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK5-NEXT: shrq %cl, %r11
+; FALLBACK5-NEXT: movq %r15, 8(%rdx)
+; FALLBACK5-NEXT: movq %r9, 48(%rdx)
+; FALLBACK5-NEXT: movq %r11, 56(%rdx)
+; FALLBACK5-NEXT: movq %rdi, 32(%rdx)
+; FALLBACK5-NEXT: movq %rbx, 40(%rdx)
+; FALLBACK5-NEXT: movq %r8, 16(%rdx)
+; FALLBACK5-NEXT: movq %rsi, 24(%rdx)
+; FALLBACK5-NEXT: movq %r14, (%rdx)
+; FALLBACK5-NEXT: popq %rbx
+; FALLBACK5-NEXT: popq %r14
+; FALLBACK5-NEXT: popq %r15
+; FALLBACK5-NEXT: retq
+;
+; FALLBACK6-LABEL: lshr_64bytes:
+; FALLBACK6: # %bb.0:
+; FALLBACK6-NEXT: pushq %rbp
+; FALLBACK6-NEXT: pushq %r15
+; FALLBACK6-NEXT: pushq %r14
+; FALLBACK6-NEXT: pushq %r13
+; FALLBACK6-NEXT: pushq %r12
+; FALLBACK6-NEXT: pushq %rbx
+; FALLBACK6-NEXT: pushq %rax
+; FALLBACK6-NEXT: movups (%rdi), %xmm0
+; FALLBACK6-NEXT: movups 16(%rdi), %xmm1
+; FALLBACK6-NEXT: movups 32(%rdi), %xmm2
+; FALLBACK6-NEXT: movups 48(%rdi), %xmm3
+; FALLBACK6-NEXT: movl (%rsi), %eax
+; FALLBACK6-NEXT: xorps %xmm4, %xmm4
+; FALLBACK6-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movaps %xmm3, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: leal (,%rax,8), %esi
+; FALLBACK6-NEXT: andl $56, %esi
+; FALLBACK6-NEXT: andl $56, %eax
+; FALLBACK6-NEXT: shrxq %rsi, -128(%rsp,%rax), %r11
+; FALLBACK6-NEXT: movq -112(%rsp,%rax), %rcx
+; FALLBACK6-NEXT: movq -104(%rsp,%rax), %rdi
+; FALLBACK6-NEXT: shrxq %rsi, %rdi, %r12
+; FALLBACK6-NEXT: movq -96(%rsp,%rax), %r13
+; FALLBACK6-NEXT: shrxq %rsi, %rcx, %r9
+; FALLBACK6-NEXT: movq -88(%rsp,%rax), %r10
+; FALLBACK6-NEXT: shrxq %rsi, %r10, %r14
+; FALLBACK6-NEXT: shrxq %rsi, %r13, %r15
+; FALLBACK6-NEXT: movl %esi, %ebx
+; FALLBACK6-NEXT: notb %bl
+; FALLBACK6-NEXT: movq -120(%rsp,%rax), %rbp
+; FALLBACK6-NEXT: leaq (%rbp,%rbp), %r8
+; FALLBACK6-NEXT: shlxq %rbx, %r8, %r8
+; FALLBACK6-NEXT: orq %r11, %r8
+; FALLBACK6-NEXT: leaq (%r13,%r13), %r11
+; FALLBACK6-NEXT: shlxq %rbx, %r11, %r11
+; FALLBACK6-NEXT: orq %r12, %r11
+; FALLBACK6-NEXT: movq -80(%rsp,%rax), %r12
+; FALLBACK6-NEXT: shrxq %rsi, %r12, %r13
+; FALLBACK6-NEXT: shrxq %rsi, %rbp, %rbp
+; FALLBACK6-NEXT: movq -72(%rsp,%rax), %rax
+; FALLBACK6-NEXT: shrxq %rsi, %rax, %rsi
+; FALLBACK6-NEXT: addq %rdi, %rdi
+; FALLBACK6-NEXT: shlxq %rbx, %rdi, %rdi
+; FALLBACK6-NEXT: orq %r9, %rdi
+; FALLBACK6-NEXT: leaq (%r12,%r12), %r9
+; FALLBACK6-NEXT: shlxq %rbx, %r9, %r9
+; FALLBACK6-NEXT: orq %r14, %r9
+; FALLBACK6-NEXT: addq %r10, %r10
+; FALLBACK6-NEXT: shlxq %rbx, %r10, %r10
+; FALLBACK6-NEXT: orq %r15, %r10
+; FALLBACK6-NEXT: addq %rax, %rax
+; FALLBACK6-NEXT: shlxq %rbx, %rax, %rax
+; FALLBACK6-NEXT: orq %r13, %rax
+; FALLBACK6-NEXT: addq %rcx, %rcx
+; FALLBACK6-NEXT: shlxq %rbx, %rcx, %rcx
+; FALLBACK6-NEXT: orq %rbp, %rcx
+; FALLBACK6-NEXT: movq %rsi, 56(%rdx)
+; FALLBACK6-NEXT: movq %rcx, 8(%rdx)
+; FALLBACK6-NEXT: movq %rax, 48(%rdx)
+; FALLBACK6-NEXT: movq %r10, 32(%rdx)
+; FALLBACK6-NEXT: movq %r9, 40(%rdx)
+; FALLBACK6-NEXT: movq %rdi, 16(%rdx)
+; FALLBACK6-NEXT: movq %r11, 24(%rdx)
+; FALLBACK6-NEXT: movq %r8, (%rdx)
+; FALLBACK6-NEXT: addq $8, %rsp
+; FALLBACK6-NEXT: popq %rbx
+; FALLBACK6-NEXT: popq %r12
+; FALLBACK6-NEXT: popq %r13
+; FALLBACK6-NEXT: popq %r14
+; FALLBACK6-NEXT: popq %r15
+; FALLBACK6-NEXT: popq %rbp
+; FALLBACK6-NEXT: retq
+;
+; FALLBACK7-LABEL: lshr_64bytes:
+; FALLBACK7: # %bb.0:
+; FALLBACK7-NEXT: pushq %r15
+; FALLBACK7-NEXT: pushq %r14
+; FALLBACK7-NEXT: pushq %rbx
+; FALLBACK7-NEXT: movups (%rdi), %xmm0
+; FALLBACK7-NEXT: movups 16(%rdi), %xmm1
+; FALLBACK7-NEXT: movups 32(%rdi), %xmm2
+; FALLBACK7-NEXT: movups 48(%rdi), %xmm3
+; FALLBACK7-NEXT: movl (%rsi), %eax
+; FALLBACK7-NEXT: xorps %xmm4, %xmm4
+; FALLBACK7-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movaps %xmm3, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: leal (,%rax,8), %ecx
+; FALLBACK7-NEXT: andl $56, %ecx
+; FALLBACK7-NEXT: andl $56, %eax
+; FALLBACK7-NEXT: movq -96(%rsp,%rax), %rdi
+; FALLBACK7-NEXT: movq -104(%rsp,%rax), %r9
+; FALLBACK7-NEXT: movq %r9, %rsi
+; FALLBACK7-NEXT: shrdq %cl, %rdi, %rsi
+; FALLBACK7-NEXT: movq -112(%rsp,%rax), %r10
+; FALLBACK7-NEXT: movq %r10, %r8
+; FALLBACK7-NEXT: shrdq %cl, %r9, %r8
+; FALLBACK7-NEXT: movq -80(%rsp,%rax), %r9
+; FALLBACK7-NEXT: movq -88(%rsp,%rax), %r11
+; FALLBACK7-NEXT: movq %r11, %rbx
+; FALLBACK7-NEXT: shrdq %cl, %r9, %rbx
+; FALLBACK7-NEXT: shrdq %cl, %r11, %rdi
+; FALLBACK7-NEXT: movq -72(%rsp,%rax), %r11
+; FALLBACK7-NEXT: shrdq %cl, %r11, %r9
+; FALLBACK7-NEXT: movq -128(%rsp,%rax), %r14
+; FALLBACK7-NEXT: movq -120(%rsp,%rax), %rax
+; FALLBACK7-NEXT: movq %rax, %r15
+; FALLBACK7-NEXT: shrdq %cl, %r10, %r15
+; FALLBACK7-NEXT: shrxq %rcx, %r11, %r10
+; FALLBACK7-NEXT: # kill: def $cl killed $cl killed $rcx
+; FALLBACK7-NEXT: shrdq %cl, %rax, %r14
+; FALLBACK7-NEXT: movq %r15, 8(%rdx)
+; FALLBACK7-NEXT: movq %r9, 48(%rdx)
+; FALLBACK7-NEXT: movq %rdi, 32(%rdx)
+; FALLBACK7-NEXT: movq %rbx, 40(%rdx)
+; FALLBACK7-NEXT: movq %r8, 16(%rdx)
+; FALLBACK7-NEXT: movq %rsi, 24(%rdx)
+; FALLBACK7-NEXT: movq %r14, (%rdx)
+; FALLBACK7-NEXT: movq %r10, 56(%rdx)
+; FALLBACK7-NEXT: popq %rbx
+; FALLBACK7-NEXT: popq %r14
+; FALLBACK7-NEXT: popq %r15
+; FALLBACK7-NEXT: retq
+;
+; FALLBACK8-LABEL: lshr_64bytes:
+; FALLBACK8: # %bb.0:
+; FALLBACK8-NEXT: pushq %rbp
+; FALLBACK8-NEXT: pushq %r15
+; FALLBACK8-NEXT: pushq %r14
+; FALLBACK8-NEXT: pushq %r13
+; FALLBACK8-NEXT: pushq %r12
+; FALLBACK8-NEXT: pushq %rbx
+; FALLBACK8-NEXT: pushq %rax
+; FALLBACK8-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK8-NEXT: vmovups 32(%rdi), %ymm1
+; FALLBACK8-NEXT: movl (%rsi), %r9d
+; FALLBACK8-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; FALLBACK8-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: leal (,%r9,8), %eax
+; FALLBACK8-NEXT: andl $56, %eax
+; FALLBACK8-NEXT: andl $56, %r9d
+; FALLBACK8-NEXT: movq -128(%rsp,%r9), %r10
+; FALLBACK8-NEXT: movq -120(%rsp,%r9), %r8
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shrq %cl, %r10
+; FALLBACK8-NEXT: movl %eax, %esi
+; FALLBACK8-NEXT: notb %sil
+; FALLBACK8-NEXT: leaq (%r8,%r8), %rdi
+; FALLBACK8-NEXT: movl %esi, %ecx
+; FALLBACK8-NEXT: shlq %cl, %rdi
+; FALLBACK8-NEXT: orq %r10, %rdi
+; FALLBACK8-NEXT: movq -104(%rsp,%r9), %r10
+; FALLBACK8-NEXT: movq %r10, %rbx
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shrq %cl, %rbx
+; FALLBACK8-NEXT: movq -96(%rsp,%r9), %r12
+; FALLBACK8-NEXT: leaq (%r12,%r12), %r11
+; FALLBACK8-NEXT: movl %esi, %ecx
+; FALLBACK8-NEXT: shlq %cl, %r11
+; FALLBACK8-NEXT: orq %rbx, %r11
+; FALLBACK8-NEXT: movq -112(%rsp,%r9), %rbx
+; FALLBACK8-NEXT: movq %rbx, %r14
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shrq %cl, %r14
+; FALLBACK8-NEXT: addq %r10, %r10
+; FALLBACK8-NEXT: movl %esi, %ecx
+; FALLBACK8-NEXT: shlq %cl, %r10
+; FALLBACK8-NEXT: orq %r14, %r10
+; FALLBACK8-NEXT: movq -88(%rsp,%r9), %r14
+; FALLBACK8-NEXT: movq %r14, %r13
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shrq %cl, %r13
+; FALLBACK8-NEXT: movq -80(%rsp,%r9), %rbp
+; FALLBACK8-NEXT: leaq (%rbp,%rbp), %r15
+; FALLBACK8-NEXT: movl %esi, %ecx
+; FALLBACK8-NEXT: shlq %cl, %r15
+; FALLBACK8-NEXT: orq %r13, %r15
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shrq %cl, %r12
+; FALLBACK8-NEXT: addq %r14, %r14
+; FALLBACK8-NEXT: movl %esi, %ecx
+; FALLBACK8-NEXT: shlq %cl, %r14
+; FALLBACK8-NEXT: orq %r12, %r14
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shrq %cl, %rbp
+; FALLBACK8-NEXT: movq -72(%rsp,%r9), %r9
+; FALLBACK8-NEXT: leaq (%r9,%r9), %r12
+; FALLBACK8-NEXT: movl %esi, %ecx
+; FALLBACK8-NEXT: shlq %cl, %r12
+; FALLBACK8-NEXT: orq %rbp, %r12
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shrq %cl, %r8
+; FALLBACK8-NEXT: addq %rbx, %rbx
+; FALLBACK8-NEXT: movl %esi, %ecx
+; FALLBACK8-NEXT: shlq %cl, %rbx
+; FALLBACK8-NEXT: orq %r8, %rbx
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shrq %cl, %r9
+; FALLBACK8-NEXT: movq %r9, 56(%rdx)
+; FALLBACK8-NEXT: movq %rbx, 8(%rdx)
+; FALLBACK8-NEXT: movq %r12, 48(%rdx)
+; FALLBACK8-NEXT: movq %r14, 32(%rdx)
+; FALLBACK8-NEXT: movq %r15, 40(%rdx)
+; FALLBACK8-NEXT: movq %r10, 16(%rdx)
+; FALLBACK8-NEXT: movq %r11, 24(%rdx)
+; FALLBACK8-NEXT: movq %rdi, (%rdx)
+; FALLBACK8-NEXT: addq $8, %rsp
+; FALLBACK8-NEXT: popq %rbx
+; FALLBACK8-NEXT: popq %r12
+; FALLBACK8-NEXT: popq %r13
+; FALLBACK8-NEXT: popq %r14
+; FALLBACK8-NEXT: popq %r15
+; FALLBACK8-NEXT: popq %rbp
+; FALLBACK8-NEXT: vzeroupper
+; FALLBACK8-NEXT: retq
+;
+; FALLBACK9-LABEL: lshr_64bytes:
+; FALLBACK9: # %bb.0:
+; FALLBACK9-NEXT: pushq %r15
+; FALLBACK9-NEXT: pushq %r14
+; FALLBACK9-NEXT: pushq %rbx
+; FALLBACK9-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK9-NEXT: vmovups 32(%rdi), %ymm1
+; FALLBACK9-NEXT: movl (%rsi), %eax
+; FALLBACK9-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; FALLBACK9-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: leal (,%rax,8), %ecx
+; FALLBACK9-NEXT: andl $56, %ecx
+; FALLBACK9-NEXT: andl $56, %eax
+; FALLBACK9-NEXT: movq -96(%rsp,%rax), %rdi
+; FALLBACK9-NEXT: movq -104(%rsp,%rax), %r9
+; FALLBACK9-NEXT: movq %r9, %rsi
+; FALLBACK9-NEXT: shrdq %cl, %rdi, %rsi
+; FALLBACK9-NEXT: movq -112(%rsp,%rax), %r10
+; FALLBACK9-NEXT: movq %r10, %r8
+; FALLBACK9-NEXT: shrdq %cl, %r9, %r8
+; FALLBACK9-NEXT: movq -80(%rsp,%rax), %r9
+; FALLBACK9-NEXT: movq -88(%rsp,%rax), %r11
+; FALLBACK9-NEXT: movq %r11, %rbx
+; FALLBACK9-NEXT: shrdq %cl, %r9, %rbx
+; FALLBACK9-NEXT: shrdq %cl, %r11, %rdi
+; FALLBACK9-NEXT: movq -72(%rsp,%rax), %r11
+; FALLBACK9-NEXT: shrdq %cl, %r11, %r9
+; FALLBACK9-NEXT: movq -128(%rsp,%rax), %r14
+; FALLBACK9-NEXT: movq -120(%rsp,%rax), %rax
+; FALLBACK9-NEXT: movq %rax, %r15
+; FALLBACK9-NEXT: shrdq %cl, %r10, %r15
+; FALLBACK9-NEXT: shrdq %cl, %rax, %r14
+; FALLBACK9-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK9-NEXT: shrq %cl, %r11
+; FALLBACK9-NEXT: movq %r15, 8(%rdx)
+; FALLBACK9-NEXT: movq %r9, 48(%rdx)
+; FALLBACK9-NEXT: movq %r11, 56(%rdx)
+; FALLBACK9-NEXT: movq %rdi, 32(%rdx)
+; FALLBACK9-NEXT: movq %rbx, 40(%rdx)
+; FALLBACK9-NEXT: movq %r8, 16(%rdx)
+; FALLBACK9-NEXT: movq %rsi, 24(%rdx)
+; FALLBACK9-NEXT: movq %r14, (%rdx)
+; FALLBACK9-NEXT: popq %rbx
+; FALLBACK9-NEXT: popq %r14
+; FALLBACK9-NEXT: popq %r15
+; FALLBACK9-NEXT: vzeroupper
+; FALLBACK9-NEXT: retq
+;
+; FALLBACK10-LABEL: lshr_64bytes:
+; FALLBACK10: # %bb.0:
+; FALLBACK10-NEXT: pushq %rbp
+; FALLBACK10-NEXT: pushq %r15
+; FALLBACK10-NEXT: pushq %r14
+; FALLBACK10-NEXT: pushq %r13
+; FALLBACK10-NEXT: pushq %r12
+; FALLBACK10-NEXT: pushq %rbx
+; FALLBACK10-NEXT: pushq %rax
+; FALLBACK10-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK10-NEXT: vmovups 32(%rdi), %ymm1
+; FALLBACK10-NEXT: movl (%rsi), %eax
+; FALLBACK10-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; FALLBACK10-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: leal (,%rax,8), %esi
+; FALLBACK10-NEXT: andl $56, %esi
+; FALLBACK10-NEXT: andl $56, %eax
+; FALLBACK10-NEXT: shrxq %rsi, -128(%rsp,%rax), %r11
+; FALLBACK10-NEXT: movq -112(%rsp,%rax), %rcx
+; FALLBACK10-NEXT: movq -104(%rsp,%rax), %rdi
+; FALLBACK10-NEXT: shrxq %rsi, %rdi, %r12
+; FALLBACK10-NEXT: movq -96(%rsp,%rax), %r13
+; FALLBACK10-NEXT: shrxq %rsi, %rcx, %r9
+; FALLBACK10-NEXT: movq -88(%rsp,%rax), %r10
+; FALLBACK10-NEXT: shrxq %rsi, %r10, %r14
+; FALLBACK10-NEXT: shrxq %rsi, %r13, %r15
+; FALLBACK10-NEXT: movl %esi, %ebx
+; FALLBACK10-NEXT: notb %bl
+; FALLBACK10-NEXT: movq -120(%rsp,%rax), %rbp
+; FALLBACK10-NEXT: leaq (%rbp,%rbp), %r8
+; FALLBACK10-NEXT: shlxq %rbx, %r8, %r8
+; FALLBACK10-NEXT: orq %r11, %r8
+; FALLBACK10-NEXT: leaq (%r13,%r13), %r11
+; FALLBACK10-NEXT: shlxq %rbx, %r11, %r11
+; FALLBACK10-NEXT: orq %r12, %r11
+; FALLBACK10-NEXT: movq -80(%rsp,%rax), %r12
+; FALLBACK10-NEXT: shrxq %rsi, %r12, %r13
+; FALLBACK10-NEXT: shrxq %rsi, %rbp, %rbp
+; FALLBACK10-NEXT: movq -72(%rsp,%rax), %rax
+; FALLBACK10-NEXT: shrxq %rsi, %rax, %rsi
+; FALLBACK10-NEXT: addq %rdi, %rdi
+; FALLBACK10-NEXT: shlxq %rbx, %rdi, %rdi
+; FALLBACK10-NEXT: orq %r9, %rdi
+; FALLBACK10-NEXT: leaq (%r12,%r12), %r9
+; FALLBACK10-NEXT: shlxq %rbx, %r9, %r9
+; FALLBACK10-NEXT: orq %r14, %r9
+; FALLBACK10-NEXT: addq %r10, %r10
+; FALLBACK10-NEXT: shlxq %rbx, %r10, %r10
+; FALLBACK10-NEXT: orq %r15, %r10
+; FALLBACK10-NEXT: addq %rax, %rax
+; FALLBACK10-NEXT: shlxq %rbx, %rax, %rax
+; FALLBACK10-NEXT: orq %r13, %rax
+; FALLBACK10-NEXT: addq %rcx, %rcx
+; FALLBACK10-NEXT: shlxq %rbx, %rcx, %rcx
+; FALLBACK10-NEXT: orq %rbp, %rcx
+; FALLBACK10-NEXT: movq %rsi, 56(%rdx)
+; FALLBACK10-NEXT: movq %rcx, 8(%rdx)
+; FALLBACK10-NEXT: movq %rax, 48(%rdx)
+; FALLBACK10-NEXT: movq %r10, 32(%rdx)
+; FALLBACK10-NEXT: movq %r9, 40(%rdx)
+; FALLBACK10-NEXT: movq %rdi, 16(%rdx)
+; FALLBACK10-NEXT: movq %r11, 24(%rdx)
+; FALLBACK10-NEXT: movq %r8, (%rdx)
+; FALLBACK10-NEXT: addq $8, %rsp
+; FALLBACK10-NEXT: popq %rbx
+; FALLBACK10-NEXT: popq %r12
+; FALLBACK10-NEXT: popq %r13
+; FALLBACK10-NEXT: popq %r14
+; FALLBACK10-NEXT: popq %r15
+; FALLBACK10-NEXT: popq %rbp
+; FALLBACK10-NEXT: vzeroupper
+; FALLBACK10-NEXT: retq
+;
+; FALLBACK11-LABEL: lshr_64bytes:
+; FALLBACK11: # %bb.0:
+; FALLBACK11-NEXT: pushq %r15
+; FALLBACK11-NEXT: pushq %r14
+; FALLBACK11-NEXT: pushq %rbx
+; FALLBACK11-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK11-NEXT: vmovups 32(%rdi), %ymm1
+; FALLBACK11-NEXT: movl (%rsi), %eax
+; FALLBACK11-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; FALLBACK11-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: leal (,%rax,8), %ecx
+; FALLBACK11-NEXT: andl $56, %ecx
+; FALLBACK11-NEXT: andl $56, %eax
+; FALLBACK11-NEXT: movq -96(%rsp,%rax), %rdi
+; FALLBACK11-NEXT: movq -104(%rsp,%rax), %r9
+; FALLBACK11-NEXT: movq %r9, %rsi
+; FALLBACK11-NEXT: shrdq %cl, %rdi, %rsi
+; FALLBACK11-NEXT: movq -112(%rsp,%rax), %r10
+; FALLBACK11-NEXT: movq %r10, %r8
+; FALLBACK11-NEXT: shrdq %cl, %r9, %r8
+; FALLBACK11-NEXT: movq -80(%rsp,%rax), %r9
+; FALLBACK11-NEXT: movq -88(%rsp,%rax), %r11
+; FALLBACK11-NEXT: movq %r11, %rbx
+; FALLBACK11-NEXT: shrdq %cl, %r9, %rbx
+; FALLBACK11-NEXT: shrdq %cl, %r11, %rdi
+; FALLBACK11-NEXT: movq -72(%rsp,%rax), %r11
+; FALLBACK11-NEXT: shrdq %cl, %r11, %r9
+; FALLBACK11-NEXT: movq -128(%rsp,%rax), %r14
+; FALLBACK11-NEXT: movq -120(%rsp,%rax), %rax
+; FALLBACK11-NEXT: movq %rax, %r15
+; FALLBACK11-NEXT: shrdq %cl, %r10, %r15
+; FALLBACK11-NEXT: shrxq %rcx, %r11, %r10
+; FALLBACK11-NEXT: # kill: def $cl killed $cl killed $rcx
+; FALLBACK11-NEXT: shrdq %cl, %rax, %r14
+; FALLBACK11-NEXT: movq %r15, 8(%rdx)
+; FALLBACK11-NEXT: movq %r9, 48(%rdx)
+; FALLBACK11-NEXT: movq %rdi, 32(%rdx)
+; FALLBACK11-NEXT: movq %rbx, 40(%rdx)
+; FALLBACK11-NEXT: movq %r8, 16(%rdx)
+; FALLBACK11-NEXT: movq %rsi, 24(%rdx)
+; FALLBACK11-NEXT: movq %r14, (%rdx)
+; FALLBACK11-NEXT: movq %r10, 56(%rdx)
+; FALLBACK11-NEXT: popq %rbx
+; FALLBACK11-NEXT: popq %r14
+; FALLBACK11-NEXT: popq %r15
+; FALLBACK11-NEXT: vzeroupper
+; FALLBACK11-NEXT: retq
+;
+; FALLBACK12-LABEL: lshr_64bytes:
+; FALLBACK12: # %bb.0:
+; FALLBACK12-NEXT: pushq %rbp
+; FALLBACK12-NEXT: pushq %r15
+; FALLBACK12-NEXT: pushq %r14
+; FALLBACK12-NEXT: pushq %r13
+; FALLBACK12-NEXT: pushq %r12
+; FALLBACK12-NEXT: pushq %rbx
+; FALLBACK12-NEXT: pushq %rax
+; FALLBACK12-NEXT: vmovups (%rdi), %zmm0
+; FALLBACK12-NEXT: movl (%rsi), %r9d
+; FALLBACK12-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK12-NEXT: vmovups %zmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK12-NEXT: vmovups %zmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK12-NEXT: leal (,%r9,8), %eax
+; FALLBACK12-NEXT: andl $56, %eax
+; FALLBACK12-NEXT: andl $56, %r9d
+; FALLBACK12-NEXT: movq -128(%rsp,%r9), %r10
+; FALLBACK12-NEXT: movq -120(%rsp,%r9), %r8
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shrq %cl, %r10
+; FALLBACK12-NEXT: movl %eax, %esi
+; FALLBACK12-NEXT: notb %sil
+; FALLBACK12-NEXT: leaq (%r8,%r8), %rdi
+; FALLBACK12-NEXT: movl %esi, %ecx
+; FALLBACK12-NEXT: shlq %cl, %rdi
+; FALLBACK12-NEXT: orq %r10, %rdi
+; FALLBACK12-NEXT: movq -104(%rsp,%r9), %r10
+; FALLBACK12-NEXT: movq %r10, %rbx
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shrq %cl, %rbx
+; FALLBACK12-NEXT: movq -96(%rsp,%r9), %r12
+; FALLBACK12-NEXT: leaq (%r12,%r12), %r11
+; FALLBACK12-NEXT: movl %esi, %ecx
+; FALLBACK12-NEXT: shlq %cl, %r11
+; FALLBACK12-NEXT: orq %rbx, %r11
+; FALLBACK12-NEXT: movq -112(%rsp,%r9), %rbx
+; FALLBACK12-NEXT: movq %rbx, %r14
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shrq %cl, %r14
+; FALLBACK12-NEXT: addq %r10, %r10
+; FALLBACK12-NEXT: movl %esi, %ecx
+; FALLBACK12-NEXT: shlq %cl, %r10
+; FALLBACK12-NEXT: orq %r14, %r10
+; FALLBACK12-NEXT: movq -88(%rsp,%r9), %r14
+; FALLBACK12-NEXT: movq %r14, %r13
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shrq %cl, %r13
+; FALLBACK12-NEXT: movq -80(%rsp,%r9), %rbp
+; FALLBACK12-NEXT: leaq (%rbp,%rbp), %r15
+; FALLBACK12-NEXT: movl %esi, %ecx
+; FALLBACK12-NEXT: shlq %cl, %r15
+; FALLBACK12-NEXT: orq %r13, %r15
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shrq %cl, %r12
+; FALLBACK12-NEXT: addq %r14, %r14
+; FALLBACK12-NEXT: movl %esi, %ecx
+; FALLBACK12-NEXT: shlq %cl, %r14
+; FALLBACK12-NEXT: orq %r12, %r14
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shrq %cl, %rbp
+; FALLBACK12-NEXT: movq -72(%rsp,%r9), %r9
+; FALLBACK12-NEXT: leaq (%r9,%r9), %r12
+; FALLBACK12-NEXT: movl %esi, %ecx
+; FALLBACK12-NEXT: shlq %cl, %r12
+; FALLBACK12-NEXT: orq %rbp, %r12
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shrq %cl, %r8
+; FALLBACK12-NEXT: addq %rbx, %rbx
+; FALLBACK12-NEXT: movl %esi, %ecx
+; FALLBACK12-NEXT: shlq %cl, %rbx
+; FALLBACK12-NEXT: orq %r8, %rbx
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shrq %cl, %r9
+; FALLBACK12-NEXT: movq %r9, 56(%rdx)
+; FALLBACK12-NEXT: movq %rbx, 8(%rdx)
+; FALLBACK12-NEXT: movq %r12, 48(%rdx)
+; FALLBACK12-NEXT: movq %r14, 32(%rdx)
+; FALLBACK12-NEXT: movq %r15, 40(%rdx)
+; FALLBACK12-NEXT: movq %r10, 16(%rdx)
+; FALLBACK12-NEXT: movq %r11, 24(%rdx)
+; FALLBACK12-NEXT: movq %rdi, (%rdx)
+; FALLBACK12-NEXT: addq $8, %rsp
+; FALLBACK12-NEXT: popq %rbx
+; FALLBACK12-NEXT: popq %r12
+; FALLBACK12-NEXT: popq %r13
+; FALLBACK12-NEXT: popq %r14
+; FALLBACK12-NEXT: popq %r15
+; FALLBACK12-NEXT: popq %rbp
+; FALLBACK12-NEXT: vzeroupper
+; FALLBACK12-NEXT: retq
+;
+; FALLBACK13-LABEL: lshr_64bytes:
+; FALLBACK13: # %bb.0:
+; FALLBACK13-NEXT: pushq %r15
+; FALLBACK13-NEXT: pushq %r14
+; FALLBACK13-NEXT: pushq %rbx
+; FALLBACK13-NEXT: vmovups (%rdi), %zmm0
+; FALLBACK13-NEXT: movl (%rsi), %edi
+; FALLBACK13-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK13-NEXT: vmovups %zmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK13-NEXT: vmovups %zmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK13-NEXT: leal (,%rdi,8), %ecx
+; FALLBACK13-NEXT: andl $56, %ecx
+; FALLBACK13-NEXT: andl $56, %edi
+; FALLBACK13-NEXT: movq -96(%rsp,%rdi), %rsi
+; FALLBACK13-NEXT: movq -104(%rsp,%rdi), %r9
+; FALLBACK13-NEXT: movq %r9, %rax
+; FALLBACK13-NEXT: shrdq %cl, %rsi, %rax
+; FALLBACK13-NEXT: movq -112(%rsp,%rdi), %r10
+; FALLBACK13-NEXT: movq %r10, %r8
+; FALLBACK13-NEXT: shrdq %cl, %r9, %r8
+; FALLBACK13-NEXT: movq -80(%rsp,%rdi), %r9
+; FALLBACK13-NEXT: movq -88(%rsp,%rdi), %r11
+; FALLBACK13-NEXT: movq %r11, %rbx
+; FALLBACK13-NEXT: shrdq %cl, %r9, %rbx
+; FALLBACK13-NEXT: shrdq %cl, %r11, %rsi
+; FALLBACK13-NEXT: movq -72(%rsp,%rdi), %r11
+; FALLBACK13-NEXT: shrdq %cl, %r11, %r9
+; FALLBACK13-NEXT: movq -128(%rsp,%rdi), %r14
+; FALLBACK13-NEXT: movq -120(%rsp,%rdi), %rdi
+; FALLBACK13-NEXT: movq %rdi, %r15
+; FALLBACK13-NEXT: shrdq %cl, %r10, %r15
+; FALLBACK13-NEXT: shrdq %cl, %rdi, %r14
+; FALLBACK13-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK13-NEXT: shrq %cl, %r11
+; FALLBACK13-NEXT: movq %r15, 8(%rdx)
+; FALLBACK13-NEXT: movq %r9, 48(%rdx)
+; FALLBACK13-NEXT: movq %r11, 56(%rdx)
+; FALLBACK13-NEXT: movq %rsi, 32(%rdx)
+; FALLBACK13-NEXT: movq %rbx, 40(%rdx)
+; FALLBACK13-NEXT: movq %r8, 16(%rdx)
+; FALLBACK13-NEXT: movq %rax, 24(%rdx)
+; FALLBACK13-NEXT: movq %r14, (%rdx)
+; FALLBACK13-NEXT: popq %rbx
+; FALLBACK13-NEXT: popq %r14
+; FALLBACK13-NEXT: popq %r15
+; FALLBACK13-NEXT: vzeroupper
+; FALLBACK13-NEXT: retq
+;
+; FALLBACK14-LABEL: lshr_64bytes:
+; FALLBACK14: # %bb.0:
+; FALLBACK14-NEXT: pushq %rbp
+; FALLBACK14-NEXT: pushq %r15
+; FALLBACK14-NEXT: pushq %r14
+; FALLBACK14-NEXT: pushq %r13
+; FALLBACK14-NEXT: pushq %r12
+; FALLBACK14-NEXT: pushq %rbx
+; FALLBACK14-NEXT: pushq %rax
+; FALLBACK14-NEXT: vmovups (%rdi), %zmm0
+; FALLBACK14-NEXT: movl (%rsi), %esi
+; FALLBACK14-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK14-NEXT: vmovups %zmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: vmovups %zmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: leal (,%rsi,8), %ecx
+; FALLBACK14-NEXT: andl $56, %ecx
+; FALLBACK14-NEXT: andl $56, %esi
+; FALLBACK14-NEXT: shrxq %rcx, -128(%rsp,%rsi), %r11
+; FALLBACK14-NEXT: movq -112(%rsp,%rsi), %rax
+; FALLBACK14-NEXT: movq -104(%rsp,%rsi), %rdi
+; FALLBACK14-NEXT: shrxq %rcx, %rdi, %r12
+; FALLBACK14-NEXT: movq -96(%rsp,%rsi), %r13
+; FALLBACK14-NEXT: shrxq %rcx, %rax, %r9
+; FALLBACK14-NEXT: movq -88(%rsp,%rsi), %r10
+; FALLBACK14-NEXT: shrxq %rcx, %r10, %r14
+; FALLBACK14-NEXT: shrxq %rcx, %r13, %r15
+; FALLBACK14-NEXT: movl %ecx, %ebx
+; FALLBACK14-NEXT: notb %bl
+; FALLBACK14-NEXT: movq -120(%rsp,%rsi), %rbp
+; FALLBACK14-NEXT: leaq (%rbp,%rbp), %r8
+; FALLBACK14-NEXT: shlxq %rbx, %r8, %r8
+; FALLBACK14-NEXT: orq %r11, %r8
+; FALLBACK14-NEXT: leaq (%r13,%r13), %r11
+; FALLBACK14-NEXT: shlxq %rbx, %r11, %r11
+; FALLBACK14-NEXT: orq %r12, %r11
+; FALLBACK14-NEXT: movq -80(%rsp,%rsi), %r12
+; FALLBACK14-NEXT: shrxq %rcx, %r12, %r13
+; FALLBACK14-NEXT: shrxq %rcx, %rbp, %rbp
+; FALLBACK14-NEXT: movq -72(%rsp,%rsi), %rsi
+; FALLBACK14-NEXT: shrxq %rcx, %rsi, %rcx
+; FALLBACK14-NEXT: addq %rdi, %rdi
+; FALLBACK14-NEXT: shlxq %rbx, %rdi, %rdi
+; FALLBACK14-NEXT: orq %r9, %rdi
+; FALLBACK14-NEXT: leaq (%r12,%r12), %r9
+; FALLBACK14-NEXT: shlxq %rbx, %r9, %r9
+; FALLBACK14-NEXT: orq %r14, %r9
+; FALLBACK14-NEXT: addq %r10, %r10
+; FALLBACK14-NEXT: shlxq %rbx, %r10, %r10
+; FALLBACK14-NEXT: orq %r15, %r10
+; FALLBACK14-NEXT: addq %rsi, %rsi
+; FALLBACK14-NEXT: shlxq %rbx, %rsi, %rsi
+; FALLBACK14-NEXT: orq %r13, %rsi
+; FALLBACK14-NEXT: addq %rax, %rax
+; FALLBACK14-NEXT: shlxq %rbx, %rax, %rax
+; FALLBACK14-NEXT: orq %rbp, %rax
+; FALLBACK14-NEXT: movq %rcx, 56(%rdx)
+; FALLBACK14-NEXT: movq %rax, 8(%rdx)
+; FALLBACK14-NEXT: movq %rsi, 48(%rdx)
+; FALLBACK14-NEXT: movq %r10, 32(%rdx)
+; FALLBACK14-NEXT: movq %r9, 40(%rdx)
+; FALLBACK14-NEXT: movq %rdi, 16(%rdx)
+; FALLBACK14-NEXT: movq %r11, 24(%rdx)
+; FALLBACK14-NEXT: movq %r8, (%rdx)
+; FALLBACK14-NEXT: addq $8, %rsp
+; FALLBACK14-NEXT: popq %rbx
+; FALLBACK14-NEXT: popq %r12
+; FALLBACK14-NEXT: popq %r13
+; FALLBACK14-NEXT: popq %r14
+; FALLBACK14-NEXT: popq %r15
+; FALLBACK14-NEXT: popq %rbp
+; FALLBACK14-NEXT: vzeroupper
+; FALLBACK14-NEXT: retq
+;
+; FALLBACK15-LABEL: lshr_64bytes:
+; FALLBACK15: # %bb.0:
+; FALLBACK15-NEXT: pushq %r15
+; FALLBACK15-NEXT: pushq %r14
+; FALLBACK15-NEXT: pushq %rbx
+; FALLBACK15-NEXT: vmovups (%rdi), %zmm0
+; FALLBACK15-NEXT: movl (%rsi), %eax
+; FALLBACK15-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK15-NEXT: vmovups %zmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK15-NEXT: vmovups %zmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK15-NEXT: leal (,%rax,8), %ecx
+; FALLBACK15-NEXT: andl $56, %ecx
+; FALLBACK15-NEXT: andl $56, %eax
+; FALLBACK15-NEXT: movq -96(%rsp,%rax), %rdi
+; FALLBACK15-NEXT: movq -104(%rsp,%rax), %r9
+; FALLBACK15-NEXT: movq %r9, %rsi
+; FALLBACK15-NEXT: shrdq %cl, %rdi, %rsi
+; FALLBACK15-NEXT: movq -112(%rsp,%rax), %r10
+; FALLBACK15-NEXT: movq %r10, %r8
+; FALLBACK15-NEXT: shrdq %cl, %r9, %r8
+; FALLBACK15-NEXT: movq -80(%rsp,%rax), %r9
+; FALLBACK15-NEXT: movq -88(%rsp,%rax), %r11
+; FALLBACK15-NEXT: movq %r11, %rbx
+; FALLBACK15-NEXT: shrdq %cl, %r9, %rbx
+; FALLBACK15-NEXT: shrdq %cl, %r11, %rdi
+; FALLBACK15-NEXT: movq -72(%rsp,%rax), %r11
+; FALLBACK15-NEXT: shrdq %cl, %r11, %r9
+; FALLBACK15-NEXT: movq -128(%rsp,%rax), %r14
+; FALLBACK15-NEXT: movq -120(%rsp,%rax), %rax
+; FALLBACK15-NEXT: movq %rax, %r15
+; FALLBACK15-NEXT: shrdq %cl, %r10, %r15
+; FALLBACK15-NEXT: shrxq %rcx, %r11, %r10
+; FALLBACK15-NEXT: # kill: def $cl killed $cl killed $rcx
+; FALLBACK15-NEXT: shrdq %cl, %rax, %r14
+; FALLBACK15-NEXT: movq %r15, 8(%rdx)
+; FALLBACK15-NEXT: movq %r9, 48(%rdx)
+; FALLBACK15-NEXT: movq %rdi, 32(%rdx)
+; FALLBACK15-NEXT: movq %rbx, 40(%rdx)
+; FALLBACK15-NEXT: movq %r8, 16(%rdx)
+; FALLBACK15-NEXT: movq %rsi, 24(%rdx)
+; FALLBACK15-NEXT: movq %r14, (%rdx)
+; FALLBACK15-NEXT: movq %r10, 56(%rdx)
+; FALLBACK15-NEXT: popq %rbx
+; FALLBACK15-NEXT: popq %r14
+; FALLBACK15-NEXT: popq %r15
+; FALLBACK15-NEXT: vzeroupper
+; FALLBACK15-NEXT: retq
+;
+; FALLBACK16-LABEL: lshr_64bytes:
+; FALLBACK16: # %bb.0:
+; FALLBACK16-NEXT: pushl %ebp
+; FALLBACK16-NEXT: pushl %ebx
+; FALLBACK16-NEXT: pushl %edi
+; FALLBACK16-NEXT: pushl %esi
+; FALLBACK16-NEXT: subl $204, %esp
+; FALLBACK16-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK16-NEXT: movl (%eax), %ecx
+; FALLBACK16-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 4(%eax), %ecx
+; FALLBACK16-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 8(%eax), %ecx
+; FALLBACK16-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 12(%eax), %ecx
+; FALLBACK16-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 16(%eax), %ecx
+; FALLBACK16-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 20(%eax), %ecx
+; FALLBACK16-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 24(%eax), %ecx
+; FALLBACK16-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 28(%eax), %ecx
+; FALLBACK16-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 32(%eax), %ecx
+; FALLBACK16-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 36(%eax), %ecx
+; FALLBACK16-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 40(%eax), %ebp
+; FALLBACK16-NEXT: movl 44(%eax), %ebx
+; FALLBACK16-NEXT: movl 48(%eax), %edi
+; FALLBACK16-NEXT: movl 52(%eax), %esi
+; FALLBACK16-NEXT: movl 56(%eax), %edx
+; FALLBACK16-NEXT: movl 60(%eax), %ecx
+; FALLBACK16-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK16-NEXT: movl (%eax), %eax
+; FALLBACK16-NEXT: xorps %xmm0, %xmm0
+; FALLBACK16-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %ebp, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %eax, %esi
+; FALLBACK16-NEXT: andl $60, %esi
+; FALLBACK16-NEXT: movl 68(%esp,%esi), %edx
+; FALLBACK16-NEXT: shll $3, %eax
+; FALLBACK16-NEXT: andl $24, %eax
+; FALLBACK16-NEXT: movl %edx, %edi
+; FALLBACK16-NEXT: movl %eax, %ecx
+; FALLBACK16-NEXT: shrl %cl, %edi
+; FALLBACK16-NEXT: movl 72(%esp,%esi), %ecx
+; FALLBACK16-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: leal (%ecx,%ecx), %ebx
+; FALLBACK16-NEXT: movb %al, %ch
+; FALLBACK16-NEXT: notb %ch
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %ebx
+; FALLBACK16-NEXT: orl %edi, %ebx
+; FALLBACK16-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 64(%esp,%esi), %edi
+; FALLBACK16-NEXT: movb %al, %cl
+; FALLBACK16-NEXT: shrl %cl, %edi
+; FALLBACK16-NEXT: addl %edx, %edx
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %edx
+; FALLBACK16-NEXT: orl %edi, %edx
+; FALLBACK16-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 76(%esp,%esi), %edx
+; FALLBACK16-NEXT: movl %edx, %ebp
+; FALLBACK16-NEXT: movb %al, %cl
+; FALLBACK16-NEXT: shrl %cl, %ebp
+; FALLBACK16-NEXT: movl 80(%esp,%esi), %edi
+; FALLBACK16-NEXT: leal (%edi,%edi), %ebx
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %ebx
+; FALLBACK16-NEXT: orl %ebp, %ebx
+; FALLBACK16-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movb %al, %cl
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; FALLBACK16-NEXT: shrl %cl, %ebx
+; FALLBACK16-NEXT: addl %edx, %edx
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %edx
+; FALLBACK16-NEXT: orl %ebx, %edx
+; FALLBACK16-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 84(%esp,%esi), %ebx
+; FALLBACK16-NEXT: movl %ebx, %ebp
+; FALLBACK16-NEXT: movl %eax, %edx
+; FALLBACK16-NEXT: movb %dl, %cl
+; FALLBACK16-NEXT: shrl %cl, %ebp
+; FALLBACK16-NEXT: movl 88(%esp,%esi), %eax
+; FALLBACK16-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: addl %eax, %eax
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %eax
+; FALLBACK16-NEXT: orl %ebp, %eax
+; FALLBACK16-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movb %dl, %cl
+; FALLBACK16-NEXT: shrl %cl, %edi
+; FALLBACK16-NEXT: addl %ebx, %ebx
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: movb %ch, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; FALLBACK16-NEXT: shll %cl, %ebx
+; FALLBACK16-NEXT: orl %edi, %ebx
+; FALLBACK16-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 92(%esp,%esi), %ebx
+; FALLBACK16-NEXT: movl %ebx, %ebp
+; FALLBACK16-NEXT: movb %dl, %cl
+; FALLBACK16-NEXT: shrl %cl, %ebp
+; FALLBACK16-NEXT: movl 96(%esp,%esi), %edi
+; FALLBACK16-NEXT: leal (%edi,%edi), %eax
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %eax
+; FALLBACK16-NEXT: orl %ebp, %eax
+; FALLBACK16-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movb %dl, %cl
+; FALLBACK16-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK16-NEXT: shrl %cl, %eax
+; FALLBACK16-NEXT: addl %ebx, %ebx
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %ebx
+; FALLBACK16-NEXT: orl %eax, %ebx
+; FALLBACK16-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 100(%esp,%esi), %ebx
+; FALLBACK16-NEXT: movl %ebx, %ebp
+; FALLBACK16-NEXT: movb %dl, %cl
+; FALLBACK16-NEXT: shrl %cl, %ebp
+; FALLBACK16-NEXT: movl 104(%esp,%esi), %edx
+; FALLBACK16-NEXT: leal (%edx,%edx), %eax
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %eax
+; FALLBACK16-NEXT: orl %ebp, %eax
+; FALLBACK16-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK16-NEXT: movb %al, %cl
+; FALLBACK16-NEXT: shrl %cl, %edi
+; FALLBACK16-NEXT: addl %ebx, %ebx
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %ebx
+; FALLBACK16-NEXT: orl %edi, %ebx
+; FALLBACK16-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 108(%esp,%esi), %edi
+; FALLBACK16-NEXT: movl %edi, %ebp
+; FALLBACK16-NEXT: movl %eax, %ecx
+; FALLBACK16-NEXT: shrl %cl, %ebp
+; FALLBACK16-NEXT: movl 112(%esp,%esi), %ecx
+; FALLBACK16-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: leal (%ecx,%ecx), %ebx
+; FALLBACK16-NEXT: movb {{[-0-9]+}}(%e{{[sb]}}p), %ch # 1-byte Reload
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %ebx
+; FALLBACK16-NEXT: orl %ebp, %ebx
+; FALLBACK16-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movb %al, %cl
+; FALLBACK16-NEXT: shrl %cl, %edx
+; FALLBACK16-NEXT: addl %edi, %edi
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %edi
+; FALLBACK16-NEXT: orl %edx, %edi
+; FALLBACK16-NEXT: movl %esi, %edx
+; FALLBACK16-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 116(%esp,%esi), %esi
+; FALLBACK16-NEXT: movl %esi, %ebx
+; FALLBACK16-NEXT: movb %al, %cl
+; FALLBACK16-NEXT: shrl %cl, %ebx
+; FALLBACK16-NEXT: movl 120(%esp,%edx), %eax
+; FALLBACK16-NEXT: leal (%eax,%eax), %ebp
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %ebp
+; FALLBACK16-NEXT: orl %ebx, %ebp
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK16-NEXT: movb %dl, %cl
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; FALLBACK16-NEXT: shrl %cl, %ebx
+; FALLBACK16-NEXT: addl %esi, %esi
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %esi
+; FALLBACK16-NEXT: orl %ebx, %esi
+; FALLBACK16-NEXT: movb %dl, %cl
+; FALLBACK16-NEXT: shrl %cl, %eax
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK16-NEXT: movl 124(%esp,%edx), %ebx
+; FALLBACK16-NEXT: leal (%ebx,%ebx), %edx
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %edx
+; FALLBACK16-NEXT: orl %eax, %edx
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK16-NEXT: shrl %cl, %ebx
+; FALLBACK16-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK16-NEXT: movl %ebx, 60(%eax)
+; FALLBACK16-NEXT: movl %edx, 56(%eax)
+; FALLBACK16-NEXT: movl %esi, 48(%eax)
+; FALLBACK16-NEXT: movl %ebp, 52(%eax)
+; FALLBACK16-NEXT: movl %edi, 40(%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, 44(%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, 32(%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, 36(%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, 24(%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, 28(%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, 16(%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, 20(%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, 8(%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, 12(%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, (%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, 4(%eax)
+; FALLBACK16-NEXT: addl $204, %esp
+; FALLBACK16-NEXT: popl %esi
+; FALLBACK16-NEXT: popl %edi
+; FALLBACK16-NEXT: popl %ebx
+; FALLBACK16-NEXT: popl %ebp
+; FALLBACK16-NEXT: retl
+;
+; FALLBACK17-LABEL: lshr_64bytes:
+; FALLBACK17: # %bb.0:
+; FALLBACK17-NEXT: pushl %ebp
+; FALLBACK17-NEXT: pushl %ebx
+; FALLBACK17-NEXT: pushl %edi
+; FALLBACK17-NEXT: pushl %esi
+; FALLBACK17-NEXT: subl $188, %esp
+; FALLBACK17-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK17-NEXT: movl (%ecx), %eax
+; FALLBACK17-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 4(%ecx), %eax
+; FALLBACK17-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 8(%ecx), %eax
+; FALLBACK17-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 12(%ecx), %eax
+; FALLBACK17-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 16(%ecx), %eax
+; FALLBACK17-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 20(%ecx), %eax
+; FALLBACK17-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 24(%ecx), %eax
+; FALLBACK17-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 28(%ecx), %eax
+; FALLBACK17-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 32(%ecx), %eax
+; FALLBACK17-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 36(%ecx), %eax
+; FALLBACK17-NEXT: movl %eax, (%esp) # 4-byte Spill
+; FALLBACK17-NEXT: movl 40(%ecx), %ebp
+; FALLBACK17-NEXT: movl 44(%ecx), %ebx
+; FALLBACK17-NEXT: movl 48(%ecx), %edi
+; FALLBACK17-NEXT: movl 52(%ecx), %esi
+; FALLBACK17-NEXT: movl 56(%ecx), %edx
+; FALLBACK17-NEXT: movl 60(%ecx), %eax
+; FALLBACK17-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK17-NEXT: movl (%ecx), %ecx
+; FALLBACK17-NEXT: xorps %xmm0, %xmm0
+; FALLBACK17-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %ebp, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl (%esp), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %ecx, %ebp
+; FALLBACK17-NEXT: andl $60, %ebp
+; FALLBACK17-NEXT: movl 56(%esp,%ebp), %edx
+; FALLBACK17-NEXT: movl 52(%esp,%ebp), %eax
+; FALLBACK17-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: shll $3, %ecx
+; FALLBACK17-NEXT: andl $24, %ecx
+; FALLBACK17-NEXT: shrdl %cl, %edx, %eax
+; FALLBACK17-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 64(%esp,%ebp), %edi
+; FALLBACK17-NEXT: movl 60(%esp,%ebp), %eax
+; FALLBACK17-NEXT: movl %eax, %esi
+; FALLBACK17-NEXT: shrdl %cl, %edi, %esi
+; FALLBACK17-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK17-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 72(%esp,%ebp), %esi
+; FALLBACK17-NEXT: movl 68(%esp,%ebp), %eax
+; FALLBACK17-NEXT: movl %eax, %edx
+; FALLBACK17-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK17-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: shrdl %cl, %eax, %edi
+; FALLBACK17-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 80(%esp,%ebp), %edi
+; FALLBACK17-NEXT: movl 76(%esp,%ebp), %eax
+; FALLBACK17-NEXT: movl %eax, %edx
+; FALLBACK17-NEXT: shrdl %cl, %edi, %edx
+; FALLBACK17-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK17-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 88(%esp,%ebp), %esi
+; FALLBACK17-NEXT: movl 84(%esp,%ebp), %eax
+; FALLBACK17-NEXT: movl %eax, %edx
+; FALLBACK17-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK17-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl %esi, %edx
+; FALLBACK17-NEXT: shrdl %cl, %eax, %edi
+; FALLBACK17-NEXT: movl %edi, (%esp) # 4-byte Spill
+; FALLBACK17-NEXT: movl 96(%esp,%ebp), %esi
+; FALLBACK17-NEXT: movl 92(%esp,%ebp), %eax
+; FALLBACK17-NEXT: movl %eax, %edi
+; FALLBACK17-NEXT: shrdl %cl, %esi, %edi
+; FALLBACK17-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK17-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 104(%esp,%ebp), %edx
+; FALLBACK17-NEXT: movl 100(%esp,%ebp), %eax
+; FALLBACK17-NEXT: movl %eax, %edi
+; FALLBACK17-NEXT: shrdl %cl, %edx, %edi
+; FALLBACK17-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK17-NEXT: movl 48(%esp,%ebp), %ebx
+; FALLBACK17-NEXT: movl 108(%esp,%ebp), %eax
+; FALLBACK17-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK17-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK17-NEXT: movl %edx, 56(%ebp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK17-NEXT: shrdl %cl, %edx, %ebx
+; FALLBACK17-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK17-NEXT: shrl %cl, %eax
+; FALLBACK17-NEXT: movl %eax, 60(%ebp)
+; FALLBACK17-NEXT: movl %esi, 48(%ebp)
+; FALLBACK17-NEXT: movl %edi, 52(%ebp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, 40(%ebp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, 44(%ebp)
+; FALLBACK17-NEXT: movl (%esp), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, 32(%ebp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, 36(%ebp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, 24(%ebp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, 28(%ebp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, 16(%ebp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, 20(%ebp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, 8(%ebp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, 12(%ebp)
+; FALLBACK17-NEXT: movl %ebx, (%ebp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, 4(%ebp)
+; FALLBACK17-NEXT: addl $188, %esp
+; FALLBACK17-NEXT: popl %esi
+; FALLBACK17-NEXT: popl %edi
+; FALLBACK17-NEXT: popl %ebx
+; FALLBACK17-NEXT: popl %ebp
+; FALLBACK17-NEXT: retl
+;
+; FALLBACK18-LABEL: lshr_64bytes:
+; FALLBACK18: # %bb.0:
+; FALLBACK18-NEXT: pushl %ebp
+; FALLBACK18-NEXT: pushl %ebx
+; FALLBACK18-NEXT: pushl %edi
+; FALLBACK18-NEXT: pushl %esi
+; FALLBACK18-NEXT: subl $204, %esp
+; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK18-NEXT: movl (%eax), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 4(%eax), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 8(%eax), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 12(%eax), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 16(%eax), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 20(%eax), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 24(%eax), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 28(%eax), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 32(%eax), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 36(%eax), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 40(%eax), %ebp
+; FALLBACK18-NEXT: movl 44(%eax), %ebx
+; FALLBACK18-NEXT: movl 48(%eax), %edi
+; FALLBACK18-NEXT: movl 52(%eax), %esi
+; FALLBACK18-NEXT: movl 56(%eax), %edx
+; FALLBACK18-NEXT: movl 60(%eax), %ecx
+; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK18-NEXT: movl (%eax), %eax
+; FALLBACK18-NEXT: xorps %xmm0, %xmm0
+; FALLBACK18-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %ebp, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %eax, %ecx
+; FALLBACK18-NEXT: leal (,%eax,8), %edx
+; FALLBACK18-NEXT: andl $24, %edx
+; FALLBACK18-NEXT: andl $60, %ecx
+; FALLBACK18-NEXT: movl 68(%esp,%ecx), %esi
+; FALLBACK18-NEXT: movl 72(%esp,%ecx), %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shrxl %edx, %esi, %edi
+; FALLBACK18-NEXT: movl %edx, %ebx
+; FALLBACK18-NEXT: notb %bl
+; FALLBACK18-NEXT: leal (%eax,%eax), %ebp
+; FALLBACK18-NEXT: shlxl %ebx, %ebp, %eax
+; FALLBACK18-NEXT: orl %edi, %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shrxl %edx, 64(%esp,%ecx), %edi
+; FALLBACK18-NEXT: addl %esi, %esi
+; FALLBACK18-NEXT: shlxl %ebx, %esi, %eax
+; FALLBACK18-NEXT: orl %edi, %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 80(%esp,%ecx), %esi
+; FALLBACK18-NEXT: leal (%esi,%esi), %edi
+; FALLBACK18-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK18-NEXT: movl 76(%esp,%ecx), %edi
+; FALLBACK18-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK18-NEXT: orl %ebp, %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK18-NEXT: addl %edi, %edi
+; FALLBACK18-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK18-NEXT: orl %eax, %edi
+; FALLBACK18-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 88(%esp,%ecx), %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: leal (%eax,%eax), %edi
+; FALLBACK18-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK18-NEXT: movl 84(%esp,%ecx), %edi
+; FALLBACK18-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK18-NEXT: orl %ebp, %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK18-NEXT: addl %edi, %edi
+; FALLBACK18-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK18-NEXT: orl %esi, %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 96(%esp,%ecx), %esi
+; FALLBACK18-NEXT: leal (%esi,%esi), %edi
+; FALLBACK18-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK18-NEXT: movl 92(%esp,%ecx), %edi
+; FALLBACK18-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK18-NEXT: orl %ebp, %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK18-NEXT: addl %edi, %edi
+; FALLBACK18-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK18-NEXT: orl %eax, %edi
+; FALLBACK18-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 104(%esp,%ecx), %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: leal (%eax,%eax), %edi
+; FALLBACK18-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK18-NEXT: movl 100(%esp,%ecx), %edi
+; FALLBACK18-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK18-NEXT: orl %ebp, %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK18-NEXT: addl %edi, %edi
+; FALLBACK18-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK18-NEXT: orl %esi, %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 112(%esp,%ecx), %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: leal (%eax,%eax), %esi
+; FALLBACK18-NEXT: shlxl %ebx, %esi, %eax
+; FALLBACK18-NEXT: movl 108(%esp,%ecx), %esi
+; FALLBACK18-NEXT: movl %ecx, %edi
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shrxl %edx, %esi, %ebp
+; FALLBACK18-NEXT: orl %ebp, %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; FALLBACK18-NEXT: addl %esi, %esi
+; FALLBACK18-NEXT: shlxl %ebx, %esi, %esi
+; FALLBACK18-NEXT: orl %ecx, %esi
+; FALLBACK18-NEXT: movl 120(%esp,%edi), %ebp
+; FALLBACK18-NEXT: leal (%ebp,%ebp), %ecx
+; FALLBACK18-NEXT: shlxl %ebx, %ecx, %ecx
+; FALLBACK18-NEXT: movl 116(%esp,%edi), %eax
+; FALLBACK18-NEXT: shrxl %edx, %eax, %edi
+; FALLBACK18-NEXT: orl %edi, %ecx
+; FALLBACK18-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK18-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: addl %eax, %eax
+; FALLBACK18-NEXT: shlxl %ebx, %eax, %edi
+; FALLBACK18-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK18-NEXT: shrxl %edx, %ebp, %eax
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; FALLBACK18-NEXT: movl 124(%esp,%ebp), %ebp
+; FALLBACK18-NEXT: shrxl %edx, %ebp, %edx
+; FALLBACK18-NEXT: addl %ebp, %ebp
+; FALLBACK18-NEXT: shlxl %ebx, %ebp, %ebx
+; FALLBACK18-NEXT: orl %eax, %ebx
+; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK18-NEXT: movl %edx, 60(%eax)
+; FALLBACK18-NEXT: movl %ebx, 56(%eax)
+; FALLBACK18-NEXT: movl %edi, 48(%eax)
+; FALLBACK18-NEXT: movl %ecx, 52(%eax)
+; FALLBACK18-NEXT: movl %esi, 40(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 44(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 32(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 36(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 24(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 28(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 16(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 20(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 8(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 12(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, (%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 4(%eax)
+; FALLBACK18-NEXT: addl $204, %esp
+; FALLBACK18-NEXT: popl %esi
+; FALLBACK18-NEXT: popl %edi
+; FALLBACK18-NEXT: popl %ebx
+; FALLBACK18-NEXT: popl %ebp
+; FALLBACK18-NEXT: retl
+;
+; FALLBACK19-LABEL: lshr_64bytes:
+; FALLBACK19: # %bb.0:
+; FALLBACK19-NEXT: pushl %ebp
+; FALLBACK19-NEXT: pushl %ebx
+; FALLBACK19-NEXT: pushl %edi
+; FALLBACK19-NEXT: pushl %esi
+; FALLBACK19-NEXT: subl $188, %esp
+; FALLBACK19-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK19-NEXT: movl (%ecx), %eax
+; FALLBACK19-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 4(%ecx), %eax
+; FALLBACK19-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 8(%ecx), %eax
+; FALLBACK19-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 12(%ecx), %eax
+; FALLBACK19-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 16(%ecx), %eax
+; FALLBACK19-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 20(%ecx), %eax
+; FALLBACK19-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 24(%ecx), %eax
+; FALLBACK19-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 28(%ecx), %eax
+; FALLBACK19-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 32(%ecx), %eax
+; FALLBACK19-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 36(%ecx), %eax
+; FALLBACK19-NEXT: movl %eax, (%esp) # 4-byte Spill
+; FALLBACK19-NEXT: movl 40(%ecx), %ebp
+; FALLBACK19-NEXT: movl 44(%ecx), %ebx
+; FALLBACK19-NEXT: movl 48(%ecx), %edi
+; FALLBACK19-NEXT: movl 52(%ecx), %esi
+; FALLBACK19-NEXT: movl 56(%ecx), %edx
+; FALLBACK19-NEXT: movl 60(%ecx), %eax
+; FALLBACK19-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK19-NEXT: movl (%ecx), %ecx
+; FALLBACK19-NEXT: xorps %xmm0, %xmm0
+; FALLBACK19-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %ebp, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl (%esp), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %ecx, %ebp
+; FALLBACK19-NEXT: andl $60, %ebp
+; FALLBACK19-NEXT: movl 56(%esp,%ebp), %edx
+; FALLBACK19-NEXT: movl 52(%esp,%ebp), %eax
+; FALLBACK19-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: shll $3, %ecx
+; FALLBACK19-NEXT: andl $24, %ecx
+; FALLBACK19-NEXT: shrdl %cl, %edx, %eax
+; FALLBACK19-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 64(%esp,%ebp), %edi
+; FALLBACK19-NEXT: movl 60(%esp,%ebp), %eax
+; FALLBACK19-NEXT: movl %eax, %esi
+; FALLBACK19-NEXT: shrdl %cl, %edi, %esi
+; FALLBACK19-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK19-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 72(%esp,%ebp), %esi
+; FALLBACK19-NEXT: movl 68(%esp,%ebp), %eax
+; FALLBACK19-NEXT: movl %eax, %edx
+; FALLBACK19-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK19-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: shrdl %cl, %eax, %edi
+; FALLBACK19-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 80(%esp,%ebp), %edi
+; FALLBACK19-NEXT: movl 76(%esp,%ebp), %eax
+; FALLBACK19-NEXT: movl %eax, %edx
+; FALLBACK19-NEXT: shrdl %cl, %edi, %edx
+; FALLBACK19-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK19-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 88(%esp,%ebp), %ebx
+; FALLBACK19-NEXT: movl 84(%esp,%ebp), %eax
+; FALLBACK19-NEXT: movl %eax, %edx
+; FALLBACK19-NEXT: shrdl %cl, %ebx, %edx
+; FALLBACK19-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: shrdl %cl, %eax, %edi
+; FALLBACK19-NEXT: movl %edi, (%esp) # 4-byte Spill
+; FALLBACK19-NEXT: movl 96(%esp,%ebp), %esi
+; FALLBACK19-NEXT: movl 92(%esp,%ebp), %eax
+; FALLBACK19-NEXT: movl %eax, %edx
+; FALLBACK19-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK19-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: shrdl %cl, %eax, %ebx
+; FALLBACK19-NEXT: movl 104(%esp,%ebp), %eax
+; FALLBACK19-NEXT: movl 100(%esp,%ebp), %edi
+; FALLBACK19-NEXT: movl %edi, %edx
+; FALLBACK19-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK19-NEXT: shrdl %cl, %edi, %esi
+; FALLBACK19-NEXT: movl 48(%esp,%ebp), %edi
+; FALLBACK19-NEXT: movl 108(%esp,%ebp), %ebp
+; FALLBACK19-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: shrdl %cl, %ebp, %eax
+; FALLBACK19-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK19-NEXT: movl %eax, 56(%ebp)
+; FALLBACK19-NEXT: movl %esi, 48(%ebp)
+; FALLBACK19-NEXT: movl %edx, 52(%ebp)
+; FALLBACK19-NEXT: movl %ebx, 40(%ebp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, 44(%ebp)
+; FALLBACK19-NEXT: movl (%esp), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, 32(%ebp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, 36(%ebp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, 24(%ebp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, 28(%ebp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, 16(%ebp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, 20(%ebp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, 8(%ebp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, 12(%ebp)
+; FALLBACK19-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK19-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK19-NEXT: shrdl %cl, %edx, %edi
+; FALLBACK19-NEXT: movl %edi, (%ebp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK19-NEXT: movl %ecx, 4(%ebp)
+; FALLBACK19-NEXT: movl %eax, 60(%ebp)
+; FALLBACK19-NEXT: addl $188, %esp
+; FALLBACK19-NEXT: popl %esi
+; FALLBACK19-NEXT: popl %edi
+; FALLBACK19-NEXT: popl %ebx
+; FALLBACK19-NEXT: popl %ebp
+; FALLBACK19-NEXT: retl
+;
+; FALLBACK20-LABEL: lshr_64bytes:
+; FALLBACK20: # %bb.0:
+; FALLBACK20-NEXT: pushl %ebp
+; FALLBACK20-NEXT: pushl %ebx
+; FALLBACK20-NEXT: pushl %edi
+; FALLBACK20-NEXT: pushl %esi
+; FALLBACK20-NEXT: subl $204, %esp
+; FALLBACK20-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK20-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK20-NEXT: movups (%ecx), %xmm0
+; FALLBACK20-NEXT: movups 16(%ecx), %xmm1
+; FALLBACK20-NEXT: movups 32(%ecx), %xmm2
+; FALLBACK20-NEXT: movups 48(%ecx), %xmm3
+; FALLBACK20-NEXT: movl (%eax), %eax
+; FALLBACK20-NEXT: xorps %xmm4, %xmm4
+; FALLBACK20-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movaps %xmm3, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movl %eax, %esi
+; FALLBACK20-NEXT: andl $60, %esi
+; FALLBACK20-NEXT: movl 68(%esp,%esi), %edx
+; FALLBACK20-NEXT: shll $3, %eax
+; FALLBACK20-NEXT: andl $24, %eax
+; FALLBACK20-NEXT: movl %edx, %edi
+; FALLBACK20-NEXT: movl %eax, %ecx
+; FALLBACK20-NEXT: shrl %cl, %edi
+; FALLBACK20-NEXT: movl 72(%esp,%esi), %ecx
+; FALLBACK20-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: leal (%ecx,%ecx), %ebx
+; FALLBACK20-NEXT: movb %al, %ch
+; FALLBACK20-NEXT: notb %ch
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shll %cl, %ebx
+; FALLBACK20-NEXT: orl %edi, %ebx
+; FALLBACK20-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl 64(%esp,%esi), %edi
+; FALLBACK20-NEXT: movb %al, %cl
+; FALLBACK20-NEXT: shrl %cl, %edi
+; FALLBACK20-NEXT: addl %edx, %edx
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shll %cl, %edx
+; FALLBACK20-NEXT: orl %edi, %edx
+; FALLBACK20-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl 76(%esp,%esi), %edx
+; FALLBACK20-NEXT: movl %edx, %ebp
+; FALLBACK20-NEXT: movb %al, %cl
+; FALLBACK20-NEXT: shrl %cl, %ebp
+; FALLBACK20-NEXT: movl 80(%esp,%esi), %edi
+; FALLBACK20-NEXT: leal (%edi,%edi), %ebx
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shll %cl, %ebx
+; FALLBACK20-NEXT: orl %ebp, %ebx
+; FALLBACK20-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movb %al, %cl
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; FALLBACK20-NEXT: shrl %cl, %ebx
+; FALLBACK20-NEXT: addl %edx, %edx
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shll %cl, %edx
+; FALLBACK20-NEXT: orl %ebx, %edx
+; FALLBACK20-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl 84(%esp,%esi), %ebx
+; FALLBACK20-NEXT: movl %ebx, %ebp
+; FALLBACK20-NEXT: movl %eax, %edx
+; FALLBACK20-NEXT: movb %dl, %cl
+; FALLBACK20-NEXT: shrl %cl, %ebp
+; FALLBACK20-NEXT: movl 88(%esp,%esi), %eax
+; FALLBACK20-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: addl %eax, %eax
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shll %cl, %eax
+; FALLBACK20-NEXT: orl %ebp, %eax
+; FALLBACK20-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movb %dl, %cl
+; FALLBACK20-NEXT: shrl %cl, %edi
+; FALLBACK20-NEXT: addl %ebx, %ebx
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: movb %ch, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; FALLBACK20-NEXT: shll %cl, %ebx
+; FALLBACK20-NEXT: orl %edi, %ebx
+; FALLBACK20-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl 92(%esp,%esi), %ebx
+; FALLBACK20-NEXT: movl %ebx, %ebp
+; FALLBACK20-NEXT: movb %dl, %cl
+; FALLBACK20-NEXT: shrl %cl, %ebp
+; FALLBACK20-NEXT: movl 96(%esp,%esi), %edi
+; FALLBACK20-NEXT: leal (%edi,%edi), %eax
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shll %cl, %eax
+; FALLBACK20-NEXT: orl %ebp, %eax
+; FALLBACK20-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movb %dl, %cl
+; FALLBACK20-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK20-NEXT: shrl %cl, %eax
+; FALLBACK20-NEXT: addl %ebx, %ebx
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shll %cl, %ebx
+; FALLBACK20-NEXT: orl %eax, %ebx
+; FALLBACK20-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl 100(%esp,%esi), %ebx
+; FALLBACK20-NEXT: movl %ebx, %ebp
+; FALLBACK20-NEXT: movb %dl, %cl
+; FALLBACK20-NEXT: shrl %cl, %ebp
+; FALLBACK20-NEXT: movl 104(%esp,%esi), %edx
+; FALLBACK20-NEXT: leal (%edx,%edx), %eax
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shll %cl, %eax
+; FALLBACK20-NEXT: orl %ebp, %eax
+; FALLBACK20-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK20-NEXT: movb %al, %cl
+; FALLBACK20-NEXT: shrl %cl, %edi
+; FALLBACK20-NEXT: addl %ebx, %ebx
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shll %cl, %ebx
+; FALLBACK20-NEXT: orl %edi, %ebx
+; FALLBACK20-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl 108(%esp,%esi), %edi
+; FALLBACK20-NEXT: movl %edi, %ebp
+; FALLBACK20-NEXT: movl %eax, %ecx
+; FALLBACK20-NEXT: shrl %cl, %ebp
+; FALLBACK20-NEXT: movl 112(%esp,%esi), %ecx
+; FALLBACK20-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: leal (%ecx,%ecx), %ebx
+; FALLBACK20-NEXT: movb {{[-0-9]+}}(%e{{[sb]}}p), %ch # 1-byte Reload
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shll %cl, %ebx
+; FALLBACK20-NEXT: orl %ebp, %ebx
+; FALLBACK20-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movb %al, %cl
+; FALLBACK20-NEXT: shrl %cl, %edx
+; FALLBACK20-NEXT: addl %edi, %edi
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shll %cl, %edi
+; FALLBACK20-NEXT: orl %edx, %edi
+; FALLBACK20-NEXT: movl %esi, %edx
+; FALLBACK20-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl 116(%esp,%esi), %esi
+; FALLBACK20-NEXT: movl %esi, %ebx
+; FALLBACK20-NEXT: movb %al, %cl
+; FALLBACK20-NEXT: shrl %cl, %ebx
+; FALLBACK20-NEXT: movl 120(%esp,%edx), %eax
+; FALLBACK20-NEXT: leal (%eax,%eax), %ebp
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shll %cl, %ebp
+; FALLBACK20-NEXT: orl %ebx, %ebp
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK20-NEXT: movb %dl, %cl
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; FALLBACK20-NEXT: shrl %cl, %ebx
+; FALLBACK20-NEXT: addl %esi, %esi
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shll %cl, %esi
+; FALLBACK20-NEXT: orl %ebx, %esi
+; FALLBACK20-NEXT: movb %dl, %cl
+; FALLBACK20-NEXT: shrl %cl, %eax
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK20-NEXT: movl 124(%esp,%edx), %ebx
+; FALLBACK20-NEXT: leal (%ebx,%ebx), %edx
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shll %cl, %edx
+; FALLBACK20-NEXT: orl %eax, %edx
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK20-NEXT: shrl %cl, %ebx
+; FALLBACK20-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK20-NEXT: movl %ebx, 60(%eax)
+; FALLBACK20-NEXT: movl %edx, 56(%eax)
+; FALLBACK20-NEXT: movl %esi, 48(%eax)
+; FALLBACK20-NEXT: movl %ebp, 52(%eax)
+; FALLBACK20-NEXT: movl %edi, 40(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, 44(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, 32(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, 36(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, 24(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, 28(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, 16(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, 20(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, 8(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, 12(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, (%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, 4(%eax)
+; FALLBACK20-NEXT: addl $204, %esp
+; FALLBACK20-NEXT: popl %esi
+; FALLBACK20-NEXT: popl %edi
+; FALLBACK20-NEXT: popl %ebx
+; FALLBACK20-NEXT: popl %ebp
+; FALLBACK20-NEXT: retl
+;
+; FALLBACK21-LABEL: lshr_64bytes:
+; FALLBACK21: # %bb.0:
+; FALLBACK21-NEXT: pushl %ebp
+; FALLBACK21-NEXT: pushl %ebx
+; FALLBACK21-NEXT: pushl %edi
+; FALLBACK21-NEXT: pushl %esi
+; FALLBACK21-NEXT: subl $188, %esp
+; FALLBACK21-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK21-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK21-NEXT: movups (%ecx), %xmm0
+; FALLBACK21-NEXT: movups 16(%ecx), %xmm1
+; FALLBACK21-NEXT: movups 32(%ecx), %xmm2
+; FALLBACK21-NEXT: movups 48(%ecx), %xmm3
+; FALLBACK21-NEXT: movl (%eax), %ecx
+; FALLBACK21-NEXT: xorps %xmm4, %xmm4
+; FALLBACK21-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movaps %xmm3, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movl %ecx, %ebp
+; FALLBACK21-NEXT: andl $60, %ebp
+; FALLBACK21-NEXT: movl 56(%esp,%ebp), %edx
+; FALLBACK21-NEXT: movl 52(%esp,%ebp), %eax
+; FALLBACK21-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: shll $3, %ecx
+; FALLBACK21-NEXT: andl $24, %ecx
+; FALLBACK21-NEXT: shrdl %cl, %edx, %eax
+; FALLBACK21-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: movl 64(%esp,%ebp), %edi
+; FALLBACK21-NEXT: movl 60(%esp,%ebp), %eax
+; FALLBACK21-NEXT: movl %eax, %esi
+; FALLBACK21-NEXT: shrdl %cl, %edi, %esi
+; FALLBACK21-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK21-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: movl 72(%esp,%ebp), %esi
+; FALLBACK21-NEXT: movl 68(%esp,%ebp), %eax
+; FALLBACK21-NEXT: movl %eax, %edx
+; FALLBACK21-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK21-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: shrdl %cl, %eax, %edi
+; FALLBACK21-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: movl 80(%esp,%ebp), %edi
+; FALLBACK21-NEXT: movl 76(%esp,%ebp), %eax
+; FALLBACK21-NEXT: movl %eax, %edx
+; FALLBACK21-NEXT: shrdl %cl, %edi, %edx
+; FALLBACK21-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK21-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: movl 88(%esp,%ebp), %esi
+; FALLBACK21-NEXT: movl 84(%esp,%ebp), %eax
+; FALLBACK21-NEXT: movl %eax, %edx
+; FALLBACK21-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK21-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: movl %esi, %edx
+; FALLBACK21-NEXT: shrdl %cl, %eax, %edi
+; FALLBACK21-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: movl 96(%esp,%ebp), %esi
+; FALLBACK21-NEXT: movl 92(%esp,%ebp), %eax
+; FALLBACK21-NEXT: movl %eax, %edi
+; FALLBACK21-NEXT: shrdl %cl, %esi, %edi
+; FALLBACK21-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK21-NEXT: movl %edx, (%esp) # 4-byte Spill
+; FALLBACK21-NEXT: movl 104(%esp,%ebp), %edx
+; FALLBACK21-NEXT: movl 100(%esp,%ebp), %eax
+; FALLBACK21-NEXT: movl %eax, %edi
+; FALLBACK21-NEXT: shrdl %cl, %edx, %edi
+; FALLBACK21-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK21-NEXT: movl 48(%esp,%ebp), %ebx
+; FALLBACK21-NEXT: movl 108(%esp,%ebp), %eax
+; FALLBACK21-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK21-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK21-NEXT: movl %edx, 56(%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK21-NEXT: shrdl %cl, %edx, %ebx
+; FALLBACK21-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK21-NEXT: shrl %cl, %eax
+; FALLBACK21-NEXT: movl %eax, 60(%ebp)
+; FALLBACK21-NEXT: movl %esi, 48(%ebp)
+; FALLBACK21-NEXT: movl %edi, 52(%ebp)
+; FALLBACK21-NEXT: movl (%esp), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 40(%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 44(%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 32(%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 36(%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 24(%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 28(%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 16(%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 20(%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 8(%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 12(%ebp)
+; FALLBACK21-NEXT: movl %ebx, (%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 4(%ebp)
+; FALLBACK21-NEXT: addl $188, %esp
+; FALLBACK21-NEXT: popl %esi
+; FALLBACK21-NEXT: popl %edi
+; FALLBACK21-NEXT: popl %ebx
+; FALLBACK21-NEXT: popl %ebp
+; FALLBACK21-NEXT: retl
+;
+; FALLBACK22-LABEL: lshr_64bytes:
+; FALLBACK22: # %bb.0:
+; FALLBACK22-NEXT: pushl %ebp
+; FALLBACK22-NEXT: pushl %ebx
+; FALLBACK22-NEXT: pushl %edi
+; FALLBACK22-NEXT: pushl %esi
+; FALLBACK22-NEXT: subl $204, %esp
+; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK22-NEXT: movups (%ecx), %xmm0
+; FALLBACK22-NEXT: movups 16(%ecx), %xmm1
+; FALLBACK22-NEXT: movups 32(%ecx), %xmm2
+; FALLBACK22-NEXT: movups 48(%ecx), %xmm3
+; FALLBACK22-NEXT: movl (%eax), %ecx
+; FALLBACK22-NEXT: xorps %xmm4, %xmm4
+; FALLBACK22-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movaps %xmm3, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: leal (,%ecx,8), %edx
+; FALLBACK22-NEXT: andl $24, %edx
+; FALLBACK22-NEXT: andl $60, %ecx
+; FALLBACK22-NEXT: movl 68(%esp,%ecx), %esi
+; FALLBACK22-NEXT: movl 72(%esp,%ecx), %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shrxl %edx, %esi, %edi
+; FALLBACK22-NEXT: movl %edx, %ebx
+; FALLBACK22-NEXT: notb %bl
+; FALLBACK22-NEXT: leal (%eax,%eax), %ebp
+; FALLBACK22-NEXT: shlxl %ebx, %ebp, %ebp
+; FALLBACK22-NEXT: orl %edi, %ebp
+; FALLBACK22-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shrxl %edx, 64(%esp,%ecx), %edi
+; FALLBACK22-NEXT: addl %esi, %esi
+; FALLBACK22-NEXT: shlxl %ebx, %esi, %esi
+; FALLBACK22-NEXT: orl %edi, %esi
+; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 80(%esp,%ecx), %esi
+; FALLBACK22-NEXT: leal (%esi,%esi), %edi
+; FALLBACK22-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK22-NEXT: movl 76(%esp,%ecx), %edi
+; FALLBACK22-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK22-NEXT: orl %ebp, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK22-NEXT: addl %edi, %edi
+; FALLBACK22-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK22-NEXT: orl %eax, %edi
+; FALLBACK22-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 88(%esp,%ecx), %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: leal (%eax,%eax), %edi
+; FALLBACK22-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK22-NEXT: movl 84(%esp,%ecx), %edi
+; FALLBACK22-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK22-NEXT: orl %ebp, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK22-NEXT: addl %edi, %edi
+; FALLBACK22-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK22-NEXT: orl %esi, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 96(%esp,%ecx), %esi
+; FALLBACK22-NEXT: leal (%esi,%esi), %edi
+; FALLBACK22-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK22-NEXT: movl 92(%esp,%ecx), %edi
+; FALLBACK22-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK22-NEXT: orl %ebp, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK22-NEXT: addl %edi, %edi
+; FALLBACK22-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK22-NEXT: orl %eax, %edi
+; FALLBACK22-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 104(%esp,%ecx), %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: leal (%eax,%eax), %edi
+; FALLBACK22-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK22-NEXT: movl 100(%esp,%ecx), %edi
+; FALLBACK22-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK22-NEXT: orl %ebp, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK22-NEXT: addl %edi, %edi
+; FALLBACK22-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK22-NEXT: orl %esi, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl %ecx, %eax
+; FALLBACK22-NEXT: movl 112(%esp,%ecx), %ecx
+; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: leal (%ecx,%ecx), %esi
+; FALLBACK22-NEXT: shlxl %ebx, %esi, %ecx
+; FALLBACK22-NEXT: movl 108(%esp,%eax), %esi
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shrxl %edx, %esi, %ebp
+; FALLBACK22-NEXT: orl %ebp, %ecx
+; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; FALLBACK22-NEXT: addl %esi, %esi
+; FALLBACK22-NEXT: shlxl %ebx, %esi, %esi
+; FALLBACK22-NEXT: orl %ecx, %esi
+; FALLBACK22-NEXT: movl 120(%esp,%eax), %ebp
+; FALLBACK22-NEXT: leal (%ebp,%ebp), %ecx
+; FALLBACK22-NEXT: shlxl %ebx, %ecx, %ecx
+; FALLBACK22-NEXT: movl 116(%esp,%eax), %eax
+; FALLBACK22-NEXT: shrxl %edx, %eax, %edi
+; FALLBACK22-NEXT: orl %edi, %ecx
+; FALLBACK22-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK22-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: addl %eax, %eax
+; FALLBACK22-NEXT: shlxl %ebx, %eax, %edi
+; FALLBACK22-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK22-NEXT: shrxl %edx, %ebp, %eax
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; FALLBACK22-NEXT: movl 124(%esp,%ebp), %ebp
+; FALLBACK22-NEXT: shrxl %edx, %ebp, %edx
+; FALLBACK22-NEXT: addl %ebp, %ebp
+; FALLBACK22-NEXT: shlxl %ebx, %ebp, %ebx
+; FALLBACK22-NEXT: orl %eax, %ebx
+; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK22-NEXT: movl %edx, 60(%eax)
+; FALLBACK22-NEXT: movl %ebx, 56(%eax)
+; FALLBACK22-NEXT: movl %edi, 48(%eax)
+; FALLBACK22-NEXT: movl %ecx, 52(%eax)
+; FALLBACK22-NEXT: movl %esi, 40(%eax)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: movl %ecx, 44(%eax)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: movl %ecx, 32(%eax)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: movl %ecx, 36(%eax)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: movl %ecx, 24(%eax)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: movl %ecx, 28(%eax)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: movl %ecx, 16(%eax)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: movl %ecx, 20(%eax)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: movl %ecx, 8(%eax)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: movl %ecx, 12(%eax)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: movl %ecx, (%eax)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: movl %ecx, 4(%eax)
+; FALLBACK22-NEXT: addl $204, %esp
+; FALLBACK22-NEXT: popl %esi
+; FALLBACK22-NEXT: popl %edi
+; FALLBACK22-NEXT: popl %ebx
+; FALLBACK22-NEXT: popl %ebp
+; FALLBACK22-NEXT: retl
+;
+; FALLBACK23-LABEL: lshr_64bytes:
+; FALLBACK23: # %bb.0:
+; FALLBACK23-NEXT: pushl %ebp
+; FALLBACK23-NEXT: pushl %ebx
+; FALLBACK23-NEXT: pushl %edi
+; FALLBACK23-NEXT: pushl %esi
+; FALLBACK23-NEXT: subl $188, %esp
+; FALLBACK23-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK23-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK23-NEXT: movups (%ecx), %xmm0
+; FALLBACK23-NEXT: movups 16(%ecx), %xmm1
+; FALLBACK23-NEXT: movups 32(%ecx), %xmm2
+; FALLBACK23-NEXT: movups 48(%ecx), %xmm3
+; FALLBACK23-NEXT: movl (%eax), %ecx
+; FALLBACK23-NEXT: xorps %xmm4, %xmm4
+; FALLBACK23-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movaps %xmm3, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movl %ecx, %ebp
+; FALLBACK23-NEXT: andl $60, %ebp
+; FALLBACK23-NEXT: movl 56(%esp,%ebp), %edx
+; FALLBACK23-NEXT: movl 52(%esp,%ebp), %eax
+; FALLBACK23-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: shll $3, %ecx
+; FALLBACK23-NEXT: andl $24, %ecx
+; FALLBACK23-NEXT: shrdl %cl, %edx, %eax
+; FALLBACK23-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: movl 64(%esp,%ebp), %edi
+; FALLBACK23-NEXT: movl 60(%esp,%ebp), %eax
+; FALLBACK23-NEXT: movl %eax, %esi
+; FALLBACK23-NEXT: shrdl %cl, %edi, %esi
+; FALLBACK23-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK23-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: movl 72(%esp,%ebp), %esi
+; FALLBACK23-NEXT: movl 68(%esp,%ebp), %eax
+; FALLBACK23-NEXT: movl %eax, %edx
+; FALLBACK23-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK23-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: shrdl %cl, %eax, %edi
+; FALLBACK23-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: movl 80(%esp,%ebp), %edi
+; FALLBACK23-NEXT: movl 76(%esp,%ebp), %eax
+; FALLBACK23-NEXT: movl %eax, %edx
+; FALLBACK23-NEXT: shrdl %cl, %edi, %edx
+; FALLBACK23-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK23-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: movl 88(%esp,%ebp), %ebx
+; FALLBACK23-NEXT: movl 84(%esp,%ebp), %eax
+; FALLBACK23-NEXT: movl %eax, %edx
+; FALLBACK23-NEXT: shrdl %cl, %ebx, %edx
+; FALLBACK23-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: shrdl %cl, %eax, %edi
+; FALLBACK23-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: movl 96(%esp,%ebp), %esi
+; FALLBACK23-NEXT: movl 92(%esp,%ebp), %eax
+; FALLBACK23-NEXT: movl %eax, %edx
+; FALLBACK23-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK23-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: shrdl %cl, %eax, %ebx
+; FALLBACK23-NEXT: movl 104(%esp,%ebp), %eax
+; FALLBACK23-NEXT: movl 100(%esp,%ebp), %edi
+; FALLBACK23-NEXT: movl %edi, %edx
+; FALLBACK23-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK23-NEXT: shrdl %cl, %edi, %esi
+; FALLBACK23-NEXT: movl 48(%esp,%ebp), %edi
+; FALLBACK23-NEXT: movl 108(%esp,%ebp), %ebp
+; FALLBACK23-NEXT: movl %ebp, (%esp) # 4-byte Spill
+; FALLBACK23-NEXT: shrdl %cl, %ebp, %eax
+; FALLBACK23-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK23-NEXT: movl %eax, 56(%ebp)
+; FALLBACK23-NEXT: movl %esi, 48(%ebp)
+; FALLBACK23-NEXT: movl %edx, 52(%ebp)
+; FALLBACK23-NEXT: movl %ebx, 40(%ebp)
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK23-NEXT: movl %eax, 44(%ebp)
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK23-NEXT: movl %eax, 32(%ebp)
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK23-NEXT: movl %eax, 36(%ebp)
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK23-NEXT: movl %eax, 24(%ebp)
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK23-NEXT: movl %eax, 28(%ebp)
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK23-NEXT: movl %eax, 16(%ebp)
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK23-NEXT: movl %eax, 20(%ebp)
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK23-NEXT: movl %eax, 8(%ebp)
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK23-NEXT: movl %eax, 12(%ebp)
+; FALLBACK23-NEXT: shrxl %ecx, (%esp), %eax # 4-byte Folded Reload
+; FALLBACK23-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK23-NEXT: shrdl %cl, %edx, %edi
+; FALLBACK23-NEXT: movl %edi, (%ebp)
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK23-NEXT: movl %ecx, 4(%ebp)
+; FALLBACK23-NEXT: movl %eax, 60(%ebp)
+; FALLBACK23-NEXT: addl $188, %esp
+; FALLBACK23-NEXT: popl %esi
+; FALLBACK23-NEXT: popl %edi
+; FALLBACK23-NEXT: popl %ebx
+; FALLBACK23-NEXT: popl %ebp
+; FALLBACK23-NEXT: retl
+;
+; FALLBACK24-LABEL: lshr_64bytes:
+; FALLBACK24: # %bb.0:
+; FALLBACK24-NEXT: pushl %ebp
+; FALLBACK24-NEXT: pushl %ebx
+; FALLBACK24-NEXT: pushl %edi
+; FALLBACK24-NEXT: pushl %esi
+; FALLBACK24-NEXT: subl $204, %esp
+; FALLBACK24-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK24-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK24-NEXT: vmovups (%ecx), %ymm0
+; FALLBACK24-NEXT: vmovups 32(%ecx), %ymm1
+; FALLBACK24-NEXT: movl (%eax), %ecx
+; FALLBACK24-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; FALLBACK24-NEXT: vmovups %ymm2, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: vmovups %ymm2, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: vmovups %ymm1, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: movl %ecx, %esi
+; FALLBACK24-NEXT: andl $60, %esi
+; FALLBACK24-NEXT: movl 68(%esp,%esi), %edx
+; FALLBACK24-NEXT: shll $3, %ecx
+; FALLBACK24-NEXT: andl $24, %ecx
+; FALLBACK24-NEXT: movl %edx, %edi
+; FALLBACK24-NEXT: shrl %cl, %edi
+; FALLBACK24-NEXT: movl 72(%esp,%esi), %eax
+; FALLBACK24-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: leal (%eax,%eax), %ebx
+; FALLBACK24-NEXT: movl %ecx, %ebp
+; FALLBACK24-NEXT: movb %cl, %ch
+; FALLBACK24-NEXT: notb %ch
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shll %cl, %ebx
+; FALLBACK24-NEXT: orl %edi, %ebx
+; FALLBACK24-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl 64(%esp,%esi), %edi
+; FALLBACK24-NEXT: movl %ebp, %eax
+; FALLBACK24-NEXT: movb %al, %cl
+; FALLBACK24-NEXT: shrl %cl, %edi
+; FALLBACK24-NEXT: addl %edx, %edx
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shll %cl, %edx
+; FALLBACK24-NEXT: orl %edi, %edx
+; FALLBACK24-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl 76(%esp,%esi), %edx
+; FALLBACK24-NEXT: movl %edx, %ebp
+; FALLBACK24-NEXT: movb %al, %cl
+; FALLBACK24-NEXT: shrl %cl, %ebp
+; FALLBACK24-NEXT: movl 80(%esp,%esi), %edi
+; FALLBACK24-NEXT: leal (%edi,%edi), %ebx
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shll %cl, %ebx
+; FALLBACK24-NEXT: orl %ebp, %ebx
+; FALLBACK24-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movb %al, %cl
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; FALLBACK24-NEXT: shrl %cl, %ebx
+; FALLBACK24-NEXT: addl %edx, %edx
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shll %cl, %edx
+; FALLBACK24-NEXT: orl %ebx, %edx
+; FALLBACK24-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl 84(%esp,%esi), %ebx
+; FALLBACK24-NEXT: movl %ebx, %ebp
+; FALLBACK24-NEXT: movl %eax, %edx
+; FALLBACK24-NEXT: movb %dl, %cl
+; FALLBACK24-NEXT: shrl %cl, %ebp
+; FALLBACK24-NEXT: movl 88(%esp,%esi), %eax
+; FALLBACK24-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: addl %eax, %eax
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shll %cl, %eax
+; FALLBACK24-NEXT: orl %ebp, %eax
+; FALLBACK24-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movb %dl, %cl
+; FALLBACK24-NEXT: shrl %cl, %edi
+; FALLBACK24-NEXT: addl %ebx, %ebx
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: movb %ch, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; FALLBACK24-NEXT: shll %cl, %ebx
+; FALLBACK24-NEXT: orl %edi, %ebx
+; FALLBACK24-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl 92(%esp,%esi), %ebx
+; FALLBACK24-NEXT: movl %ebx, %ebp
+; FALLBACK24-NEXT: movb %dl, %cl
+; FALLBACK24-NEXT: shrl %cl, %ebp
+; FALLBACK24-NEXT: movl 96(%esp,%esi), %edi
+; FALLBACK24-NEXT: leal (%edi,%edi), %eax
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shll %cl, %eax
+; FALLBACK24-NEXT: orl %ebp, %eax
+; FALLBACK24-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movb %dl, %cl
+; FALLBACK24-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK24-NEXT: shrl %cl, %eax
+; FALLBACK24-NEXT: addl %ebx, %ebx
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shll %cl, %ebx
+; FALLBACK24-NEXT: orl %eax, %ebx
+; FALLBACK24-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl 100(%esp,%esi), %ebx
+; FALLBACK24-NEXT: movl %ebx, %ebp
+; FALLBACK24-NEXT: movb %dl, %cl
+; FALLBACK24-NEXT: shrl %cl, %ebp
+; FALLBACK24-NEXT: movl 104(%esp,%esi), %edx
+; FALLBACK24-NEXT: leal (%edx,%edx), %eax
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shll %cl, %eax
+; FALLBACK24-NEXT: orl %ebp, %eax
+; FALLBACK24-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK24-NEXT: movb %al, %cl
+; FALLBACK24-NEXT: shrl %cl, %edi
+; FALLBACK24-NEXT: addl %ebx, %ebx
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shll %cl, %ebx
+; FALLBACK24-NEXT: orl %edi, %ebx
+; FALLBACK24-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl 108(%esp,%esi), %edi
+; FALLBACK24-NEXT: movl %edi, %ebp
+; FALLBACK24-NEXT: movl %eax, %ecx
+; FALLBACK24-NEXT: shrl %cl, %ebp
+; FALLBACK24-NEXT: movl 112(%esp,%esi), %ecx
+; FALLBACK24-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: leal (%ecx,%ecx), %ebx
+; FALLBACK24-NEXT: movb {{[-0-9]+}}(%e{{[sb]}}p), %ch # 1-byte Reload
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shll %cl, %ebx
+; FALLBACK24-NEXT: orl %ebp, %ebx
+; FALLBACK24-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movb %al, %cl
+; FALLBACK24-NEXT: shrl %cl, %edx
+; FALLBACK24-NEXT: addl %edi, %edi
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shll %cl, %edi
+; FALLBACK24-NEXT: orl %edx, %edi
+; FALLBACK24-NEXT: movl %esi, %edx
+; FALLBACK24-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl 116(%esp,%esi), %esi
+; FALLBACK24-NEXT: movl %esi, %ebx
+; FALLBACK24-NEXT: movb %al, %cl
+; FALLBACK24-NEXT: shrl %cl, %ebx
+; FALLBACK24-NEXT: movl 120(%esp,%edx), %eax
+; FALLBACK24-NEXT: leal (%eax,%eax), %ebp
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shll %cl, %ebp
+; FALLBACK24-NEXT: orl %ebx, %ebp
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK24-NEXT: movb %dl, %cl
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; FALLBACK24-NEXT: shrl %cl, %ebx
+; FALLBACK24-NEXT: addl %esi, %esi
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shll %cl, %esi
+; FALLBACK24-NEXT: orl %ebx, %esi
+; FALLBACK24-NEXT: movb %dl, %cl
+; FALLBACK24-NEXT: shrl %cl, %eax
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK24-NEXT: movl 124(%esp,%edx), %ebx
+; FALLBACK24-NEXT: leal (%ebx,%ebx), %edx
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shll %cl, %edx
+; FALLBACK24-NEXT: orl %eax, %edx
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK24-NEXT: shrl %cl, %ebx
+; FALLBACK24-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK24-NEXT: movl %ebx, 60(%eax)
+; FALLBACK24-NEXT: movl %edx, 56(%eax)
+; FALLBACK24-NEXT: movl %esi, 48(%eax)
+; FALLBACK24-NEXT: movl %ebp, 52(%eax)
+; FALLBACK24-NEXT: movl %edi, 40(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, 44(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, 32(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, 36(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, 24(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, 28(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, 16(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, 20(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, 8(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, 12(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, (%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, 4(%eax)
+; FALLBACK24-NEXT: addl $204, %esp
+; FALLBACK24-NEXT: popl %esi
+; FALLBACK24-NEXT: popl %edi
+; FALLBACK24-NEXT: popl %ebx
+; FALLBACK24-NEXT: popl %ebp
+; FALLBACK24-NEXT: vzeroupper
+; FALLBACK24-NEXT: retl
+;
+; FALLBACK25-LABEL: lshr_64bytes:
+; FALLBACK25: # %bb.0:
+; FALLBACK25-NEXT: pushl %ebp
+; FALLBACK25-NEXT: pushl %ebx
+; FALLBACK25-NEXT: pushl %edi
+; FALLBACK25-NEXT: pushl %esi
+; FALLBACK25-NEXT: subl $188, %esp
+; FALLBACK25-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK25-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK25-NEXT: vmovups (%ecx), %ymm0
+; FALLBACK25-NEXT: vmovups 32(%ecx), %ymm1
+; FALLBACK25-NEXT: movl (%eax), %ecx
+; FALLBACK25-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; FALLBACK25-NEXT: vmovups %ymm2, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: vmovups %ymm2, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: vmovups %ymm1, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: movl %ecx, %ebp
+; FALLBACK25-NEXT: andl $60, %ebp
+; FALLBACK25-NEXT: movl 56(%esp,%ebp), %edx
+; FALLBACK25-NEXT: movl 52(%esp,%ebp), %eax
+; FALLBACK25-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: shll $3, %ecx
+; FALLBACK25-NEXT: andl $24, %ecx
+; FALLBACK25-NEXT: shrdl %cl, %edx, %eax
+; FALLBACK25-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: movl 64(%esp,%ebp), %edi
+; FALLBACK25-NEXT: movl 60(%esp,%ebp), %eax
+; FALLBACK25-NEXT: movl %eax, %esi
+; FALLBACK25-NEXT: shrdl %cl, %edi, %esi
+; FALLBACK25-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK25-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: movl 72(%esp,%ebp), %esi
+; FALLBACK25-NEXT: movl 68(%esp,%ebp), %eax
+; FALLBACK25-NEXT: movl %eax, %edx
+; FALLBACK25-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK25-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: shrdl %cl, %eax, %edi
+; FALLBACK25-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: movl 80(%esp,%ebp), %edi
+; FALLBACK25-NEXT: movl 76(%esp,%ebp), %eax
+; FALLBACK25-NEXT: movl %eax, %edx
+; FALLBACK25-NEXT: shrdl %cl, %edi, %edx
+; FALLBACK25-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK25-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: movl 88(%esp,%ebp), %esi
+; FALLBACK25-NEXT: movl 84(%esp,%ebp), %eax
+; FALLBACK25-NEXT: movl %eax, %edx
+; FALLBACK25-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK25-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: movl %esi, %edx
+; FALLBACK25-NEXT: shrdl %cl, %eax, %edi
+; FALLBACK25-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: movl 96(%esp,%ebp), %esi
+; FALLBACK25-NEXT: movl 92(%esp,%ebp), %eax
+; FALLBACK25-NEXT: movl %eax, %edi
+; FALLBACK25-NEXT: shrdl %cl, %esi, %edi
+; FALLBACK25-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK25-NEXT: movl %edx, (%esp) # 4-byte Spill
+; FALLBACK25-NEXT: movl 104(%esp,%ebp), %edx
+; FALLBACK25-NEXT: movl 100(%esp,%ebp), %eax
+; FALLBACK25-NEXT: movl %eax, %edi
+; FALLBACK25-NEXT: shrdl %cl, %edx, %edi
+; FALLBACK25-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK25-NEXT: movl 48(%esp,%ebp), %ebx
+; FALLBACK25-NEXT: movl 108(%esp,%ebp), %eax
+; FALLBACK25-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK25-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK25-NEXT: movl %edx, 56(%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK25-NEXT: shrdl %cl, %edx, %ebx
+; FALLBACK25-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK25-NEXT: shrl %cl, %eax
+; FALLBACK25-NEXT: movl %eax, 60(%ebp)
+; FALLBACK25-NEXT: movl %esi, 48(%ebp)
+; FALLBACK25-NEXT: movl %edi, 52(%ebp)
+; FALLBACK25-NEXT: movl (%esp), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 40(%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 44(%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 32(%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 36(%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 24(%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 28(%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 16(%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 20(%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 8(%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 12(%ebp)
+; FALLBACK25-NEXT: movl %ebx, (%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 4(%ebp)
+; FALLBACK25-NEXT: addl $188, %esp
+; FALLBACK25-NEXT: popl %esi
+; FALLBACK25-NEXT: popl %edi
+; FALLBACK25-NEXT: popl %ebx
+; FALLBACK25-NEXT: popl %ebp
+; FALLBACK25-NEXT: vzeroupper
+; FALLBACK25-NEXT: retl
+;
+; FALLBACK26-LABEL: lshr_64bytes:
+; FALLBACK26: # %bb.0:
+; FALLBACK26-NEXT: pushl %ebp
+; FALLBACK26-NEXT: pushl %ebx
+; FALLBACK26-NEXT: pushl %edi
+; FALLBACK26-NEXT: pushl %esi
+; FALLBACK26-NEXT: subl $204, %esp
+; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK26-NEXT: vmovups (%ecx), %ymm0
+; FALLBACK26-NEXT: vmovups 32(%ecx), %ymm1
+; FALLBACK26-NEXT: movl (%eax), %ecx
+; FALLBACK26-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; FALLBACK26-NEXT: vmovups %ymm2, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: vmovups %ymm2, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: vmovups %ymm1, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: leal (,%ecx,8), %edx
+; FALLBACK26-NEXT: andl $24, %edx
+; FALLBACK26-NEXT: andl $60, %ecx
+; FALLBACK26-NEXT: movl 68(%esp,%ecx), %esi
+; FALLBACK26-NEXT: movl 72(%esp,%ecx), %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shrxl %edx, %esi, %edi
+; FALLBACK26-NEXT: movl %edx, %ebx
+; FALLBACK26-NEXT: notb %bl
+; FALLBACK26-NEXT: leal (%eax,%eax), %ebp
+; FALLBACK26-NEXT: shlxl %ebx, %ebp, %ebp
+; FALLBACK26-NEXT: orl %edi, %ebp
+; FALLBACK26-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shrxl %edx, 64(%esp,%ecx), %edi
+; FALLBACK26-NEXT: addl %esi, %esi
+; FALLBACK26-NEXT: shlxl %ebx, %esi, %esi
+; FALLBACK26-NEXT: orl %edi, %esi
+; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 80(%esp,%ecx), %esi
+; FALLBACK26-NEXT: leal (%esi,%esi), %edi
+; FALLBACK26-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK26-NEXT: movl 76(%esp,%ecx), %edi
+; FALLBACK26-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK26-NEXT: orl %ebp, %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK26-NEXT: addl %edi, %edi
+; FALLBACK26-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK26-NEXT: orl %eax, %edi
+; FALLBACK26-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 88(%esp,%ecx), %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: leal (%eax,%eax), %edi
+; FALLBACK26-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK26-NEXT: movl 84(%esp,%ecx), %edi
+; FALLBACK26-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK26-NEXT: orl %ebp, %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK26-NEXT: addl %edi, %edi
+; FALLBACK26-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK26-NEXT: orl %esi, %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 96(%esp,%ecx), %esi
+; FALLBACK26-NEXT: leal (%esi,%esi), %edi
+; FALLBACK26-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK26-NEXT: movl 92(%esp,%ecx), %edi
+; FALLBACK26-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK26-NEXT: orl %ebp, %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK26-NEXT: addl %edi, %edi
+; FALLBACK26-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK26-NEXT: orl %eax, %edi
+; FALLBACK26-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 104(%esp,%ecx), %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: leal (%eax,%eax), %edi
+; FALLBACK26-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK26-NEXT: movl 100(%esp,%ecx), %edi
+; FALLBACK26-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK26-NEXT: orl %ebp, %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK26-NEXT: addl %edi, %edi
+; FALLBACK26-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK26-NEXT: orl %esi, %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 112(%esp,%ecx), %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: leal (%eax,%eax), %esi
+; FALLBACK26-NEXT: shlxl %ebx, %esi, %eax
+; FALLBACK26-NEXT: movl 108(%esp,%ecx), %esi
+; FALLBACK26-NEXT: shrxl %edx, %esi, %ebp
+; FALLBACK26-NEXT: orl %ebp, %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK26-NEXT: addl %esi, %esi
+; FALLBACK26-NEXT: shlxl %ebx, %esi, %esi
+; FALLBACK26-NEXT: orl %eax, %esi
+; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 120(%esp,%ecx), %ebp
+; FALLBACK26-NEXT: leal (%ebp,%ebp), %eax
+; FALLBACK26-NEXT: shlxl %ebx, %eax, %esi
+; FALLBACK26-NEXT: movl 116(%esp,%ecx), %eax
+; FALLBACK26-NEXT: shrxl %edx, %eax, %edi
+; FALLBACK26-NEXT: orl %edi, %esi
+; FALLBACK26-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK26-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: addl %eax, %eax
+; FALLBACK26-NEXT: shlxl %ebx, %eax, %edi
+; FALLBACK26-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK26-NEXT: shrxl %edx, %ebp, %eax
+; FALLBACK26-NEXT: movl 124(%esp,%ecx), %ecx
+; FALLBACK26-NEXT: shrxl %edx, %ecx, %edx
+; FALLBACK26-NEXT: addl %ecx, %ecx
+; FALLBACK26-NEXT: shlxl %ebx, %ecx, %ebx
+; FALLBACK26-NEXT: orl %eax, %ebx
+; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK26-NEXT: movl %edx, 60(%ecx)
+; FALLBACK26-NEXT: movl %ebx, 56(%ecx)
+; FALLBACK26-NEXT: movl %edi, 48(%ecx)
+; FALLBACK26-NEXT: movl %esi, 52(%ecx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 40(%ecx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 44(%ecx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 32(%ecx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 36(%ecx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 24(%ecx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 28(%ecx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 16(%ecx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 20(%ecx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 8(%ecx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 12(%ecx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, (%ecx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 4(%ecx)
+; FALLBACK26-NEXT: addl $204, %esp
+; FALLBACK26-NEXT: popl %esi
+; FALLBACK26-NEXT: popl %edi
+; FALLBACK26-NEXT: popl %ebx
+; FALLBACK26-NEXT: popl %ebp
+; FALLBACK26-NEXT: vzeroupper
+; FALLBACK26-NEXT: retl
+;
+; FALLBACK27-LABEL: lshr_64bytes:
+; FALLBACK27: # %bb.0:
+; FALLBACK27-NEXT: pushl %ebp
+; FALLBACK27-NEXT: pushl %ebx
+; FALLBACK27-NEXT: pushl %edi
+; FALLBACK27-NEXT: pushl %esi
+; FALLBACK27-NEXT: subl $188, %esp
+; FALLBACK27-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK27-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK27-NEXT: vmovups (%ecx), %ymm0
+; FALLBACK27-NEXT: vmovups 32(%ecx), %ymm1
+; FALLBACK27-NEXT: movl (%eax), %ecx
+; FALLBACK27-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; FALLBACK27-NEXT: vmovups %ymm2, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: vmovups %ymm2, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: vmovups %ymm1, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: movl %ecx, %ebp
+; FALLBACK27-NEXT: andl $60, %ebp
+; FALLBACK27-NEXT: movl 56(%esp,%ebp), %edx
+; FALLBACK27-NEXT: movl 52(%esp,%ebp), %eax
+; FALLBACK27-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: shll $3, %ecx
+; FALLBACK27-NEXT: andl $24, %ecx
+; FALLBACK27-NEXT: shrdl %cl, %edx, %eax
+; FALLBACK27-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: movl 64(%esp,%ebp), %edi
+; FALLBACK27-NEXT: movl 60(%esp,%ebp), %eax
+; FALLBACK27-NEXT: movl %eax, %esi
+; FALLBACK27-NEXT: shrdl %cl, %edi, %esi
+; FALLBACK27-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK27-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: movl 72(%esp,%ebp), %esi
+; FALLBACK27-NEXT: movl 68(%esp,%ebp), %eax
+; FALLBACK27-NEXT: movl %eax, %edx
+; FALLBACK27-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK27-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: shrdl %cl, %eax, %edi
+; FALLBACK27-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: movl 80(%esp,%ebp), %edi
+; FALLBACK27-NEXT: movl 76(%esp,%ebp), %eax
+; FALLBACK27-NEXT: movl %eax, %edx
+; FALLBACK27-NEXT: shrdl %cl, %edi, %edx
+; FALLBACK27-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK27-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: movl 88(%esp,%ebp), %ebx
+; FALLBACK27-NEXT: movl 84(%esp,%ebp), %eax
+; FALLBACK27-NEXT: movl %eax, %edx
+; FALLBACK27-NEXT: shrdl %cl, %ebx, %edx
+; FALLBACK27-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: shrdl %cl, %eax, %edi
+; FALLBACK27-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: movl 96(%esp,%ebp), %esi
+; FALLBACK27-NEXT: movl 92(%esp,%ebp), %eax
+; FALLBACK27-NEXT: movl %eax, %edx
+; FALLBACK27-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK27-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: shrdl %cl, %eax, %ebx
+; FALLBACK27-NEXT: movl 104(%esp,%ebp), %eax
+; FALLBACK27-NEXT: movl 100(%esp,%ebp), %edi
+; FALLBACK27-NEXT: movl %edi, %edx
+; FALLBACK27-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK27-NEXT: shrdl %cl, %edi, %esi
+; FALLBACK27-NEXT: movl 48(%esp,%ebp), %edi
+; FALLBACK27-NEXT: movl 108(%esp,%ebp), %ebp
+; FALLBACK27-NEXT: movl %ebp, (%esp) # 4-byte Spill
+; FALLBACK27-NEXT: shrdl %cl, %ebp, %eax
+; FALLBACK27-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK27-NEXT: movl %eax, 56(%ebp)
+; FALLBACK27-NEXT: movl %esi, 48(%ebp)
+; FALLBACK27-NEXT: movl %edx, 52(%ebp)
+; FALLBACK27-NEXT: movl %ebx, 40(%ebp)
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK27-NEXT: movl %eax, 44(%ebp)
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK27-NEXT: movl %eax, 32(%ebp)
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK27-NEXT: movl %eax, 36(%ebp)
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK27-NEXT: movl %eax, 24(%ebp)
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK27-NEXT: movl %eax, 28(%ebp)
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK27-NEXT: movl %eax, 16(%ebp)
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK27-NEXT: movl %eax, 20(%ebp)
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK27-NEXT: movl %eax, 8(%ebp)
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK27-NEXT: movl %eax, 12(%ebp)
+; FALLBACK27-NEXT: shrxl %ecx, (%esp), %eax # 4-byte Folded Reload
+; FALLBACK27-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK27-NEXT: shrdl %cl, %edx, %edi
+; FALLBACK27-NEXT: movl %edi, (%ebp)
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK27-NEXT: movl %ecx, 4(%ebp)
+; FALLBACK27-NEXT: movl %eax, 60(%ebp)
+; FALLBACK27-NEXT: addl $188, %esp
+; FALLBACK27-NEXT: popl %esi
+; FALLBACK27-NEXT: popl %edi
+; FALLBACK27-NEXT: popl %ebx
+; FALLBACK27-NEXT: popl %ebp
+; FALLBACK27-NEXT: vzeroupper
+; FALLBACK27-NEXT: retl
+;
+; FALLBACK28-LABEL: lshr_64bytes:
+; FALLBACK28: # %bb.0:
+; FALLBACK28-NEXT: pushl %ebp
+; FALLBACK28-NEXT: pushl %ebx
+; FALLBACK28-NEXT: pushl %edi
+; FALLBACK28-NEXT: pushl %esi
+; FALLBACK28-NEXT: subl $204, %esp
+; FALLBACK28-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK28-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK28-NEXT: vmovups (%ecx), %zmm0
+; FALLBACK28-NEXT: movl (%eax), %ecx
+; FALLBACK28-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK28-NEXT: vmovups %zmm1, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: vmovups %zmm0, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: movl %ecx, %esi
+; FALLBACK28-NEXT: andl $60, %esi
+; FALLBACK28-NEXT: movl 68(%esp,%esi), %edx
+; FALLBACK28-NEXT: shll $3, %ecx
+; FALLBACK28-NEXT: andl $24, %ecx
+; FALLBACK28-NEXT: movl %edx, %edi
+; FALLBACK28-NEXT: shrl %cl, %edi
+; FALLBACK28-NEXT: movl 72(%esp,%esi), %eax
+; FALLBACK28-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: leal (%eax,%eax), %ebx
+; FALLBACK28-NEXT: movl %ecx, %ebp
+; FALLBACK28-NEXT: movb %cl, %ch
+; FALLBACK28-NEXT: notb %ch
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shll %cl, %ebx
+; FALLBACK28-NEXT: orl %edi, %ebx
+; FALLBACK28-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl 64(%esp,%esi), %edi
+; FALLBACK28-NEXT: movl %ebp, %eax
+; FALLBACK28-NEXT: movb %al, %cl
+; FALLBACK28-NEXT: shrl %cl, %edi
+; FALLBACK28-NEXT: addl %edx, %edx
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shll %cl, %edx
+; FALLBACK28-NEXT: orl %edi, %edx
+; FALLBACK28-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl 76(%esp,%esi), %edx
+; FALLBACK28-NEXT: movl %edx, %ebp
+; FALLBACK28-NEXT: movb %al, %cl
+; FALLBACK28-NEXT: shrl %cl, %ebp
+; FALLBACK28-NEXT: movl 80(%esp,%esi), %edi
+; FALLBACK28-NEXT: leal (%edi,%edi), %ebx
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shll %cl, %ebx
+; FALLBACK28-NEXT: orl %ebp, %ebx
+; FALLBACK28-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movb %al, %cl
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; FALLBACK28-NEXT: shrl %cl, %ebx
+; FALLBACK28-NEXT: addl %edx, %edx
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shll %cl, %edx
+; FALLBACK28-NEXT: orl %ebx, %edx
+; FALLBACK28-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl 84(%esp,%esi), %ebx
+; FALLBACK28-NEXT: movl %ebx, %ebp
+; FALLBACK28-NEXT: movl %eax, %edx
+; FALLBACK28-NEXT: movb %dl, %cl
+; FALLBACK28-NEXT: shrl %cl, %ebp
+; FALLBACK28-NEXT: movl 88(%esp,%esi), %eax
+; FALLBACK28-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: addl %eax, %eax
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shll %cl, %eax
+; FALLBACK28-NEXT: orl %ebp, %eax
+; FALLBACK28-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movb %dl, %cl
+; FALLBACK28-NEXT: shrl %cl, %edi
+; FALLBACK28-NEXT: addl %ebx, %ebx
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: movb %ch, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; FALLBACK28-NEXT: shll %cl, %ebx
+; FALLBACK28-NEXT: orl %edi, %ebx
+; FALLBACK28-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl 92(%esp,%esi), %ebx
+; FALLBACK28-NEXT: movl %ebx, %ebp
+; FALLBACK28-NEXT: movb %dl, %cl
+; FALLBACK28-NEXT: shrl %cl, %ebp
+; FALLBACK28-NEXT: movl 96(%esp,%esi), %edi
+; FALLBACK28-NEXT: leal (%edi,%edi), %eax
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shll %cl, %eax
+; FALLBACK28-NEXT: orl %ebp, %eax
+; FALLBACK28-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movb %dl, %cl
+; FALLBACK28-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK28-NEXT: shrl %cl, %eax
+; FALLBACK28-NEXT: addl %ebx, %ebx
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shll %cl, %ebx
+; FALLBACK28-NEXT: orl %eax, %ebx
+; FALLBACK28-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl 100(%esp,%esi), %ebx
+; FALLBACK28-NEXT: movl %ebx, %ebp
+; FALLBACK28-NEXT: movb %dl, %cl
+; FALLBACK28-NEXT: shrl %cl, %ebp
+; FALLBACK28-NEXT: movl 104(%esp,%esi), %edx
+; FALLBACK28-NEXT: leal (%edx,%edx), %eax
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shll %cl, %eax
+; FALLBACK28-NEXT: orl %ebp, %eax
+; FALLBACK28-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK28-NEXT: movb %al, %cl
+; FALLBACK28-NEXT: shrl %cl, %edi
+; FALLBACK28-NEXT: addl %ebx, %ebx
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shll %cl, %ebx
+; FALLBACK28-NEXT: orl %edi, %ebx
+; FALLBACK28-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl 108(%esp,%esi), %edi
+; FALLBACK28-NEXT: movl %edi, %ebp
+; FALLBACK28-NEXT: movl %eax, %ecx
+; FALLBACK28-NEXT: shrl %cl, %ebp
+; FALLBACK28-NEXT: movl 112(%esp,%esi), %ecx
+; FALLBACK28-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: leal (%ecx,%ecx), %ebx
+; FALLBACK28-NEXT: movb {{[-0-9]+}}(%e{{[sb]}}p), %ch # 1-byte Reload
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shll %cl, %ebx
+; FALLBACK28-NEXT: orl %ebp, %ebx
+; FALLBACK28-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movb %al, %cl
+; FALLBACK28-NEXT: shrl %cl, %edx
+; FALLBACK28-NEXT: addl %edi, %edi
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shll %cl, %edi
+; FALLBACK28-NEXT: orl %edx, %edi
+; FALLBACK28-NEXT: movl %esi, %edx
+; FALLBACK28-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl 116(%esp,%esi), %esi
+; FALLBACK28-NEXT: movl %esi, %ebx
+; FALLBACK28-NEXT: movb %al, %cl
+; FALLBACK28-NEXT: shrl %cl, %ebx
+; FALLBACK28-NEXT: movl 120(%esp,%edx), %eax
+; FALLBACK28-NEXT: leal (%eax,%eax), %ebp
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shll %cl, %ebp
+; FALLBACK28-NEXT: orl %ebx, %ebp
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK28-NEXT: movb %dl, %cl
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; FALLBACK28-NEXT: shrl %cl, %ebx
+; FALLBACK28-NEXT: addl %esi, %esi
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shll %cl, %esi
+; FALLBACK28-NEXT: orl %ebx, %esi
+; FALLBACK28-NEXT: movb %dl, %cl
+; FALLBACK28-NEXT: shrl %cl, %eax
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK28-NEXT: movl 124(%esp,%edx), %ebx
+; FALLBACK28-NEXT: leal (%ebx,%ebx), %edx
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shll %cl, %edx
+; FALLBACK28-NEXT: orl %eax, %edx
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK28-NEXT: shrl %cl, %ebx
+; FALLBACK28-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK28-NEXT: movl %ebx, 60(%eax)
+; FALLBACK28-NEXT: movl %edx, 56(%eax)
+; FALLBACK28-NEXT: movl %esi, 48(%eax)
+; FALLBACK28-NEXT: movl %ebp, 52(%eax)
+; FALLBACK28-NEXT: movl %edi, 40(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, 44(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, 32(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, 36(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, 24(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, 28(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, 16(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, 20(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, 8(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, 12(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, (%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, 4(%eax)
+; FALLBACK28-NEXT: addl $204, %esp
+; FALLBACK28-NEXT: popl %esi
+; FALLBACK28-NEXT: popl %edi
+; FALLBACK28-NEXT: popl %ebx
+; FALLBACK28-NEXT: popl %ebp
+; FALLBACK28-NEXT: vzeroupper
+; FALLBACK28-NEXT: retl
+;
+; FALLBACK29-LABEL: lshr_64bytes:
+; FALLBACK29: # %bb.0:
+; FALLBACK29-NEXT: pushl %ebp
+; FALLBACK29-NEXT: pushl %ebx
+; FALLBACK29-NEXT: pushl %edi
+; FALLBACK29-NEXT: pushl %esi
+; FALLBACK29-NEXT: subl $188, %esp
+; FALLBACK29-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK29-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK29-NEXT: vmovups (%ecx), %zmm0
+; FALLBACK29-NEXT: movl (%eax), %ecx
+; FALLBACK29-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK29-NEXT: vmovups %zmm1, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: vmovups %zmm0, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: movl %ecx, %ebp
+; FALLBACK29-NEXT: andl $60, %ebp
+; FALLBACK29-NEXT: movl 56(%esp,%ebp), %edx
+; FALLBACK29-NEXT: movl 52(%esp,%ebp), %eax
+; FALLBACK29-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: shll $3, %ecx
+; FALLBACK29-NEXT: andl $24, %ecx
+; FALLBACK29-NEXT: shrdl %cl, %edx, %eax
+; FALLBACK29-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: movl 64(%esp,%ebp), %edi
+; FALLBACK29-NEXT: movl 60(%esp,%ebp), %eax
+; FALLBACK29-NEXT: movl %eax, %esi
+; FALLBACK29-NEXT: shrdl %cl, %edi, %esi
+; FALLBACK29-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK29-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: movl 72(%esp,%ebp), %esi
+; FALLBACK29-NEXT: movl 68(%esp,%ebp), %eax
+; FALLBACK29-NEXT: movl %eax, %edx
+; FALLBACK29-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK29-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: shrdl %cl, %eax, %edi
+; FALLBACK29-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: movl 80(%esp,%ebp), %edi
+; FALLBACK29-NEXT: movl 76(%esp,%ebp), %eax
+; FALLBACK29-NEXT: movl %eax, %edx
+; FALLBACK29-NEXT: shrdl %cl, %edi, %edx
+; FALLBACK29-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK29-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: movl 88(%esp,%ebp), %esi
+; FALLBACK29-NEXT: movl 84(%esp,%ebp), %eax
+; FALLBACK29-NEXT: movl %eax, %edx
+; FALLBACK29-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK29-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: movl %esi, %edx
+; FALLBACK29-NEXT: shrdl %cl, %eax, %edi
+; FALLBACK29-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: movl 96(%esp,%ebp), %esi
+; FALLBACK29-NEXT: movl 92(%esp,%ebp), %eax
+; FALLBACK29-NEXT: movl %eax, %edi
+; FALLBACK29-NEXT: shrdl %cl, %esi, %edi
+; FALLBACK29-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK29-NEXT: movl %edx, (%esp) # 4-byte Spill
+; FALLBACK29-NEXT: movl 104(%esp,%ebp), %edx
+; FALLBACK29-NEXT: movl 100(%esp,%ebp), %eax
+; FALLBACK29-NEXT: movl %eax, %edi
+; FALLBACK29-NEXT: shrdl %cl, %edx, %edi
+; FALLBACK29-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK29-NEXT: movl 48(%esp,%ebp), %ebx
+; FALLBACK29-NEXT: movl 108(%esp,%ebp), %eax
+; FALLBACK29-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK29-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK29-NEXT: movl %edx, 56(%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK29-NEXT: shrdl %cl, %edx, %ebx
+; FALLBACK29-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK29-NEXT: shrl %cl, %eax
+; FALLBACK29-NEXT: movl %eax, 60(%ebp)
+; FALLBACK29-NEXT: movl %esi, 48(%ebp)
+; FALLBACK29-NEXT: movl %edi, 52(%ebp)
+; FALLBACK29-NEXT: movl (%esp), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 40(%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 44(%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 32(%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 36(%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 24(%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 28(%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 16(%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 20(%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 8(%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 12(%ebp)
+; FALLBACK29-NEXT: movl %ebx, (%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 4(%ebp)
+; FALLBACK29-NEXT: addl $188, %esp
+; FALLBACK29-NEXT: popl %esi
+; FALLBACK29-NEXT: popl %edi
+; FALLBACK29-NEXT: popl %ebx
+; FALLBACK29-NEXT: popl %ebp
+; FALLBACK29-NEXT: vzeroupper
+; FALLBACK29-NEXT: retl
+;
+; FALLBACK30-LABEL: lshr_64bytes:
+; FALLBACK30: # %bb.0:
+; FALLBACK30-NEXT: pushl %ebp
+; FALLBACK30-NEXT: pushl %ebx
+; FALLBACK30-NEXT: pushl %edi
+; FALLBACK30-NEXT: pushl %esi
+; FALLBACK30-NEXT: subl $204, %esp
+; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK30-NEXT: vmovups (%ecx), %zmm0
+; FALLBACK30-NEXT: movl (%eax), %edx
+; FALLBACK30-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK30-NEXT: vmovups %zmm1, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: vmovups %zmm0, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: leal (,%edx,8), %ecx
+; FALLBACK30-NEXT: andl $24, %ecx
+; FALLBACK30-NEXT: andl $60, %edx
+; FALLBACK30-NEXT: movl 68(%esp,%edx), %esi
+; FALLBACK30-NEXT: movl 72(%esp,%edx), %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shrxl %ecx, %esi, %edi
+; FALLBACK30-NEXT: movl %ecx, %ebx
+; FALLBACK30-NEXT: notb %bl
+; FALLBACK30-NEXT: leal (%eax,%eax), %ebp
+; FALLBACK30-NEXT: shlxl %ebx, %ebp, %ebp
+; FALLBACK30-NEXT: orl %edi, %ebp
+; FALLBACK30-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shrxl %ecx, 64(%esp,%edx), %edi
+; FALLBACK30-NEXT: addl %esi, %esi
+; FALLBACK30-NEXT: shlxl %ebx, %esi, %esi
+; FALLBACK30-NEXT: orl %edi, %esi
+; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 80(%esp,%edx), %esi
+; FALLBACK30-NEXT: leal (%esi,%esi), %edi
+; FALLBACK30-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK30-NEXT: movl 76(%esp,%edx), %edi
+; FALLBACK30-NEXT: shrxl %ecx, %edi, %ebp
+; FALLBACK30-NEXT: orl %ebp, %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK30-NEXT: addl %edi, %edi
+; FALLBACK30-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK30-NEXT: orl %eax, %edi
+; FALLBACK30-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 88(%esp,%edx), %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: leal (%eax,%eax), %edi
+; FALLBACK30-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK30-NEXT: movl 84(%esp,%edx), %edi
+; FALLBACK30-NEXT: shrxl %ecx, %edi, %ebp
+; FALLBACK30-NEXT: orl %ebp, %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shrxl %ecx, %esi, %esi
+; FALLBACK30-NEXT: addl %edi, %edi
+; FALLBACK30-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK30-NEXT: orl %esi, %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 96(%esp,%edx), %esi
+; FALLBACK30-NEXT: leal (%esi,%esi), %edi
+; FALLBACK30-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK30-NEXT: movl 92(%esp,%edx), %edi
+; FALLBACK30-NEXT: shrxl %ecx, %edi, %ebp
+; FALLBACK30-NEXT: orl %ebp, %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK30-NEXT: addl %edi, %edi
+; FALLBACK30-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK30-NEXT: orl %eax, %edi
+; FALLBACK30-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 104(%esp,%edx), %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: leal (%eax,%eax), %edi
+; FALLBACK30-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK30-NEXT: movl 100(%esp,%edx), %edi
+; FALLBACK30-NEXT: shrxl %ecx, %edi, %ebp
+; FALLBACK30-NEXT: orl %ebp, %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shrxl %ecx, %esi, %esi
+; FALLBACK30-NEXT: addl %edi, %edi
+; FALLBACK30-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK30-NEXT: orl %esi, %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 112(%esp,%edx), %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: leal (%eax,%eax), %esi
+; FALLBACK30-NEXT: shlxl %ebx, %esi, %eax
+; FALLBACK30-NEXT: movl 108(%esp,%edx), %esi
+; FALLBACK30-NEXT: shrxl %ecx, %esi, %ebp
+; FALLBACK30-NEXT: orl %ebp, %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK30-NEXT: addl %esi, %esi
+; FALLBACK30-NEXT: shlxl %ebx, %esi, %esi
+; FALLBACK30-NEXT: orl %eax, %esi
+; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 120(%esp,%edx), %ebp
+; FALLBACK30-NEXT: leal (%ebp,%ebp), %eax
+; FALLBACK30-NEXT: shlxl %ebx, %eax, %esi
+; FALLBACK30-NEXT: movl 116(%esp,%edx), %eax
+; FALLBACK30-NEXT: shrxl %ecx, %eax, %edi
+; FALLBACK30-NEXT: orl %edi, %esi
+; FALLBACK30-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK30-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: addl %eax, %eax
+; FALLBACK30-NEXT: shlxl %ebx, %eax, %edi
+; FALLBACK30-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK30-NEXT: shrxl %ecx, %ebp, %eax
+; FALLBACK30-NEXT: movl 124(%esp,%edx), %edx
+; FALLBACK30-NEXT: shrxl %ecx, %edx, %ebp
+; FALLBACK30-NEXT: leal (%edx,%edx), %ecx
+; FALLBACK30-NEXT: shlxl %ebx, %ecx, %edx
+; FALLBACK30-NEXT: orl %eax, %edx
+; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK30-NEXT: movl %ebp, 60(%ecx)
+; FALLBACK30-NEXT: movl %edx, 56(%ecx)
+; FALLBACK30-NEXT: movl %edi, 48(%ecx)
+; FALLBACK30-NEXT: movl %esi, 52(%ecx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 40(%ecx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 44(%ecx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 32(%ecx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 36(%ecx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 24(%ecx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 28(%ecx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 16(%ecx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 20(%ecx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 8(%ecx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 12(%ecx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, (%ecx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 4(%ecx)
+; FALLBACK30-NEXT: addl $204, %esp
+; FALLBACK30-NEXT: popl %esi
+; FALLBACK30-NEXT: popl %edi
+; FALLBACK30-NEXT: popl %ebx
+; FALLBACK30-NEXT: popl %ebp
+; FALLBACK30-NEXT: vzeroupper
+; FALLBACK30-NEXT: retl
+;
+; FALLBACK31-LABEL: lshr_64bytes:
+; FALLBACK31: # %bb.0:
+; FALLBACK31-NEXT: pushl %ebp
+; FALLBACK31-NEXT: pushl %ebx
+; FALLBACK31-NEXT: pushl %edi
+; FALLBACK31-NEXT: pushl %esi
+; FALLBACK31-NEXT: subl $188, %esp
+; FALLBACK31-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK31-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK31-NEXT: vmovups (%ecx), %zmm0
+; FALLBACK31-NEXT: movl (%eax), %ecx
+; FALLBACK31-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK31-NEXT: vmovups %zmm1, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: vmovups %zmm0, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: movl %ecx, %ebp
+; FALLBACK31-NEXT: andl $60, %ebp
+; FALLBACK31-NEXT: movl 56(%esp,%ebp), %edx
+; FALLBACK31-NEXT: movl 52(%esp,%ebp), %eax
+; FALLBACK31-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: shll $3, %ecx
+; FALLBACK31-NEXT: andl $24, %ecx
+; FALLBACK31-NEXT: shrdl %cl, %edx, %eax
+; FALLBACK31-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: movl 64(%esp,%ebp), %edi
+; FALLBACK31-NEXT: movl 60(%esp,%ebp), %eax
+; FALLBACK31-NEXT: movl %eax, %esi
+; FALLBACK31-NEXT: shrdl %cl, %edi, %esi
+; FALLBACK31-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK31-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: movl 72(%esp,%ebp), %esi
+; FALLBACK31-NEXT: movl 68(%esp,%ebp), %eax
+; FALLBACK31-NEXT: movl %eax, %edx
+; FALLBACK31-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK31-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: shrdl %cl, %eax, %edi
+; FALLBACK31-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: movl 80(%esp,%ebp), %edi
+; FALLBACK31-NEXT: movl 76(%esp,%ebp), %eax
+; FALLBACK31-NEXT: movl %eax, %edx
+; FALLBACK31-NEXT: shrdl %cl, %edi, %edx
+; FALLBACK31-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK31-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: movl 88(%esp,%ebp), %ebx
+; FALLBACK31-NEXT: movl 84(%esp,%ebp), %eax
+; FALLBACK31-NEXT: movl %eax, %edx
+; FALLBACK31-NEXT: shrdl %cl, %ebx, %edx
+; FALLBACK31-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: shrdl %cl, %eax, %edi
+; FALLBACK31-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: movl 96(%esp,%ebp), %esi
+; FALLBACK31-NEXT: movl 92(%esp,%ebp), %eax
+; FALLBACK31-NEXT: movl %eax, %edx
+; FALLBACK31-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK31-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: shrdl %cl, %eax, %ebx
+; FALLBACK31-NEXT: movl 104(%esp,%ebp), %eax
+; FALLBACK31-NEXT: movl 100(%esp,%ebp), %edi
+; FALLBACK31-NEXT: movl %edi, %edx
+; FALLBACK31-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK31-NEXT: shrdl %cl, %edi, %esi
+; FALLBACK31-NEXT: movl 48(%esp,%ebp), %edi
+; FALLBACK31-NEXT: movl 108(%esp,%ebp), %ebp
+; FALLBACK31-NEXT: movl %ebp, (%esp) # 4-byte Spill
+; FALLBACK31-NEXT: shrdl %cl, %ebp, %eax
+; FALLBACK31-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK31-NEXT: movl %eax, 56(%ebp)
+; FALLBACK31-NEXT: movl %esi, 48(%ebp)
+; FALLBACK31-NEXT: movl %edx, 52(%ebp)
+; FALLBACK31-NEXT: movl %ebx, 40(%ebp)
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK31-NEXT: movl %eax, 44(%ebp)
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK31-NEXT: movl %eax, 32(%ebp)
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK31-NEXT: movl %eax, 36(%ebp)
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK31-NEXT: movl %eax, 24(%ebp)
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK31-NEXT: movl %eax, 28(%ebp)
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK31-NEXT: movl %eax, 16(%ebp)
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK31-NEXT: movl %eax, 20(%ebp)
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK31-NEXT: movl %eax, 8(%ebp)
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK31-NEXT: movl %eax, 12(%ebp)
+; FALLBACK31-NEXT: shrxl %ecx, (%esp), %eax # 4-byte Folded Reload
+; FALLBACK31-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK31-NEXT: shrdl %cl, %edx, %edi
+; FALLBACK31-NEXT: movl %edi, (%ebp)
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK31-NEXT: movl %ecx, 4(%ebp)
+; FALLBACK31-NEXT: movl %eax, 60(%ebp)
+; FALLBACK31-NEXT: addl $188, %esp
+; FALLBACK31-NEXT: popl %esi
+; FALLBACK31-NEXT: popl %edi
+; FALLBACK31-NEXT: popl %ebx
+; FALLBACK31-NEXT: popl %ebp
+; FALLBACK31-NEXT: vzeroupper
+; FALLBACK31-NEXT: retl
+ %src = load i512, ptr %src.ptr, align 1
+ %byteOff = load i512, ptr %byteOff.ptr, align 1
+ %bitOff = shl i512 %byteOff, 3
+ %res = lshr i512 %src, %bitOff
+ store i512 %res, ptr %dst, align 1
+ ret void
+}
+
+define void @lshr_64bytes_qwordOff(ptr %src.ptr, ptr %qwordOff.ptr, ptr %dst) nounwind {
+; X64-SSE2-LABEL: lshr_64bytes_qwordOff:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pushq %rbx
; X64-SSE2-NEXT: movq (%rdi), %rax
@@ -1667,6 +15700,11 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X64-SSE2-NEXT: movq 48(%rdi), %rbx
; X64-SSE2-NEXT: movq 56(%rdi), %rdi
; X64-SSE2-NEXT: movl (%rsi), %esi
+; X64-SSE2-NEXT: xorps %xmm0, %xmm0
+; X64-SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-SSE2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-SSE2-NEXT: movq %rbx, -{{[0-9]+}}(%rsp)
; X64-SSE2-NEXT: movq %r11, -{{[0-9]+}}(%rsp)
@@ -1675,23 +15713,15 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X64-SSE2-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
; X64-SSE2-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; X64-SSE2-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
-; X64-SSE2-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-SSE2-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-SSE2-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-SSE2-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-SSE2-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-SSE2-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-SSE2-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-SSE2-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-SSE2-NEXT: andl $63, %esi
-; X64-SSE2-NEXT: movq -128(%rsp,%rsi), %rax
-; X64-SSE2-NEXT: movq -120(%rsp,%rsi), %rcx
-; X64-SSE2-NEXT: movq -104(%rsp,%rsi), %rdi
-; X64-SSE2-NEXT: movq -112(%rsp,%rsi), %r8
-; X64-SSE2-NEXT: movq -88(%rsp,%rsi), %r9
-; X64-SSE2-NEXT: movq -96(%rsp,%rsi), %r10
-; X64-SSE2-NEXT: movq -72(%rsp,%rsi), %r11
-; X64-SSE2-NEXT: movq -80(%rsp,%rsi), %rsi
+; X64-SSE2-NEXT: andl $7, %esi
+; X64-SSE2-NEXT: movq -128(%rsp,%rsi,8), %rax
+; X64-SSE2-NEXT: movq -120(%rsp,%rsi,8), %rcx
+; X64-SSE2-NEXT: movq -104(%rsp,%rsi,8), %rdi
+; X64-SSE2-NEXT: movq -112(%rsp,%rsi,8), %r8
+; X64-SSE2-NEXT: movq -88(%rsp,%rsi,8), %r9
+; X64-SSE2-NEXT: movq -96(%rsp,%rsi,8), %r10
+; X64-SSE2-NEXT: movq -72(%rsp,%rsi,8), %r11
+; X64-SSE2-NEXT: movq -80(%rsp,%rsi,8), %rsi
; X64-SSE2-NEXT: movq %rsi, 48(%rdx)
; X64-SSE2-NEXT: movq %r11, 56(%rdx)
; X64-SSE2-NEXT: movq %r10, 32(%rdx)
@@ -1703,35 +15733,38 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X64-SSE2-NEXT: popq %rbx
; X64-SSE2-NEXT: retq
;
-; X64-SSE42-LABEL: lshr_64bytes:
+; X64-SSE42-LABEL: lshr_64bytes_qwordOff:
; X64-SSE42: # %bb.0:
+; X64-SSE42-NEXT: pushq %rax
; X64-SSE42-NEXT: movups (%rdi), %xmm0
; X64-SSE42-NEXT: movups 16(%rdi), %xmm1
; X64-SSE42-NEXT: movups 32(%rdi), %xmm2
; X64-SSE42-NEXT: movups 48(%rdi), %xmm3
; X64-SSE42-NEXT: movl (%rsi), %eax
; X64-SSE42-NEXT: xorps %xmm4, %xmm4
-; X64-SSE42-NEXT: movups %xmm4, -{{[0-9]+}}(%rsp)
-; X64-SSE42-NEXT: movups %xmm4, -{{[0-9]+}}(%rsp)
-; X64-SSE42-NEXT: movups %xmm4, -{{[0-9]+}}(%rsp)
-; X64-SSE42-NEXT: movups %xmm4, -{{[0-9]+}}(%rsp)
-; X64-SSE42-NEXT: movups %xmm3, -{{[0-9]+}}(%rsp)
-; X64-SSE42-NEXT: movups %xmm2, -{{[0-9]+}}(%rsp)
-; X64-SSE42-NEXT: movups %xmm1, -{{[0-9]+}}(%rsp)
-; X64-SSE42-NEXT: movups %xmm0, -{{[0-9]+}}(%rsp)
-; X64-SSE42-NEXT: andl $63, %eax
-; X64-SSE42-NEXT: movups -128(%rsp,%rax), %xmm0
-; X64-SSE42-NEXT: movups -112(%rsp,%rax), %xmm1
-; X64-SSE42-NEXT: movups -96(%rsp,%rax), %xmm2
-; X64-SSE42-NEXT: movups -80(%rsp,%rax), %xmm3
+; X64-SSE42-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; X64-SSE42-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; X64-SSE42-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; X64-SSE42-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; X64-SSE42-NEXT: movaps %xmm3, -{{[0-9]+}}(%rsp)
+; X64-SSE42-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-SSE42-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-SSE42-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-SSE42-NEXT: andl $7, %eax
+; X64-SSE42-NEXT: movups -128(%rsp,%rax,8), %xmm0
+; X64-SSE42-NEXT: movups -112(%rsp,%rax,8), %xmm1
+; X64-SSE42-NEXT: movups -96(%rsp,%rax,8), %xmm2
+; X64-SSE42-NEXT: movups -80(%rsp,%rax,8), %xmm3
; X64-SSE42-NEXT: movups %xmm3, 48(%rdx)
; X64-SSE42-NEXT: movups %xmm1, 16(%rdx)
; X64-SSE42-NEXT: movups %xmm2, 32(%rdx)
; X64-SSE42-NEXT: movups %xmm0, (%rdx)
+; X64-SSE42-NEXT: popq %rax
; X64-SSE42-NEXT: retq
;
-; X64-AVX1-LABEL: lshr_64bytes:
+; X64-AVX1-LABEL: lshr_64bytes_qwordOff:
; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: pushq %rax
; X64-AVX1-NEXT: vmovups (%rdi), %ymm0
; X64-AVX1-NEXT: vmovups 32(%rdi), %ymm1
; X64-AVX1-NEXT: movl (%rsi), %eax
@@ -1740,44 +15773,47 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X64-AVX1-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
; X64-AVX1-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
; X64-AVX1-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; X64-AVX1-NEXT: andl $63, %eax
-; X64-AVX1-NEXT: vmovups -128(%rsp,%rax), %xmm0
-; X64-AVX1-NEXT: vmovups -112(%rsp,%rax), %xmm1
-; X64-AVX1-NEXT: vmovups -96(%rsp,%rax), %xmm2
-; X64-AVX1-NEXT: vmovups -80(%rsp,%rax), %xmm3
+; X64-AVX1-NEXT: andl $7, %eax
+; X64-AVX1-NEXT: vmovups -128(%rsp,%rax,8), %xmm0
+; X64-AVX1-NEXT: vmovups -112(%rsp,%rax,8), %xmm1
+; X64-AVX1-NEXT: vmovups -96(%rsp,%rax,8), %xmm2
+; X64-AVX1-NEXT: vmovups -80(%rsp,%rax,8), %xmm3
; X64-AVX1-NEXT: vmovups %xmm3, 48(%rdx)
; X64-AVX1-NEXT: vmovups %xmm1, 16(%rdx)
; X64-AVX1-NEXT: vmovups %xmm2, 32(%rdx)
; X64-AVX1-NEXT: vmovups %xmm0, (%rdx)
+; X64-AVX1-NEXT: popq %rax
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
-; X64-AVX512-LABEL: lshr_64bytes:
+; X64-AVX512-LABEL: lshr_64bytes_qwordOff:
; X64-AVX512: # %bb.0:
+; X64-AVX512-NEXT: pushq %rax
; X64-AVX512-NEXT: vmovups (%rdi), %zmm0
; X64-AVX512-NEXT: movl (%rsi), %eax
; X64-AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-AVX512-NEXT: vmovups %zmm1, -{{[0-9]+}}(%rsp)
; X64-AVX512-NEXT: vmovups %zmm0, -{{[0-9]+}}(%rsp)
-; X64-AVX512-NEXT: andl $63, %eax
-; X64-AVX512-NEXT: vmovups -128(%rsp,%rax), %xmm0
-; X64-AVX512-NEXT: vmovups -112(%rsp,%rax), %xmm1
-; X64-AVX512-NEXT: vmovups -96(%rsp,%rax), %xmm2
-; X64-AVX512-NEXT: vmovups -80(%rsp,%rax), %xmm3
+; X64-AVX512-NEXT: andl $7, %eax
+; X64-AVX512-NEXT: vmovups -128(%rsp,%rax,8), %xmm0
+; X64-AVX512-NEXT: vmovups -112(%rsp,%rax,8), %xmm1
+; X64-AVX512-NEXT: vmovups -96(%rsp,%rax,8), %xmm2
+; X64-AVX512-NEXT: vmovups -80(%rsp,%rax,8), %xmm3
; X64-AVX512-NEXT: vmovups %xmm3, 48(%rdx)
; X64-AVX512-NEXT: vmovups %xmm1, 16(%rdx)
; X64-AVX512-NEXT: vmovups %xmm2, 32(%rdx)
; X64-AVX512-NEXT: vmovups %xmm0, (%rdx)
+; X64-AVX512-NEXT: popq %rax
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
;
-; X86-SSE2-LABEL: lshr_64bytes:
+; X86-SSE2-LABEL: lshr_64bytes_qwordOff:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pushl %ebp
; X86-SSE2-NEXT: pushl %ebx
; X86-SSE2-NEXT: pushl %edi
; X86-SSE2-NEXT: pushl %esi
-; X86-SSE2-NEXT: subl $168, %esp
+; X86-SSE2-NEXT: subl $188, %esp
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movl (%eax), %ecx
; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
@@ -1798,7 +15834,7 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-SSE2-NEXT: movl 32(%eax), %ecx
; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-SSE2-NEXT: movl 36(%eax), %ecx
-; X86-SSE2-NEXT: movl %ecx, (%esp) # 4-byte Spill
+; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-SSE2-NEXT: movl 40(%eax), %ebp
; X86-SSE2-NEXT: movl 44(%eax), %ebx
; X86-SSE2-NEXT: movl 48(%eax), %edi
@@ -1807,13 +15843,17 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-SSE2-NEXT: movl 60(%eax), %ecx
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movl (%eax), %eax
+; X86-SSE2-NEXT: xorps %xmm0, %xmm0
+; X86-SSE2-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl %esi, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl %ebx, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl %ebp, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl (%esp), %ecx # 4-byte Reload
+; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
@@ -1821,6 +15861,7 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
@@ -1833,49 +15874,33 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: andl $63, %eax
-; X86-SSE2-NEXT: movl 40(%esp,%eax), %ecx
+; X86-SSE2-NEXT: andl $7, %eax
+; X86-SSE2-NEXT: movl 48(%esp,%eax,8), %ecx
; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 44(%esp,%eax), %ecx
+; X86-SSE2-NEXT: movl 52(%esp,%eax,8), %ecx
; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 52(%esp,%eax), %ecx
+; X86-SSE2-NEXT: movl 60(%esp,%eax,8), %ecx
; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 48(%esp,%eax), %ecx
+; X86-SSE2-NEXT: movl 56(%esp,%eax,8), %ecx
; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 60(%esp,%eax), %ecx
+; X86-SSE2-NEXT: movl 68(%esp,%eax,8), %ecx
; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 56(%esp,%eax), %ecx
+; X86-SSE2-NEXT: movl 64(%esp,%eax,8), %ecx
; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 68(%esp,%eax), %ecx
+; X86-SSE2-NEXT: movl 76(%esp,%eax,8), %ecx
; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 64(%esp,%eax), %ecx
+; X86-SSE2-NEXT: movl 72(%esp,%eax,8), %ecx
; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 76(%esp,%eax), %ecx
+; X86-SSE2-NEXT: movl 84(%esp,%eax,8), %ecx
; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 72(%esp,%eax), %ecx
-; X86-SSE2-NEXT: movl %ecx, (%esp) # 4-byte Spill
-; X86-SSE2-NEXT: movl 84(%esp,%eax), %ebp
-; X86-SSE2-NEXT: movl 80(%esp,%eax), %ebx
-; X86-SSE2-NEXT: movl 92(%esp,%eax), %edi
-; X86-SSE2-NEXT: movl 88(%esp,%eax), %esi
-; X86-SSE2-NEXT: movl 100(%esp,%eax), %edx
-; X86-SSE2-NEXT: movl 96(%esp,%eax), %ecx
+; X86-SSE2-NEXT: movl 80(%esp,%eax,8), %ecx
+; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SSE2-NEXT: movl 92(%esp,%eax,8), %ebp
+; X86-SSE2-NEXT: movl 88(%esp,%eax,8), %ebx
+; X86-SSE2-NEXT: movl 100(%esp,%eax,8), %edi
+; X86-SSE2-NEXT: movl 96(%esp,%eax,8), %esi
+; X86-SSE2-NEXT: movl 108(%esp,%eax,8), %edx
+; X86-SSE2-NEXT: movl 104(%esp,%eax,8), %ecx
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movl %ecx, 56(%eax)
; X86-SSE2-NEXT: movl %edx, 60(%eax)
@@ -1883,7 +15908,7 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-SSE2-NEXT: movl %edi, 52(%eax)
; X86-SSE2-NEXT: movl %ebx, 40(%eax)
; X86-SSE2-NEXT: movl %ebp, 44(%eax)
-; X86-SSE2-NEXT: movl (%esp), %ecx # 4-byte Reload
+; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-SSE2-NEXT: movl %ecx, 32(%eax)
; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-SSE2-NEXT: movl %ecx, 36(%eax)
@@ -1903,16 +15928,16 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-SSE2-NEXT: movl %ecx, (%eax)
; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-SSE2-NEXT: movl %ecx, 4(%eax)
-; X86-SSE2-NEXT: addl $168, %esp
+; X86-SSE2-NEXT: addl $188, %esp
; X86-SSE2-NEXT: popl %esi
; X86-SSE2-NEXT: popl %edi
; X86-SSE2-NEXT: popl %ebx
; X86-SSE2-NEXT: popl %ebp
; X86-SSE2-NEXT: retl
;
-; X86-SSE42-LABEL: lshr_64bytes:
+; X86-SSE42-LABEL: lshr_64bytes_qwordOff:
; X86-SSE42: # %bb.0:
-; X86-SSE42-NEXT: subl $128, %esp
+; X86-SSE42-NEXT: subl $140, %esp
; X86-SSE42-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE42-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-SSE42-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -1922,29 +15947,29 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-SSE42-NEXT: movups 48(%edx), %xmm3
; X86-SSE42-NEXT: movl (%ecx), %ecx
; X86-SSE42-NEXT: xorps %xmm4, %xmm4
-; X86-SSE42-NEXT: movups %xmm4, {{[0-9]+}}(%esp)
-; X86-SSE42-NEXT: movups %xmm4, {{[0-9]+}}(%esp)
-; X86-SSE42-NEXT: movups %xmm4, {{[0-9]+}}(%esp)
-; X86-SSE42-NEXT: movups %xmm4, {{[0-9]+}}(%esp)
-; X86-SSE42-NEXT: movups %xmm3, {{[0-9]+}}(%esp)
-; X86-SSE42-NEXT: movups %xmm2, {{[0-9]+}}(%esp)
-; X86-SSE42-NEXT: movups %xmm1, {{[0-9]+}}(%esp)
-; X86-SSE42-NEXT: movups %xmm0, (%esp)
-; X86-SSE42-NEXT: andl $63, %ecx
-; X86-SSE42-NEXT: movups (%esp,%ecx), %xmm0
-; X86-SSE42-NEXT: movups 16(%esp,%ecx), %xmm1
-; X86-SSE42-NEXT: movups 32(%esp,%ecx), %xmm2
-; X86-SSE42-NEXT: movups 48(%esp,%ecx), %xmm3
+; X86-SSE42-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: movaps %xmm3, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: movaps %xmm0, (%esp)
+; X86-SSE42-NEXT: andl $7, %ecx
+; X86-SSE42-NEXT: movups (%esp,%ecx,8), %xmm0
+; X86-SSE42-NEXT: movups 16(%esp,%ecx,8), %xmm1
+; X86-SSE42-NEXT: movups 32(%esp,%ecx,8), %xmm2
+; X86-SSE42-NEXT: movups 48(%esp,%ecx,8), %xmm3
; X86-SSE42-NEXT: movups %xmm3, 48(%eax)
; X86-SSE42-NEXT: movups %xmm2, 32(%eax)
; X86-SSE42-NEXT: movups %xmm1, 16(%eax)
; X86-SSE42-NEXT: movups %xmm0, (%eax)
-; X86-SSE42-NEXT: addl $128, %esp
+; X86-SSE42-NEXT: addl $140, %esp
; X86-SSE42-NEXT: retl
;
-; X86-AVX1-LABEL: lshr_64bytes:
+; X86-AVX1-LABEL: lshr_64bytes_qwordOff:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: subl $128, %esp
+; X86-AVX1-NEXT: subl $140, %esp
; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -1956,22 +15981,22 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-AVX1-NEXT: vmovups %ymm2, {{[0-9]+}}(%esp)
; X86-AVX1-NEXT: vmovups %ymm1, {{[0-9]+}}(%esp)
; X86-AVX1-NEXT: vmovups %ymm0, (%esp)
-; X86-AVX1-NEXT: andl $63, %ecx
-; X86-AVX1-NEXT: vmovups (%esp,%ecx), %xmm0
-; X86-AVX1-NEXT: vmovups 16(%esp,%ecx), %xmm1
-; X86-AVX1-NEXT: vmovups 32(%esp,%ecx), %xmm2
-; X86-AVX1-NEXT: vmovups 48(%esp,%ecx), %xmm3
+; X86-AVX1-NEXT: andl $7, %ecx
+; X86-AVX1-NEXT: vmovups (%esp,%ecx,8), %xmm0
+; X86-AVX1-NEXT: vmovups 16(%esp,%ecx,8), %xmm1
+; X86-AVX1-NEXT: vmovups 32(%esp,%ecx,8), %xmm2
+; X86-AVX1-NEXT: vmovups 48(%esp,%ecx,8), %xmm3
; X86-AVX1-NEXT: vmovups %xmm3, 48(%eax)
; X86-AVX1-NEXT: vmovups %xmm2, 32(%eax)
; X86-AVX1-NEXT: vmovups %xmm1, 16(%eax)
; X86-AVX1-NEXT: vmovups %xmm0, (%eax)
-; X86-AVX1-NEXT: addl $128, %esp
+; X86-AVX1-NEXT: addl $140, %esp
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
-; X86-AVX512-LABEL: lshr_64bytes:
+; X86-AVX512-LABEL: lshr_64bytes_qwordOff:
; X86-AVX512: # %bb.0:
-; X86-AVX512-NEXT: subl $128, %esp
+; X86-AVX512-NEXT: subl $140, %esp
; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -1980,27 +16005,3801 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X86-AVX512-NEXT: vmovups %zmm1, {{[0-9]+}}(%esp)
; X86-AVX512-NEXT: vmovups %zmm0, (%esp)
-; X86-AVX512-NEXT: andl $63, %ecx
-; X86-AVX512-NEXT: vmovups (%esp,%ecx), %xmm0
-; X86-AVX512-NEXT: vmovups 16(%esp,%ecx), %xmm1
-; X86-AVX512-NEXT: vmovups 32(%esp,%ecx), %xmm2
-; X86-AVX512-NEXT: vmovups 48(%esp,%ecx), %xmm3
+; X86-AVX512-NEXT: andl $7, %ecx
+; X86-AVX512-NEXT: vmovups (%esp,%ecx,8), %xmm0
+; X86-AVX512-NEXT: vmovups 16(%esp,%ecx,8), %xmm1
+; X86-AVX512-NEXT: vmovups 32(%esp,%ecx,8), %xmm2
+; X86-AVX512-NEXT: vmovups 48(%esp,%ecx,8), %xmm3
; X86-AVX512-NEXT: vmovups %xmm3, 48(%eax)
; X86-AVX512-NEXT: vmovups %xmm2, 32(%eax)
; X86-AVX512-NEXT: vmovups %xmm1, 16(%eax)
; X86-AVX512-NEXT: vmovups %xmm0, (%eax)
-; X86-AVX512-NEXT: addl $128, %esp
+; X86-AVX512-NEXT: addl $140, %esp
; X86-AVX512-NEXT: vzeroupper
; X86-AVX512-NEXT: retl
%src = load i512, ptr %src.ptr, align 1
- %byteOff = load i512, ptr %byteOff.ptr, align 1
- %bitOff = shl i512 %byteOff, 3
+ %qwordOff = load i512, ptr %qwordOff.ptr, align 1
+ %bitOff = shl i512 %qwordOff, 6
%res = lshr i512 %src, %bitOff
store i512 %res, ptr %dst, align 1
ret void
}
+
define void @shl_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
-; X64-SSE2-LABEL: shl_64bytes:
+; FALLBACK0-LABEL: shl_64bytes:
+; FALLBACK0: # %bb.0:
+; FALLBACK0-NEXT: pushq %r15
+; FALLBACK0-NEXT: pushq %r14
+; FALLBACK0-NEXT: pushq %r13
+; FALLBACK0-NEXT: pushq %r12
+; FALLBACK0-NEXT: pushq %rbx
+; FALLBACK0-NEXT: movq (%rdi), %rax
+; FALLBACK0-NEXT: movq 8(%rdi), %rcx
+; FALLBACK0-NEXT: movq 16(%rdi), %r8
+; FALLBACK0-NEXT: movq 24(%rdi), %r9
+; FALLBACK0-NEXT: movq 32(%rdi), %r10
+; FALLBACK0-NEXT: movq 40(%rdi), %r11
+; FALLBACK0-NEXT: movq 48(%rdi), %rbx
+; FALLBACK0-NEXT: movq 56(%rdi), %rdi
+; FALLBACK0-NEXT: movl (%rsi), %esi
+; FALLBACK0-NEXT: xorps %xmm0, %xmm0
+; FALLBACK0-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %rbx, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %r11, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %r10, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: leal (,%rsi,8), %eax
+; FALLBACK0-NEXT: andl $56, %eax
+; FALLBACK0-NEXT: andl $56, %esi
+; FALLBACK0-NEXT: negl %esi
+; FALLBACK0-NEXT: movslq %esi, %rbx
+; FALLBACK0-NEXT: movq -64(%rsp,%rbx), %r8
+; FALLBACK0-NEXT: movq -56(%rsp,%rbx), %rdi
+; FALLBACK0-NEXT: movq %rdi, %r10
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shlq %cl, %r10
+; FALLBACK0-NEXT: movl %eax, %esi
+; FALLBACK0-NEXT: notb %sil
+; FALLBACK0-NEXT: movq %r8, %r9
+; FALLBACK0-NEXT: shrq %r9
+; FALLBACK0-NEXT: movl %esi, %ecx
+; FALLBACK0-NEXT: shrq %cl, %r9
+; FALLBACK0-NEXT: orq %r10, %r9
+; FALLBACK0-NEXT: movq -40(%rsp,%rbx), %r10
+; FALLBACK0-NEXT: movq %r10, %r14
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shlq %cl, %r14
+; FALLBACK0-NEXT: movq -48(%rsp,%rbx), %r15
+; FALLBACK0-NEXT: movq %r15, %r11
+; FALLBACK0-NEXT: shrq %r11
+; FALLBACK0-NEXT: movl %esi, %ecx
+; FALLBACK0-NEXT: shrq %cl, %r11
+; FALLBACK0-NEXT: orq %r14, %r11
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shlq %cl, %r15
+; FALLBACK0-NEXT: shrq %rdi
+; FALLBACK0-NEXT: movl %esi, %ecx
+; FALLBACK0-NEXT: shrq %cl, %rdi
+; FALLBACK0-NEXT: orq %r15, %rdi
+; FALLBACK0-NEXT: movq -24(%rsp,%rbx), %r14
+; FALLBACK0-NEXT: movq %r14, %r12
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shlq %cl, %r12
+; FALLBACK0-NEXT: movq -32(%rsp,%rbx), %r13
+; FALLBACK0-NEXT: movq %r13, %r15
+; FALLBACK0-NEXT: shrq %r15
+; FALLBACK0-NEXT: movl %esi, %ecx
+; FALLBACK0-NEXT: shrq %cl, %r15
+; FALLBACK0-NEXT: orq %r12, %r15
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shlq %cl, %r13
+; FALLBACK0-NEXT: shrq %r10
+; FALLBACK0-NEXT: movl %esi, %ecx
+; FALLBACK0-NEXT: shrq %cl, %r10
+; FALLBACK0-NEXT: orq %r13, %r10
+; FALLBACK0-NEXT: movq -8(%rsp,%rbx), %r12
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shlq %cl, %r12
+; FALLBACK0-NEXT: movq -16(%rsp,%rbx), %rbx
+; FALLBACK0-NEXT: movq %rbx, %r13
+; FALLBACK0-NEXT: shrq %r13
+; FALLBACK0-NEXT: movl %esi, %ecx
+; FALLBACK0-NEXT: shrq %cl, %r13
+; FALLBACK0-NEXT: orq %r12, %r13
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shlq %cl, %rbx
+; FALLBACK0-NEXT: shrq %r14
+; FALLBACK0-NEXT: movl %esi, %ecx
+; FALLBACK0-NEXT: shrq %cl, %r14
+; FALLBACK0-NEXT: orq %rbx, %r14
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shlq %cl, %r8
+; FALLBACK0-NEXT: movq %r8, (%rdx)
+; FALLBACK0-NEXT: movq %r14, 48(%rdx)
+; FALLBACK0-NEXT: movq %r13, 56(%rdx)
+; FALLBACK0-NEXT: movq %r10, 32(%rdx)
+; FALLBACK0-NEXT: movq %r15, 40(%rdx)
+; FALLBACK0-NEXT: movq %rdi, 16(%rdx)
+; FALLBACK0-NEXT: movq %r11, 24(%rdx)
+; FALLBACK0-NEXT: movq %r9, 8(%rdx)
+; FALLBACK0-NEXT: popq %rbx
+; FALLBACK0-NEXT: popq %r12
+; FALLBACK0-NEXT: popq %r13
+; FALLBACK0-NEXT: popq %r14
+; FALLBACK0-NEXT: popq %r15
+; FALLBACK0-NEXT: retq
+;
+; FALLBACK1-LABEL: shl_64bytes:
+; FALLBACK1: # %bb.0:
+; FALLBACK1-NEXT: pushq %r14
+; FALLBACK1-NEXT: pushq %rbx
+; FALLBACK1-NEXT: pushq %rax
+; FALLBACK1-NEXT: movq (%rdi), %rax
+; FALLBACK1-NEXT: movq 8(%rdi), %rcx
+; FALLBACK1-NEXT: movq 16(%rdi), %r8
+; FALLBACK1-NEXT: movq 24(%rdi), %r9
+; FALLBACK1-NEXT: movq 32(%rdi), %r10
+; FALLBACK1-NEXT: movq 40(%rdi), %r11
+; FALLBACK1-NEXT: movq 48(%rdi), %rbx
+; FALLBACK1-NEXT: movq 56(%rdi), %rdi
+; FALLBACK1-NEXT: movl (%rsi), %esi
+; FALLBACK1-NEXT: xorps %xmm0, %xmm0
+; FALLBACK1-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %rbx, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %r11, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %r10, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: leal (,%rsi,8), %ecx
+; FALLBACK1-NEXT: andl $56, %ecx
+; FALLBACK1-NEXT: andl $56, %esi
+; FALLBACK1-NEXT: negl %esi
+; FALLBACK1-NEXT: movslq %esi, %r9
+; FALLBACK1-NEXT: movq -48(%rsp,%r9), %rax
+; FALLBACK1-NEXT: movq -40(%rsp,%r9), %r10
+; FALLBACK1-NEXT: movq %r10, %rsi
+; FALLBACK1-NEXT: shldq %cl, %rax, %rsi
+; FALLBACK1-NEXT: movq -64(%rsp,%r9), %r8
+; FALLBACK1-NEXT: movq -56(%rsp,%r9), %rdi
+; FALLBACK1-NEXT: shldq %cl, %rdi, %rax
+; FALLBACK1-NEXT: movq -32(%rsp,%r9), %r11
+; FALLBACK1-NEXT: movq -24(%rsp,%r9), %rbx
+; FALLBACK1-NEXT: movq %rbx, %r14
+; FALLBACK1-NEXT: shldq %cl, %r11, %r14
+; FALLBACK1-NEXT: shldq %cl, %r10, %r11
+; FALLBACK1-NEXT: movq -16(%rsp,%r9), %r10
+; FALLBACK1-NEXT: movq -8(%rsp,%r9), %r9
+; FALLBACK1-NEXT: shldq %cl, %r10, %r9
+; FALLBACK1-NEXT: shldq %cl, %rbx, %r10
+; FALLBACK1-NEXT: shldq %cl, %r8, %rdi
+; FALLBACK1-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK1-NEXT: shlq %cl, %r8
+; FALLBACK1-NEXT: movq %r10, 48(%rdx)
+; FALLBACK1-NEXT: movq %r9, 56(%rdx)
+; FALLBACK1-NEXT: movq %r11, 32(%rdx)
+; FALLBACK1-NEXT: movq %r14, 40(%rdx)
+; FALLBACK1-NEXT: movq %rax, 16(%rdx)
+; FALLBACK1-NEXT: movq %rsi, 24(%rdx)
+; FALLBACK1-NEXT: movq %r8, (%rdx)
+; FALLBACK1-NEXT: movq %rdi, 8(%rdx)
+; FALLBACK1-NEXT: addq $8, %rsp
+; FALLBACK1-NEXT: popq %rbx
+; FALLBACK1-NEXT: popq %r14
+; FALLBACK1-NEXT: retq
+;
+; FALLBACK2-LABEL: shl_64bytes:
+; FALLBACK2: # %bb.0:
+; FALLBACK2-NEXT: pushq %rbp
+; FALLBACK2-NEXT: pushq %r15
+; FALLBACK2-NEXT: pushq %r14
+; FALLBACK2-NEXT: pushq %r13
+; FALLBACK2-NEXT: pushq %r12
+; FALLBACK2-NEXT: pushq %rbx
+; FALLBACK2-NEXT: pushq %rax
+; FALLBACK2-NEXT: movq (%rdi), %rax
+; FALLBACK2-NEXT: movq 8(%rdi), %rcx
+; FALLBACK2-NEXT: movq 16(%rdi), %r8
+; FALLBACK2-NEXT: movq 24(%rdi), %r9
+; FALLBACK2-NEXT: movq 32(%rdi), %r10
+; FALLBACK2-NEXT: movq 40(%rdi), %r11
+; FALLBACK2-NEXT: movq 48(%rdi), %rbx
+; FALLBACK2-NEXT: movq 56(%rdi), %rdi
+; FALLBACK2-NEXT: movl (%rsi), %esi
+; FALLBACK2-NEXT: xorps %xmm0, %xmm0
+; FALLBACK2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %rbx, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %r11, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %r10, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: leal (,%rsi,8), %eax
+; FALLBACK2-NEXT: andl $56, %eax
+; FALLBACK2-NEXT: andl $56, %esi
+; FALLBACK2-NEXT: negl %esi
+; FALLBACK2-NEXT: movslq %esi, %rsi
+; FALLBACK2-NEXT: movq -64(%rsp,%rsi), %r10
+; FALLBACK2-NEXT: movq -56(%rsp,%rsi), %rcx
+; FALLBACK2-NEXT: shlxq %rax, %rcx, %r9
+; FALLBACK2-NEXT: movq -40(%rsp,%rsi), %rdi
+; FALLBACK2-NEXT: shlxq %rax, %rdi, %r11
+; FALLBACK2-NEXT: movq -48(%rsp,%rsi), %r14
+; FALLBACK2-NEXT: shlxq %rax, %r14, %rbx
+; FALLBACK2-NEXT: movq -24(%rsp,%rsi), %r8
+; FALLBACK2-NEXT: shlxq %rax, %r8, %r15
+; FALLBACK2-NEXT: shlxq %rax, %r10, %r12
+; FALLBACK2-NEXT: movl %eax, %r13d
+; FALLBACK2-NEXT: notb %r13b
+; FALLBACK2-NEXT: shrq %r10
+; FALLBACK2-NEXT: shrxq %r13, %r10, %r10
+; FALLBACK2-NEXT: orq %r9, %r10
+; FALLBACK2-NEXT: movq -32(%rsp,%rsi), %r9
+; FALLBACK2-NEXT: shlxq %rax, %r9, %rbp
+; FALLBACK2-NEXT: shrq %r14
+; FALLBACK2-NEXT: shrxq %r13, %r14, %r14
+; FALLBACK2-NEXT: orq %r11, %r14
+; FALLBACK2-NEXT: shlxq %rax, -8(%rsp,%rsi), %r11
+; FALLBACK2-NEXT: movq -16(%rsp,%rsi), %rsi
+; FALLBACK2-NEXT: shlxq %rax, %rsi, %rax
+; FALLBACK2-NEXT: shrq %rcx
+; FALLBACK2-NEXT: shrxq %r13, %rcx, %rcx
+; FALLBACK2-NEXT: orq %rbx, %rcx
+; FALLBACK2-NEXT: shrq %r9
+; FALLBACK2-NEXT: shrxq %r13, %r9, %r9
+; FALLBACK2-NEXT: orq %r15, %r9
+; FALLBACK2-NEXT: shrq %rdi
+; FALLBACK2-NEXT: shrxq %r13, %rdi, %rdi
+; FALLBACK2-NEXT: orq %rbp, %rdi
+; FALLBACK2-NEXT: shrq %rsi
+; FALLBACK2-NEXT: shrxq %r13, %rsi, %rsi
+; FALLBACK2-NEXT: orq %r11, %rsi
+; FALLBACK2-NEXT: shrq %r8
+; FALLBACK2-NEXT: shrxq %r13, %r8, %r8
+; FALLBACK2-NEXT: orq %rax, %r8
+; FALLBACK2-NEXT: movq %r12, (%rdx)
+; FALLBACK2-NEXT: movq %r8, 48(%rdx)
+; FALLBACK2-NEXT: movq %rsi, 56(%rdx)
+; FALLBACK2-NEXT: movq %rdi, 32(%rdx)
+; FALLBACK2-NEXT: movq %r9, 40(%rdx)
+; FALLBACK2-NEXT: movq %rcx, 16(%rdx)
+; FALLBACK2-NEXT: movq %r14, 24(%rdx)
+; FALLBACK2-NEXT: movq %r10, 8(%rdx)
+; FALLBACK2-NEXT: addq $8, %rsp
+; FALLBACK2-NEXT: popq %rbx
+; FALLBACK2-NEXT: popq %r12
+; FALLBACK2-NEXT: popq %r13
+; FALLBACK2-NEXT: popq %r14
+; FALLBACK2-NEXT: popq %r15
+; FALLBACK2-NEXT: popq %rbp
+; FALLBACK2-NEXT: retq
+;
+; FALLBACK3-LABEL: shl_64bytes:
+; FALLBACK3: # %bb.0:
+; FALLBACK3-NEXT: pushq %r14
+; FALLBACK3-NEXT: pushq %rbx
+; FALLBACK3-NEXT: pushq %rax
+; FALLBACK3-NEXT: movq (%rdi), %rax
+; FALLBACK3-NEXT: movq 8(%rdi), %rcx
+; FALLBACK3-NEXT: movq 16(%rdi), %r8
+; FALLBACK3-NEXT: movq 24(%rdi), %r9
+; FALLBACK3-NEXT: movq 32(%rdi), %r10
+; FALLBACK3-NEXT: movq 40(%rdi), %r11
+; FALLBACK3-NEXT: movq 48(%rdi), %rbx
+; FALLBACK3-NEXT: movq 56(%rdi), %rdi
+; FALLBACK3-NEXT: movl (%rsi), %esi
+; FALLBACK3-NEXT: xorps %xmm0, %xmm0
+; FALLBACK3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %rbx, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %r11, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %r10, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: leal (,%rsi,8), %ecx
+; FALLBACK3-NEXT: andl $56, %ecx
+; FALLBACK3-NEXT: andl $56, %esi
+; FALLBACK3-NEXT: negl %esi
+; FALLBACK3-NEXT: movslq %esi, %r8
+; FALLBACK3-NEXT: movq -48(%rsp,%r8), %rax
+; FALLBACK3-NEXT: movq -40(%rsp,%r8), %r9
+; FALLBACK3-NEXT: movq %r9, %rsi
+; FALLBACK3-NEXT: shldq %cl, %rax, %rsi
+; FALLBACK3-NEXT: movq -64(%rsp,%r8), %r10
+; FALLBACK3-NEXT: movq -56(%rsp,%r8), %rdi
+; FALLBACK3-NEXT: shldq %cl, %rdi, %rax
+; FALLBACK3-NEXT: movq -32(%rsp,%r8), %r11
+; FALLBACK3-NEXT: movq -24(%rsp,%r8), %rbx
+; FALLBACK3-NEXT: movq %rbx, %r14
+; FALLBACK3-NEXT: shldq %cl, %r11, %r14
+; FALLBACK3-NEXT: shldq %cl, %r9, %r11
+; FALLBACK3-NEXT: movq -16(%rsp,%r8), %r9
+; FALLBACK3-NEXT: movq -8(%rsp,%r8), %r8
+; FALLBACK3-NEXT: shldq %cl, %r9, %r8
+; FALLBACK3-NEXT: shldq %cl, %rbx, %r9
+; FALLBACK3-NEXT: shldq %cl, %r10, %rdi
+; FALLBACK3-NEXT: shlxq %rcx, %r10, %rcx
+; FALLBACK3-NEXT: movq %r9, 48(%rdx)
+; FALLBACK3-NEXT: movq %r8, 56(%rdx)
+; FALLBACK3-NEXT: movq %r11, 32(%rdx)
+; FALLBACK3-NEXT: movq %r14, 40(%rdx)
+; FALLBACK3-NEXT: movq %rax, 16(%rdx)
+; FALLBACK3-NEXT: movq %rsi, 24(%rdx)
+; FALLBACK3-NEXT: movq %rcx, (%rdx)
+; FALLBACK3-NEXT: movq %rdi, 8(%rdx)
+; FALLBACK3-NEXT: addq $8, %rsp
+; FALLBACK3-NEXT: popq %rbx
+; FALLBACK3-NEXT: popq %r14
+; FALLBACK3-NEXT: retq
+;
+; FALLBACK4-LABEL: shl_64bytes:
+; FALLBACK4: # %bb.0:
+; FALLBACK4-NEXT: pushq %r15
+; FALLBACK4-NEXT: pushq %r14
+; FALLBACK4-NEXT: pushq %r13
+; FALLBACK4-NEXT: pushq %r12
+; FALLBACK4-NEXT: pushq %rbx
+; FALLBACK4-NEXT: movups (%rdi), %xmm0
+; FALLBACK4-NEXT: movups 16(%rdi), %xmm1
+; FALLBACK4-NEXT: movups 32(%rdi), %xmm2
+; FALLBACK4-NEXT: movups 48(%rdi), %xmm3
+; FALLBACK4-NEXT: movl (%rsi), %ecx
+; FALLBACK4-NEXT: xorps %xmm4, %xmm4
+; FALLBACK4-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movaps %xmm3, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: leal (,%rcx,8), %eax
+; FALLBACK4-NEXT: andl $56, %eax
+; FALLBACK4-NEXT: andl $56, %ecx
+; FALLBACK4-NEXT: negl %ecx
+; FALLBACK4-NEXT: movslq %ecx, %r9
+; FALLBACK4-NEXT: movq -24(%rsp,%r9), %rdi
+; FALLBACK4-NEXT: movq %rdi, %r10
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shlq %cl, %r10
+; FALLBACK4-NEXT: movl %eax, %esi
+; FALLBACK4-NEXT: notb %sil
+; FALLBACK4-NEXT: movq -32(%rsp,%r9), %r11
+; FALLBACK4-NEXT: movq %r11, %r8
+; FALLBACK4-NEXT: shrq %r8
+; FALLBACK4-NEXT: movl %esi, %ecx
+; FALLBACK4-NEXT: shrq %cl, %r8
+; FALLBACK4-NEXT: orq %r10, %r8
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shlq %cl, %r11
+; FALLBACK4-NEXT: movq -40(%rsp,%r9), %rbx
+; FALLBACK4-NEXT: movq %rbx, %r10
+; FALLBACK4-NEXT: shrq %r10
+; FALLBACK4-NEXT: movl %esi, %ecx
+; FALLBACK4-NEXT: shrq %cl, %r10
+; FALLBACK4-NEXT: orq %r11, %r10
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shlq %cl, %rbx
+; FALLBACK4-NEXT: movq -48(%rsp,%r9), %r15
+; FALLBACK4-NEXT: movq %r15, %r11
+; FALLBACK4-NEXT: shrq %r11
+; FALLBACK4-NEXT: movl %esi, %ecx
+; FALLBACK4-NEXT: shrq %cl, %r11
+; FALLBACK4-NEXT: orq %rbx, %r11
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shlq %cl, %r15
+; FALLBACK4-NEXT: movq -64(%rsp,%r9), %r14
+; FALLBACK4-NEXT: movq -56(%rsp,%r9), %r12
+; FALLBACK4-NEXT: movq %r12, %rbx
+; FALLBACK4-NEXT: shrq %rbx
+; FALLBACK4-NEXT: movl %esi, %ecx
+; FALLBACK4-NEXT: shrq %cl, %rbx
+; FALLBACK4-NEXT: orq %r15, %rbx
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shlq %cl, %r12
+; FALLBACK4-NEXT: movq %r14, %r15
+; FALLBACK4-NEXT: shrq %r15
+; FALLBACK4-NEXT: movl %esi, %ecx
+; FALLBACK4-NEXT: shrq %cl, %r15
+; FALLBACK4-NEXT: orq %r12, %r15
+; FALLBACK4-NEXT: movq -16(%rsp,%r9), %r12
+; FALLBACK4-NEXT: movq %r12, %r13
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shlq %cl, %r13
+; FALLBACK4-NEXT: shrq %rdi
+; FALLBACK4-NEXT: movl %esi, %ecx
+; FALLBACK4-NEXT: shrq %cl, %rdi
+; FALLBACK4-NEXT: orq %r13, %rdi
+; FALLBACK4-NEXT: movq -8(%rsp,%r9), %r9
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shlq %cl, %r9
+; FALLBACK4-NEXT: shrq %r12
+; FALLBACK4-NEXT: movl %esi, %ecx
+; FALLBACK4-NEXT: shrq %cl, %r12
+; FALLBACK4-NEXT: orq %r9, %r12
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shlq %cl, %r14
+; FALLBACK4-NEXT: movq %r14, (%rdx)
+; FALLBACK4-NEXT: movq %r12, 56(%rdx)
+; FALLBACK4-NEXT: movq %rdi, 48(%rdx)
+; FALLBACK4-NEXT: movq %r15, 8(%rdx)
+; FALLBACK4-NEXT: movq %rbx, 16(%rdx)
+; FALLBACK4-NEXT: movq %r11, 24(%rdx)
+; FALLBACK4-NEXT: movq %r10, 32(%rdx)
+; FALLBACK4-NEXT: movq %r8, 40(%rdx)
+; FALLBACK4-NEXT: popq %rbx
+; FALLBACK4-NEXT: popq %r12
+; FALLBACK4-NEXT: popq %r13
+; FALLBACK4-NEXT: popq %r14
+; FALLBACK4-NEXT: popq %r15
+; FALLBACK4-NEXT: retq
+;
+; FALLBACK5-LABEL: shl_64bytes:
+; FALLBACK5: # %bb.0:
+; FALLBACK5-NEXT: pushq %r15
+; FALLBACK5-NEXT: pushq %r14
+; FALLBACK5-NEXT: pushq %rbx
+; FALLBACK5-NEXT: movups (%rdi), %xmm0
+; FALLBACK5-NEXT: movups 16(%rdi), %xmm1
+; FALLBACK5-NEXT: movups 32(%rdi), %xmm2
+; FALLBACK5-NEXT: movups 48(%rdi), %xmm3
+; FALLBACK5-NEXT: movl (%rsi), %eax
+; FALLBACK5-NEXT: xorps %xmm4, %xmm4
+; FALLBACK5-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movaps %xmm3, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: leal (,%rax,8), %ecx
+; FALLBACK5-NEXT: andl $56, %ecx
+; FALLBACK5-NEXT: andl $56, %eax
+; FALLBACK5-NEXT: negl %eax
+; FALLBACK5-NEXT: movslq %eax, %r8
+; FALLBACK5-NEXT: movq -32(%rsp,%r8), %rax
+; FALLBACK5-NEXT: movq -24(%rsp,%r8), %r9
+; FALLBACK5-NEXT: movq %r9, %rsi
+; FALLBACK5-NEXT: shldq %cl, %rax, %rsi
+; FALLBACK5-NEXT: movq -40(%rsp,%r8), %rdi
+; FALLBACK5-NEXT: shldq %cl, %rdi, %rax
+; FALLBACK5-NEXT: movq -48(%rsp,%r8), %r10
+; FALLBACK5-NEXT: shldq %cl, %r10, %rdi
+; FALLBACK5-NEXT: movq -64(%rsp,%r8), %r11
+; FALLBACK5-NEXT: movq -56(%rsp,%r8), %rbx
+; FALLBACK5-NEXT: shldq %cl, %rbx, %r10
+; FALLBACK5-NEXT: movq -16(%rsp,%r8), %r14
+; FALLBACK5-NEXT: movq %r14, %r15
+; FALLBACK5-NEXT: shldq %cl, %r9, %r15
+; FALLBACK5-NEXT: movq -8(%rsp,%r8), %r8
+; FALLBACK5-NEXT: shldq %cl, %r14, %r8
+; FALLBACK5-NEXT: movq %r11, %r9
+; FALLBACK5-NEXT: shlq %cl, %r9
+; FALLBACK5-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK5-NEXT: shldq %cl, %r11, %rbx
+; FALLBACK5-NEXT: movq %r8, 56(%rdx)
+; FALLBACK5-NEXT: movq %r15, 48(%rdx)
+; FALLBACK5-NEXT: movq %rbx, 8(%rdx)
+; FALLBACK5-NEXT: movq %r10, 16(%rdx)
+; FALLBACK5-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK5-NEXT: movq %rax, 32(%rdx)
+; FALLBACK5-NEXT: movq %rsi, 40(%rdx)
+; FALLBACK5-NEXT: movq %r9, (%rdx)
+; FALLBACK5-NEXT: popq %rbx
+; FALLBACK5-NEXT: popq %r14
+; FALLBACK5-NEXT: popq %r15
+; FALLBACK5-NEXT: retq
+;
+; FALLBACK6-LABEL: shl_64bytes:
+; FALLBACK6: # %bb.0:
+; FALLBACK6-NEXT: pushq %rbp
+; FALLBACK6-NEXT: pushq %r15
+; FALLBACK6-NEXT: pushq %r14
+; FALLBACK6-NEXT: pushq %r13
+; FALLBACK6-NEXT: pushq %r12
+; FALLBACK6-NEXT: pushq %rbx
+; FALLBACK6-NEXT: subq $24, %rsp
+; FALLBACK6-NEXT: movups (%rdi), %xmm0
+; FALLBACK6-NEXT: movups 16(%rdi), %xmm1
+; FALLBACK6-NEXT: movups 32(%rdi), %xmm2
+; FALLBACK6-NEXT: movups 48(%rdi), %xmm3
+; FALLBACK6-NEXT: movl (%rsi), %eax
+; FALLBACK6-NEXT: xorps %xmm4, %xmm4
+; FALLBACK6-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movaps %xmm3, (%rsp)
+; FALLBACK6-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: leal (,%rax,8), %ecx
+; FALLBACK6-NEXT: andl $56, %ecx
+; FALLBACK6-NEXT: andl $56, %eax
+; FALLBACK6-NEXT: negl %eax
+; FALLBACK6-NEXT: movslq %eax, %rsi
+; FALLBACK6-NEXT: movq -8(%rsp,%rsi), %rax
+; FALLBACK6-NEXT: shlxq %rcx, %rax, %r12
+; FALLBACK6-NEXT: movq -16(%rsp,%rsi), %rdi
+; FALLBACK6-NEXT: shlxq %rcx, %rdi, %r15
+; FALLBACK6-NEXT: movq -24(%rsp,%rsi), %r13
+; FALLBACK6-NEXT: shlxq %rcx, %r13, %r8
+; FALLBACK6-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; FALLBACK6-NEXT: movq -32(%rsp,%rsi), %r11
+; FALLBACK6-NEXT: shlxq %rcx, %r11, %r10
+; FALLBACK6-NEXT: movq -40(%rsp,%rsi), %r14
+; FALLBACK6-NEXT: shlxq %rcx, %r14, %rbx
+; FALLBACK6-NEXT: movl %ecx, %r9d
+; FALLBACK6-NEXT: notb %r9b
+; FALLBACK6-NEXT: shrq %rdi
+; FALLBACK6-NEXT: shrxq %r9, %rdi, %rdi
+; FALLBACK6-NEXT: orq %r12, %rdi
+; FALLBACK6-NEXT: movq (%rsp,%rsi), %rbp
+; FALLBACK6-NEXT: shlxq %rcx, %rbp, %r8
+; FALLBACK6-NEXT: shrq %r13
+; FALLBACK6-NEXT: shrxq %r9, %r13, %r12
+; FALLBACK6-NEXT: orq %r15, %r12
+; FALLBACK6-NEXT: shlxq %rcx, 8(%rsp,%rsi), %r15
+; FALLBACK6-NEXT: movq -48(%rsp,%rsi), %rsi
+; FALLBACK6-NEXT: shlxq %rcx, %rsi, %rcx
+; FALLBACK6-NEXT: shrq %r11
+; FALLBACK6-NEXT: shrxq %r9, %r11, %r11
+; FALLBACK6-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; FALLBACK6-NEXT: shrq %r14
+; FALLBACK6-NEXT: shrxq %r9, %r14, %r14
+; FALLBACK6-NEXT: orq %r10, %r14
+; FALLBACK6-NEXT: shrq %rsi
+; FALLBACK6-NEXT: shrxq %r9, %rsi, %rsi
+; FALLBACK6-NEXT: orq %rbx, %rsi
+; FALLBACK6-NEXT: shrq %rax
+; FALLBACK6-NEXT: shrxq %r9, %rax, %rax
+; FALLBACK6-NEXT: orq %r8, %rax
+; FALLBACK6-NEXT: shrq %rbp
+; FALLBACK6-NEXT: shrxq %r9, %rbp, %r8
+; FALLBACK6-NEXT: orq %r15, %r8
+; FALLBACK6-NEXT: movq %rcx, (%rdx)
+; FALLBACK6-NEXT: movq %r8, 56(%rdx)
+; FALLBACK6-NEXT: movq %rax, 48(%rdx)
+; FALLBACK6-NEXT: movq %rsi, 8(%rdx)
+; FALLBACK6-NEXT: movq %r14, 16(%rdx)
+; FALLBACK6-NEXT: movq %r11, 24(%rdx)
+; FALLBACK6-NEXT: movq %r12, 32(%rdx)
+; FALLBACK6-NEXT: movq %rdi, 40(%rdx)
+; FALLBACK6-NEXT: addq $24, %rsp
+; FALLBACK6-NEXT: popq %rbx
+; FALLBACK6-NEXT: popq %r12
+; FALLBACK6-NEXT: popq %r13
+; FALLBACK6-NEXT: popq %r14
+; FALLBACK6-NEXT: popq %r15
+; FALLBACK6-NEXT: popq %rbp
+; FALLBACK6-NEXT: retq
+;
+; FALLBACK7-LABEL: shl_64bytes:
+; FALLBACK7: # %bb.0:
+; FALLBACK7-NEXT: pushq %r15
+; FALLBACK7-NEXT: pushq %r14
+; FALLBACK7-NEXT: pushq %rbx
+; FALLBACK7-NEXT: movups (%rdi), %xmm0
+; FALLBACK7-NEXT: movups 16(%rdi), %xmm1
+; FALLBACK7-NEXT: movups 32(%rdi), %xmm2
+; FALLBACK7-NEXT: movups 48(%rdi), %xmm3
+; FALLBACK7-NEXT: movl (%rsi), %eax
+; FALLBACK7-NEXT: xorps %xmm4, %xmm4
+; FALLBACK7-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movaps %xmm3, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: leal (,%rax,8), %ecx
+; FALLBACK7-NEXT: andl $56, %ecx
+; FALLBACK7-NEXT: andl $56, %eax
+; FALLBACK7-NEXT: negl %eax
+; FALLBACK7-NEXT: movslq %eax, %r8
+; FALLBACK7-NEXT: movq -32(%rsp,%r8), %rax
+; FALLBACK7-NEXT: movq -24(%rsp,%r8), %r9
+; FALLBACK7-NEXT: movq %r9, %rsi
+; FALLBACK7-NEXT: shldq %cl, %rax, %rsi
+; FALLBACK7-NEXT: movq -40(%rsp,%r8), %rdi
+; FALLBACK7-NEXT: shldq %cl, %rdi, %rax
+; FALLBACK7-NEXT: movq -48(%rsp,%r8), %r10
+; FALLBACK7-NEXT: shldq %cl, %r10, %rdi
+; FALLBACK7-NEXT: movq -64(%rsp,%r8), %r11
+; FALLBACK7-NEXT: movq -56(%rsp,%r8), %rbx
+; FALLBACK7-NEXT: shldq %cl, %rbx, %r10
+; FALLBACK7-NEXT: movq -16(%rsp,%r8), %r14
+; FALLBACK7-NEXT: movq %r14, %r15
+; FALLBACK7-NEXT: shldq %cl, %r9, %r15
+; FALLBACK7-NEXT: movq -8(%rsp,%r8), %r8
+; FALLBACK7-NEXT: shldq %cl, %r14, %r8
+; FALLBACK7-NEXT: shlxq %rcx, %r11, %r9
+; FALLBACK7-NEXT: # kill: def $cl killed $cl killed $rcx
+; FALLBACK7-NEXT: shldq %cl, %r11, %rbx
+; FALLBACK7-NEXT: movq %r8, 56(%rdx)
+; FALLBACK7-NEXT: movq %r15, 48(%rdx)
+; FALLBACK7-NEXT: movq %rbx, 8(%rdx)
+; FALLBACK7-NEXT: movq %r10, 16(%rdx)
+; FALLBACK7-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK7-NEXT: movq %rax, 32(%rdx)
+; FALLBACK7-NEXT: movq %rsi, 40(%rdx)
+; FALLBACK7-NEXT: movq %r9, (%rdx)
+; FALLBACK7-NEXT: popq %rbx
+; FALLBACK7-NEXT: popq %r14
+; FALLBACK7-NEXT: popq %r15
+; FALLBACK7-NEXT: retq
+;
+; FALLBACK8-LABEL: shl_64bytes:
+; FALLBACK8: # %bb.0:
+; FALLBACK8-NEXT: pushq %r15
+; FALLBACK8-NEXT: pushq %r14
+; FALLBACK8-NEXT: pushq %r13
+; FALLBACK8-NEXT: pushq %r12
+; FALLBACK8-NEXT: pushq %rbx
+; FALLBACK8-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK8-NEXT: vmovups 32(%rdi), %ymm1
+; FALLBACK8-NEXT: movl (%rsi), %ecx
+; FALLBACK8-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; FALLBACK8-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: leal (,%rcx,8), %eax
+; FALLBACK8-NEXT: andl $56, %eax
+; FALLBACK8-NEXT: andl $56, %ecx
+; FALLBACK8-NEXT: negl %ecx
+; FALLBACK8-NEXT: movslq %ecx, %r9
+; FALLBACK8-NEXT: movq -24(%rsp,%r9), %rdi
+; FALLBACK8-NEXT: movq %rdi, %r10
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shlq %cl, %r10
+; FALLBACK8-NEXT: movl %eax, %esi
+; FALLBACK8-NEXT: notb %sil
+; FALLBACK8-NEXT: movq -32(%rsp,%r9), %r11
+; FALLBACK8-NEXT: movq %r11, %r8
+; FALLBACK8-NEXT: shrq %r8
+; FALLBACK8-NEXT: movl %esi, %ecx
+; FALLBACK8-NEXT: shrq %cl, %r8
+; FALLBACK8-NEXT: orq %r10, %r8
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shlq %cl, %r11
+; FALLBACK8-NEXT: movq -40(%rsp,%r9), %rbx
+; FALLBACK8-NEXT: movq %rbx, %r10
+; FALLBACK8-NEXT: shrq %r10
+; FALLBACK8-NEXT: movl %esi, %ecx
+; FALLBACK8-NEXT: shrq %cl, %r10
+; FALLBACK8-NEXT: orq %r11, %r10
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shlq %cl, %rbx
+; FALLBACK8-NEXT: movq -48(%rsp,%r9), %r15
+; FALLBACK8-NEXT: movq %r15, %r11
+; FALLBACK8-NEXT: shrq %r11
+; FALLBACK8-NEXT: movl %esi, %ecx
+; FALLBACK8-NEXT: shrq %cl, %r11
+; FALLBACK8-NEXT: orq %rbx, %r11
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shlq %cl, %r15
+; FALLBACK8-NEXT: movq -64(%rsp,%r9), %r14
+; FALLBACK8-NEXT: movq -56(%rsp,%r9), %r12
+; FALLBACK8-NEXT: movq %r12, %rbx
+; FALLBACK8-NEXT: shrq %rbx
+; FALLBACK8-NEXT: movl %esi, %ecx
+; FALLBACK8-NEXT: shrq %cl, %rbx
+; FALLBACK8-NEXT: orq %r15, %rbx
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shlq %cl, %r12
+; FALLBACK8-NEXT: movq %r14, %r15
+; FALLBACK8-NEXT: shrq %r15
+; FALLBACK8-NEXT: movl %esi, %ecx
+; FALLBACK8-NEXT: shrq %cl, %r15
+; FALLBACK8-NEXT: orq %r12, %r15
+; FALLBACK8-NEXT: movq -16(%rsp,%r9), %r12
+; FALLBACK8-NEXT: movq %r12, %r13
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shlq %cl, %r13
+; FALLBACK8-NEXT: shrq %rdi
+; FALLBACK8-NEXT: movl %esi, %ecx
+; FALLBACK8-NEXT: shrq %cl, %rdi
+; FALLBACK8-NEXT: orq %r13, %rdi
+; FALLBACK8-NEXT: movq -8(%rsp,%r9), %r9
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shlq %cl, %r9
+; FALLBACK8-NEXT: shrq %r12
+; FALLBACK8-NEXT: movl %esi, %ecx
+; FALLBACK8-NEXT: shrq %cl, %r12
+; FALLBACK8-NEXT: orq %r9, %r12
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shlq %cl, %r14
+; FALLBACK8-NEXT: movq %r14, (%rdx)
+; FALLBACK8-NEXT: movq %r12, 56(%rdx)
+; FALLBACK8-NEXT: movq %rdi, 48(%rdx)
+; FALLBACK8-NEXT: movq %r15, 8(%rdx)
+; FALLBACK8-NEXT: movq %rbx, 16(%rdx)
+; FALLBACK8-NEXT: movq %r11, 24(%rdx)
+; FALLBACK8-NEXT: movq %r10, 32(%rdx)
+; FALLBACK8-NEXT: movq %r8, 40(%rdx)
+; FALLBACK8-NEXT: popq %rbx
+; FALLBACK8-NEXT: popq %r12
+; FALLBACK8-NEXT: popq %r13
+; FALLBACK8-NEXT: popq %r14
+; FALLBACK8-NEXT: popq %r15
+; FALLBACK8-NEXT: vzeroupper
+; FALLBACK8-NEXT: retq
+;
+; FALLBACK9-LABEL: shl_64bytes:
+; FALLBACK9: # %bb.0:
+; FALLBACK9-NEXT: pushq %r15
+; FALLBACK9-NEXT: pushq %r14
+; FALLBACK9-NEXT: pushq %rbx
+; FALLBACK9-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK9-NEXT: vmovups 32(%rdi), %ymm1
+; FALLBACK9-NEXT: movl (%rsi), %eax
+; FALLBACK9-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; FALLBACK9-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: leal (,%rax,8), %ecx
+; FALLBACK9-NEXT: andl $56, %ecx
+; FALLBACK9-NEXT: andl $56, %eax
+; FALLBACK9-NEXT: negl %eax
+; FALLBACK9-NEXT: movslq %eax, %r8
+; FALLBACK9-NEXT: movq -32(%rsp,%r8), %rax
+; FALLBACK9-NEXT: movq -24(%rsp,%r8), %r9
+; FALLBACK9-NEXT: movq %r9, %rsi
+; FALLBACK9-NEXT: shldq %cl, %rax, %rsi
+; FALLBACK9-NEXT: movq -40(%rsp,%r8), %rdi
+; FALLBACK9-NEXT: shldq %cl, %rdi, %rax
+; FALLBACK9-NEXT: movq -48(%rsp,%r8), %r10
+; FALLBACK9-NEXT: shldq %cl, %r10, %rdi
+; FALLBACK9-NEXT: movq -64(%rsp,%r8), %r11
+; FALLBACK9-NEXT: movq -56(%rsp,%r8), %rbx
+; FALLBACK9-NEXT: shldq %cl, %rbx, %r10
+; FALLBACK9-NEXT: movq -16(%rsp,%r8), %r14
+; FALLBACK9-NEXT: movq %r14, %r15
+; FALLBACK9-NEXT: shldq %cl, %r9, %r15
+; FALLBACK9-NEXT: movq -8(%rsp,%r8), %r8
+; FALLBACK9-NEXT: shldq %cl, %r14, %r8
+; FALLBACK9-NEXT: movq %r11, %r9
+; FALLBACK9-NEXT: shlq %cl, %r9
+; FALLBACK9-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK9-NEXT: shldq %cl, %r11, %rbx
+; FALLBACK9-NEXT: movq %r8, 56(%rdx)
+; FALLBACK9-NEXT: movq %r15, 48(%rdx)
+; FALLBACK9-NEXT: movq %rbx, 8(%rdx)
+; FALLBACK9-NEXT: movq %r10, 16(%rdx)
+; FALLBACK9-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK9-NEXT: movq %rax, 32(%rdx)
+; FALLBACK9-NEXT: movq %rsi, 40(%rdx)
+; FALLBACK9-NEXT: movq %r9, (%rdx)
+; FALLBACK9-NEXT: popq %rbx
+; FALLBACK9-NEXT: popq %r14
+; FALLBACK9-NEXT: popq %r15
+; FALLBACK9-NEXT: vzeroupper
+; FALLBACK9-NEXT: retq
+;
+; FALLBACK10-LABEL: shl_64bytes:
+; FALLBACK10: # %bb.0:
+; FALLBACK10-NEXT: pushq %rbp
+; FALLBACK10-NEXT: pushq %r15
+; FALLBACK10-NEXT: pushq %r14
+; FALLBACK10-NEXT: pushq %r13
+; FALLBACK10-NEXT: pushq %r12
+; FALLBACK10-NEXT: pushq %rbx
+; FALLBACK10-NEXT: subq $24, %rsp
+; FALLBACK10-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK10-NEXT: vmovups 32(%rdi), %ymm1
+; FALLBACK10-NEXT: movl (%rsi), %eax
+; FALLBACK10-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; FALLBACK10-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: leal (,%rax,8), %ecx
+; FALLBACK10-NEXT: andl $56, %ecx
+; FALLBACK10-NEXT: andl $56, %eax
+; FALLBACK10-NEXT: negl %eax
+; FALLBACK10-NEXT: movslq %eax, %rsi
+; FALLBACK10-NEXT: movq -8(%rsp,%rsi), %rax
+; FALLBACK10-NEXT: shlxq %rcx, %rax, %r12
+; FALLBACK10-NEXT: movq -16(%rsp,%rsi), %rdi
+; FALLBACK10-NEXT: shlxq %rcx, %rdi, %r15
+; FALLBACK10-NEXT: movq -24(%rsp,%rsi), %r13
+; FALLBACK10-NEXT: shlxq %rcx, %r13, %r8
+; FALLBACK10-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; FALLBACK10-NEXT: movq -32(%rsp,%rsi), %r11
+; FALLBACK10-NEXT: shlxq %rcx, %r11, %r10
+; FALLBACK10-NEXT: movq -40(%rsp,%rsi), %r14
+; FALLBACK10-NEXT: shlxq %rcx, %r14, %rbx
+; FALLBACK10-NEXT: movl %ecx, %r9d
+; FALLBACK10-NEXT: notb %r9b
+; FALLBACK10-NEXT: shrq %rdi
+; FALLBACK10-NEXT: shrxq %r9, %rdi, %rdi
+; FALLBACK10-NEXT: orq %r12, %rdi
+; FALLBACK10-NEXT: movq (%rsp,%rsi), %rbp
+; FALLBACK10-NEXT: shlxq %rcx, %rbp, %r8
+; FALLBACK10-NEXT: shrq %r13
+; FALLBACK10-NEXT: shrxq %r9, %r13, %r12
+; FALLBACK10-NEXT: orq %r15, %r12
+; FALLBACK10-NEXT: shlxq %rcx, 8(%rsp,%rsi), %r15
+; FALLBACK10-NEXT: movq -48(%rsp,%rsi), %rsi
+; FALLBACK10-NEXT: shlxq %rcx, %rsi, %rcx
+; FALLBACK10-NEXT: shrq %r11
+; FALLBACK10-NEXT: shrxq %r9, %r11, %r11
+; FALLBACK10-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; FALLBACK10-NEXT: shrq %r14
+; FALLBACK10-NEXT: shrxq %r9, %r14, %r14
+; FALLBACK10-NEXT: orq %r10, %r14
+; FALLBACK10-NEXT: shrq %rsi
+; FALLBACK10-NEXT: shrxq %r9, %rsi, %rsi
+; FALLBACK10-NEXT: orq %rbx, %rsi
+; FALLBACK10-NEXT: shrq %rax
+; FALLBACK10-NEXT: shrxq %r9, %rax, %rax
+; FALLBACK10-NEXT: orq %r8, %rax
+; FALLBACK10-NEXT: shrq %rbp
+; FALLBACK10-NEXT: shrxq %r9, %rbp, %r8
+; FALLBACK10-NEXT: orq %r15, %r8
+; FALLBACK10-NEXT: movq %rcx, (%rdx)
+; FALLBACK10-NEXT: movq %r8, 56(%rdx)
+; FALLBACK10-NEXT: movq %rax, 48(%rdx)
+; FALLBACK10-NEXT: movq %rsi, 8(%rdx)
+; FALLBACK10-NEXT: movq %r14, 16(%rdx)
+; FALLBACK10-NEXT: movq %r11, 24(%rdx)
+; FALLBACK10-NEXT: movq %r12, 32(%rdx)
+; FALLBACK10-NEXT: movq %rdi, 40(%rdx)
+; FALLBACK10-NEXT: addq $24, %rsp
+; FALLBACK10-NEXT: popq %rbx
+; FALLBACK10-NEXT: popq %r12
+; FALLBACK10-NEXT: popq %r13
+; FALLBACK10-NEXT: popq %r14
+; FALLBACK10-NEXT: popq %r15
+; FALLBACK10-NEXT: popq %rbp
+; FALLBACK10-NEXT: vzeroupper
+; FALLBACK10-NEXT: retq
+;
+; FALLBACK11-LABEL: shl_64bytes:
+; FALLBACK11: # %bb.0:
+; FALLBACK11-NEXT: pushq %r15
+; FALLBACK11-NEXT: pushq %r14
+; FALLBACK11-NEXT: pushq %rbx
+; FALLBACK11-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK11-NEXT: vmovups 32(%rdi), %ymm1
+; FALLBACK11-NEXT: movl (%rsi), %eax
+; FALLBACK11-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; FALLBACK11-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: leal (,%rax,8), %ecx
+; FALLBACK11-NEXT: andl $56, %ecx
+; FALLBACK11-NEXT: andl $56, %eax
+; FALLBACK11-NEXT: negl %eax
+; FALLBACK11-NEXT: movslq %eax, %r8
+; FALLBACK11-NEXT: movq -32(%rsp,%r8), %rax
+; FALLBACK11-NEXT: movq -24(%rsp,%r8), %r9
+; FALLBACK11-NEXT: movq %r9, %rsi
+; FALLBACK11-NEXT: shldq %cl, %rax, %rsi
+; FALLBACK11-NEXT: movq -40(%rsp,%r8), %rdi
+; FALLBACK11-NEXT: shldq %cl, %rdi, %rax
+; FALLBACK11-NEXT: movq -48(%rsp,%r8), %r10
+; FALLBACK11-NEXT: shldq %cl, %r10, %rdi
+; FALLBACK11-NEXT: movq -64(%rsp,%r8), %r11
+; FALLBACK11-NEXT: movq -56(%rsp,%r8), %rbx
+; FALLBACK11-NEXT: shldq %cl, %rbx, %r10
+; FALLBACK11-NEXT: movq -16(%rsp,%r8), %r14
+; FALLBACK11-NEXT: movq %r14, %r15
+; FALLBACK11-NEXT: shldq %cl, %r9, %r15
+; FALLBACK11-NEXT: movq -8(%rsp,%r8), %r8
+; FALLBACK11-NEXT: shldq %cl, %r14, %r8
+; FALLBACK11-NEXT: shlxq %rcx, %r11, %r9
+; FALLBACK11-NEXT: # kill: def $cl killed $cl killed $rcx
+; FALLBACK11-NEXT: shldq %cl, %r11, %rbx
+; FALLBACK11-NEXT: movq %r8, 56(%rdx)
+; FALLBACK11-NEXT: movq %r15, 48(%rdx)
+; FALLBACK11-NEXT: movq %rbx, 8(%rdx)
+; FALLBACK11-NEXT: movq %r10, 16(%rdx)
+; FALLBACK11-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK11-NEXT: movq %rax, 32(%rdx)
+; FALLBACK11-NEXT: movq %rsi, 40(%rdx)
+; FALLBACK11-NEXT: movq %r9, (%rdx)
+; FALLBACK11-NEXT: popq %rbx
+; FALLBACK11-NEXT: popq %r14
+; FALLBACK11-NEXT: popq %r15
+; FALLBACK11-NEXT: vzeroupper
+; FALLBACK11-NEXT: retq
+;
+; FALLBACK12-LABEL: shl_64bytes:
+; FALLBACK12: # %bb.0:
+; FALLBACK12-NEXT: pushq %r15
+; FALLBACK12-NEXT: pushq %r14
+; FALLBACK12-NEXT: pushq %r13
+; FALLBACK12-NEXT: pushq %r12
+; FALLBACK12-NEXT: pushq %rbx
+; FALLBACK12-NEXT: vmovups (%rdi), %zmm0
+; FALLBACK12-NEXT: movl (%rsi), %ecx
+; FALLBACK12-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK12-NEXT: vmovups %zmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK12-NEXT: vmovups %zmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK12-NEXT: leal (,%rcx,8), %eax
+; FALLBACK12-NEXT: andl $56, %eax
+; FALLBACK12-NEXT: andl $56, %ecx
+; FALLBACK12-NEXT: negl %ecx
+; FALLBACK12-NEXT: movslq %ecx, %r9
+; FALLBACK12-NEXT: movq -24(%rsp,%r9), %rdi
+; FALLBACK12-NEXT: movq %rdi, %r10
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shlq %cl, %r10
+; FALLBACK12-NEXT: movl %eax, %esi
+; FALLBACK12-NEXT: notb %sil
+; FALLBACK12-NEXT: movq -32(%rsp,%r9), %r11
+; FALLBACK12-NEXT: movq %r11, %r8
+; FALLBACK12-NEXT: shrq %r8
+; FALLBACK12-NEXT: movl %esi, %ecx
+; FALLBACK12-NEXT: shrq %cl, %r8
+; FALLBACK12-NEXT: orq %r10, %r8
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shlq %cl, %r11
+; FALLBACK12-NEXT: movq -40(%rsp,%r9), %rbx
+; FALLBACK12-NEXT: movq %rbx, %r10
+; FALLBACK12-NEXT: shrq %r10
+; FALLBACK12-NEXT: movl %esi, %ecx
+; FALLBACK12-NEXT: shrq %cl, %r10
+; FALLBACK12-NEXT: orq %r11, %r10
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shlq %cl, %rbx
+; FALLBACK12-NEXT: movq -48(%rsp,%r9), %r15
+; FALLBACK12-NEXT: movq %r15, %r11
+; FALLBACK12-NEXT: shrq %r11
+; FALLBACK12-NEXT: movl %esi, %ecx
+; FALLBACK12-NEXT: shrq %cl, %r11
+; FALLBACK12-NEXT: orq %rbx, %r11
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shlq %cl, %r15
+; FALLBACK12-NEXT: movq -64(%rsp,%r9), %r14
+; FALLBACK12-NEXT: movq -56(%rsp,%r9), %r12
+; FALLBACK12-NEXT: movq %r12, %rbx
+; FALLBACK12-NEXT: shrq %rbx
+; FALLBACK12-NEXT: movl %esi, %ecx
+; FALLBACK12-NEXT: shrq %cl, %rbx
+; FALLBACK12-NEXT: orq %r15, %rbx
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shlq %cl, %r12
+; FALLBACK12-NEXT: movq %r14, %r15
+; FALLBACK12-NEXT: shrq %r15
+; FALLBACK12-NEXT: movl %esi, %ecx
+; FALLBACK12-NEXT: shrq %cl, %r15
+; FALLBACK12-NEXT: orq %r12, %r15
+; FALLBACK12-NEXT: movq -16(%rsp,%r9), %r12
+; FALLBACK12-NEXT: movq %r12, %r13
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shlq %cl, %r13
+; FALLBACK12-NEXT: shrq %rdi
+; FALLBACK12-NEXT: movl %esi, %ecx
+; FALLBACK12-NEXT: shrq %cl, %rdi
+; FALLBACK12-NEXT: orq %r13, %rdi
+; FALLBACK12-NEXT: movq -8(%rsp,%r9), %r9
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shlq %cl, %r9
+; FALLBACK12-NEXT: shrq %r12
+; FALLBACK12-NEXT: movl %esi, %ecx
+; FALLBACK12-NEXT: shrq %cl, %r12
+; FALLBACK12-NEXT: orq %r9, %r12
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shlq %cl, %r14
+; FALLBACK12-NEXT: movq %r14, (%rdx)
+; FALLBACK12-NEXT: movq %r12, 56(%rdx)
+; FALLBACK12-NEXT: movq %rdi, 48(%rdx)
+; FALLBACK12-NEXT: movq %r15, 8(%rdx)
+; FALLBACK12-NEXT: movq %rbx, 16(%rdx)
+; FALLBACK12-NEXT: movq %r11, 24(%rdx)
+; FALLBACK12-NEXT: movq %r10, 32(%rdx)
+; FALLBACK12-NEXT: movq %r8, 40(%rdx)
+; FALLBACK12-NEXT: popq %rbx
+; FALLBACK12-NEXT: popq %r12
+; FALLBACK12-NEXT: popq %r13
+; FALLBACK12-NEXT: popq %r14
+; FALLBACK12-NEXT: popq %r15
+; FALLBACK12-NEXT: vzeroupper
+; FALLBACK12-NEXT: retq
+;
+; FALLBACK13-LABEL: shl_64bytes:
+; FALLBACK13: # %bb.0:
+; FALLBACK13-NEXT: pushq %r15
+; FALLBACK13-NEXT: pushq %r14
+; FALLBACK13-NEXT: pushq %rbx
+; FALLBACK13-NEXT: vmovups (%rdi), %zmm0
+; FALLBACK13-NEXT: movl (%rsi), %eax
+; FALLBACK13-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK13-NEXT: vmovups %zmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK13-NEXT: vmovups %zmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK13-NEXT: leal (,%rax,8), %ecx
+; FALLBACK13-NEXT: andl $56, %ecx
+; FALLBACK13-NEXT: andl $56, %eax
+; FALLBACK13-NEXT: negl %eax
+; FALLBACK13-NEXT: movslq %eax, %r8
+; FALLBACK13-NEXT: movq -32(%rsp,%r8), %rax
+; FALLBACK13-NEXT: movq -24(%rsp,%r8), %r9
+; FALLBACK13-NEXT: movq %r9, %rsi
+; FALLBACK13-NEXT: shldq %cl, %rax, %rsi
+; FALLBACK13-NEXT: movq -40(%rsp,%r8), %rdi
+; FALLBACK13-NEXT: shldq %cl, %rdi, %rax
+; FALLBACK13-NEXT: movq -48(%rsp,%r8), %r10
+; FALLBACK13-NEXT: shldq %cl, %r10, %rdi
+; FALLBACK13-NEXT: movq -64(%rsp,%r8), %r11
+; FALLBACK13-NEXT: movq -56(%rsp,%r8), %rbx
+; FALLBACK13-NEXT: shldq %cl, %rbx, %r10
+; FALLBACK13-NEXT: movq -16(%rsp,%r8), %r14
+; FALLBACK13-NEXT: movq %r14, %r15
+; FALLBACK13-NEXT: shldq %cl, %r9, %r15
+; FALLBACK13-NEXT: movq -8(%rsp,%r8), %r8
+; FALLBACK13-NEXT: shldq %cl, %r14, %r8
+; FALLBACK13-NEXT: movq %r11, %r9
+; FALLBACK13-NEXT: shlq %cl, %r9
+; FALLBACK13-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK13-NEXT: shldq %cl, %r11, %rbx
+; FALLBACK13-NEXT: movq %r8, 56(%rdx)
+; FALLBACK13-NEXT: movq %r15, 48(%rdx)
+; FALLBACK13-NEXT: movq %rbx, 8(%rdx)
+; FALLBACK13-NEXT: movq %r10, 16(%rdx)
+; FALLBACK13-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK13-NEXT: movq %rax, 32(%rdx)
+; FALLBACK13-NEXT: movq %rsi, 40(%rdx)
+; FALLBACK13-NEXT: movq %r9, (%rdx)
+; FALLBACK13-NEXT: popq %rbx
+; FALLBACK13-NEXT: popq %r14
+; FALLBACK13-NEXT: popq %r15
+; FALLBACK13-NEXT: vzeroupper
+; FALLBACK13-NEXT: retq
+;
+; FALLBACK14-LABEL: shl_64bytes:
+; FALLBACK14: # %bb.0:
+; FALLBACK14-NEXT: pushq %rbp
+; FALLBACK14-NEXT: pushq %r15
+; FALLBACK14-NEXT: pushq %r14
+; FALLBACK14-NEXT: pushq %r13
+; FALLBACK14-NEXT: pushq %r12
+; FALLBACK14-NEXT: pushq %rbx
+; FALLBACK14-NEXT: subq $24, %rsp
+; FALLBACK14-NEXT: vmovups (%rdi), %zmm0
+; FALLBACK14-NEXT: movl (%rsi), %eax
+; FALLBACK14-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK14-NEXT: vmovups %zmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: vmovups %zmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: leal (,%rax,8), %ecx
+; FALLBACK14-NEXT: andl $56, %ecx
+; FALLBACK14-NEXT: andl $56, %eax
+; FALLBACK14-NEXT: negl %eax
+; FALLBACK14-NEXT: movslq %eax, %rsi
+; FALLBACK14-NEXT: movq -8(%rsp,%rsi), %rax
+; FALLBACK14-NEXT: shlxq %rcx, %rax, %r12
+; FALLBACK14-NEXT: movq -16(%rsp,%rsi), %rdi
+; FALLBACK14-NEXT: shlxq %rcx, %rdi, %r15
+; FALLBACK14-NEXT: movq -24(%rsp,%rsi), %r13
+; FALLBACK14-NEXT: shlxq %rcx, %r13, %r8
+; FALLBACK14-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; FALLBACK14-NEXT: movq -32(%rsp,%rsi), %r11
+; FALLBACK14-NEXT: shlxq %rcx, %r11, %r10
+; FALLBACK14-NEXT: movq -40(%rsp,%rsi), %r14
+; FALLBACK14-NEXT: shlxq %rcx, %r14, %rbx
+; FALLBACK14-NEXT: movl %ecx, %r9d
+; FALLBACK14-NEXT: notb %r9b
+; FALLBACK14-NEXT: shrq %rdi
+; FALLBACK14-NEXT: shrxq %r9, %rdi, %rdi
+; FALLBACK14-NEXT: orq %r12, %rdi
+; FALLBACK14-NEXT: movq (%rsp,%rsi), %rbp
+; FALLBACK14-NEXT: shlxq %rcx, %rbp, %r8
+; FALLBACK14-NEXT: shrq %r13
+; FALLBACK14-NEXT: shrxq %r9, %r13, %r12
+; FALLBACK14-NEXT: orq %r15, %r12
+; FALLBACK14-NEXT: shlxq %rcx, 8(%rsp,%rsi), %r15
+; FALLBACK14-NEXT: movq -48(%rsp,%rsi), %rsi
+; FALLBACK14-NEXT: shlxq %rcx, %rsi, %rcx
+; FALLBACK14-NEXT: shrq %r11
+; FALLBACK14-NEXT: shrxq %r9, %r11, %r11
+; FALLBACK14-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; FALLBACK14-NEXT: shrq %r14
+; FALLBACK14-NEXT: shrxq %r9, %r14, %r14
+; FALLBACK14-NEXT: orq %r10, %r14
+; FALLBACK14-NEXT: shrq %rsi
+; FALLBACK14-NEXT: shrxq %r9, %rsi, %rsi
+; FALLBACK14-NEXT: orq %rbx, %rsi
+; FALLBACK14-NEXT: shrq %rax
+; FALLBACK14-NEXT: shrxq %r9, %rax, %rax
+; FALLBACK14-NEXT: orq %r8, %rax
+; FALLBACK14-NEXT: shrq %rbp
+; FALLBACK14-NEXT: shrxq %r9, %rbp, %r8
+; FALLBACK14-NEXT: orq %r15, %r8
+; FALLBACK14-NEXT: movq %rcx, (%rdx)
+; FALLBACK14-NEXT: movq %r8, 56(%rdx)
+; FALLBACK14-NEXT: movq %rax, 48(%rdx)
+; FALLBACK14-NEXT: movq %rsi, 8(%rdx)
+; FALLBACK14-NEXT: movq %r14, 16(%rdx)
+; FALLBACK14-NEXT: movq %r11, 24(%rdx)
+; FALLBACK14-NEXT: movq %r12, 32(%rdx)
+; FALLBACK14-NEXT: movq %rdi, 40(%rdx)
+; FALLBACK14-NEXT: addq $24, %rsp
+; FALLBACK14-NEXT: popq %rbx
+; FALLBACK14-NEXT: popq %r12
+; FALLBACK14-NEXT: popq %r13
+; FALLBACK14-NEXT: popq %r14
+; FALLBACK14-NEXT: popq %r15
+; FALLBACK14-NEXT: popq %rbp
+; FALLBACK14-NEXT: vzeroupper
+; FALLBACK14-NEXT: retq
+;
+; FALLBACK15-LABEL: shl_64bytes:
+; FALLBACK15: # %bb.0:
+; FALLBACK15-NEXT: pushq %r15
+; FALLBACK15-NEXT: pushq %r14
+; FALLBACK15-NEXT: pushq %rbx
+; FALLBACK15-NEXT: vmovups (%rdi), %zmm0
+; FALLBACK15-NEXT: movl (%rsi), %eax
+; FALLBACK15-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK15-NEXT: vmovups %zmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK15-NEXT: vmovups %zmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK15-NEXT: leal (,%rax,8), %ecx
+; FALLBACK15-NEXT: andl $56, %ecx
+; FALLBACK15-NEXT: andl $56, %eax
+; FALLBACK15-NEXT: negl %eax
+; FALLBACK15-NEXT: movslq %eax, %r8
+; FALLBACK15-NEXT: movq -32(%rsp,%r8), %rax
+; FALLBACK15-NEXT: movq -24(%rsp,%r8), %r9
+; FALLBACK15-NEXT: movq %r9, %rsi
+; FALLBACK15-NEXT: shldq %cl, %rax, %rsi
+; FALLBACK15-NEXT: movq -40(%rsp,%r8), %rdi
+; FALLBACK15-NEXT: shldq %cl, %rdi, %rax
+; FALLBACK15-NEXT: movq -48(%rsp,%r8), %r10
+; FALLBACK15-NEXT: shldq %cl, %r10, %rdi
+; FALLBACK15-NEXT: movq -64(%rsp,%r8), %r11
+; FALLBACK15-NEXT: movq -56(%rsp,%r8), %rbx
+; FALLBACK15-NEXT: shldq %cl, %rbx, %r10
+; FALLBACK15-NEXT: movq -16(%rsp,%r8), %r14
+; FALLBACK15-NEXT: movq %r14, %r15
+; FALLBACK15-NEXT: shldq %cl, %r9, %r15
+; FALLBACK15-NEXT: movq -8(%rsp,%r8), %r8
+; FALLBACK15-NEXT: shldq %cl, %r14, %r8
+; FALLBACK15-NEXT: shlxq %rcx, %r11, %r9
+; FALLBACK15-NEXT: # kill: def $cl killed $cl killed $rcx
+; FALLBACK15-NEXT: shldq %cl, %r11, %rbx
+; FALLBACK15-NEXT: movq %r8, 56(%rdx)
+; FALLBACK15-NEXT: movq %r15, 48(%rdx)
+; FALLBACK15-NEXT: movq %rbx, 8(%rdx)
+; FALLBACK15-NEXT: movq %r10, 16(%rdx)
+; FALLBACK15-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK15-NEXT: movq %rax, 32(%rdx)
+; FALLBACK15-NEXT: movq %rsi, 40(%rdx)
+; FALLBACK15-NEXT: movq %r9, (%rdx)
+; FALLBACK15-NEXT: popq %rbx
+; FALLBACK15-NEXT: popq %r14
+; FALLBACK15-NEXT: popq %r15
+; FALLBACK15-NEXT: vzeroupper
+; FALLBACK15-NEXT: retq
+;
+; FALLBACK16-LABEL: shl_64bytes:
+; FALLBACK16: # %bb.0:
+; FALLBACK16-NEXT: pushl %ebp
+; FALLBACK16-NEXT: pushl %ebx
+; FALLBACK16-NEXT: pushl %edi
+; FALLBACK16-NEXT: pushl %esi
+; FALLBACK16-NEXT: subl $204, %esp
+; FALLBACK16-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK16-NEXT: movl (%eax), %ecx
+; FALLBACK16-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 4(%eax), %ecx
+; FALLBACK16-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 8(%eax), %ecx
+; FALLBACK16-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 12(%eax), %ecx
+; FALLBACK16-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 16(%eax), %ecx
+; FALLBACK16-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 20(%eax), %ecx
+; FALLBACK16-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 24(%eax), %ecx
+; FALLBACK16-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 28(%eax), %ecx
+; FALLBACK16-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 32(%eax), %ecx
+; FALLBACK16-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 36(%eax), %ecx
+; FALLBACK16-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 40(%eax), %ebp
+; FALLBACK16-NEXT: movl 44(%eax), %ebx
+; FALLBACK16-NEXT: movl 48(%eax), %edi
+; FALLBACK16-NEXT: movl 52(%eax), %esi
+; FALLBACK16-NEXT: movl 56(%eax), %edx
+; FALLBACK16-NEXT: movl 60(%eax), %ecx
+; FALLBACK16-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK16-NEXT: movl (%eax), %eax
+; FALLBACK16-NEXT: xorps %xmm0, %xmm0
+; FALLBACK16-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %ebp, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %eax, %edx
+; FALLBACK16-NEXT: andl $60, %edx
+; FALLBACK16-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: leal {{[0-9]+}}(%esp), %ecx
+; FALLBACK16-NEXT: subl %edx, %ecx
+; FALLBACK16-NEXT: movl (%ecx), %edi
+; FALLBACK16-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 4(%ecx), %edx
+; FALLBACK16-NEXT: movl %ecx, %ebp
+; FALLBACK16-NEXT: shll $3, %eax
+; FALLBACK16-NEXT: andl $24, %eax
+; FALLBACK16-NEXT: movl %edx, %esi
+; FALLBACK16-NEXT: movl %eax, %ecx
+; FALLBACK16-NEXT: shll %cl, %esi
+; FALLBACK16-NEXT: shrl %edi
+; FALLBACK16-NEXT: movb %al, %ch
+; FALLBACK16-NEXT: notb %ch
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shrl %cl, %edi
+; FALLBACK16-NEXT: orl %esi, %edi
+; FALLBACK16-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 12(%ebp), %ebx
+; FALLBACK16-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movb %al, %cl
+; FALLBACK16-NEXT: shll %cl, %ebx
+; FALLBACK16-NEXT: movl 8(%ebp), %esi
+; FALLBACK16-NEXT: movl %ebp, %edi
+; FALLBACK16-NEXT: movl %esi, %ebp
+; FALLBACK16-NEXT: shrl %ebp
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shrl %cl, %ebp
+; FALLBACK16-NEXT: orl %ebx, %ebp
+; FALLBACK16-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movb %al, %cl
+; FALLBACK16-NEXT: shll %cl, %esi
+; FALLBACK16-NEXT: shrl %edx
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shrl %cl, %edx
+; FALLBACK16-NEXT: orl %esi, %edx
+; FALLBACK16-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl %edi, %ebp
+; FALLBACK16-NEXT: movl 20(%edi), %ebx
+; FALLBACK16-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movb %al, %cl
+; FALLBACK16-NEXT: shll %cl, %ebx
+; FALLBACK16-NEXT: movl 16(%edi), %esi
+; FALLBACK16-NEXT: movl %esi, %edx
+; FALLBACK16-NEXT: shrl %edx
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shrl %cl, %edx
+; FALLBACK16-NEXT: orl %ebx, %edx
+; FALLBACK16-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movb %al, %cl
+; FALLBACK16-NEXT: shll %cl, %esi
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; FALLBACK16-NEXT: shrl %edi
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shrl %cl, %edi
+; FALLBACK16-NEXT: orl %esi, %edi
+; FALLBACK16-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl %ebp, %edx
+; FALLBACK16-NEXT: movl 28(%ebp), %ebx
+; FALLBACK16-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movb %al, %cl
+; FALLBACK16-NEXT: shll %cl, %ebx
+; FALLBACK16-NEXT: movl 24(%ebp), %esi
+; FALLBACK16-NEXT: movl %esi, %edi
+; FALLBACK16-NEXT: shrl %edi
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shrl %cl, %edi
+; FALLBACK16-NEXT: orl %ebx, %edi
+; FALLBACK16-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movb %al, %cl
+; FALLBACK16-NEXT: shll %cl, %esi
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; FALLBACK16-NEXT: shrl %ebp
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shrl %cl, %ebp
+; FALLBACK16-NEXT: orl %esi, %ebp
+; FALLBACK16-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 36(%edx), %ebx
+; FALLBACK16-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movb %al, %cl
+; FALLBACK16-NEXT: shll %cl, %ebx
+; FALLBACK16-NEXT: movl 32(%edx), %esi
+; FALLBACK16-NEXT: movl %edx, %ebp
+; FALLBACK16-NEXT: movl %esi, %edi
+; FALLBACK16-NEXT: shrl %edi
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shrl %cl, %edi
+; FALLBACK16-NEXT: orl %ebx, %edi
+; FALLBACK16-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movb %al, %cl
+; FALLBACK16-NEXT: shll %cl, %esi
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK16-NEXT: shrl %edx
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shrl %cl, %edx
+; FALLBACK16-NEXT: orl %esi, %edx
+; FALLBACK16-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 44(%ebp), %ebx
+; FALLBACK16-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movb %al, %cl
+; FALLBACK16-NEXT: shll %cl, %ebx
+; FALLBACK16-NEXT: movl 40(%ebp), %esi
+; FALLBACK16-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl %esi, %edx
+; FALLBACK16-NEXT: shrl %edx
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shrl %cl, %edx
+; FALLBACK16-NEXT: orl %ebx, %edx
+; FALLBACK16-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movb %al, %cl
+; FALLBACK16-NEXT: shll %cl, %esi
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK16-NEXT: shrl %edx
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shrl %cl, %edx
+; FALLBACK16-NEXT: orl %esi, %edx
+; FALLBACK16-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 52(%ebp), %esi
+; FALLBACK16-NEXT: movl %esi, %edi
+; FALLBACK16-NEXT: movb %al, %cl
+; FALLBACK16-NEXT: shll %cl, %edi
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK16-NEXT: negl %edx
+; FALLBACK16-NEXT: movl 176(%esp,%edx), %ebx
+; FALLBACK16-NEXT: movl %ebx, %ebp
+; FALLBACK16-NEXT: shrl %ebp
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shrl %cl, %ebp
+; FALLBACK16-NEXT: orl %edi, %ebp
+; FALLBACK16-NEXT: movb %al, %cl
+; FALLBACK16-NEXT: shll %cl, %ebx
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK16-NEXT: shrl %edx
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shrl %cl, %edx
+; FALLBACK16-NEXT: orl %ebx, %edx
+; FALLBACK16-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; FALLBACK16-NEXT: movl 60(%edi), %edx
+; FALLBACK16-NEXT: movb %al, %cl
+; FALLBACK16-NEXT: shll %cl, %edx
+; FALLBACK16-NEXT: movl 56(%edi), %ebx
+; FALLBACK16-NEXT: movl %ebx, %edi
+; FALLBACK16-NEXT: shrl %edi
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shrl %cl, %edi
+; FALLBACK16-NEXT: orl %edx, %edi
+; FALLBACK16-NEXT: movb %al, %cl
+; FALLBACK16-NEXT: shll %cl, %ebx
+; FALLBACK16-NEXT: shrl %esi
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shrl %cl, %esi
+; FALLBACK16-NEXT: orl %ebx, %esi
+; FALLBACK16-NEXT: movl %eax, %ecx
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK16-NEXT: shll %cl, %edx
+; FALLBACK16-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK16-NEXT: movl %edx, (%eax)
+; FALLBACK16-NEXT: movl %esi, 56(%eax)
+; FALLBACK16-NEXT: movl %edi, 60(%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, 48(%eax)
+; FALLBACK16-NEXT: movl %ebp, 52(%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, 40(%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, 44(%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, 32(%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, 36(%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, 24(%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, 28(%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, 16(%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, 20(%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, 8(%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, 12(%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, 4(%eax)
+; FALLBACK16-NEXT: addl $204, %esp
+; FALLBACK16-NEXT: popl %esi
+; FALLBACK16-NEXT: popl %edi
+; FALLBACK16-NEXT: popl %ebx
+; FALLBACK16-NEXT: popl %ebp
+; FALLBACK16-NEXT: retl
+;
+; FALLBACK17-LABEL: shl_64bytes:
+; FALLBACK17: # %bb.0:
+; FALLBACK17-NEXT: pushl %ebp
+; FALLBACK17-NEXT: pushl %ebx
+; FALLBACK17-NEXT: pushl %edi
+; FALLBACK17-NEXT: pushl %esi
+; FALLBACK17-NEXT: subl $188, %esp
+; FALLBACK17-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK17-NEXT: movl (%ecx), %eax
+; FALLBACK17-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 4(%ecx), %eax
+; FALLBACK17-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 8(%ecx), %eax
+; FALLBACK17-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 12(%ecx), %eax
+; FALLBACK17-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 16(%ecx), %eax
+; FALLBACK17-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 20(%ecx), %eax
+; FALLBACK17-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 24(%ecx), %eax
+; FALLBACK17-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 28(%ecx), %eax
+; FALLBACK17-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 32(%ecx), %eax
+; FALLBACK17-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 36(%ecx), %eax
+; FALLBACK17-NEXT: movl %eax, (%esp) # 4-byte Spill
+; FALLBACK17-NEXT: movl 40(%ecx), %ebp
+; FALLBACK17-NEXT: movl 44(%ecx), %ebx
+; FALLBACK17-NEXT: movl 48(%ecx), %edi
+; FALLBACK17-NEXT: movl 52(%ecx), %esi
+; FALLBACK17-NEXT: movl 56(%ecx), %edx
+; FALLBACK17-NEXT: movl 60(%ecx), %eax
+; FALLBACK17-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK17-NEXT: movl (%ecx), %ecx
+; FALLBACK17-NEXT: xorps %xmm0, %xmm0
+; FALLBACK17-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %ebp, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl (%esp), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %ecx, %ebp
+; FALLBACK17-NEXT: andl $60, %ebp
+; FALLBACK17-NEXT: leal {{[0-9]+}}(%esp), %eax
+; FALLBACK17-NEXT: subl %ebp, %eax
+; FALLBACK17-NEXT: movl 8(%eax), %esi
+; FALLBACK17-NEXT: movl 12(%eax), %edx
+; FALLBACK17-NEXT: shll $3, %ecx
+; FALLBACK17-NEXT: andl $24, %ecx
+; FALLBACK17-NEXT: movl %edx, %edi
+; FALLBACK17-NEXT: shldl %cl, %esi, %edi
+; FALLBACK17-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 4(%eax), %edi
+; FALLBACK17-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: shldl %cl, %edi, %esi
+; FALLBACK17-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 16(%eax), %edi
+; FALLBACK17-NEXT: movl 20(%eax), %esi
+; FALLBACK17-NEXT: movl %esi, %ebx
+; FALLBACK17-NEXT: shldl %cl, %edi, %ebx
+; FALLBACK17-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: shldl %cl, %edx, %edi
+; FALLBACK17-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 24(%eax), %edi
+; FALLBACK17-NEXT: movl 28(%eax), %edx
+; FALLBACK17-NEXT: movl %edx, %ebx
+; FALLBACK17-NEXT: shldl %cl, %edi, %ebx
+; FALLBACK17-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: shldl %cl, %esi, %edi
+; FALLBACK17-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 32(%eax), %edi
+; FALLBACK17-NEXT: movl 36(%eax), %esi
+; FALLBACK17-NEXT: movl %esi, %ebx
+; FALLBACK17-NEXT: shldl %cl, %edi, %ebx
+; FALLBACK17-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: shldl %cl, %edx, %edi
+; FALLBACK17-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 40(%eax), %edx
+; FALLBACK17-NEXT: movl 44(%eax), %edi
+; FALLBACK17-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: shldl %cl, %edx, %edi
+; FALLBACK17-NEXT: movl %edi, (%esp) # 4-byte Spill
+; FALLBACK17-NEXT: shldl %cl, %esi, %edx
+; FALLBACK17-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 56(%eax), %edx
+; FALLBACK17-NEXT: movl 60(%eax), %edi
+; FALLBACK17-NEXT: shldl %cl, %edx, %edi
+; FALLBACK17-NEXT: movl (%eax), %ebx
+; FALLBACK17-NEXT: movl 52(%eax), %esi
+; FALLBACK17-NEXT: shldl %cl, %esi, %edx
+; FALLBACK17-NEXT: negl %ebp
+; FALLBACK17-NEXT: movl 160(%esp,%ebp), %eax
+; FALLBACK17-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK17-NEXT: movl %edx, 56(%ebp)
+; FALLBACK17-NEXT: movl %edi, 60(%ebp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK17-NEXT: shldl %cl, %ebx, %edx
+; FALLBACK17-NEXT: shll %cl, %ebx
+; FALLBACK17-NEXT: shldl %cl, %eax, %esi
+; FALLBACK17-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; FALLBACK17-NEXT: shldl %cl, %edi, %eax
+; FALLBACK17-NEXT: movl %eax, 48(%ebp)
+; FALLBACK17-NEXT: movl %esi, 52(%ebp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, 40(%ebp)
+; FALLBACK17-NEXT: movl (%esp), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, 44(%ebp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, 32(%ebp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, 36(%ebp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, 24(%ebp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, 28(%ebp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, 16(%ebp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, 20(%ebp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, 8(%ebp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, 12(%ebp)
+; FALLBACK17-NEXT: movl %ebx, (%ebp)
+; FALLBACK17-NEXT: movl %edx, 4(%ebp)
+; FALLBACK17-NEXT: addl $188, %esp
+; FALLBACK17-NEXT: popl %esi
+; FALLBACK17-NEXT: popl %edi
+; FALLBACK17-NEXT: popl %ebx
+; FALLBACK17-NEXT: popl %ebp
+; FALLBACK17-NEXT: retl
+;
+; FALLBACK18-LABEL: shl_64bytes:
+; FALLBACK18: # %bb.0:
+; FALLBACK18-NEXT: pushl %ebp
+; FALLBACK18-NEXT: pushl %ebx
+; FALLBACK18-NEXT: pushl %edi
+; FALLBACK18-NEXT: pushl %esi
+; FALLBACK18-NEXT: subl $204, %esp
+; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK18-NEXT: movl (%eax), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 4(%eax), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 8(%eax), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 12(%eax), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 16(%eax), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 20(%eax), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 24(%eax), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 28(%eax), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 32(%eax), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 36(%eax), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 40(%eax), %ebx
+; FALLBACK18-NEXT: movl 44(%eax), %edi
+; FALLBACK18-NEXT: movl 48(%eax), %esi
+; FALLBACK18-NEXT: movl 52(%eax), %edx
+; FALLBACK18-NEXT: movl 56(%eax), %ecx
+; FALLBACK18-NEXT: movl 60(%eax), %eax
+; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK18-NEXT: movl (%ebp), %ebp
+; FALLBACK18-NEXT: xorps %xmm0, %xmm0
+; FALLBACK18-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: leal (,%ebp,8), %edx
+; FALLBACK18-NEXT: andl $24, %edx
+; FALLBACK18-NEXT: andl $60, %ebp
+; FALLBACK18-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: leal {{[0-9]+}}(%esp), %edi
+; FALLBACK18-NEXT: subl %ebp, %edi
+; FALLBACK18-NEXT: movl (%edi), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 4(%edi), %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl %edx, %ebx
+; FALLBACK18-NEXT: notb %bl
+; FALLBACK18-NEXT: shrl %ecx
+; FALLBACK18-NEXT: shrxl %ebx, %ecx, %esi
+; FALLBACK18-NEXT: shlxl %edx, %eax, %ecx
+; FALLBACK18-NEXT: orl %ecx, %esi
+; FALLBACK18-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 8(%edi), %esi
+; FALLBACK18-NEXT: movl %esi, %ecx
+; FALLBACK18-NEXT: shrl %ecx
+; FALLBACK18-NEXT: shrxl %ebx, %ecx, %eax
+; FALLBACK18-NEXT: movl 12(%edi), %ecx
+; FALLBACK18-NEXT: shlxl %edx, %ecx, %ebp
+; FALLBACK18-NEXT: orl %ebp, %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shlxl %edx, %esi, %esi
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: shrl %eax
+; FALLBACK18-NEXT: shrxl %ebx, %eax, %eax
+; FALLBACK18-NEXT: orl %esi, %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 16(%edi), %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shrl %eax
+; FALLBACK18-NEXT: shrxl %ebx, %eax, %eax
+; FALLBACK18-NEXT: movl 20(%edi), %esi
+; FALLBACK18-NEXT: shlxl %edx, %esi, %ebp
+; FALLBACK18-NEXT: orl %ebp, %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK18-NEXT: shrl %ecx
+; FALLBACK18-NEXT: shrxl %ebx, %ecx, %ecx
+; FALLBACK18-NEXT: orl %eax, %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 24(%edi), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shrl %ecx
+; FALLBACK18-NEXT: shrxl %ebx, %ecx, %eax
+; FALLBACK18-NEXT: movl 28(%edi), %ecx
+; FALLBACK18-NEXT: shlxl %edx, %ecx, %ebp
+; FALLBACK18-NEXT: orl %ebp, %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK18-NEXT: shrl %esi
+; FALLBACK18-NEXT: shrxl %ebx, %esi, %esi
+; FALLBACK18-NEXT: orl %eax, %esi
+; FALLBACK18-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 32(%edi), %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shrl %eax
+; FALLBACK18-NEXT: shrxl %ebx, %eax, %eax
+; FALLBACK18-NEXT: movl 36(%edi), %esi
+; FALLBACK18-NEXT: shlxl %edx, %esi, %ebp
+; FALLBACK18-NEXT: orl %ebp, %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK18-NEXT: shrl %ecx
+; FALLBACK18-NEXT: shrxl %ebx, %ecx, %ecx
+; FALLBACK18-NEXT: orl %eax, %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 40(%edi), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shrl %ecx
+; FALLBACK18-NEXT: shrxl %ebx, %ecx, %eax
+; FALLBACK18-NEXT: movl 44(%edi), %ecx
+; FALLBACK18-NEXT: shlxl %edx, %ecx, %ebp
+; FALLBACK18-NEXT: orl %ebp, %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK18-NEXT: shrl %esi
+; FALLBACK18-NEXT: shrxl %ebx, %esi, %esi
+; FALLBACK18-NEXT: orl %eax, %esi
+; FALLBACK18-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 48(%edi), %esi
+; FALLBACK18-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shrl %esi
+; FALLBACK18-NEXT: shrxl %ebx, %esi, %eax
+; FALLBACK18-NEXT: movl 52(%edi), %esi
+; FALLBACK18-NEXT: shlxl %edx, %esi, %ebp
+; FALLBACK18-NEXT: orl %ebp, %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK18-NEXT: shrl %ecx
+; FALLBACK18-NEXT: shrxl %ebx, %ecx, %ebp
+; FALLBACK18-NEXT: orl %eax, %ebp
+; FALLBACK18-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: negl %eax
+; FALLBACK18-NEXT: shlxl %edx, 188(%esp,%eax), %ecx
+; FALLBACK18-NEXT: movl 56(%edi), %eax
+; FALLBACK18-NEXT: shlxl %edx, %eax, %edx
+; FALLBACK18-NEXT: shrl %esi
+; FALLBACK18-NEXT: shrxl %ebx, %esi, %esi
+; FALLBACK18-NEXT: orl %edx, %esi
+; FALLBACK18-NEXT: shrl %eax
+; FALLBACK18-NEXT: shrxl %ebx, %eax, %eax
+; FALLBACK18-NEXT: orl %eax, %ecx
+; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK18-NEXT: movl %edx, (%eax)
+; FALLBACK18-NEXT: movl %esi, 56(%eax)
+; FALLBACK18-NEXT: movl %ecx, 60(%eax)
+; FALLBACK18-NEXT: movl %ebp, 48(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 52(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 40(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 44(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 32(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 36(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 24(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 28(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 16(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 20(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 8(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 12(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 4(%eax)
+; FALLBACK18-NEXT: addl $204, %esp
+; FALLBACK18-NEXT: popl %esi
+; FALLBACK18-NEXT: popl %edi
+; FALLBACK18-NEXT: popl %ebx
+; FALLBACK18-NEXT: popl %ebp
+; FALLBACK18-NEXT: retl
+;
+; FALLBACK19-LABEL: shl_64bytes:
+; FALLBACK19: # %bb.0:
+; FALLBACK19-NEXT: pushl %ebp
+; FALLBACK19-NEXT: pushl %ebx
+; FALLBACK19-NEXT: pushl %edi
+; FALLBACK19-NEXT: pushl %esi
+; FALLBACK19-NEXT: subl $204, %esp
+; FALLBACK19-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK19-NEXT: movl (%ebp), %eax
+; FALLBACK19-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 4(%ebp), %eax
+; FALLBACK19-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 8(%ebp), %eax
+; FALLBACK19-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 12(%ebp), %eax
+; FALLBACK19-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 16(%ebp), %eax
+; FALLBACK19-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 20(%ebp), %eax
+; FALLBACK19-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 24(%ebp), %eax
+; FALLBACK19-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 28(%ebp), %eax
+; FALLBACK19-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 32(%ebp), %eax
+; FALLBACK19-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 36(%ebp), %eax
+; FALLBACK19-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 40(%ebp), %ebx
+; FALLBACK19-NEXT: movl 44(%ebp), %edi
+; FALLBACK19-NEXT: movl 48(%ebp), %esi
+; FALLBACK19-NEXT: movl 52(%ebp), %edx
+; FALLBACK19-NEXT: movl 56(%ebp), %ecx
+; FALLBACK19-NEXT: movl 60(%ebp), %eax
+; FALLBACK19-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK19-NEXT: movl (%ebp), %ebp
+; FALLBACK19-NEXT: xorps %xmm0, %xmm0
+; FALLBACK19-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: leal (,%ebp,8), %ecx
+; FALLBACK19-NEXT: andl $24, %ecx
+; FALLBACK19-NEXT: andl $60, %ebp
+; FALLBACK19-NEXT: leal {{[0-9]+}}(%esp), %eax
+; FALLBACK19-NEXT: subl %ebp, %eax
+; FALLBACK19-NEXT: movl 4(%eax), %esi
+; FALLBACK19-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 8(%eax), %edi
+; FALLBACK19-NEXT: movl 12(%eax), %edx
+; FALLBACK19-NEXT: movl %edx, %ebx
+; FALLBACK19-NEXT: shldl %cl, %edi, %ebx
+; FALLBACK19-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: shldl %cl, %esi, %edi
+; FALLBACK19-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 16(%eax), %edi
+; FALLBACK19-NEXT: movl 20(%eax), %esi
+; FALLBACK19-NEXT: movl %esi, %ebx
+; FALLBACK19-NEXT: shldl %cl, %edi, %ebx
+; FALLBACK19-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: shldl %cl, %edx, %edi
+; FALLBACK19-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 24(%eax), %edi
+; FALLBACK19-NEXT: movl 28(%eax), %edx
+; FALLBACK19-NEXT: movl %edx, %ebx
+; FALLBACK19-NEXT: shldl %cl, %edi, %ebx
+; FALLBACK19-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: shldl %cl, %esi, %edi
+; FALLBACK19-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 32(%eax), %edi
+; FALLBACK19-NEXT: movl 36(%eax), %esi
+; FALLBACK19-NEXT: movl %esi, %ebx
+; FALLBACK19-NEXT: shldl %cl, %edi, %ebx
+; FALLBACK19-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: shldl %cl, %edx, %edi
+; FALLBACK19-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 40(%eax), %ebx
+; FALLBACK19-NEXT: movl 44(%eax), %edx
+; FALLBACK19-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: shldl %cl, %ebx, %edx
+; FALLBACK19-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: shldl %cl, %esi, %ebx
+; FALLBACK19-NEXT: movl 56(%eax), %edx
+; FALLBACK19-NEXT: movl 60(%eax), %edi
+; FALLBACK19-NEXT: shldl %cl, %edx, %edi
+; FALLBACK19-NEXT: movl (%eax), %esi
+; FALLBACK19-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 52(%eax), %esi
+; FALLBACK19-NEXT: shldl %cl, %esi, %edx
+; FALLBACK19-NEXT: negl %ebp
+; FALLBACK19-NEXT: movl 176(%esp,%ebp), %ebp
+; FALLBACK19-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK19-NEXT: movl %edx, 56(%eax)
+; FALLBACK19-NEXT: movl %edi, 60(%eax)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK19-NEXT: shlxl %ecx, %edx, %edi
+; FALLBACK19-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; FALLBACK19-NEXT: shldl %cl, %edx, %edi
+; FALLBACK19-NEXT: shldl %cl, %ebp, %esi
+; FALLBACK19-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK19-NEXT: shldl %cl, %edx, %ebp
+; FALLBACK19-NEXT: movl %ebp, 48(%eax)
+; FALLBACK19-NEXT: movl %esi, 52(%eax)
+; FALLBACK19-NEXT: movl %ebx, 40(%eax)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK19-NEXT: movl %ecx, 44(%eax)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK19-NEXT: movl %ecx, 32(%eax)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK19-NEXT: movl %ecx, 36(%eax)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK19-NEXT: movl %ecx, 24(%eax)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK19-NEXT: movl %ecx, 28(%eax)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK19-NEXT: movl %ecx, 16(%eax)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK19-NEXT: movl %ecx, 20(%eax)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK19-NEXT: movl %ecx, 8(%eax)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK19-NEXT: movl %ecx, 12(%eax)
+; FALLBACK19-NEXT: movl %edi, 4(%eax)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK19-NEXT: movl %ecx, (%eax)
+; FALLBACK19-NEXT: addl $204, %esp
+; FALLBACK19-NEXT: popl %esi
+; FALLBACK19-NEXT: popl %edi
+; FALLBACK19-NEXT: popl %ebx
+; FALLBACK19-NEXT: popl %ebp
+; FALLBACK19-NEXT: retl
+;
+; FALLBACK20-LABEL: shl_64bytes:
+; FALLBACK20: # %bb.0:
+; FALLBACK20-NEXT: pushl %ebp
+; FALLBACK20-NEXT: pushl %ebx
+; FALLBACK20-NEXT: pushl %edi
+; FALLBACK20-NEXT: pushl %esi
+; FALLBACK20-NEXT: subl $204, %esp
+; FALLBACK20-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK20-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK20-NEXT: movups (%ecx), %xmm0
+; FALLBACK20-NEXT: movups 16(%ecx), %xmm1
+; FALLBACK20-NEXT: movups 32(%ecx), %xmm2
+; FALLBACK20-NEXT: movups 48(%ecx), %xmm3
+; FALLBACK20-NEXT: movl (%eax), %eax
+; FALLBACK20-NEXT: xorps %xmm4, %xmm4
+; FALLBACK20-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movaps %xmm3, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movl %eax, %edx
+; FALLBACK20-NEXT: andl $60, %edx
+; FALLBACK20-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: leal {{[0-9]+}}(%esp), %ecx
+; FALLBACK20-NEXT: subl %edx, %ecx
+; FALLBACK20-NEXT: movl (%ecx), %edi
+; FALLBACK20-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl 4(%ecx), %edx
+; FALLBACK20-NEXT: movl %ecx, %ebp
+; FALLBACK20-NEXT: shll $3, %eax
+; FALLBACK20-NEXT: andl $24, %eax
+; FALLBACK20-NEXT: movl %edx, %esi
+; FALLBACK20-NEXT: movl %eax, %ecx
+; FALLBACK20-NEXT: shll %cl, %esi
+; FALLBACK20-NEXT: shrl %edi
+; FALLBACK20-NEXT: movb %al, %ch
+; FALLBACK20-NEXT: notb %ch
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shrl %cl, %edi
+; FALLBACK20-NEXT: orl %esi, %edi
+; FALLBACK20-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl 12(%ebp), %ebx
+; FALLBACK20-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movb %al, %cl
+; FALLBACK20-NEXT: shll %cl, %ebx
+; FALLBACK20-NEXT: movl 8(%ebp), %esi
+; FALLBACK20-NEXT: movl %ebp, %edi
+; FALLBACK20-NEXT: movl %esi, %ebp
+; FALLBACK20-NEXT: shrl %ebp
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shrl %cl, %ebp
+; FALLBACK20-NEXT: orl %ebx, %ebp
+; FALLBACK20-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movb %al, %cl
+; FALLBACK20-NEXT: shll %cl, %esi
+; FALLBACK20-NEXT: shrl %edx
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shrl %cl, %edx
+; FALLBACK20-NEXT: orl %esi, %edx
+; FALLBACK20-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl %edi, %ebp
+; FALLBACK20-NEXT: movl 20(%edi), %ebx
+; FALLBACK20-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movb %al, %cl
+; FALLBACK20-NEXT: shll %cl, %ebx
+; FALLBACK20-NEXT: movl 16(%edi), %esi
+; FALLBACK20-NEXT: movl %esi, %edx
+; FALLBACK20-NEXT: shrl %edx
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shrl %cl, %edx
+; FALLBACK20-NEXT: orl %ebx, %edx
+; FALLBACK20-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movb %al, %cl
+; FALLBACK20-NEXT: shll %cl, %esi
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; FALLBACK20-NEXT: shrl %edi
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shrl %cl, %edi
+; FALLBACK20-NEXT: orl %esi, %edi
+; FALLBACK20-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl %ebp, %edx
+; FALLBACK20-NEXT: movl 28(%ebp), %ebx
+; FALLBACK20-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movb %al, %cl
+; FALLBACK20-NEXT: shll %cl, %ebx
+; FALLBACK20-NEXT: movl 24(%ebp), %esi
+; FALLBACK20-NEXT: movl %esi, %edi
+; FALLBACK20-NEXT: shrl %edi
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shrl %cl, %edi
+; FALLBACK20-NEXT: orl %ebx, %edi
+; FALLBACK20-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movb %al, %cl
+; FALLBACK20-NEXT: shll %cl, %esi
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; FALLBACK20-NEXT: shrl %ebp
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shrl %cl, %ebp
+; FALLBACK20-NEXT: orl %esi, %ebp
+; FALLBACK20-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl 36(%edx), %ebx
+; FALLBACK20-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movb %al, %cl
+; FALLBACK20-NEXT: shll %cl, %ebx
+; FALLBACK20-NEXT: movl 32(%edx), %esi
+; FALLBACK20-NEXT: movl %edx, %ebp
+; FALLBACK20-NEXT: movl %esi, %edi
+; FALLBACK20-NEXT: shrl %edi
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shrl %cl, %edi
+; FALLBACK20-NEXT: orl %ebx, %edi
+; FALLBACK20-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movb %al, %cl
+; FALLBACK20-NEXT: shll %cl, %esi
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK20-NEXT: shrl %edx
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shrl %cl, %edx
+; FALLBACK20-NEXT: orl %esi, %edx
+; FALLBACK20-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl 44(%ebp), %ebx
+; FALLBACK20-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movb %al, %cl
+; FALLBACK20-NEXT: shll %cl, %ebx
+; FALLBACK20-NEXT: movl 40(%ebp), %esi
+; FALLBACK20-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl %esi, %edx
+; FALLBACK20-NEXT: shrl %edx
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shrl %cl, %edx
+; FALLBACK20-NEXT: orl %ebx, %edx
+; FALLBACK20-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movb %al, %cl
+; FALLBACK20-NEXT: shll %cl, %esi
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK20-NEXT: shrl %edx
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shrl %cl, %edx
+; FALLBACK20-NEXT: orl %esi, %edx
+; FALLBACK20-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl 52(%ebp), %esi
+; FALLBACK20-NEXT: movl %esi, %edi
+; FALLBACK20-NEXT: movb %al, %cl
+; FALLBACK20-NEXT: shll %cl, %edi
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK20-NEXT: negl %edx
+; FALLBACK20-NEXT: movl 176(%esp,%edx), %ebx
+; FALLBACK20-NEXT: movl %ebx, %ebp
+; FALLBACK20-NEXT: shrl %ebp
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shrl %cl, %ebp
+; FALLBACK20-NEXT: orl %edi, %ebp
+; FALLBACK20-NEXT: movb %al, %cl
+; FALLBACK20-NEXT: shll %cl, %ebx
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK20-NEXT: shrl %edx
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shrl %cl, %edx
+; FALLBACK20-NEXT: orl %ebx, %edx
+; FALLBACK20-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; FALLBACK20-NEXT: movl 60(%edi), %edx
+; FALLBACK20-NEXT: movb %al, %cl
+; FALLBACK20-NEXT: shll %cl, %edx
+; FALLBACK20-NEXT: movl 56(%edi), %ebx
+; FALLBACK20-NEXT: movl %ebx, %edi
+; FALLBACK20-NEXT: shrl %edi
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shrl %cl, %edi
+; FALLBACK20-NEXT: orl %edx, %edi
+; FALLBACK20-NEXT: movb %al, %cl
+; FALLBACK20-NEXT: shll %cl, %ebx
+; FALLBACK20-NEXT: shrl %esi
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shrl %cl, %esi
+; FALLBACK20-NEXT: orl %ebx, %esi
+; FALLBACK20-NEXT: movl %eax, %ecx
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK20-NEXT: shll %cl, %edx
+; FALLBACK20-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK20-NEXT: movl %edx, (%eax)
+; FALLBACK20-NEXT: movl %esi, 56(%eax)
+; FALLBACK20-NEXT: movl %edi, 60(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, 48(%eax)
+; FALLBACK20-NEXT: movl %ebp, 52(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, 40(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, 44(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, 32(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, 36(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, 24(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, 28(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, 16(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, 20(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, 8(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, 12(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, 4(%eax)
+; FALLBACK20-NEXT: addl $204, %esp
+; FALLBACK20-NEXT: popl %esi
+; FALLBACK20-NEXT: popl %edi
+; FALLBACK20-NEXT: popl %ebx
+; FALLBACK20-NEXT: popl %ebp
+; FALLBACK20-NEXT: retl
+;
+; FALLBACK21-LABEL: shl_64bytes:
+; FALLBACK21: # %bb.0:
+; FALLBACK21-NEXT: pushl %ebp
+; FALLBACK21-NEXT: pushl %ebx
+; FALLBACK21-NEXT: pushl %edi
+; FALLBACK21-NEXT: pushl %esi
+; FALLBACK21-NEXT: subl $188, %esp
+; FALLBACK21-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK21-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK21-NEXT: movups (%ecx), %xmm0
+; FALLBACK21-NEXT: movups 16(%ecx), %xmm1
+; FALLBACK21-NEXT: movups 32(%ecx), %xmm2
+; FALLBACK21-NEXT: movups 48(%ecx), %xmm3
+; FALLBACK21-NEXT: movl (%eax), %ecx
+; FALLBACK21-NEXT: xorps %xmm4, %xmm4
+; FALLBACK21-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movaps %xmm3, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movl %ecx, %ebp
+; FALLBACK21-NEXT: andl $60, %ebp
+; FALLBACK21-NEXT: leal {{[0-9]+}}(%esp), %eax
+; FALLBACK21-NEXT: subl %ebp, %eax
+; FALLBACK21-NEXT: movl 8(%eax), %esi
+; FALLBACK21-NEXT: movl 12(%eax), %edx
+; FALLBACK21-NEXT: shll $3, %ecx
+; FALLBACK21-NEXT: andl $24, %ecx
+; FALLBACK21-NEXT: movl %edx, %edi
+; FALLBACK21-NEXT: shldl %cl, %esi, %edi
+; FALLBACK21-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: movl 4(%eax), %edi
+; FALLBACK21-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: shldl %cl, %edi, %esi
+; FALLBACK21-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: movl 16(%eax), %edi
+; FALLBACK21-NEXT: movl 20(%eax), %esi
+; FALLBACK21-NEXT: movl %esi, %ebx
+; FALLBACK21-NEXT: shldl %cl, %edi, %ebx
+; FALLBACK21-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: shldl %cl, %edx, %edi
+; FALLBACK21-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: movl 24(%eax), %edi
+; FALLBACK21-NEXT: movl 28(%eax), %edx
+; FALLBACK21-NEXT: movl %edx, %ebx
+; FALLBACK21-NEXT: shldl %cl, %edi, %ebx
+; FALLBACK21-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: shldl %cl, %esi, %edi
+; FALLBACK21-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: movl 32(%eax), %edi
+; FALLBACK21-NEXT: movl 36(%eax), %esi
+; FALLBACK21-NEXT: movl %esi, %ebx
+; FALLBACK21-NEXT: shldl %cl, %edi, %ebx
+; FALLBACK21-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: shldl %cl, %edx, %edi
+; FALLBACK21-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: movl 40(%eax), %edx
+; FALLBACK21-NEXT: movl 44(%eax), %edi
+; FALLBACK21-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: shldl %cl, %edx, %edi
+; FALLBACK21-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: shldl %cl, %esi, %edx
+; FALLBACK21-NEXT: movl %edx, (%esp) # 4-byte Spill
+; FALLBACK21-NEXT: movl 56(%eax), %edx
+; FALLBACK21-NEXT: movl 60(%eax), %edi
+; FALLBACK21-NEXT: shldl %cl, %edx, %edi
+; FALLBACK21-NEXT: movl (%eax), %ebx
+; FALLBACK21-NEXT: movl 52(%eax), %esi
+; FALLBACK21-NEXT: shldl %cl, %esi, %edx
+; FALLBACK21-NEXT: negl %ebp
+; FALLBACK21-NEXT: movl 160(%esp,%ebp), %eax
+; FALLBACK21-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK21-NEXT: movl %edx, 56(%ebp)
+; FALLBACK21-NEXT: movl %edi, 60(%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK21-NEXT: shldl %cl, %ebx, %edx
+; FALLBACK21-NEXT: shll %cl, %ebx
+; FALLBACK21-NEXT: shldl %cl, %eax, %esi
+; FALLBACK21-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; FALLBACK21-NEXT: shldl %cl, %edi, %eax
+; FALLBACK21-NEXT: movl %eax, 48(%ebp)
+; FALLBACK21-NEXT: movl %esi, 52(%ebp)
+; FALLBACK21-NEXT: movl (%esp), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 40(%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 44(%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 32(%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 36(%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 24(%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 28(%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 16(%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 20(%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 8(%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 12(%ebp)
+; FALLBACK21-NEXT: movl %ebx, (%ebp)
+; FALLBACK21-NEXT: movl %edx, 4(%ebp)
+; FALLBACK21-NEXT: addl $188, %esp
+; FALLBACK21-NEXT: popl %esi
+; FALLBACK21-NEXT: popl %edi
+; FALLBACK21-NEXT: popl %ebx
+; FALLBACK21-NEXT: popl %ebp
+; FALLBACK21-NEXT: retl
+;
+; FALLBACK22-LABEL: shl_64bytes:
+; FALLBACK22: # %bb.0:
+; FALLBACK22-NEXT: pushl %ebp
+; FALLBACK22-NEXT: pushl %ebx
+; FALLBACK22-NEXT: pushl %edi
+; FALLBACK22-NEXT: pushl %esi
+; FALLBACK22-NEXT: subl $204, %esp
+; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK22-NEXT: movups (%ecx), %xmm0
+; FALLBACK22-NEXT: movups 16(%ecx), %xmm1
+; FALLBACK22-NEXT: movups 32(%ecx), %xmm2
+; FALLBACK22-NEXT: movups 48(%ecx), %xmm3
+; FALLBACK22-NEXT: movl (%eax), %eax
+; FALLBACK22-NEXT: xorps %xmm4, %xmm4
+; FALLBACK22-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movaps %xmm3, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: leal (,%eax,8), %edx
+; FALLBACK22-NEXT: andl $24, %edx
+; FALLBACK22-NEXT: andl $60, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: leal {{[0-9]+}}(%esp), %edi
+; FALLBACK22-NEXT: subl %eax, %edi
+; FALLBACK22-NEXT: movl (%edi), %ecx
+; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 4(%edi), %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl %edx, %ebx
+; FALLBACK22-NEXT: notb %bl
+; FALLBACK22-NEXT: shrl %ecx
+; FALLBACK22-NEXT: shrxl %ebx, %ecx, %esi
+; FALLBACK22-NEXT: shlxl %edx, %eax, %ecx
+; FALLBACK22-NEXT: orl %ecx, %esi
+; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 8(%edi), %esi
+; FALLBACK22-NEXT: movl %esi, %ecx
+; FALLBACK22-NEXT: shrl %ecx
+; FALLBACK22-NEXT: shrxl %ebx, %ecx, %eax
+; FALLBACK22-NEXT: movl 12(%edi), %ecx
+; FALLBACK22-NEXT: shlxl %edx, %ecx, %ebp
+; FALLBACK22-NEXT: orl %ebp, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shlxl %edx, %esi, %esi
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: shrl %eax
+; FALLBACK22-NEXT: shrxl %ebx, %eax, %eax
+; FALLBACK22-NEXT: orl %esi, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 16(%edi), %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shrl %eax
+; FALLBACK22-NEXT: shrxl %ebx, %eax, %eax
+; FALLBACK22-NEXT: movl 20(%edi), %esi
+; FALLBACK22-NEXT: shlxl %edx, %esi, %ebp
+; FALLBACK22-NEXT: orl %ebp, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK22-NEXT: shrl %ecx
+; FALLBACK22-NEXT: shrxl %ebx, %ecx, %ecx
+; FALLBACK22-NEXT: orl %eax, %ecx
+; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 24(%edi), %ecx
+; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shrl %ecx
+; FALLBACK22-NEXT: shrxl %ebx, %ecx, %eax
+; FALLBACK22-NEXT: movl 28(%edi), %ecx
+; FALLBACK22-NEXT: shlxl %edx, %ecx, %ebp
+; FALLBACK22-NEXT: orl %ebp, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK22-NEXT: shrl %esi
+; FALLBACK22-NEXT: shrxl %ebx, %esi, %esi
+; FALLBACK22-NEXT: orl %eax, %esi
+; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 32(%edi), %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shrl %eax
+; FALLBACK22-NEXT: shrxl %ebx, %eax, %eax
+; FALLBACK22-NEXT: movl 36(%edi), %esi
+; FALLBACK22-NEXT: shlxl %edx, %esi, %ebp
+; FALLBACK22-NEXT: orl %ebp, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK22-NEXT: shrl %ecx
+; FALLBACK22-NEXT: shrxl %ebx, %ecx, %ecx
+; FALLBACK22-NEXT: orl %eax, %ecx
+; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 40(%edi), %ecx
+; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shrl %ecx
+; FALLBACK22-NEXT: shrxl %ebx, %ecx, %eax
+; FALLBACK22-NEXT: movl 44(%edi), %ecx
+; FALLBACK22-NEXT: shlxl %edx, %ecx, %ebp
+; FALLBACK22-NEXT: orl %ebp, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK22-NEXT: shrl %esi
+; FALLBACK22-NEXT: shrxl %ebx, %esi, %esi
+; FALLBACK22-NEXT: orl %eax, %esi
+; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 48(%edi), %esi
+; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shrl %esi
+; FALLBACK22-NEXT: shrxl %ebx, %esi, %eax
+; FALLBACK22-NEXT: movl 52(%edi), %esi
+; FALLBACK22-NEXT: shlxl %edx, %esi, %ebp
+; FALLBACK22-NEXT: orl %ebp, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK22-NEXT: shrl %ecx
+; FALLBACK22-NEXT: shrxl %ebx, %ecx, %ebp
+; FALLBACK22-NEXT: orl %eax, %ebp
+; FALLBACK22-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: negl %eax
+; FALLBACK22-NEXT: shlxl %edx, 188(%esp,%eax), %ecx
+; FALLBACK22-NEXT: movl 56(%edi), %eax
+; FALLBACK22-NEXT: shlxl %edx, %eax, %edx
+; FALLBACK22-NEXT: shrl %esi
+; FALLBACK22-NEXT: shrxl %ebx, %esi, %esi
+; FALLBACK22-NEXT: orl %edx, %esi
+; FALLBACK22-NEXT: shrl %eax
+; FALLBACK22-NEXT: shrxl %ebx, %eax, %eax
+; FALLBACK22-NEXT: orl %eax, %ecx
+; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK22-NEXT: movl %edx, (%eax)
+; FALLBACK22-NEXT: movl %esi, 56(%eax)
+; FALLBACK22-NEXT: movl %ecx, 60(%eax)
+; FALLBACK22-NEXT: movl %ebp, 48(%eax)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: movl %ecx, 52(%eax)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: movl %ecx, 40(%eax)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: movl %ecx, 44(%eax)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: movl %ecx, 32(%eax)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: movl %ecx, 36(%eax)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: movl %ecx, 24(%eax)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: movl %ecx, 28(%eax)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: movl %ecx, 16(%eax)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: movl %ecx, 20(%eax)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: movl %ecx, 8(%eax)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: movl %ecx, 12(%eax)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: movl %ecx, 4(%eax)
+; FALLBACK22-NEXT: addl $204, %esp
+; FALLBACK22-NEXT: popl %esi
+; FALLBACK22-NEXT: popl %edi
+; FALLBACK22-NEXT: popl %ebx
+; FALLBACK22-NEXT: popl %ebp
+; FALLBACK22-NEXT: retl
+;
+; FALLBACK23-LABEL: shl_64bytes:
+; FALLBACK23: # %bb.0:
+; FALLBACK23-NEXT: pushl %ebp
+; FALLBACK23-NEXT: pushl %ebx
+; FALLBACK23-NEXT: pushl %edi
+; FALLBACK23-NEXT: pushl %esi
+; FALLBACK23-NEXT: subl $204, %esp
+; FALLBACK23-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK23-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK23-NEXT: movups (%ecx), %xmm0
+; FALLBACK23-NEXT: movups 16(%ecx), %xmm1
+; FALLBACK23-NEXT: movups 32(%ecx), %xmm2
+; FALLBACK23-NEXT: movups 48(%ecx), %xmm3
+; FALLBACK23-NEXT: movl (%eax), %ebp
+; FALLBACK23-NEXT: xorps %xmm4, %xmm4
+; FALLBACK23-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movaps %xmm3, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: leal (,%ebp,8), %ecx
+; FALLBACK23-NEXT: andl $24, %ecx
+; FALLBACK23-NEXT: andl $60, %ebp
+; FALLBACK23-NEXT: leal {{[0-9]+}}(%esp), %eax
+; FALLBACK23-NEXT: subl %ebp, %eax
+; FALLBACK23-NEXT: movl 4(%eax), %esi
+; FALLBACK23-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: movl 8(%eax), %edi
+; FALLBACK23-NEXT: movl 12(%eax), %edx
+; FALLBACK23-NEXT: movl %edx, %ebx
+; FALLBACK23-NEXT: shldl %cl, %edi, %ebx
+; FALLBACK23-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: shldl %cl, %esi, %edi
+; FALLBACK23-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: movl 16(%eax), %edi
+; FALLBACK23-NEXT: movl 20(%eax), %esi
+; FALLBACK23-NEXT: movl %esi, %ebx
+; FALLBACK23-NEXT: shldl %cl, %edi, %ebx
+; FALLBACK23-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: shldl %cl, %edx, %edi
+; FALLBACK23-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: movl 24(%eax), %edi
+; FALLBACK23-NEXT: movl 28(%eax), %edx
+; FALLBACK23-NEXT: movl %edx, %ebx
+; FALLBACK23-NEXT: shldl %cl, %edi, %ebx
+; FALLBACK23-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: shldl %cl, %esi, %edi
+; FALLBACK23-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: movl 32(%eax), %edi
+; FALLBACK23-NEXT: movl 36(%eax), %esi
+; FALLBACK23-NEXT: movl %esi, %ebx
+; FALLBACK23-NEXT: shldl %cl, %edi, %ebx
+; FALLBACK23-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: shldl %cl, %edx, %edi
+; FALLBACK23-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: movl 40(%eax), %ebx
+; FALLBACK23-NEXT: movl 44(%eax), %edx
+; FALLBACK23-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: shldl %cl, %ebx, %edx
+; FALLBACK23-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: shldl %cl, %esi, %ebx
+; FALLBACK23-NEXT: movl 56(%eax), %edx
+; FALLBACK23-NEXT: movl 60(%eax), %edi
+; FALLBACK23-NEXT: shldl %cl, %edx, %edi
+; FALLBACK23-NEXT: movl (%eax), %esi
+; FALLBACK23-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: movl 52(%eax), %esi
+; FALLBACK23-NEXT: shldl %cl, %esi, %edx
+; FALLBACK23-NEXT: negl %ebp
+; FALLBACK23-NEXT: movl 176(%esp,%ebp), %ebp
+; FALLBACK23-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK23-NEXT: movl %edx, 56(%eax)
+; FALLBACK23-NEXT: movl %edi, 60(%eax)
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK23-NEXT: shlxl %ecx, %edx, %edi
+; FALLBACK23-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; FALLBACK23-NEXT: shldl %cl, %edx, %edi
+; FALLBACK23-NEXT: shldl %cl, %ebp, %esi
+; FALLBACK23-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK23-NEXT: shldl %cl, %edx, %ebp
+; FALLBACK23-NEXT: movl %ebp, 48(%eax)
+; FALLBACK23-NEXT: movl %esi, 52(%eax)
+; FALLBACK23-NEXT: movl %ebx, 40(%eax)
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK23-NEXT: movl %ecx, 44(%eax)
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK23-NEXT: movl %ecx, 32(%eax)
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK23-NEXT: movl %ecx, 36(%eax)
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK23-NEXT: movl %ecx, 24(%eax)
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK23-NEXT: movl %ecx, 28(%eax)
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK23-NEXT: movl %ecx, 16(%eax)
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK23-NEXT: movl %ecx, 20(%eax)
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK23-NEXT: movl %ecx, 8(%eax)
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK23-NEXT: movl %ecx, 12(%eax)
+; FALLBACK23-NEXT: movl %edi, 4(%eax)
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK23-NEXT: movl %ecx, (%eax)
+; FALLBACK23-NEXT: addl $204, %esp
+; FALLBACK23-NEXT: popl %esi
+; FALLBACK23-NEXT: popl %edi
+; FALLBACK23-NEXT: popl %ebx
+; FALLBACK23-NEXT: popl %ebp
+; FALLBACK23-NEXT: retl
+;
+; FALLBACK24-LABEL: shl_64bytes:
+; FALLBACK24: # %bb.0:
+; FALLBACK24-NEXT: pushl %ebp
+; FALLBACK24-NEXT: pushl %ebx
+; FALLBACK24-NEXT: pushl %edi
+; FALLBACK24-NEXT: pushl %esi
+; FALLBACK24-NEXT: subl $204, %esp
+; FALLBACK24-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK24-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK24-NEXT: vmovups (%ecx), %ymm0
+; FALLBACK24-NEXT: vmovups 32(%ecx), %ymm1
+; FALLBACK24-NEXT: movl (%eax), %eax
+; FALLBACK24-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; FALLBACK24-NEXT: vmovups %ymm2, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: vmovups %ymm2, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: vmovups %ymm1, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: movl %eax, %edx
+; FALLBACK24-NEXT: andl $60, %edx
+; FALLBACK24-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: leal {{[0-9]+}}(%esp), %ecx
+; FALLBACK24-NEXT: subl %edx, %ecx
+; FALLBACK24-NEXT: movl (%ecx), %edi
+; FALLBACK24-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl 4(%ecx), %edx
+; FALLBACK24-NEXT: movl %ecx, %ebp
+; FALLBACK24-NEXT: shll $3, %eax
+; FALLBACK24-NEXT: andl $24, %eax
+; FALLBACK24-NEXT: movl %edx, %esi
+; FALLBACK24-NEXT: movl %eax, %ecx
+; FALLBACK24-NEXT: shll %cl, %esi
+; FALLBACK24-NEXT: shrl %edi
+; FALLBACK24-NEXT: movb %al, %ch
+; FALLBACK24-NEXT: notb %ch
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shrl %cl, %edi
+; FALLBACK24-NEXT: orl %esi, %edi
+; FALLBACK24-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl 12(%ebp), %ebx
+; FALLBACK24-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movb %al, %cl
+; FALLBACK24-NEXT: shll %cl, %ebx
+; FALLBACK24-NEXT: movl 8(%ebp), %esi
+; FALLBACK24-NEXT: movl %ebp, %edi
+; FALLBACK24-NEXT: movl %esi, %ebp
+; FALLBACK24-NEXT: shrl %ebp
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shrl %cl, %ebp
+; FALLBACK24-NEXT: orl %ebx, %ebp
+; FALLBACK24-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movb %al, %cl
+; FALLBACK24-NEXT: shll %cl, %esi
+; FALLBACK24-NEXT: shrl %edx
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shrl %cl, %edx
+; FALLBACK24-NEXT: orl %esi, %edx
+; FALLBACK24-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl %edi, %ebp
+; FALLBACK24-NEXT: movl 20(%edi), %ebx
+; FALLBACK24-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movb %al, %cl
+; FALLBACK24-NEXT: shll %cl, %ebx
+; FALLBACK24-NEXT: movl 16(%edi), %esi
+; FALLBACK24-NEXT: movl %esi, %edx
+; FALLBACK24-NEXT: shrl %edx
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shrl %cl, %edx
+; FALLBACK24-NEXT: orl %ebx, %edx
+; FALLBACK24-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movb %al, %cl
+; FALLBACK24-NEXT: shll %cl, %esi
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; FALLBACK24-NEXT: shrl %edi
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shrl %cl, %edi
+; FALLBACK24-NEXT: orl %esi, %edi
+; FALLBACK24-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl %ebp, %edx
+; FALLBACK24-NEXT: movl 28(%ebp), %ebx
+; FALLBACK24-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movb %al, %cl
+; FALLBACK24-NEXT: shll %cl, %ebx
+; FALLBACK24-NEXT: movl 24(%ebp), %esi
+; FALLBACK24-NEXT: movl %esi, %edi
+; FALLBACK24-NEXT: shrl %edi
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shrl %cl, %edi
+; FALLBACK24-NEXT: orl %ebx, %edi
+; FALLBACK24-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movb %al, %cl
+; FALLBACK24-NEXT: shll %cl, %esi
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; FALLBACK24-NEXT: shrl %ebp
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shrl %cl, %ebp
+; FALLBACK24-NEXT: orl %esi, %ebp
+; FALLBACK24-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl 36(%edx), %ebx
+; FALLBACK24-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movb %al, %cl
+; FALLBACK24-NEXT: shll %cl, %ebx
+; FALLBACK24-NEXT: movl 32(%edx), %esi
+; FALLBACK24-NEXT: movl %edx, %ebp
+; FALLBACK24-NEXT: movl %esi, %edi
+; FALLBACK24-NEXT: shrl %edi
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shrl %cl, %edi
+; FALLBACK24-NEXT: orl %ebx, %edi
+; FALLBACK24-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movb %al, %cl
+; FALLBACK24-NEXT: shll %cl, %esi
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK24-NEXT: shrl %edx
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shrl %cl, %edx
+; FALLBACK24-NEXT: orl %esi, %edx
+; FALLBACK24-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl 44(%ebp), %ebx
+; FALLBACK24-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movb %al, %cl
+; FALLBACK24-NEXT: shll %cl, %ebx
+; FALLBACK24-NEXT: movl 40(%ebp), %esi
+; FALLBACK24-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl %esi, %edx
+; FALLBACK24-NEXT: shrl %edx
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shrl %cl, %edx
+; FALLBACK24-NEXT: orl %ebx, %edx
+; FALLBACK24-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movb %al, %cl
+; FALLBACK24-NEXT: shll %cl, %esi
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK24-NEXT: shrl %edx
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shrl %cl, %edx
+; FALLBACK24-NEXT: orl %esi, %edx
+; FALLBACK24-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl 52(%ebp), %esi
+; FALLBACK24-NEXT: movl %esi, %edi
+; FALLBACK24-NEXT: movb %al, %cl
+; FALLBACK24-NEXT: shll %cl, %edi
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK24-NEXT: negl %edx
+; FALLBACK24-NEXT: movl 176(%esp,%edx), %ebx
+; FALLBACK24-NEXT: movl %ebx, %ebp
+; FALLBACK24-NEXT: shrl %ebp
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shrl %cl, %ebp
+; FALLBACK24-NEXT: orl %edi, %ebp
+; FALLBACK24-NEXT: movb %al, %cl
+; FALLBACK24-NEXT: shll %cl, %ebx
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK24-NEXT: shrl %edx
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shrl %cl, %edx
+; FALLBACK24-NEXT: orl %ebx, %edx
+; FALLBACK24-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; FALLBACK24-NEXT: movl 60(%edi), %edx
+; FALLBACK24-NEXT: movb %al, %cl
+; FALLBACK24-NEXT: shll %cl, %edx
+; FALLBACK24-NEXT: movl 56(%edi), %ebx
+; FALLBACK24-NEXT: movl %ebx, %edi
+; FALLBACK24-NEXT: shrl %edi
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shrl %cl, %edi
+; FALLBACK24-NEXT: orl %edx, %edi
+; FALLBACK24-NEXT: movb %al, %cl
+; FALLBACK24-NEXT: shll %cl, %ebx
+; FALLBACK24-NEXT: shrl %esi
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shrl %cl, %esi
+; FALLBACK24-NEXT: orl %ebx, %esi
+; FALLBACK24-NEXT: movl %eax, %ecx
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK24-NEXT: shll %cl, %edx
+; FALLBACK24-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK24-NEXT: movl %edx, (%eax)
+; FALLBACK24-NEXT: movl %esi, 56(%eax)
+; FALLBACK24-NEXT: movl %edi, 60(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, 48(%eax)
+; FALLBACK24-NEXT: movl %ebp, 52(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, 40(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, 44(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, 32(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, 36(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, 24(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, 28(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, 16(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, 20(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, 8(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, 12(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, 4(%eax)
+; FALLBACK24-NEXT: addl $204, %esp
+; FALLBACK24-NEXT: popl %esi
+; FALLBACK24-NEXT: popl %edi
+; FALLBACK24-NEXT: popl %ebx
+; FALLBACK24-NEXT: popl %ebp
+; FALLBACK24-NEXT: vzeroupper
+; FALLBACK24-NEXT: retl
+;
+; FALLBACK25-LABEL: shl_64bytes:
+; FALLBACK25: # %bb.0:
+; FALLBACK25-NEXT: pushl %ebp
+; FALLBACK25-NEXT: pushl %ebx
+; FALLBACK25-NEXT: pushl %edi
+; FALLBACK25-NEXT: pushl %esi
+; FALLBACK25-NEXT: subl $188, %esp
+; FALLBACK25-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK25-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK25-NEXT: vmovups (%ecx), %ymm0
+; FALLBACK25-NEXT: vmovups 32(%ecx), %ymm1
+; FALLBACK25-NEXT: movl (%eax), %ecx
+; FALLBACK25-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; FALLBACK25-NEXT: vmovups %ymm2, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: vmovups %ymm2, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: vmovups %ymm1, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: movl %ecx, %ebp
+; FALLBACK25-NEXT: andl $60, %ebp
+; FALLBACK25-NEXT: leal {{[0-9]+}}(%esp), %eax
+; FALLBACK25-NEXT: subl %ebp, %eax
+; FALLBACK25-NEXT: movl 8(%eax), %esi
+; FALLBACK25-NEXT: movl 12(%eax), %edx
+; FALLBACK25-NEXT: shll $3, %ecx
+; FALLBACK25-NEXT: andl $24, %ecx
+; FALLBACK25-NEXT: movl %edx, %edi
+; FALLBACK25-NEXT: shldl %cl, %esi, %edi
+; FALLBACK25-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: movl 4(%eax), %edi
+; FALLBACK25-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: shldl %cl, %edi, %esi
+; FALLBACK25-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: movl 16(%eax), %edi
+; FALLBACK25-NEXT: movl 20(%eax), %esi
+; FALLBACK25-NEXT: movl %esi, %ebx
+; FALLBACK25-NEXT: shldl %cl, %edi, %ebx
+; FALLBACK25-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: shldl %cl, %edx, %edi
+; FALLBACK25-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: movl 24(%eax), %edi
+; FALLBACK25-NEXT: movl 28(%eax), %edx
+; FALLBACK25-NEXT: movl %edx, %ebx
+; FALLBACK25-NEXT: shldl %cl, %edi, %ebx
+; FALLBACK25-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: shldl %cl, %esi, %edi
+; FALLBACK25-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: movl 32(%eax), %edi
+; FALLBACK25-NEXT: movl 36(%eax), %esi
+; FALLBACK25-NEXT: movl %esi, %ebx
+; FALLBACK25-NEXT: shldl %cl, %edi, %ebx
+; FALLBACK25-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: shldl %cl, %edx, %edi
+; FALLBACK25-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: movl 40(%eax), %edx
+; FALLBACK25-NEXT: movl 44(%eax), %edi
+; FALLBACK25-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: shldl %cl, %edx, %edi
+; FALLBACK25-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: shldl %cl, %esi, %edx
+; FALLBACK25-NEXT: movl %edx, (%esp) # 4-byte Spill
+; FALLBACK25-NEXT: movl 56(%eax), %edx
+; FALLBACK25-NEXT: movl 60(%eax), %edi
+; FALLBACK25-NEXT: shldl %cl, %edx, %edi
+; FALLBACK25-NEXT: movl (%eax), %ebx
+; FALLBACK25-NEXT: movl 52(%eax), %esi
+; FALLBACK25-NEXT: shldl %cl, %esi, %edx
+; FALLBACK25-NEXT: negl %ebp
+; FALLBACK25-NEXT: movl 160(%esp,%ebp), %eax
+; FALLBACK25-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK25-NEXT: movl %edx, 56(%ebp)
+; FALLBACK25-NEXT: movl %edi, 60(%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK25-NEXT: shldl %cl, %ebx, %edx
+; FALLBACK25-NEXT: shll %cl, %ebx
+; FALLBACK25-NEXT: shldl %cl, %eax, %esi
+; FALLBACK25-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; FALLBACK25-NEXT: shldl %cl, %edi, %eax
+; FALLBACK25-NEXT: movl %eax, 48(%ebp)
+; FALLBACK25-NEXT: movl %esi, 52(%ebp)
+; FALLBACK25-NEXT: movl (%esp), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 40(%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 44(%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 32(%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 36(%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 24(%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 28(%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 16(%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 20(%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 8(%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 12(%ebp)
+; FALLBACK25-NEXT: movl %ebx, (%ebp)
+; FALLBACK25-NEXT: movl %edx, 4(%ebp)
+; FALLBACK25-NEXT: addl $188, %esp
+; FALLBACK25-NEXT: popl %esi
+; FALLBACK25-NEXT: popl %edi
+; FALLBACK25-NEXT: popl %ebx
+; FALLBACK25-NEXT: popl %ebp
+; FALLBACK25-NEXT: vzeroupper
+; FALLBACK25-NEXT: retl
+;
+; FALLBACK26-LABEL: shl_64bytes:
+; FALLBACK26: # %bb.0:
+; FALLBACK26-NEXT: pushl %ebp
+; FALLBACK26-NEXT: pushl %ebx
+; FALLBACK26-NEXT: pushl %edi
+; FALLBACK26-NEXT: pushl %esi
+; FALLBACK26-NEXT: subl $204, %esp
+; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK26-NEXT: vmovups (%ecx), %ymm0
+; FALLBACK26-NEXT: vmovups 32(%ecx), %ymm1
+; FALLBACK26-NEXT: movl (%eax), %eax
+; FALLBACK26-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; FALLBACK26-NEXT: vmovups %ymm2, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: vmovups %ymm2, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: vmovups %ymm1, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: leal (,%eax,8), %edx
+; FALLBACK26-NEXT: andl $24, %edx
+; FALLBACK26-NEXT: andl $60, %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: leal {{[0-9]+}}(%esp), %edi
+; FALLBACK26-NEXT: subl %eax, %edi
+; FALLBACK26-NEXT: movl (%edi), %ecx
+; FALLBACK26-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 4(%edi), %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl %edx, %ebx
+; FALLBACK26-NEXT: notb %bl
+; FALLBACK26-NEXT: shrl %ecx
+; FALLBACK26-NEXT: shrxl %ebx, %ecx, %esi
+; FALLBACK26-NEXT: shlxl %edx, %eax, %ecx
+; FALLBACK26-NEXT: orl %ecx, %esi
+; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 8(%edi), %esi
+; FALLBACK26-NEXT: movl %esi, %ecx
+; FALLBACK26-NEXT: shrl %ecx
+; FALLBACK26-NEXT: shrxl %ebx, %ecx, %eax
+; FALLBACK26-NEXT: movl 12(%edi), %ecx
+; FALLBACK26-NEXT: shlxl %edx, %ecx, %ebp
+; FALLBACK26-NEXT: orl %ebp, %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shlxl %edx, %esi, %esi
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: shrl %eax
+; FALLBACK26-NEXT: shrxl %ebx, %eax, %eax
+; FALLBACK26-NEXT: orl %esi, %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 16(%edi), %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shrl %eax
+; FALLBACK26-NEXT: shrxl %ebx, %eax, %eax
+; FALLBACK26-NEXT: movl 20(%edi), %esi
+; FALLBACK26-NEXT: shlxl %edx, %esi, %ebp
+; FALLBACK26-NEXT: orl %ebp, %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK26-NEXT: shrl %ecx
+; FALLBACK26-NEXT: shrxl %ebx, %ecx, %ecx
+; FALLBACK26-NEXT: orl %eax, %ecx
+; FALLBACK26-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 24(%edi), %ecx
+; FALLBACK26-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shrl %ecx
+; FALLBACK26-NEXT: shrxl %ebx, %ecx, %eax
+; FALLBACK26-NEXT: movl 28(%edi), %ecx
+; FALLBACK26-NEXT: shlxl %edx, %ecx, %ebp
+; FALLBACK26-NEXT: orl %ebp, %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK26-NEXT: shrl %esi
+; FALLBACK26-NEXT: shrxl %ebx, %esi, %esi
+; FALLBACK26-NEXT: orl %eax, %esi
+; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 32(%edi), %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shrl %eax
+; FALLBACK26-NEXT: shrxl %ebx, %eax, %eax
+; FALLBACK26-NEXT: movl 36(%edi), %esi
+; FALLBACK26-NEXT: shlxl %edx, %esi, %ebp
+; FALLBACK26-NEXT: orl %ebp, %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK26-NEXT: shrl %ecx
+; FALLBACK26-NEXT: shrxl %ebx, %ecx, %ecx
+; FALLBACK26-NEXT: orl %eax, %ecx
+; FALLBACK26-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 40(%edi), %ecx
+; FALLBACK26-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shrl %ecx
+; FALLBACK26-NEXT: shrxl %ebx, %ecx, %eax
+; FALLBACK26-NEXT: movl 44(%edi), %ecx
+; FALLBACK26-NEXT: shlxl %edx, %ecx, %ebp
+; FALLBACK26-NEXT: orl %ebp, %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK26-NEXT: shrl %esi
+; FALLBACK26-NEXT: shrxl %ebx, %esi, %esi
+; FALLBACK26-NEXT: orl %eax, %esi
+; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 48(%edi), %esi
+; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shrl %esi
+; FALLBACK26-NEXT: shrxl %ebx, %esi, %eax
+; FALLBACK26-NEXT: movl 52(%edi), %esi
+; FALLBACK26-NEXT: shlxl %edx, %esi, %ebp
+; FALLBACK26-NEXT: orl %ebp, %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK26-NEXT: shrl %ecx
+; FALLBACK26-NEXT: shrxl %ebx, %ecx, %ebp
+; FALLBACK26-NEXT: orl %eax, %ebp
+; FALLBACK26-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: negl %eax
+; FALLBACK26-NEXT: shlxl %edx, 188(%esp,%eax), %ecx
+; FALLBACK26-NEXT: movl 56(%edi), %eax
+; FALLBACK26-NEXT: shlxl %edx, %eax, %edx
+; FALLBACK26-NEXT: shrl %esi
+; FALLBACK26-NEXT: shrxl %ebx, %esi, %esi
+; FALLBACK26-NEXT: orl %edx, %esi
+; FALLBACK26-NEXT: shrl %eax
+; FALLBACK26-NEXT: shrxl %ebx, %eax, %eax
+; FALLBACK26-NEXT: orl %eax, %ecx
+; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK26-NEXT: movl %edx, (%eax)
+; FALLBACK26-NEXT: movl %esi, 56(%eax)
+; FALLBACK26-NEXT: movl %ecx, 60(%eax)
+; FALLBACK26-NEXT: movl %ebp, 48(%eax)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK26-NEXT: movl %ecx, 52(%eax)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK26-NEXT: movl %ecx, 40(%eax)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK26-NEXT: movl %ecx, 44(%eax)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK26-NEXT: movl %ecx, 32(%eax)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK26-NEXT: movl %ecx, 36(%eax)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK26-NEXT: movl %ecx, 24(%eax)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK26-NEXT: movl %ecx, 28(%eax)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK26-NEXT: movl %ecx, 16(%eax)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK26-NEXT: movl %ecx, 20(%eax)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK26-NEXT: movl %ecx, 8(%eax)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK26-NEXT: movl %ecx, 12(%eax)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK26-NEXT: movl %ecx, 4(%eax)
+; FALLBACK26-NEXT: addl $204, %esp
+; FALLBACK26-NEXT: popl %esi
+; FALLBACK26-NEXT: popl %edi
+; FALLBACK26-NEXT: popl %ebx
+; FALLBACK26-NEXT: popl %ebp
+; FALLBACK26-NEXT: vzeroupper
+; FALLBACK26-NEXT: retl
+;
+; FALLBACK27-LABEL: shl_64bytes:
+; FALLBACK27: # %bb.0:
+; FALLBACK27-NEXT: pushl %ebp
+; FALLBACK27-NEXT: pushl %ebx
+; FALLBACK27-NEXT: pushl %edi
+; FALLBACK27-NEXT: pushl %esi
+; FALLBACK27-NEXT: subl $204, %esp
+; FALLBACK27-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK27-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK27-NEXT: vmovups (%ecx), %ymm0
+; FALLBACK27-NEXT: vmovups 32(%ecx), %ymm1
+; FALLBACK27-NEXT: movl (%eax), %ebx
+; FALLBACK27-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; FALLBACK27-NEXT: vmovups %ymm2, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: vmovups %ymm2, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: vmovups %ymm1, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: leal (,%ebx,8), %ecx
+; FALLBACK27-NEXT: andl $24, %ecx
+; FALLBACK27-NEXT: andl $60, %ebx
+; FALLBACK27-NEXT: leal {{[0-9]+}}(%esp), %eax
+; FALLBACK27-NEXT: subl %ebx, %eax
+; FALLBACK27-NEXT: movl 4(%eax), %esi
+; FALLBACK27-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: movl 8(%eax), %edi
+; FALLBACK27-NEXT: movl 12(%eax), %edx
+; FALLBACK27-NEXT: movl %edx, %ebp
+; FALLBACK27-NEXT: shldl %cl, %edi, %ebp
+; FALLBACK27-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: shldl %cl, %esi, %edi
+; FALLBACK27-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: movl 16(%eax), %edi
+; FALLBACK27-NEXT: movl 20(%eax), %esi
+; FALLBACK27-NEXT: movl %esi, %ebp
+; FALLBACK27-NEXT: shldl %cl, %edi, %ebp
+; FALLBACK27-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: shldl %cl, %edx, %edi
+; FALLBACK27-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: movl 24(%eax), %edi
+; FALLBACK27-NEXT: movl 28(%eax), %edx
+; FALLBACK27-NEXT: movl %edx, %ebp
+; FALLBACK27-NEXT: shldl %cl, %edi, %ebp
+; FALLBACK27-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: shldl %cl, %esi, %edi
+; FALLBACK27-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: movl 32(%eax), %edi
+; FALLBACK27-NEXT: movl 36(%eax), %esi
+; FALLBACK27-NEXT: movl %esi, %ebp
+; FALLBACK27-NEXT: shldl %cl, %edi, %ebp
+; FALLBACK27-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: shldl %cl, %edx, %edi
+; FALLBACK27-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: movl 40(%eax), %ebp
+; FALLBACK27-NEXT: movl 44(%eax), %edx
+; FALLBACK27-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: shldl %cl, %ebp, %edx
+; FALLBACK27-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: shldl %cl, %esi, %ebp
+; FALLBACK27-NEXT: movl 56(%eax), %edx
+; FALLBACK27-NEXT: movl 60(%eax), %edi
+; FALLBACK27-NEXT: shldl %cl, %edx, %edi
+; FALLBACK27-NEXT: movl (%eax), %esi
+; FALLBACK27-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: movl 52(%eax), %esi
+; FALLBACK27-NEXT: shldl %cl, %esi, %edx
+; FALLBACK27-NEXT: negl %ebx
+; FALLBACK27-NEXT: movl 176(%esp,%ebx), %ebx
+; FALLBACK27-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK27-NEXT: movl %edx, 56(%eax)
+; FALLBACK27-NEXT: movl %edi, 60(%eax)
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK27-NEXT: shlxl %ecx, %edx, %edi
+; FALLBACK27-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; FALLBACK27-NEXT: shldl %cl, %edx, %edi
+; FALLBACK27-NEXT: shldl %cl, %ebx, %esi
+; FALLBACK27-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK27-NEXT: shldl %cl, %edx, %ebx
+; FALLBACK27-NEXT: movl %ebx, 48(%eax)
+; FALLBACK27-NEXT: movl %esi, 52(%eax)
+; FALLBACK27-NEXT: movl %ebp, 40(%eax)
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK27-NEXT: movl %ecx, 44(%eax)
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK27-NEXT: movl %ecx, 32(%eax)
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK27-NEXT: movl %ecx, 36(%eax)
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK27-NEXT: movl %ecx, 24(%eax)
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK27-NEXT: movl %ecx, 28(%eax)
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK27-NEXT: movl %ecx, 16(%eax)
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK27-NEXT: movl %ecx, 20(%eax)
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK27-NEXT: movl %ecx, 8(%eax)
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK27-NEXT: movl %ecx, 12(%eax)
+; FALLBACK27-NEXT: movl %edi, 4(%eax)
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK27-NEXT: movl %ecx, (%eax)
+; FALLBACK27-NEXT: addl $204, %esp
+; FALLBACK27-NEXT: popl %esi
+; FALLBACK27-NEXT: popl %edi
+; FALLBACK27-NEXT: popl %ebx
+; FALLBACK27-NEXT: popl %ebp
+; FALLBACK27-NEXT: vzeroupper
+; FALLBACK27-NEXT: retl
+;
+; FALLBACK28-LABEL: shl_64bytes:
+; FALLBACK28: # %bb.0:
+; FALLBACK28-NEXT: pushl %ebp
+; FALLBACK28-NEXT: pushl %ebx
+; FALLBACK28-NEXT: pushl %edi
+; FALLBACK28-NEXT: pushl %esi
+; FALLBACK28-NEXT: subl $204, %esp
+; FALLBACK28-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK28-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK28-NEXT: vmovups (%ecx), %zmm0
+; FALLBACK28-NEXT: movl (%eax), %eax
+; FALLBACK28-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK28-NEXT: vmovups %zmm1, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: vmovups %zmm0, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: movl %eax, %edx
+; FALLBACK28-NEXT: andl $60, %edx
+; FALLBACK28-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: leal {{[0-9]+}}(%esp), %ecx
+; FALLBACK28-NEXT: subl %edx, %ecx
+; FALLBACK28-NEXT: movl (%ecx), %edi
+; FALLBACK28-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl 4(%ecx), %edx
+; FALLBACK28-NEXT: movl %ecx, %ebp
+; FALLBACK28-NEXT: shll $3, %eax
+; FALLBACK28-NEXT: andl $24, %eax
+; FALLBACK28-NEXT: movl %edx, %esi
+; FALLBACK28-NEXT: movl %eax, %ecx
+; FALLBACK28-NEXT: shll %cl, %esi
+; FALLBACK28-NEXT: shrl %edi
+; FALLBACK28-NEXT: movb %al, %ch
+; FALLBACK28-NEXT: notb %ch
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shrl %cl, %edi
+; FALLBACK28-NEXT: orl %esi, %edi
+; FALLBACK28-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl 12(%ebp), %ebx
+; FALLBACK28-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movb %al, %cl
+; FALLBACK28-NEXT: shll %cl, %ebx
+; FALLBACK28-NEXT: movl 8(%ebp), %esi
+; FALLBACK28-NEXT: movl %ebp, %edi
+; FALLBACK28-NEXT: movl %esi, %ebp
+; FALLBACK28-NEXT: shrl %ebp
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shrl %cl, %ebp
+; FALLBACK28-NEXT: orl %ebx, %ebp
+; FALLBACK28-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movb %al, %cl
+; FALLBACK28-NEXT: shll %cl, %esi
+; FALLBACK28-NEXT: shrl %edx
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shrl %cl, %edx
+; FALLBACK28-NEXT: orl %esi, %edx
+; FALLBACK28-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl %edi, %ebp
+; FALLBACK28-NEXT: movl 20(%edi), %ebx
+; FALLBACK28-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movb %al, %cl
+; FALLBACK28-NEXT: shll %cl, %ebx
+; FALLBACK28-NEXT: movl 16(%edi), %esi
+; FALLBACK28-NEXT: movl %esi, %edx
+; FALLBACK28-NEXT: shrl %edx
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shrl %cl, %edx
+; FALLBACK28-NEXT: orl %ebx, %edx
+; FALLBACK28-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movb %al, %cl
+; FALLBACK28-NEXT: shll %cl, %esi
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; FALLBACK28-NEXT: shrl %edi
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shrl %cl, %edi
+; FALLBACK28-NEXT: orl %esi, %edi
+; FALLBACK28-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl %ebp, %edx
+; FALLBACK28-NEXT: movl 28(%ebp), %ebx
+; FALLBACK28-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movb %al, %cl
+; FALLBACK28-NEXT: shll %cl, %ebx
+; FALLBACK28-NEXT: movl 24(%ebp), %esi
+; FALLBACK28-NEXT: movl %esi, %edi
+; FALLBACK28-NEXT: shrl %edi
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shrl %cl, %edi
+; FALLBACK28-NEXT: orl %ebx, %edi
+; FALLBACK28-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movb %al, %cl
+; FALLBACK28-NEXT: shll %cl, %esi
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; FALLBACK28-NEXT: shrl %ebp
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shrl %cl, %ebp
+; FALLBACK28-NEXT: orl %esi, %ebp
+; FALLBACK28-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl 36(%edx), %ebx
+; FALLBACK28-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movb %al, %cl
+; FALLBACK28-NEXT: shll %cl, %ebx
+; FALLBACK28-NEXT: movl 32(%edx), %esi
+; FALLBACK28-NEXT: movl %edx, %ebp
+; FALLBACK28-NEXT: movl %esi, %edi
+; FALLBACK28-NEXT: shrl %edi
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shrl %cl, %edi
+; FALLBACK28-NEXT: orl %ebx, %edi
+; FALLBACK28-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movb %al, %cl
+; FALLBACK28-NEXT: shll %cl, %esi
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK28-NEXT: shrl %edx
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shrl %cl, %edx
+; FALLBACK28-NEXT: orl %esi, %edx
+; FALLBACK28-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl 44(%ebp), %ebx
+; FALLBACK28-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movb %al, %cl
+; FALLBACK28-NEXT: shll %cl, %ebx
+; FALLBACK28-NEXT: movl 40(%ebp), %esi
+; FALLBACK28-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl %esi, %edx
+; FALLBACK28-NEXT: shrl %edx
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shrl %cl, %edx
+; FALLBACK28-NEXT: orl %ebx, %edx
+; FALLBACK28-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movb %al, %cl
+; FALLBACK28-NEXT: shll %cl, %esi
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK28-NEXT: shrl %edx
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shrl %cl, %edx
+; FALLBACK28-NEXT: orl %esi, %edx
+; FALLBACK28-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl 52(%ebp), %esi
+; FALLBACK28-NEXT: movl %esi, %edi
+; FALLBACK28-NEXT: movb %al, %cl
+; FALLBACK28-NEXT: shll %cl, %edi
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK28-NEXT: negl %edx
+; FALLBACK28-NEXT: movl 176(%esp,%edx), %ebx
+; FALLBACK28-NEXT: movl %ebx, %ebp
+; FALLBACK28-NEXT: shrl %ebp
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shrl %cl, %ebp
+; FALLBACK28-NEXT: orl %edi, %ebp
+; FALLBACK28-NEXT: movb %al, %cl
+; FALLBACK28-NEXT: shll %cl, %ebx
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK28-NEXT: shrl %edx
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shrl %cl, %edx
+; FALLBACK28-NEXT: orl %ebx, %edx
+; FALLBACK28-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; FALLBACK28-NEXT: movl 60(%edi), %edx
+; FALLBACK28-NEXT: movb %al, %cl
+; FALLBACK28-NEXT: shll %cl, %edx
+; FALLBACK28-NEXT: movl 56(%edi), %ebx
+; FALLBACK28-NEXT: movl %ebx, %edi
+; FALLBACK28-NEXT: shrl %edi
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shrl %cl, %edi
+; FALLBACK28-NEXT: orl %edx, %edi
+; FALLBACK28-NEXT: movb %al, %cl
+; FALLBACK28-NEXT: shll %cl, %ebx
+; FALLBACK28-NEXT: shrl %esi
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shrl %cl, %esi
+; FALLBACK28-NEXT: orl %ebx, %esi
+; FALLBACK28-NEXT: movl %eax, %ecx
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK28-NEXT: shll %cl, %edx
+; FALLBACK28-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK28-NEXT: movl %edx, (%eax)
+; FALLBACK28-NEXT: movl %esi, 56(%eax)
+; FALLBACK28-NEXT: movl %edi, 60(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, 48(%eax)
+; FALLBACK28-NEXT: movl %ebp, 52(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, 40(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, 44(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, 32(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, 36(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, 24(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, 28(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, 16(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, 20(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, 8(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, 12(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, 4(%eax)
+; FALLBACK28-NEXT: addl $204, %esp
+; FALLBACK28-NEXT: popl %esi
+; FALLBACK28-NEXT: popl %edi
+; FALLBACK28-NEXT: popl %ebx
+; FALLBACK28-NEXT: popl %ebp
+; FALLBACK28-NEXT: vzeroupper
+; FALLBACK28-NEXT: retl
+;
+; FALLBACK29-LABEL: shl_64bytes:
+; FALLBACK29: # %bb.0:
+; FALLBACK29-NEXT: pushl %ebp
+; FALLBACK29-NEXT: pushl %ebx
+; FALLBACK29-NEXT: pushl %edi
+; FALLBACK29-NEXT: pushl %esi
+; FALLBACK29-NEXT: subl $188, %esp
+; FALLBACK29-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK29-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK29-NEXT: vmovups (%ecx), %zmm0
+; FALLBACK29-NEXT: movl (%eax), %ecx
+; FALLBACK29-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK29-NEXT: vmovups %zmm1, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: vmovups %zmm0, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: movl %ecx, %ebp
+; FALLBACK29-NEXT: andl $60, %ebp
+; FALLBACK29-NEXT: leal {{[0-9]+}}(%esp), %eax
+; FALLBACK29-NEXT: subl %ebp, %eax
+; FALLBACK29-NEXT: movl 8(%eax), %esi
+; FALLBACK29-NEXT: movl 12(%eax), %edx
+; FALLBACK29-NEXT: shll $3, %ecx
+; FALLBACK29-NEXT: andl $24, %ecx
+; FALLBACK29-NEXT: movl %edx, %edi
+; FALLBACK29-NEXT: shldl %cl, %esi, %edi
+; FALLBACK29-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: movl 4(%eax), %edi
+; FALLBACK29-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: shldl %cl, %edi, %esi
+; FALLBACK29-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: movl 16(%eax), %edi
+; FALLBACK29-NEXT: movl 20(%eax), %esi
+; FALLBACK29-NEXT: movl %esi, %ebx
+; FALLBACK29-NEXT: shldl %cl, %edi, %ebx
+; FALLBACK29-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: shldl %cl, %edx, %edi
+; FALLBACK29-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: movl 24(%eax), %edi
+; FALLBACK29-NEXT: movl 28(%eax), %edx
+; FALLBACK29-NEXT: movl %edx, %ebx
+; FALLBACK29-NEXT: shldl %cl, %edi, %ebx
+; FALLBACK29-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: shldl %cl, %esi, %edi
+; FALLBACK29-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: movl 32(%eax), %edi
+; FALLBACK29-NEXT: movl 36(%eax), %esi
+; FALLBACK29-NEXT: movl %esi, %ebx
+; FALLBACK29-NEXT: shldl %cl, %edi, %ebx
+; FALLBACK29-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: shldl %cl, %edx, %edi
+; FALLBACK29-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: movl 40(%eax), %edx
+; FALLBACK29-NEXT: movl 44(%eax), %edi
+; FALLBACK29-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: shldl %cl, %edx, %edi
+; FALLBACK29-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: shldl %cl, %esi, %edx
+; FALLBACK29-NEXT: movl %edx, (%esp) # 4-byte Spill
+; FALLBACK29-NEXT: movl 56(%eax), %edx
+; FALLBACK29-NEXT: movl 60(%eax), %edi
+; FALLBACK29-NEXT: shldl %cl, %edx, %edi
+; FALLBACK29-NEXT: movl (%eax), %ebx
+; FALLBACK29-NEXT: movl 52(%eax), %esi
+; FALLBACK29-NEXT: shldl %cl, %esi, %edx
+; FALLBACK29-NEXT: negl %ebp
+; FALLBACK29-NEXT: movl 160(%esp,%ebp), %eax
+; FALLBACK29-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK29-NEXT: movl %edx, 56(%ebp)
+; FALLBACK29-NEXT: movl %edi, 60(%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK29-NEXT: shldl %cl, %ebx, %edx
+; FALLBACK29-NEXT: shll %cl, %ebx
+; FALLBACK29-NEXT: shldl %cl, %eax, %esi
+; FALLBACK29-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; FALLBACK29-NEXT: shldl %cl, %edi, %eax
+; FALLBACK29-NEXT: movl %eax, 48(%ebp)
+; FALLBACK29-NEXT: movl %esi, 52(%ebp)
+; FALLBACK29-NEXT: movl (%esp), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 40(%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 44(%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 32(%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 36(%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 24(%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 28(%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 16(%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 20(%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 8(%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 12(%ebp)
+; FALLBACK29-NEXT: movl %ebx, (%ebp)
+; FALLBACK29-NEXT: movl %edx, 4(%ebp)
+; FALLBACK29-NEXT: addl $188, %esp
+; FALLBACK29-NEXT: popl %esi
+; FALLBACK29-NEXT: popl %edi
+; FALLBACK29-NEXT: popl %ebx
+; FALLBACK29-NEXT: popl %ebp
+; FALLBACK29-NEXT: vzeroupper
+; FALLBACK29-NEXT: retl
+;
+; FALLBACK30-LABEL: shl_64bytes:
+; FALLBACK30: # %bb.0:
+; FALLBACK30-NEXT: pushl %ebp
+; FALLBACK30-NEXT: pushl %ebx
+; FALLBACK30-NEXT: pushl %edi
+; FALLBACK30-NEXT: pushl %esi
+; FALLBACK30-NEXT: subl $204, %esp
+; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK30-NEXT: vmovups (%ecx), %zmm0
+; FALLBACK30-NEXT: movl (%eax), %eax
+; FALLBACK30-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK30-NEXT: vmovups %zmm1, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: vmovups %zmm0, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: leal (,%eax,8), %edx
+; FALLBACK30-NEXT: andl $24, %edx
+; FALLBACK30-NEXT: andl $60, %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: leal {{[0-9]+}}(%esp), %edi
+; FALLBACK30-NEXT: subl %eax, %edi
+; FALLBACK30-NEXT: movl (%edi), %ecx
+; FALLBACK30-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 4(%edi), %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl %edx, %ebx
+; FALLBACK30-NEXT: notb %bl
+; FALLBACK30-NEXT: shrl %ecx
+; FALLBACK30-NEXT: shrxl %ebx, %ecx, %esi
+; FALLBACK30-NEXT: shlxl %edx, %eax, %ecx
+; FALLBACK30-NEXT: orl %ecx, %esi
+; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 8(%edi), %esi
+; FALLBACK30-NEXT: movl %esi, %ecx
+; FALLBACK30-NEXT: shrl %ecx
+; FALLBACK30-NEXT: shrxl %ebx, %ecx, %eax
+; FALLBACK30-NEXT: movl 12(%edi), %ecx
+; FALLBACK30-NEXT: shlxl %edx, %ecx, %ebp
+; FALLBACK30-NEXT: orl %ebp, %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shlxl %edx, %esi, %esi
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: shrl %eax
+; FALLBACK30-NEXT: shrxl %ebx, %eax, %eax
+; FALLBACK30-NEXT: orl %esi, %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 16(%edi), %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shrl %eax
+; FALLBACK30-NEXT: shrxl %ebx, %eax, %eax
+; FALLBACK30-NEXT: movl 20(%edi), %esi
+; FALLBACK30-NEXT: shlxl %edx, %esi, %ebp
+; FALLBACK30-NEXT: orl %ebp, %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK30-NEXT: shrl %ecx
+; FALLBACK30-NEXT: shrxl %ebx, %ecx, %ecx
+; FALLBACK30-NEXT: orl %eax, %ecx
+; FALLBACK30-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 24(%edi), %ecx
+; FALLBACK30-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shrl %ecx
+; FALLBACK30-NEXT: shrxl %ebx, %ecx, %eax
+; FALLBACK30-NEXT: movl 28(%edi), %ecx
+; FALLBACK30-NEXT: shlxl %edx, %ecx, %ebp
+; FALLBACK30-NEXT: orl %ebp, %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK30-NEXT: shrl %esi
+; FALLBACK30-NEXT: shrxl %ebx, %esi, %esi
+; FALLBACK30-NEXT: orl %eax, %esi
+; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 32(%edi), %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shrl %eax
+; FALLBACK30-NEXT: shrxl %ebx, %eax, %eax
+; FALLBACK30-NEXT: movl 36(%edi), %esi
+; FALLBACK30-NEXT: shlxl %edx, %esi, %ebp
+; FALLBACK30-NEXT: orl %ebp, %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK30-NEXT: shrl %ecx
+; FALLBACK30-NEXT: shrxl %ebx, %ecx, %ecx
+; FALLBACK30-NEXT: orl %eax, %ecx
+; FALLBACK30-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 40(%edi), %ecx
+; FALLBACK30-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shrl %ecx
+; FALLBACK30-NEXT: shrxl %ebx, %ecx, %eax
+; FALLBACK30-NEXT: movl 44(%edi), %ecx
+; FALLBACK30-NEXT: shlxl %edx, %ecx, %ebp
+; FALLBACK30-NEXT: orl %ebp, %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK30-NEXT: shrl %esi
+; FALLBACK30-NEXT: shrxl %ebx, %esi, %esi
+; FALLBACK30-NEXT: orl %eax, %esi
+; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 48(%edi), %esi
+; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shrl %esi
+; FALLBACK30-NEXT: shrxl %ebx, %esi, %eax
+; FALLBACK30-NEXT: movl 52(%edi), %esi
+; FALLBACK30-NEXT: shlxl %edx, %esi, %ebp
+; FALLBACK30-NEXT: orl %ebp, %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK30-NEXT: shrl %ecx
+; FALLBACK30-NEXT: shrxl %ebx, %ecx, %ebp
+; FALLBACK30-NEXT: orl %eax, %ebp
+; FALLBACK30-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: negl %eax
+; FALLBACK30-NEXT: shlxl %edx, 188(%esp,%eax), %ecx
+; FALLBACK30-NEXT: movl 56(%edi), %eax
+; FALLBACK30-NEXT: shlxl %edx, %eax, %edx
+; FALLBACK30-NEXT: shrl %esi
+; FALLBACK30-NEXT: shrxl %ebx, %esi, %esi
+; FALLBACK30-NEXT: orl %edx, %esi
+; FALLBACK30-NEXT: shrl %eax
+; FALLBACK30-NEXT: shrxl %ebx, %eax, %eax
+; FALLBACK30-NEXT: orl %eax, %ecx
+; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK30-NEXT: movl %edx, (%eax)
+; FALLBACK30-NEXT: movl %esi, 56(%eax)
+; FALLBACK30-NEXT: movl %ecx, 60(%eax)
+; FALLBACK30-NEXT: movl %ebp, 48(%eax)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK30-NEXT: movl %ecx, 52(%eax)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK30-NEXT: movl %ecx, 40(%eax)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK30-NEXT: movl %ecx, 44(%eax)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK30-NEXT: movl %ecx, 32(%eax)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK30-NEXT: movl %ecx, 36(%eax)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK30-NEXT: movl %ecx, 24(%eax)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK30-NEXT: movl %ecx, 28(%eax)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK30-NEXT: movl %ecx, 16(%eax)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK30-NEXT: movl %ecx, 20(%eax)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK30-NEXT: movl %ecx, 8(%eax)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK30-NEXT: movl %ecx, 12(%eax)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK30-NEXT: movl %ecx, 4(%eax)
+; FALLBACK30-NEXT: addl $204, %esp
+; FALLBACK30-NEXT: popl %esi
+; FALLBACK30-NEXT: popl %edi
+; FALLBACK30-NEXT: popl %ebx
+; FALLBACK30-NEXT: popl %ebp
+; FALLBACK30-NEXT: vzeroupper
+; FALLBACK30-NEXT: retl
+;
+; FALLBACK31-LABEL: shl_64bytes:
+; FALLBACK31: # %bb.0:
+; FALLBACK31-NEXT: pushl %ebp
+; FALLBACK31-NEXT: pushl %ebx
+; FALLBACK31-NEXT: pushl %edi
+; FALLBACK31-NEXT: pushl %esi
+; FALLBACK31-NEXT: subl $204, %esp
+; FALLBACK31-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK31-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK31-NEXT: vmovups (%ecx), %zmm0
+; FALLBACK31-NEXT: movl (%eax), %ebx
+; FALLBACK31-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; FALLBACK31-NEXT: vmovups %zmm1, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: vmovups %zmm0, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: leal (,%ebx,8), %ecx
+; FALLBACK31-NEXT: andl $24, %ecx
+; FALLBACK31-NEXT: andl $60, %ebx
+; FALLBACK31-NEXT: leal {{[0-9]+}}(%esp), %eax
+; FALLBACK31-NEXT: subl %ebx, %eax
+; FALLBACK31-NEXT: movl 4(%eax), %esi
+; FALLBACK31-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: movl 8(%eax), %edi
+; FALLBACK31-NEXT: movl 12(%eax), %edx
+; FALLBACK31-NEXT: movl %edx, %ebp
+; FALLBACK31-NEXT: shldl %cl, %edi, %ebp
+; FALLBACK31-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: shldl %cl, %esi, %edi
+; FALLBACK31-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: movl 16(%eax), %edi
+; FALLBACK31-NEXT: movl 20(%eax), %esi
+; FALLBACK31-NEXT: movl %esi, %ebp
+; FALLBACK31-NEXT: shldl %cl, %edi, %ebp
+; FALLBACK31-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: shldl %cl, %edx, %edi
+; FALLBACK31-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: movl 24(%eax), %edi
+; FALLBACK31-NEXT: movl 28(%eax), %edx
+; FALLBACK31-NEXT: movl %edx, %ebp
+; FALLBACK31-NEXT: shldl %cl, %edi, %ebp
+; FALLBACK31-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: shldl %cl, %esi, %edi
+; FALLBACK31-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: movl 32(%eax), %edi
+; FALLBACK31-NEXT: movl 36(%eax), %esi
+; FALLBACK31-NEXT: movl %esi, %ebp
+; FALLBACK31-NEXT: shldl %cl, %edi, %ebp
+; FALLBACK31-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: shldl %cl, %edx, %edi
+; FALLBACK31-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: movl 40(%eax), %ebp
+; FALLBACK31-NEXT: movl 44(%eax), %edx
+; FALLBACK31-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: shldl %cl, %ebp, %edx
+; FALLBACK31-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: shldl %cl, %esi, %ebp
+; FALLBACK31-NEXT: movl 56(%eax), %edx
+; FALLBACK31-NEXT: movl 60(%eax), %edi
+; FALLBACK31-NEXT: shldl %cl, %edx, %edi
+; FALLBACK31-NEXT: movl (%eax), %esi
+; FALLBACK31-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: movl 52(%eax), %esi
+; FALLBACK31-NEXT: shldl %cl, %esi, %edx
+; FALLBACK31-NEXT: negl %ebx
+; FALLBACK31-NEXT: movl 176(%esp,%ebx), %ebx
+; FALLBACK31-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK31-NEXT: movl %edx, 56(%eax)
+; FALLBACK31-NEXT: movl %edi, 60(%eax)
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK31-NEXT: shlxl %ecx, %edx, %edi
+; FALLBACK31-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; FALLBACK31-NEXT: shldl %cl, %edx, %edi
+; FALLBACK31-NEXT: shldl %cl, %ebx, %esi
+; FALLBACK31-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK31-NEXT: shldl %cl, %edx, %ebx
+; FALLBACK31-NEXT: movl %ebx, 48(%eax)
+; FALLBACK31-NEXT: movl %esi, 52(%eax)
+; FALLBACK31-NEXT: movl %ebp, 40(%eax)
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK31-NEXT: movl %ecx, 44(%eax)
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK31-NEXT: movl %ecx, 32(%eax)
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK31-NEXT: movl %ecx, 36(%eax)
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK31-NEXT: movl %ecx, 24(%eax)
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK31-NEXT: movl %ecx, 28(%eax)
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK31-NEXT: movl %ecx, 16(%eax)
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK31-NEXT: movl %ecx, 20(%eax)
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK31-NEXT: movl %ecx, 8(%eax)
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK31-NEXT: movl %ecx, 12(%eax)
+; FALLBACK31-NEXT: movl %edi, 4(%eax)
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK31-NEXT: movl %ecx, (%eax)
+; FALLBACK31-NEXT: addl $204, %esp
+; FALLBACK31-NEXT: popl %esi
+; FALLBACK31-NEXT: popl %edi
+; FALLBACK31-NEXT: popl %ebx
+; FALLBACK31-NEXT: popl %ebp
+; FALLBACK31-NEXT: vzeroupper
+; FALLBACK31-NEXT: retl
+ %src = load i512, ptr %src.ptr, align 1
+ %byteOff = load i512, ptr %byteOff.ptr, align 1
+ %bitOff = shl i512 %byteOff, 3
+ %res = shl i512 %src, %bitOff
+ store i512 %res, ptr %dst, align 1
+ ret void
+}
+
+define void @shl_64bytes_qwordOff(ptr %src.ptr, ptr %qwordOff.ptr, ptr %dst) nounwind {
+; X64-SSE2-LABEL: shl_64bytes_qwordOff:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pushq %rbx
; X64-SSE2-NEXT: movq (%rdi), %rax
@@ -2012,6 +19811,11 @@ define void @shl_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X64-SSE2-NEXT: movq 48(%rdi), %rbx
; X64-SSE2-NEXT: movq 56(%rdi), %rdi
; X64-SSE2-NEXT: movl (%rsi), %esi
+; X64-SSE2-NEXT: xorps %xmm0, %xmm0
+; X64-SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-SSE2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-SSE2-NEXT: movq %rbx, -{{[0-9]+}}(%rsp)
; X64-SSE2-NEXT: movq %r11, -{{[0-9]+}}(%rsp)
@@ -2020,15 +19824,8 @@ define void @shl_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X64-SSE2-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
; X64-SSE2-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; X64-SSE2-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
-; X64-SSE2-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-SSE2-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-SSE2-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-SSE2-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-SSE2-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-SSE2-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-SSE2-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-SSE2-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-SSE2-NEXT: andl $63, %esi
+; X64-SSE2-NEXT: shll $3, %esi
+; X64-SSE2-NEXT: andl $56, %esi
; X64-SSE2-NEXT: negl %esi
; X64-SSE2-NEXT: movslq %esi, %rax
; X64-SSE2-NEXT: movq -64(%rsp,%rax), %rcx
@@ -2050,23 +19847,25 @@ define void @shl_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X64-SSE2-NEXT: popq %rbx
; X64-SSE2-NEXT: retq
;
-; X64-SSE42-LABEL: shl_64bytes:
+; X64-SSE42-LABEL: shl_64bytes_qwordOff:
; X64-SSE42: # %bb.0:
+; X64-SSE42-NEXT: pushq %rax
; X64-SSE42-NEXT: movups (%rdi), %xmm0
; X64-SSE42-NEXT: movups 16(%rdi), %xmm1
; X64-SSE42-NEXT: movups 32(%rdi), %xmm2
; X64-SSE42-NEXT: movups 48(%rdi), %xmm3
; X64-SSE42-NEXT: movl (%rsi), %eax
; X64-SSE42-NEXT: xorps %xmm4, %xmm4
-; X64-SSE42-NEXT: movups %xmm4, -{{[0-9]+}}(%rsp)
-; X64-SSE42-NEXT: movups %xmm4, -{{[0-9]+}}(%rsp)
-; X64-SSE42-NEXT: movups %xmm4, -{{[0-9]+}}(%rsp)
-; X64-SSE42-NEXT: movups %xmm4, -{{[0-9]+}}(%rsp)
-; X64-SSE42-NEXT: movups %xmm3, -{{[0-9]+}}(%rsp)
-; X64-SSE42-NEXT: movups %xmm2, -{{[0-9]+}}(%rsp)
-; X64-SSE42-NEXT: movups %xmm1, -{{[0-9]+}}(%rsp)
-; X64-SSE42-NEXT: movups %xmm0, -{{[0-9]+}}(%rsp)
-; X64-SSE42-NEXT: andl $63, %eax
+; X64-SSE42-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; X64-SSE42-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; X64-SSE42-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; X64-SSE42-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; X64-SSE42-NEXT: movaps %xmm3, -{{[0-9]+}}(%rsp)
+; X64-SSE42-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-SSE42-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-SSE42-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-SSE42-NEXT: shll $3, %eax
+; X64-SSE42-NEXT: andl $56, %eax
; X64-SSE42-NEXT: negl %eax
; X64-SSE42-NEXT: cltq
; X64-SSE42-NEXT: movups -64(%rsp,%rax), %xmm0
@@ -2077,10 +19876,12 @@ define void @shl_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X64-SSE42-NEXT: movups %xmm1, 16(%rdx)
; X64-SSE42-NEXT: movups %xmm2, 32(%rdx)
; X64-SSE42-NEXT: movups %xmm0, (%rdx)
+; X64-SSE42-NEXT: popq %rax
; X64-SSE42-NEXT: retq
;
-; X64-AVX1-LABEL: shl_64bytes:
+; X64-AVX1-LABEL: shl_64bytes_qwordOff:
; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: pushq %rax
; X64-AVX1-NEXT: vmovups (%rdi), %ymm0
; X64-AVX1-NEXT: vmovups 32(%rdi), %ymm1
; X64-AVX1-NEXT: movl (%rsi), %eax
@@ -2089,7 +19890,8 @@ define void @shl_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X64-AVX1-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
; X64-AVX1-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
; X64-AVX1-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; X64-AVX1-NEXT: andl $63, %eax
+; X64-AVX1-NEXT: shll $3, %eax
+; X64-AVX1-NEXT: andl $56, %eax
; X64-AVX1-NEXT: negl %eax
; X64-AVX1-NEXT: cltq
; X64-AVX1-NEXT: vmovups -64(%rsp,%rax), %xmm0
@@ -2100,17 +19902,20 @@ define void @shl_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X64-AVX1-NEXT: vmovups %xmm1, 16(%rdx)
; X64-AVX1-NEXT: vmovups %xmm2, 32(%rdx)
; X64-AVX1-NEXT: vmovups %xmm0, (%rdx)
+; X64-AVX1-NEXT: popq %rax
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
-; X64-AVX512-LABEL: shl_64bytes:
+; X64-AVX512-LABEL: shl_64bytes_qwordOff:
; X64-AVX512: # %bb.0:
+; X64-AVX512-NEXT: pushq %rax
; X64-AVX512-NEXT: vmovups (%rdi), %zmm0
; X64-AVX512-NEXT: movl (%rsi), %eax
; X64-AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-AVX512-NEXT: vmovups %zmm1, -{{[0-9]+}}(%rsp)
; X64-AVX512-NEXT: vmovups %zmm0, -{{[0-9]+}}(%rsp)
-; X64-AVX512-NEXT: andl $63, %eax
+; X64-AVX512-NEXT: shll $3, %eax
+; X64-AVX512-NEXT: andl $56, %eax
; X64-AVX512-NEXT: negl %eax
; X64-AVX512-NEXT: cltq
; X64-AVX512-NEXT: vmovups -64(%rsp,%rax), %xmm0
@@ -2121,117 +19926,108 @@ define void @shl_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X64-AVX512-NEXT: vmovups %xmm1, 16(%rdx)
; X64-AVX512-NEXT: vmovups %xmm2, 32(%rdx)
; X64-AVX512-NEXT: vmovups %xmm0, (%rdx)
+; X64-AVX512-NEXT: popq %rax
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
;
-; X86-SSE2-LABEL: shl_64bytes:
+; X86-SSE2-LABEL: shl_64bytes_qwordOff:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pushl %ebp
; X86-SSE2-NEXT: pushl %ebx
; X86-SSE2-NEXT: pushl %edi
; X86-SSE2-NEXT: pushl %esi
-; X86-SSE2-NEXT: subl $168, %esp
-; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-SSE2-NEXT: movl (%eax), %ecx
-; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 4(%eax), %ecx
-; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 8(%eax), %ecx
-; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 12(%eax), %ecx
-; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 16(%eax), %ecx
-; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 20(%eax), %ecx
-; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 24(%eax), %ecx
-; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 28(%eax), %ecx
-; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 32(%eax), %ecx
-; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 36(%eax), %ecx
-; X86-SSE2-NEXT: movl %ecx, (%esp) # 4-byte Spill
-; X86-SSE2-NEXT: movl 40(%eax), %ebp
-; X86-SSE2-NEXT: movl 44(%eax), %ebx
-; X86-SSE2-NEXT: movl 48(%eax), %edi
-; X86-SSE2-NEXT: movl 52(%eax), %esi
-; X86-SSE2-NEXT: movl 56(%eax), %edx
-; X86-SSE2-NEXT: movl 60(%eax), %ecx
-; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-SSE2-NEXT: movl (%eax), %eax
-; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: subl $188, %esp
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movl (%ecx), %eax
+; X86-SSE2-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SSE2-NEXT: movl 4(%ecx), %eax
+; X86-SSE2-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SSE2-NEXT: movl 8(%ecx), %eax
+; X86-SSE2-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SSE2-NEXT: movl 12(%ecx), %eax
+; X86-SSE2-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SSE2-NEXT: movl 16(%ecx), %eax
+; X86-SSE2-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SSE2-NEXT: movl 20(%ecx), %eax
+; X86-SSE2-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SSE2-NEXT: movl 24(%ecx), %eax
+; X86-SSE2-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SSE2-NEXT: movl 28(%ecx), %eax
+; X86-SSE2-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SSE2-NEXT: movl 32(%ecx), %eax
+; X86-SSE2-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SSE2-NEXT: movl 36(%ecx), %eax
+; X86-SSE2-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SSE2-NEXT: movl 40(%ecx), %ebp
+; X86-SSE2-NEXT: movl 44(%ecx), %ebx
+; X86-SSE2-NEXT: movl 48(%ecx), %edi
+; X86-SSE2-NEXT: movl 52(%ecx), %esi
+; X86-SSE2-NEXT: movl 56(%ecx), %edx
+; X86-SSE2-NEXT: movl 60(%ecx), %eax
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movl (%ecx), %ecx
+; X86-SSE2-NEXT: xorps %xmm0, %xmm0
+; X86-SSE2-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl %esi, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl %ebx, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl %ebp, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl (%esp), %ecx # 4-byte Reload
-; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: andl $63, %eax
-; X86-SSE2-NEXT: leal {{[0-9]+}}(%esp), %ecx
-; X86-SSE2-NEXT: subl %eax, %ecx
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl (%ecx), %edx
+; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-SSE2-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-SSE2-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-SSE2-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-SSE2-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-SSE2-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-SSE2-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-SSE2-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-SSE2-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-SSE2-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-SSE2-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: shll $3, %ecx
+; X86-SSE2-NEXT: andl $56, %ecx
+; X86-SSE2-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: subl %ecx, %eax
+; X86-SSE2-NEXT: movl (%eax), %edx
; X86-SSE2-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 4(%ecx), %edx
+; X86-SSE2-NEXT: movl 4(%eax), %edx
; X86-SSE2-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 12(%ecx), %edx
+; X86-SSE2-NEXT: movl 12(%eax), %edx
; X86-SSE2-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 8(%ecx), %edx
+; X86-SSE2-NEXT: movl 8(%eax), %edx
; X86-SSE2-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 20(%ecx), %edx
+; X86-SSE2-NEXT: movl 20(%eax), %edx
; X86-SSE2-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 16(%ecx), %edx
+; X86-SSE2-NEXT: movl 16(%eax), %edx
; X86-SSE2-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 28(%ecx), %edx
+; X86-SSE2-NEXT: movl 28(%eax), %edx
; X86-SSE2-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 24(%ecx), %edx
+; X86-SSE2-NEXT: movl 24(%eax), %edx
; X86-SSE2-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 36(%ecx), %edx
+; X86-SSE2-NEXT: movl 36(%eax), %edx
; X86-SSE2-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 32(%ecx), %edx
-; X86-SSE2-NEXT: movl %edx, (%esp) # 4-byte Spill
-; X86-SSE2-NEXT: movl 44(%ecx), %ebp
-; X86-SSE2-NEXT: movl 40(%ecx), %ebx
-; X86-SSE2-NEXT: movl 52(%ecx), %edi
-; X86-SSE2-NEXT: movl 60(%ecx), %esi
-; X86-SSE2-NEXT: movl 56(%ecx), %edx
-; X86-SSE2-NEXT: negl %eax
-; X86-SSE2-NEXT: movl 152(%esp,%eax), %ecx
+; X86-SSE2-NEXT: movl 32(%eax), %edx
+; X86-SSE2-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SSE2-NEXT: movl 44(%eax), %ebp
+; X86-SSE2-NEXT: movl 40(%eax), %ebx
+; X86-SSE2-NEXT: movl 52(%eax), %edi
+; X86-SSE2-NEXT: movl 60(%eax), %esi
+; X86-SSE2-NEXT: movl 56(%eax), %edx
+; X86-SSE2-NEXT: negl %ecx
+; X86-SSE2-NEXT: movl 160(%esp,%ecx), %ecx
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movl %edx, 56(%eax)
; X86-SSE2-NEXT: movl %esi, 60(%eax)
@@ -2239,7 +20035,7 @@ define void @shl_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-SSE2-NEXT: movl %edi, 52(%eax)
; X86-SSE2-NEXT: movl %ebx, 40(%eax)
; X86-SSE2-NEXT: movl %ebp, 44(%eax)
-; X86-SSE2-NEXT: movl (%esp), %ecx # 4-byte Reload
+; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-SSE2-NEXT: movl %ecx, 32(%eax)
; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-SSE2-NEXT: movl %ecx, 36(%eax)
@@ -2259,16 +20055,16 @@ define void @shl_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-SSE2-NEXT: movl %ecx, (%eax)
; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-SSE2-NEXT: movl %ecx, 4(%eax)
-; X86-SSE2-NEXT: addl $168, %esp
+; X86-SSE2-NEXT: addl $188, %esp
; X86-SSE2-NEXT: popl %esi
; X86-SSE2-NEXT: popl %edi
; X86-SSE2-NEXT: popl %ebx
; X86-SSE2-NEXT: popl %ebp
; X86-SSE2-NEXT: retl
;
-; X86-SSE42-LABEL: shl_64bytes:
+; X86-SSE42-LABEL: shl_64bytes_qwordOff:
; X86-SSE42: # %bb.0:
-; X86-SSE42-NEXT: subl $128, %esp
+; X86-SSE42-NEXT: subl $140, %esp
; X86-SSE42-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE42-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-SSE42-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -2278,15 +20074,16 @@ define void @shl_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-SSE42-NEXT: movups 48(%edx), %xmm3
; X86-SSE42-NEXT: movl (%ecx), %ecx
; X86-SSE42-NEXT: xorps %xmm4, %xmm4
-; X86-SSE42-NEXT: movups %xmm4, {{[0-9]+}}(%esp)
-; X86-SSE42-NEXT: movups %xmm4, {{[0-9]+}}(%esp)
-; X86-SSE42-NEXT: movups %xmm4, {{[0-9]+}}(%esp)
-; X86-SSE42-NEXT: movups %xmm4, (%esp)
-; X86-SSE42-NEXT: movups %xmm3, {{[0-9]+}}(%esp)
-; X86-SSE42-NEXT: movups %xmm2, {{[0-9]+}}(%esp)
-; X86-SSE42-NEXT: movups %xmm1, {{[0-9]+}}(%esp)
-; X86-SSE42-NEXT: movups %xmm0, {{[0-9]+}}(%esp)
-; X86-SSE42-NEXT: andl $63, %ecx
+; X86-SSE42-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: movaps %xmm4, (%esp)
+; X86-SSE42-NEXT: movaps %xmm3, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: shll $3, %ecx
+; X86-SSE42-NEXT: andl $56, %ecx
; X86-SSE42-NEXT: leal {{[0-9]+}}(%esp), %edx
; X86-SSE42-NEXT: subl %ecx, %edx
; X86-SSE42-NEXT: movups (%edx), %xmm0
@@ -2298,12 +20095,12 @@ define void @shl_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-SSE42-NEXT: movups %xmm2, 32(%eax)
; X86-SSE42-NEXT: movups %xmm1, 16(%eax)
; X86-SSE42-NEXT: movups %xmm0, (%eax)
-; X86-SSE42-NEXT: addl $128, %esp
+; X86-SSE42-NEXT: addl $140, %esp
; X86-SSE42-NEXT: retl
;
-; X86-AVX1-LABEL: shl_64bytes:
+; X86-AVX1-LABEL: shl_64bytes_qwordOff:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: subl $128, %esp
+; X86-AVX1-NEXT: subl $140, %esp
; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -2315,7 +20112,8 @@ define void @shl_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-AVX1-NEXT: vmovups %ymm2, (%esp)
; X86-AVX1-NEXT: vmovups %ymm1, {{[0-9]+}}(%esp)
; X86-AVX1-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT: andl $63, %ecx
+; X86-AVX1-NEXT: shll $3, %ecx
+; X86-AVX1-NEXT: andl $56, %ecx
; X86-AVX1-NEXT: leal {{[0-9]+}}(%esp), %edx
; X86-AVX1-NEXT: subl %ecx, %edx
; X86-AVX1-NEXT: vmovups (%edx), %xmm0
@@ -2327,13 +20125,13 @@ define void @shl_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-AVX1-NEXT: vmovups %xmm2, 32(%eax)
; X86-AVX1-NEXT: vmovups %xmm1, 16(%eax)
; X86-AVX1-NEXT: vmovups %xmm0, (%eax)
-; X86-AVX1-NEXT: addl $128, %esp
+; X86-AVX1-NEXT: addl $140, %esp
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
-; X86-AVX512-LABEL: shl_64bytes:
+; X86-AVX512-LABEL: shl_64bytes_qwordOff:
; X86-AVX512: # %bb.0:
-; X86-AVX512-NEXT: subl $128, %esp
+; X86-AVX512-NEXT: subl $140, %esp
; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -2342,7 +20140,8 @@ define void @shl_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X86-AVX512-NEXT: vmovups %zmm1, (%esp)
; X86-AVX512-NEXT: vmovups %zmm0, {{[0-9]+}}(%esp)
-; X86-AVX512-NEXT: andl $63, %ecx
+; X86-AVX512-NEXT: shll $3, %ecx
+; X86-AVX512-NEXT: andl $56, %ecx
; X86-AVX512-NEXT: leal {{[0-9]+}}(%esp), %edx
; X86-AVX512-NEXT: subl %ecx, %edx
; X86-AVX512-NEXT: vmovups (%edx), %xmm0
@@ -2354,18 +20153,4121 @@ define void @shl_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-AVX512-NEXT: vmovups %xmm2, 32(%eax)
; X86-AVX512-NEXT: vmovups %xmm1, 16(%eax)
; X86-AVX512-NEXT: vmovups %xmm0, (%eax)
-; X86-AVX512-NEXT: addl $128, %esp
+; X86-AVX512-NEXT: addl $140, %esp
; X86-AVX512-NEXT: vzeroupper
; X86-AVX512-NEXT: retl
%src = load i512, ptr %src.ptr, align 1
- %byteOff = load i512, ptr %byteOff.ptr, align 1
- %bitOff = shl i512 %byteOff, 3
+ %qwordOff = load i512, ptr %qwordOff.ptr, align 1
+ %bitOff = shl i512 %qwordOff, 6
%res = shl i512 %src, %bitOff
store i512 %res, ptr %dst, align 1
ret void
}
+
define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
-; X64-SSE2-LABEL: ashr_64bytes:
+; FALLBACK0-LABEL: ashr_64bytes:
+; FALLBACK0: # %bb.0:
+; FALLBACK0-NEXT: pushq %r15
+; FALLBACK0-NEXT: pushq %r14
+; FALLBACK0-NEXT: pushq %r13
+; FALLBACK0-NEXT: pushq %r12
+; FALLBACK0-NEXT: pushq %rbx
+; FALLBACK0-NEXT: movq (%rdi), %rax
+; FALLBACK0-NEXT: movq 8(%rdi), %rcx
+; FALLBACK0-NEXT: movq 16(%rdi), %r8
+; FALLBACK0-NEXT: movq 24(%rdi), %r9
+; FALLBACK0-NEXT: movq 32(%rdi), %r10
+; FALLBACK0-NEXT: movq 40(%rdi), %r11
+; FALLBACK0-NEXT: movq 48(%rdi), %rbx
+; FALLBACK0-NEXT: movq 56(%rdi), %r14
+; FALLBACK0-NEXT: movl (%rsi), %edi
+; FALLBACK0-NEXT: movq %r14, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %rbx, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %r11, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %r10, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: sarq $63, %r14
+; FALLBACK0-NEXT: movq %r14, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %r14, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %r14, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %r14, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %r14, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %r14, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %r14, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: movq %r14, -{{[0-9]+}}(%rsp)
+; FALLBACK0-NEXT: leal (,%rdi,8), %eax
+; FALLBACK0-NEXT: andl $56, %eax
+; FALLBACK0-NEXT: andl $56, %edi
+; FALLBACK0-NEXT: movq -128(%rsp,%rdi), %r10
+; FALLBACK0-NEXT: movq -120(%rsp,%rdi), %r8
+; FALLBACK0-NEXT: movq %r8, %r11
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shrq %cl, %r11
+; FALLBACK0-NEXT: movl %eax, %esi
+; FALLBACK0-NEXT: notb %sil
+; FALLBACK0-NEXT: movq -112(%rsp,%rdi), %rbx
+; FALLBACK0-NEXT: leaq (%rbx,%rbx), %r9
+; FALLBACK0-NEXT: movl %esi, %ecx
+; FALLBACK0-NEXT: shlq %cl, %r9
+; FALLBACK0-NEXT: orq %r11, %r9
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shrq %cl, %r10
+; FALLBACK0-NEXT: addq %r8, %r8
+; FALLBACK0-NEXT: movl %esi, %ecx
+; FALLBACK0-NEXT: shlq %cl, %r8
+; FALLBACK0-NEXT: orq %r10, %r8
+; FALLBACK0-NEXT: movq -104(%rsp,%rdi), %r10
+; FALLBACK0-NEXT: movq %r10, %r15
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shrq %cl, %r15
+; FALLBACK0-NEXT: movq -96(%rsp,%rdi), %r14
+; FALLBACK0-NEXT: leaq (%r14,%r14), %r11
+; FALLBACK0-NEXT: movl %esi, %ecx
+; FALLBACK0-NEXT: shlq %cl, %r11
+; FALLBACK0-NEXT: orq %r15, %r11
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shrq %cl, %rbx
+; FALLBACK0-NEXT: addq %r10, %r10
+; FALLBACK0-NEXT: movl %esi, %ecx
+; FALLBACK0-NEXT: shlq %cl, %r10
+; FALLBACK0-NEXT: orq %rbx, %r10
+; FALLBACK0-NEXT: movq -88(%rsp,%rdi), %rbx
+; FALLBACK0-NEXT: movq %rbx, %r12
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shrq %cl, %r12
+; FALLBACK0-NEXT: movq -80(%rsp,%rdi), %r13
+; FALLBACK0-NEXT: leaq (%r13,%r13), %r15
+; FALLBACK0-NEXT: movl %esi, %ecx
+; FALLBACK0-NEXT: shlq %cl, %r15
+; FALLBACK0-NEXT: orq %r12, %r15
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shrq %cl, %r14
+; FALLBACK0-NEXT: addq %rbx, %rbx
+; FALLBACK0-NEXT: movl %esi, %ecx
+; FALLBACK0-NEXT: shlq %cl, %rbx
+; FALLBACK0-NEXT: orq %r14, %rbx
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: shrq %cl, %r13
+; FALLBACK0-NEXT: movq -72(%rsp,%rdi), %rdi
+; FALLBACK0-NEXT: leaq (%rdi,%rdi), %r14
+; FALLBACK0-NEXT: movl %esi, %ecx
+; FALLBACK0-NEXT: shlq %cl, %r14
+; FALLBACK0-NEXT: orq %r13, %r14
+; FALLBACK0-NEXT: movl %eax, %ecx
+; FALLBACK0-NEXT: sarq %cl, %rdi
+; FALLBACK0-NEXT: movq %rdi, 56(%rdx)
+; FALLBACK0-NEXT: movq %r14, 48(%rdx)
+; FALLBACK0-NEXT: movq %rbx, 32(%rdx)
+; FALLBACK0-NEXT: movq %r15, 40(%rdx)
+; FALLBACK0-NEXT: movq %r10, 16(%rdx)
+; FALLBACK0-NEXT: movq %r11, 24(%rdx)
+; FALLBACK0-NEXT: movq %r8, (%rdx)
+; FALLBACK0-NEXT: movq %r9, 8(%rdx)
+; FALLBACK0-NEXT: popq %rbx
+; FALLBACK0-NEXT: popq %r12
+; FALLBACK0-NEXT: popq %r13
+; FALLBACK0-NEXT: popq %r14
+; FALLBACK0-NEXT: popq %r15
+; FALLBACK0-NEXT: retq
+;
+; FALLBACK1-LABEL: ashr_64bytes:
+; FALLBACK1: # %bb.0:
+; FALLBACK1-NEXT: pushq %r15
+; FALLBACK1-NEXT: pushq %r14
+; FALLBACK1-NEXT: pushq %rbx
+; FALLBACK1-NEXT: movq (%rdi), %rcx
+; FALLBACK1-NEXT: movq 8(%rdi), %r8
+; FALLBACK1-NEXT: movq 16(%rdi), %r9
+; FALLBACK1-NEXT: movq 24(%rdi), %r10
+; FALLBACK1-NEXT: movq 32(%rdi), %r11
+; FALLBACK1-NEXT: movq 40(%rdi), %rbx
+; FALLBACK1-NEXT: movq 48(%rdi), %r14
+; FALLBACK1-NEXT: movq 56(%rdi), %rdi
+; FALLBACK1-NEXT: movl (%rsi), %eax
+; FALLBACK1-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %r14, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %rbx, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %r11, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %r10, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: sarq $63, %rdi
+; FALLBACK1-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK1-NEXT: leal (,%rax,8), %ecx
+; FALLBACK1-NEXT: andl $56, %ecx
+; FALLBACK1-NEXT: andl $56, %eax
+; FALLBACK1-NEXT: movq -112(%rsp,%rax), %rdi
+; FALLBACK1-NEXT: movq -128(%rsp,%rax), %rsi
+; FALLBACK1-NEXT: movq -120(%rsp,%rax), %r9
+; FALLBACK1-NEXT: movq %r9, %r8
+; FALLBACK1-NEXT: shrdq %cl, %rdi, %r8
+; FALLBACK1-NEXT: movq -96(%rsp,%rax), %r10
+; FALLBACK1-NEXT: movq -104(%rsp,%rax), %r11
+; FALLBACK1-NEXT: movq %r11, %rbx
+; FALLBACK1-NEXT: shrdq %cl, %r10, %rbx
+; FALLBACK1-NEXT: shrdq %cl, %r11, %rdi
+; FALLBACK1-NEXT: movq -80(%rsp,%rax), %r11
+; FALLBACK1-NEXT: movq -88(%rsp,%rax), %r14
+; FALLBACK1-NEXT: movq %r14, %r15
+; FALLBACK1-NEXT: shrdq %cl, %r11, %r15
+; FALLBACK1-NEXT: shrdq %cl, %r14, %r10
+; FALLBACK1-NEXT: movq -72(%rsp,%rax), %rax
+; FALLBACK1-NEXT: shrdq %cl, %rax, %r11
+; FALLBACK1-NEXT: shrdq %cl, %r9, %rsi
+; FALLBACK1-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK1-NEXT: sarq %cl, %rax
+; FALLBACK1-NEXT: movq %r11, 48(%rdx)
+; FALLBACK1-NEXT: movq %rax, 56(%rdx)
+; FALLBACK1-NEXT: movq %r10, 32(%rdx)
+; FALLBACK1-NEXT: movq %r15, 40(%rdx)
+; FALLBACK1-NEXT: movq %rdi, 16(%rdx)
+; FALLBACK1-NEXT: movq %rbx, 24(%rdx)
+; FALLBACK1-NEXT: movq %rsi, (%rdx)
+; FALLBACK1-NEXT: movq %r8, 8(%rdx)
+; FALLBACK1-NEXT: popq %rbx
+; FALLBACK1-NEXT: popq %r14
+; FALLBACK1-NEXT: popq %r15
+; FALLBACK1-NEXT: retq
+;
+; FALLBACK2-LABEL: ashr_64bytes:
+; FALLBACK2: # %bb.0:
+; FALLBACK2-NEXT: pushq %rbp
+; FALLBACK2-NEXT: pushq %r15
+; FALLBACK2-NEXT: pushq %r14
+; FALLBACK2-NEXT: pushq %r13
+; FALLBACK2-NEXT: pushq %r12
+; FALLBACK2-NEXT: pushq %rbx
+; FALLBACK2-NEXT: pushq %rax
+; FALLBACK2-NEXT: movq (%rdi), %rcx
+; FALLBACK2-NEXT: movq 8(%rdi), %r8
+; FALLBACK2-NEXT: movq 16(%rdi), %r9
+; FALLBACK2-NEXT: movq 24(%rdi), %r10
+; FALLBACK2-NEXT: movq 32(%rdi), %r11
+; FALLBACK2-NEXT: movq 40(%rdi), %rbx
+; FALLBACK2-NEXT: movq 48(%rdi), %r14
+; FALLBACK2-NEXT: movq 56(%rdi), %rdi
+; FALLBACK2-NEXT: movl (%rsi), %eax
+; FALLBACK2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %r14, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %rbx, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %r11, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %r10, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: sarq $63, %rdi
+; FALLBACK2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: leal (,%rax,8), %ecx
+; FALLBACK2-NEXT: andl $56, %ecx
+; FALLBACK2-NEXT: andl $56, %eax
+; FALLBACK2-NEXT: movq -120(%rsp,%rax), %rdi
+; FALLBACK2-NEXT: movq -112(%rsp,%rax), %r9
+; FALLBACK2-NEXT: shrxq %rcx, %rdi, %rbx
+; FALLBACK2-NEXT: shrxq %rcx, -128(%rsp,%rax), %r13
+; FALLBACK2-NEXT: movq -104(%rsp,%rax), %rsi
+; FALLBACK2-NEXT: shrxq %rcx, %rsi, %r8
+; FALLBACK2-NEXT: movq -96(%rsp,%rax), %r10
+; FALLBACK2-NEXT: shrxq %rcx, %r9, %r11
+; FALLBACK2-NEXT: movq -88(%rsp,%rax), %r14
+; FALLBACK2-NEXT: shrxq %rcx, %r14, %r15
+; FALLBACK2-NEXT: shrxq %rcx, %r10, %rbp
+; FALLBACK2-NEXT: movl %ecx, %r12d
+; FALLBACK2-NEXT: notb %r12b
+; FALLBACK2-NEXT: addq %r9, %r9
+; FALLBACK2-NEXT: shlxq %r12, %r9, %r9
+; FALLBACK2-NEXT: orq %rbx, %r9
+; FALLBACK2-NEXT: addq %rdi, %rdi
+; FALLBACK2-NEXT: shlxq %r12, %rdi, %rdi
+; FALLBACK2-NEXT: orq %r13, %rdi
+; FALLBACK2-NEXT: movq -80(%rsp,%rax), %rbx
+; FALLBACK2-NEXT: shrxq %rcx, %rbx, %r13
+; FALLBACK2-NEXT: movq -72(%rsp,%rax), %rax
+; FALLBACK2-NEXT: sarxq %rcx, %rax, %rcx
+; FALLBACK2-NEXT: addq %r10, %r10
+; FALLBACK2-NEXT: shlxq %r12, %r10, %r10
+; FALLBACK2-NEXT: orq %r8, %r10
+; FALLBACK2-NEXT: addq %rsi, %rsi
+; FALLBACK2-NEXT: shlxq %r12, %rsi, %rsi
+; FALLBACK2-NEXT: orq %r11, %rsi
+; FALLBACK2-NEXT: leaq (%rbx,%rbx), %r8
+; FALLBACK2-NEXT: shlxq %r12, %r8, %r8
+; FALLBACK2-NEXT: orq %r15, %r8
+; FALLBACK2-NEXT: addq %r14, %r14
+; FALLBACK2-NEXT: shlxq %r12, %r14, %r11
+; FALLBACK2-NEXT: orq %rbp, %r11
+; FALLBACK2-NEXT: addq %rax, %rax
+; FALLBACK2-NEXT: shlxq %r12, %rax, %rax
+; FALLBACK2-NEXT: orq %r13, %rax
+; FALLBACK2-NEXT: movq %rcx, 56(%rdx)
+; FALLBACK2-NEXT: movq %rax, 48(%rdx)
+; FALLBACK2-NEXT: movq %r11, 32(%rdx)
+; FALLBACK2-NEXT: movq %r8, 40(%rdx)
+; FALLBACK2-NEXT: movq %rsi, 16(%rdx)
+; FALLBACK2-NEXT: movq %r10, 24(%rdx)
+; FALLBACK2-NEXT: movq %rdi, (%rdx)
+; FALLBACK2-NEXT: movq %r9, 8(%rdx)
+; FALLBACK2-NEXT: addq $8, %rsp
+; FALLBACK2-NEXT: popq %rbx
+; FALLBACK2-NEXT: popq %r12
+; FALLBACK2-NEXT: popq %r13
+; FALLBACK2-NEXT: popq %r14
+; FALLBACK2-NEXT: popq %r15
+; FALLBACK2-NEXT: popq %rbp
+; FALLBACK2-NEXT: retq
+;
+; FALLBACK3-LABEL: ashr_64bytes:
+; FALLBACK3: # %bb.0:
+; FALLBACK3-NEXT: pushq %r15
+; FALLBACK3-NEXT: pushq %r14
+; FALLBACK3-NEXT: pushq %rbx
+; FALLBACK3-NEXT: movq (%rdi), %rcx
+; FALLBACK3-NEXT: movq 8(%rdi), %r8
+; FALLBACK3-NEXT: movq 16(%rdi), %r9
+; FALLBACK3-NEXT: movq 24(%rdi), %r10
+; FALLBACK3-NEXT: movq 32(%rdi), %r11
+; FALLBACK3-NEXT: movq 40(%rdi), %rbx
+; FALLBACK3-NEXT: movq 48(%rdi), %r14
+; FALLBACK3-NEXT: movq 56(%rdi), %rdi
+; FALLBACK3-NEXT: movl (%rsi), %eax
+; FALLBACK3-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %r14, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %rbx, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %r11, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %r10, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: sarq $63, %rdi
+; FALLBACK3-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK3-NEXT: leal (,%rax,8), %ecx
+; FALLBACK3-NEXT: andl $56, %ecx
+; FALLBACK3-NEXT: andl $56, %eax
+; FALLBACK3-NEXT: movq -112(%rsp,%rax), %rdi
+; FALLBACK3-NEXT: movq -128(%rsp,%rax), %rsi
+; FALLBACK3-NEXT: movq -120(%rsp,%rax), %r9
+; FALLBACK3-NEXT: movq %r9, %r8
+; FALLBACK3-NEXT: shrdq %cl, %rdi, %r8
+; FALLBACK3-NEXT: movq -96(%rsp,%rax), %r10
+; FALLBACK3-NEXT: movq -104(%rsp,%rax), %r11
+; FALLBACK3-NEXT: movq %r11, %rbx
+; FALLBACK3-NEXT: shrdq %cl, %r10, %rbx
+; FALLBACK3-NEXT: shrdq %cl, %r11, %rdi
+; FALLBACK3-NEXT: movq -80(%rsp,%rax), %r11
+; FALLBACK3-NEXT: movq -88(%rsp,%rax), %r14
+; FALLBACK3-NEXT: movq %r14, %r15
+; FALLBACK3-NEXT: shrdq %cl, %r11, %r15
+; FALLBACK3-NEXT: shrdq %cl, %r14, %r10
+; FALLBACK3-NEXT: movq -72(%rsp,%rax), %rax
+; FALLBACK3-NEXT: shrdq %cl, %rax, %r11
+; FALLBACK3-NEXT: sarxq %rcx, %rax, %rax
+; FALLBACK3-NEXT: # kill: def $cl killed $cl killed $rcx
+; FALLBACK3-NEXT: shrdq %cl, %r9, %rsi
+; FALLBACK3-NEXT: movq %r11, 48(%rdx)
+; FALLBACK3-NEXT: movq %r10, 32(%rdx)
+; FALLBACK3-NEXT: movq %r15, 40(%rdx)
+; FALLBACK3-NEXT: movq %rdi, 16(%rdx)
+; FALLBACK3-NEXT: movq %rbx, 24(%rdx)
+; FALLBACK3-NEXT: movq %rsi, (%rdx)
+; FALLBACK3-NEXT: movq %r8, 8(%rdx)
+; FALLBACK3-NEXT: movq %rax, 56(%rdx)
+; FALLBACK3-NEXT: popq %rbx
+; FALLBACK3-NEXT: popq %r14
+; FALLBACK3-NEXT: popq %r15
+; FALLBACK3-NEXT: retq
+;
+; FALLBACK4-LABEL: ashr_64bytes:
+; FALLBACK4: # %bb.0:
+; FALLBACK4-NEXT: pushq %rbp
+; FALLBACK4-NEXT: pushq %r15
+; FALLBACK4-NEXT: pushq %r14
+; FALLBACK4-NEXT: pushq %r13
+; FALLBACK4-NEXT: pushq %r12
+; FALLBACK4-NEXT: pushq %rbx
+; FALLBACK4-NEXT: pushq %rax
+; FALLBACK4-NEXT: movups (%rdi), %xmm0
+; FALLBACK4-NEXT: movups 16(%rdi), %xmm1
+; FALLBACK4-NEXT: movups 32(%rdi), %xmm2
+; FALLBACK4-NEXT: movq 48(%rdi), %rax
+; FALLBACK4-NEXT: movq 56(%rdi), %rcx
+; FALLBACK4-NEXT: movl (%rsi), %edi
+; FALLBACK4-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: sarq $63, %rcx
+; FALLBACK4-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK4-NEXT: leal (,%rdi,8), %eax
+; FALLBACK4-NEXT: andl $56, %eax
+; FALLBACK4-NEXT: andl $56, %edi
+; FALLBACK4-NEXT: movq -128(%rsp,%rdi), %r10
+; FALLBACK4-NEXT: movq -120(%rsp,%rdi), %r9
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shrq %cl, %r10
+; FALLBACK4-NEXT: movl %eax, %esi
+; FALLBACK4-NEXT: notb %sil
+; FALLBACK4-NEXT: leaq (%r9,%r9), %r8
+; FALLBACK4-NEXT: movl %esi, %ecx
+; FALLBACK4-NEXT: shlq %cl, %r8
+; FALLBACK4-NEXT: orq %r10, %r8
+; FALLBACK4-NEXT: movq -104(%rsp,%rdi), %r10
+; FALLBACK4-NEXT: movq %r10, %rbx
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shrq %cl, %rbx
+; FALLBACK4-NEXT: movq -96(%rsp,%rdi), %r12
+; FALLBACK4-NEXT: leaq (%r12,%r12), %r11
+; FALLBACK4-NEXT: movl %esi, %ecx
+; FALLBACK4-NEXT: shlq %cl, %r11
+; FALLBACK4-NEXT: orq %rbx, %r11
+; FALLBACK4-NEXT: movq -112(%rsp,%rdi), %rbx
+; FALLBACK4-NEXT: movq %rbx, %r14
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shrq %cl, %r14
+; FALLBACK4-NEXT: addq %r10, %r10
+; FALLBACK4-NEXT: movl %esi, %ecx
+; FALLBACK4-NEXT: shlq %cl, %r10
+; FALLBACK4-NEXT: orq %r14, %r10
+; FALLBACK4-NEXT: movq -88(%rsp,%rdi), %r14
+; FALLBACK4-NEXT: movq %r14, %r13
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shrq %cl, %r13
+; FALLBACK4-NEXT: movq -80(%rsp,%rdi), %rbp
+; FALLBACK4-NEXT: leaq (%rbp,%rbp), %r15
+; FALLBACK4-NEXT: movl %esi, %ecx
+; FALLBACK4-NEXT: shlq %cl, %r15
+; FALLBACK4-NEXT: orq %r13, %r15
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shrq %cl, %r12
+; FALLBACK4-NEXT: addq %r14, %r14
+; FALLBACK4-NEXT: movl %esi, %ecx
+; FALLBACK4-NEXT: shlq %cl, %r14
+; FALLBACK4-NEXT: orq %r12, %r14
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shrq %cl, %rbp
+; FALLBACK4-NEXT: movq -72(%rsp,%rdi), %rdi
+; FALLBACK4-NEXT: leaq (%rdi,%rdi), %r12
+; FALLBACK4-NEXT: movl %esi, %ecx
+; FALLBACK4-NEXT: shlq %cl, %r12
+; FALLBACK4-NEXT: orq %rbp, %r12
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: shrq %cl, %r9
+; FALLBACK4-NEXT: addq %rbx, %rbx
+; FALLBACK4-NEXT: movl %esi, %ecx
+; FALLBACK4-NEXT: shlq %cl, %rbx
+; FALLBACK4-NEXT: orq %r9, %rbx
+; FALLBACK4-NEXT: movl %eax, %ecx
+; FALLBACK4-NEXT: sarq %cl, %rdi
+; FALLBACK4-NEXT: movq %rdi, 56(%rdx)
+; FALLBACK4-NEXT: movq %rbx, 8(%rdx)
+; FALLBACK4-NEXT: movq %r12, 48(%rdx)
+; FALLBACK4-NEXT: movq %r14, 32(%rdx)
+; FALLBACK4-NEXT: movq %r15, 40(%rdx)
+; FALLBACK4-NEXT: movq %r10, 16(%rdx)
+; FALLBACK4-NEXT: movq %r11, 24(%rdx)
+; FALLBACK4-NEXT: movq %r8, (%rdx)
+; FALLBACK4-NEXT: addq $8, %rsp
+; FALLBACK4-NEXT: popq %rbx
+; FALLBACK4-NEXT: popq %r12
+; FALLBACK4-NEXT: popq %r13
+; FALLBACK4-NEXT: popq %r14
+; FALLBACK4-NEXT: popq %r15
+; FALLBACK4-NEXT: popq %rbp
+; FALLBACK4-NEXT: retq
+;
+; FALLBACK5-LABEL: ashr_64bytes:
+; FALLBACK5: # %bb.0:
+; FALLBACK5-NEXT: pushq %r15
+; FALLBACK5-NEXT: pushq %r14
+; FALLBACK5-NEXT: pushq %rbx
+; FALLBACK5-NEXT: movups (%rdi), %xmm0
+; FALLBACK5-NEXT: movups 16(%rdi), %xmm1
+; FALLBACK5-NEXT: movups 32(%rdi), %xmm2
+; FALLBACK5-NEXT: movq 48(%rdi), %rcx
+; FALLBACK5-NEXT: movq 56(%rdi), %rdi
+; FALLBACK5-NEXT: movl (%rsi), %eax
+; FALLBACK5-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: sarq $63, %rdi
+; FALLBACK5-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK5-NEXT: leal (,%rax,8), %ecx
+; FALLBACK5-NEXT: andl $56, %ecx
+; FALLBACK5-NEXT: andl $56, %eax
+; FALLBACK5-NEXT: movq -96(%rsp,%rax), %rdi
+; FALLBACK5-NEXT: movq -104(%rsp,%rax), %r9
+; FALLBACK5-NEXT: movq %r9, %rsi
+; FALLBACK5-NEXT: shrdq %cl, %rdi, %rsi
+; FALLBACK5-NEXT: movq -112(%rsp,%rax), %r10
+; FALLBACK5-NEXT: movq %r10, %r8
+; FALLBACK5-NEXT: shrdq %cl, %r9, %r8
+; FALLBACK5-NEXT: movq -80(%rsp,%rax), %r9
+; FALLBACK5-NEXT: movq -88(%rsp,%rax), %r11
+; FALLBACK5-NEXT: movq %r11, %rbx
+; FALLBACK5-NEXT: shrdq %cl, %r9, %rbx
+; FALLBACK5-NEXT: shrdq %cl, %r11, %rdi
+; FALLBACK5-NEXT: movq -72(%rsp,%rax), %r11
+; FALLBACK5-NEXT: shrdq %cl, %r11, %r9
+; FALLBACK5-NEXT: movq -128(%rsp,%rax), %r14
+; FALLBACK5-NEXT: movq -120(%rsp,%rax), %rax
+; FALLBACK5-NEXT: movq %rax, %r15
+; FALLBACK5-NEXT: shrdq %cl, %r10, %r15
+; FALLBACK5-NEXT: shrdq %cl, %rax, %r14
+; FALLBACK5-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK5-NEXT: sarq %cl, %r11
+; FALLBACK5-NEXT: movq %r15, 8(%rdx)
+; FALLBACK5-NEXT: movq %r9, 48(%rdx)
+; FALLBACK5-NEXT: movq %r11, 56(%rdx)
+; FALLBACK5-NEXT: movq %rdi, 32(%rdx)
+; FALLBACK5-NEXT: movq %rbx, 40(%rdx)
+; FALLBACK5-NEXT: movq %r8, 16(%rdx)
+; FALLBACK5-NEXT: movq %rsi, 24(%rdx)
+; FALLBACK5-NEXT: movq %r14, (%rdx)
+; FALLBACK5-NEXT: popq %rbx
+; FALLBACK5-NEXT: popq %r14
+; FALLBACK5-NEXT: popq %r15
+; FALLBACK5-NEXT: retq
+;
+; FALLBACK6-LABEL: ashr_64bytes:
+; FALLBACK6: # %bb.0:
+; FALLBACK6-NEXT: pushq %rbp
+; FALLBACK6-NEXT: pushq %r15
+; FALLBACK6-NEXT: pushq %r14
+; FALLBACK6-NEXT: pushq %r13
+; FALLBACK6-NEXT: pushq %r12
+; FALLBACK6-NEXT: pushq %rbx
+; FALLBACK6-NEXT: pushq %rax
+; FALLBACK6-NEXT: movups (%rdi), %xmm0
+; FALLBACK6-NEXT: movups 16(%rdi), %xmm1
+; FALLBACK6-NEXT: movups 32(%rdi), %xmm2
+; FALLBACK6-NEXT: movq 48(%rdi), %rcx
+; FALLBACK6-NEXT: movq 56(%rdi), %rdi
+; FALLBACK6-NEXT: movl (%rsi), %eax
+; FALLBACK6-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: sarq $63, %rdi
+; FALLBACK6-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: leal (,%rax,8), %esi
+; FALLBACK6-NEXT: andl $56, %esi
+; FALLBACK6-NEXT: andl $56, %eax
+; FALLBACK6-NEXT: shrxq %rsi, -128(%rsp,%rax), %r11
+; FALLBACK6-NEXT: movq -112(%rsp,%rax), %rcx
+; FALLBACK6-NEXT: movq -104(%rsp,%rax), %rdi
+; FALLBACK6-NEXT: shrxq %rsi, %rdi, %r12
+; FALLBACK6-NEXT: movq -96(%rsp,%rax), %r13
+; FALLBACK6-NEXT: shrxq %rsi, %rcx, %r9
+; FALLBACK6-NEXT: movq -88(%rsp,%rax), %r10
+; FALLBACK6-NEXT: shrxq %rsi, %r10, %r14
+; FALLBACK6-NEXT: shrxq %rsi, %r13, %r15
+; FALLBACK6-NEXT: movl %esi, %ebx
+; FALLBACK6-NEXT: notb %bl
+; FALLBACK6-NEXT: movq -120(%rsp,%rax), %rbp
+; FALLBACK6-NEXT: leaq (%rbp,%rbp), %r8
+; FALLBACK6-NEXT: shlxq %rbx, %r8, %r8
+; FALLBACK6-NEXT: orq %r11, %r8
+; FALLBACK6-NEXT: leaq (%r13,%r13), %r11
+; FALLBACK6-NEXT: shlxq %rbx, %r11, %r11
+; FALLBACK6-NEXT: orq %r12, %r11
+; FALLBACK6-NEXT: movq -80(%rsp,%rax), %r12
+; FALLBACK6-NEXT: shrxq %rsi, %r12, %r13
+; FALLBACK6-NEXT: shrxq %rsi, %rbp, %rbp
+; FALLBACK6-NEXT: movq -72(%rsp,%rax), %rax
+; FALLBACK6-NEXT: sarxq %rsi, %rax, %rsi
+; FALLBACK6-NEXT: addq %rdi, %rdi
+; FALLBACK6-NEXT: shlxq %rbx, %rdi, %rdi
+; FALLBACK6-NEXT: orq %r9, %rdi
+; FALLBACK6-NEXT: leaq (%r12,%r12), %r9
+; FALLBACK6-NEXT: shlxq %rbx, %r9, %r9
+; FALLBACK6-NEXT: orq %r14, %r9
+; FALLBACK6-NEXT: addq %r10, %r10
+; FALLBACK6-NEXT: shlxq %rbx, %r10, %r10
+; FALLBACK6-NEXT: orq %r15, %r10
+; FALLBACK6-NEXT: addq %rax, %rax
+; FALLBACK6-NEXT: shlxq %rbx, %rax, %rax
+; FALLBACK6-NEXT: orq %r13, %rax
+; FALLBACK6-NEXT: addq %rcx, %rcx
+; FALLBACK6-NEXT: shlxq %rbx, %rcx, %rcx
+; FALLBACK6-NEXT: orq %rbp, %rcx
+; FALLBACK6-NEXT: movq %rsi, 56(%rdx)
+; FALLBACK6-NEXT: movq %rcx, 8(%rdx)
+; FALLBACK6-NEXT: movq %rax, 48(%rdx)
+; FALLBACK6-NEXT: movq %r10, 32(%rdx)
+; FALLBACK6-NEXT: movq %r9, 40(%rdx)
+; FALLBACK6-NEXT: movq %rdi, 16(%rdx)
+; FALLBACK6-NEXT: movq %r11, 24(%rdx)
+; FALLBACK6-NEXT: movq %r8, (%rdx)
+; FALLBACK6-NEXT: addq $8, %rsp
+; FALLBACK6-NEXT: popq %rbx
+; FALLBACK6-NEXT: popq %r12
+; FALLBACK6-NEXT: popq %r13
+; FALLBACK6-NEXT: popq %r14
+; FALLBACK6-NEXT: popq %r15
+; FALLBACK6-NEXT: popq %rbp
+; FALLBACK6-NEXT: retq
+;
+; FALLBACK7-LABEL: ashr_64bytes:
+; FALLBACK7: # %bb.0:
+; FALLBACK7-NEXT: pushq %r15
+; FALLBACK7-NEXT: pushq %r14
+; FALLBACK7-NEXT: pushq %rbx
+; FALLBACK7-NEXT: movups (%rdi), %xmm0
+; FALLBACK7-NEXT: movups 16(%rdi), %xmm1
+; FALLBACK7-NEXT: movups 32(%rdi), %xmm2
+; FALLBACK7-NEXT: movq 48(%rdi), %rcx
+; FALLBACK7-NEXT: movq 56(%rdi), %rdi
+; FALLBACK7-NEXT: movl (%rsi), %eax
+; FALLBACK7-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: sarq $63, %rdi
+; FALLBACK7-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK7-NEXT: leal (,%rax,8), %ecx
+; FALLBACK7-NEXT: andl $56, %ecx
+; FALLBACK7-NEXT: andl $56, %eax
+; FALLBACK7-NEXT: movq -96(%rsp,%rax), %rdi
+; FALLBACK7-NEXT: movq -104(%rsp,%rax), %r9
+; FALLBACK7-NEXT: movq %r9, %rsi
+; FALLBACK7-NEXT: shrdq %cl, %rdi, %rsi
+; FALLBACK7-NEXT: movq -112(%rsp,%rax), %r10
+; FALLBACK7-NEXT: movq %r10, %r8
+; FALLBACK7-NEXT: shrdq %cl, %r9, %r8
+; FALLBACK7-NEXT: movq -80(%rsp,%rax), %r9
+; FALLBACK7-NEXT: movq -88(%rsp,%rax), %r11
+; FALLBACK7-NEXT: movq %r11, %rbx
+; FALLBACK7-NEXT: shrdq %cl, %r9, %rbx
+; FALLBACK7-NEXT: shrdq %cl, %r11, %rdi
+; FALLBACK7-NEXT: movq -72(%rsp,%rax), %r11
+; FALLBACK7-NEXT: shrdq %cl, %r11, %r9
+; FALLBACK7-NEXT: movq -128(%rsp,%rax), %r14
+; FALLBACK7-NEXT: movq -120(%rsp,%rax), %rax
+; FALLBACK7-NEXT: movq %rax, %r15
+; FALLBACK7-NEXT: shrdq %cl, %r10, %r15
+; FALLBACK7-NEXT: sarxq %rcx, %r11, %r10
+; FALLBACK7-NEXT: # kill: def $cl killed $cl killed $rcx
+; FALLBACK7-NEXT: shrdq %cl, %rax, %r14
+; FALLBACK7-NEXT: movq %r15, 8(%rdx)
+; FALLBACK7-NEXT: movq %r9, 48(%rdx)
+; FALLBACK7-NEXT: movq %rdi, 32(%rdx)
+; FALLBACK7-NEXT: movq %rbx, 40(%rdx)
+; FALLBACK7-NEXT: movq %r8, 16(%rdx)
+; FALLBACK7-NEXT: movq %rsi, 24(%rdx)
+; FALLBACK7-NEXT: movq %r14, (%rdx)
+; FALLBACK7-NEXT: movq %r10, 56(%rdx)
+; FALLBACK7-NEXT: popq %rbx
+; FALLBACK7-NEXT: popq %r14
+; FALLBACK7-NEXT: popq %r15
+; FALLBACK7-NEXT: retq
+;
+; FALLBACK8-LABEL: ashr_64bytes:
+; FALLBACK8: # %bb.0:
+; FALLBACK8-NEXT: pushq %rbp
+; FALLBACK8-NEXT: pushq %r15
+; FALLBACK8-NEXT: pushq %r14
+; FALLBACK8-NEXT: pushq %r13
+; FALLBACK8-NEXT: pushq %r12
+; FALLBACK8-NEXT: pushq %rbx
+; FALLBACK8-NEXT: pushq %rax
+; FALLBACK8-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK8-NEXT: vmovups 32(%rdi), %xmm1
+; FALLBACK8-NEXT: movq 48(%rdi), %rax
+; FALLBACK8-NEXT: movq 56(%rdi), %rcx
+; FALLBACK8-NEXT: movl (%rsi), %edi
+; FALLBACK8-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: vmovaps %xmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: sarq $63, %rcx
+; FALLBACK8-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK8-NEXT: leal (,%rdi,8), %eax
+; FALLBACK8-NEXT: andl $56, %eax
+; FALLBACK8-NEXT: andl $56, %edi
+; FALLBACK8-NEXT: movq -128(%rsp,%rdi), %r10
+; FALLBACK8-NEXT: movq -120(%rsp,%rdi), %r9
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shrq %cl, %r10
+; FALLBACK8-NEXT: movl %eax, %esi
+; FALLBACK8-NEXT: notb %sil
+; FALLBACK8-NEXT: leaq (%r9,%r9), %r8
+; FALLBACK8-NEXT: movl %esi, %ecx
+; FALLBACK8-NEXT: shlq %cl, %r8
+; FALLBACK8-NEXT: orq %r10, %r8
+; FALLBACK8-NEXT: movq -104(%rsp,%rdi), %r10
+; FALLBACK8-NEXT: movq %r10, %rbx
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shrq %cl, %rbx
+; FALLBACK8-NEXT: movq -96(%rsp,%rdi), %r12
+; FALLBACK8-NEXT: leaq (%r12,%r12), %r11
+; FALLBACK8-NEXT: movl %esi, %ecx
+; FALLBACK8-NEXT: shlq %cl, %r11
+; FALLBACK8-NEXT: orq %rbx, %r11
+; FALLBACK8-NEXT: movq -112(%rsp,%rdi), %rbx
+; FALLBACK8-NEXT: movq %rbx, %r14
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shrq %cl, %r14
+; FALLBACK8-NEXT: addq %r10, %r10
+; FALLBACK8-NEXT: movl %esi, %ecx
+; FALLBACK8-NEXT: shlq %cl, %r10
+; FALLBACK8-NEXT: orq %r14, %r10
+; FALLBACK8-NEXT: movq -88(%rsp,%rdi), %r14
+; FALLBACK8-NEXT: movq %r14, %r13
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shrq %cl, %r13
+; FALLBACK8-NEXT: movq -80(%rsp,%rdi), %rbp
+; FALLBACK8-NEXT: leaq (%rbp,%rbp), %r15
+; FALLBACK8-NEXT: movl %esi, %ecx
+; FALLBACK8-NEXT: shlq %cl, %r15
+; FALLBACK8-NEXT: orq %r13, %r15
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shrq %cl, %r12
+; FALLBACK8-NEXT: addq %r14, %r14
+; FALLBACK8-NEXT: movl %esi, %ecx
+; FALLBACK8-NEXT: shlq %cl, %r14
+; FALLBACK8-NEXT: orq %r12, %r14
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shrq %cl, %rbp
+; FALLBACK8-NEXT: movq -72(%rsp,%rdi), %rdi
+; FALLBACK8-NEXT: leaq (%rdi,%rdi), %r12
+; FALLBACK8-NEXT: movl %esi, %ecx
+; FALLBACK8-NEXT: shlq %cl, %r12
+; FALLBACK8-NEXT: orq %rbp, %r12
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: shrq %cl, %r9
+; FALLBACK8-NEXT: addq %rbx, %rbx
+; FALLBACK8-NEXT: movl %esi, %ecx
+; FALLBACK8-NEXT: shlq %cl, %rbx
+; FALLBACK8-NEXT: orq %r9, %rbx
+; FALLBACK8-NEXT: movl %eax, %ecx
+; FALLBACK8-NEXT: sarq %cl, %rdi
+; FALLBACK8-NEXT: movq %rdi, 56(%rdx)
+; FALLBACK8-NEXT: movq %rbx, 8(%rdx)
+; FALLBACK8-NEXT: movq %r12, 48(%rdx)
+; FALLBACK8-NEXT: movq %r14, 32(%rdx)
+; FALLBACK8-NEXT: movq %r15, 40(%rdx)
+; FALLBACK8-NEXT: movq %r10, 16(%rdx)
+; FALLBACK8-NEXT: movq %r11, 24(%rdx)
+; FALLBACK8-NEXT: movq %r8, (%rdx)
+; FALLBACK8-NEXT: addq $8, %rsp
+; FALLBACK8-NEXT: popq %rbx
+; FALLBACK8-NEXT: popq %r12
+; FALLBACK8-NEXT: popq %r13
+; FALLBACK8-NEXT: popq %r14
+; FALLBACK8-NEXT: popq %r15
+; FALLBACK8-NEXT: popq %rbp
+; FALLBACK8-NEXT: vzeroupper
+; FALLBACK8-NEXT: retq
+;
+; FALLBACK9-LABEL: ashr_64bytes:
+; FALLBACK9: # %bb.0:
+; FALLBACK9-NEXT: pushq %r15
+; FALLBACK9-NEXT: pushq %r14
+; FALLBACK9-NEXT: pushq %rbx
+; FALLBACK9-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK9-NEXT: vmovups 32(%rdi), %xmm1
+; FALLBACK9-NEXT: movq 48(%rdi), %rcx
+; FALLBACK9-NEXT: movq 56(%rdi), %rdi
+; FALLBACK9-NEXT: movl (%rsi), %eax
+; FALLBACK9-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: vmovaps %xmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: sarq $63, %rdi
+; FALLBACK9-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK9-NEXT: leal (,%rax,8), %ecx
+; FALLBACK9-NEXT: andl $56, %ecx
+; FALLBACK9-NEXT: andl $56, %eax
+; FALLBACK9-NEXT: movq -96(%rsp,%rax), %rdi
+; FALLBACK9-NEXT: movq -104(%rsp,%rax), %r9
+; FALLBACK9-NEXT: movq %r9, %rsi
+; FALLBACK9-NEXT: shrdq %cl, %rdi, %rsi
+; FALLBACK9-NEXT: movq -112(%rsp,%rax), %r10
+; FALLBACK9-NEXT: movq %r10, %r8
+; FALLBACK9-NEXT: shrdq %cl, %r9, %r8
+; FALLBACK9-NEXT: movq -80(%rsp,%rax), %r9
+; FALLBACK9-NEXT: movq -88(%rsp,%rax), %r11
+; FALLBACK9-NEXT: movq %r11, %rbx
+; FALLBACK9-NEXT: shrdq %cl, %r9, %rbx
+; FALLBACK9-NEXT: shrdq %cl, %r11, %rdi
+; FALLBACK9-NEXT: movq -72(%rsp,%rax), %r11
+; FALLBACK9-NEXT: shrdq %cl, %r11, %r9
+; FALLBACK9-NEXT: movq -128(%rsp,%rax), %r14
+; FALLBACK9-NEXT: movq -120(%rsp,%rax), %rax
+; FALLBACK9-NEXT: movq %rax, %r15
+; FALLBACK9-NEXT: shrdq %cl, %r10, %r15
+; FALLBACK9-NEXT: shrdq %cl, %rax, %r14
+; FALLBACK9-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK9-NEXT: sarq %cl, %r11
+; FALLBACK9-NEXT: movq %r15, 8(%rdx)
+; FALLBACK9-NEXT: movq %r9, 48(%rdx)
+; FALLBACK9-NEXT: movq %r11, 56(%rdx)
+; FALLBACK9-NEXT: movq %rdi, 32(%rdx)
+; FALLBACK9-NEXT: movq %rbx, 40(%rdx)
+; FALLBACK9-NEXT: movq %r8, 16(%rdx)
+; FALLBACK9-NEXT: movq %rsi, 24(%rdx)
+; FALLBACK9-NEXT: movq %r14, (%rdx)
+; FALLBACK9-NEXT: popq %rbx
+; FALLBACK9-NEXT: popq %r14
+; FALLBACK9-NEXT: popq %r15
+; FALLBACK9-NEXT: vzeroupper
+; FALLBACK9-NEXT: retq
+;
+; FALLBACK10-LABEL: ashr_64bytes:
+; FALLBACK10: # %bb.0:
+; FALLBACK10-NEXT: pushq %rbp
+; FALLBACK10-NEXT: pushq %r15
+; FALLBACK10-NEXT: pushq %r14
+; FALLBACK10-NEXT: pushq %r13
+; FALLBACK10-NEXT: pushq %r12
+; FALLBACK10-NEXT: pushq %rbx
+; FALLBACK10-NEXT: pushq %rax
+; FALLBACK10-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK10-NEXT: vmovups 32(%rdi), %xmm1
+; FALLBACK10-NEXT: movq 48(%rdi), %rcx
+; FALLBACK10-NEXT: movq 56(%rdi), %rdi
+; FALLBACK10-NEXT: movl (%rsi), %eax
+; FALLBACK10-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: vmovaps %xmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: sarq $63, %rdi
+; FALLBACK10-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: leal (,%rax,8), %esi
+; FALLBACK10-NEXT: andl $56, %esi
+; FALLBACK10-NEXT: andl $56, %eax
+; FALLBACK10-NEXT: shrxq %rsi, -128(%rsp,%rax), %r11
+; FALLBACK10-NEXT: movq -112(%rsp,%rax), %rcx
+; FALLBACK10-NEXT: movq -104(%rsp,%rax), %rdi
+; FALLBACK10-NEXT: shrxq %rsi, %rdi, %r12
+; FALLBACK10-NEXT: movq -96(%rsp,%rax), %r13
+; FALLBACK10-NEXT: shrxq %rsi, %rcx, %r9
+; FALLBACK10-NEXT: movq -88(%rsp,%rax), %r10
+; FALLBACK10-NEXT: shrxq %rsi, %r10, %r14
+; FALLBACK10-NEXT: shrxq %rsi, %r13, %r15
+; FALLBACK10-NEXT: movl %esi, %ebx
+; FALLBACK10-NEXT: notb %bl
+; FALLBACK10-NEXT: movq -120(%rsp,%rax), %rbp
+; FALLBACK10-NEXT: leaq (%rbp,%rbp), %r8
+; FALLBACK10-NEXT: shlxq %rbx, %r8, %r8
+; FALLBACK10-NEXT: orq %r11, %r8
+; FALLBACK10-NEXT: leaq (%r13,%r13), %r11
+; FALLBACK10-NEXT: shlxq %rbx, %r11, %r11
+; FALLBACK10-NEXT: orq %r12, %r11
+; FALLBACK10-NEXT: movq -80(%rsp,%rax), %r12
+; FALLBACK10-NEXT: shrxq %rsi, %r12, %r13
+; FALLBACK10-NEXT: shrxq %rsi, %rbp, %rbp
+; FALLBACK10-NEXT: movq -72(%rsp,%rax), %rax
+; FALLBACK10-NEXT: sarxq %rsi, %rax, %rsi
+; FALLBACK10-NEXT: addq %rdi, %rdi
+; FALLBACK10-NEXT: shlxq %rbx, %rdi, %rdi
+; FALLBACK10-NEXT: orq %r9, %rdi
+; FALLBACK10-NEXT: leaq (%r12,%r12), %r9
+; FALLBACK10-NEXT: shlxq %rbx, %r9, %r9
+; FALLBACK10-NEXT: orq %r14, %r9
+; FALLBACK10-NEXT: addq %r10, %r10
+; FALLBACK10-NEXT: shlxq %rbx, %r10, %r10
+; FALLBACK10-NEXT: orq %r15, %r10
+; FALLBACK10-NEXT: addq %rax, %rax
+; FALLBACK10-NEXT: shlxq %rbx, %rax, %rax
+; FALLBACK10-NEXT: orq %r13, %rax
+; FALLBACK10-NEXT: addq %rcx, %rcx
+; FALLBACK10-NEXT: shlxq %rbx, %rcx, %rcx
+; FALLBACK10-NEXT: orq %rbp, %rcx
+; FALLBACK10-NEXT: movq %rsi, 56(%rdx)
+; FALLBACK10-NEXT: movq %rcx, 8(%rdx)
+; FALLBACK10-NEXT: movq %rax, 48(%rdx)
+; FALLBACK10-NEXT: movq %r10, 32(%rdx)
+; FALLBACK10-NEXT: movq %r9, 40(%rdx)
+; FALLBACK10-NEXT: movq %rdi, 16(%rdx)
+; FALLBACK10-NEXT: movq %r11, 24(%rdx)
+; FALLBACK10-NEXT: movq %r8, (%rdx)
+; FALLBACK10-NEXT: addq $8, %rsp
+; FALLBACK10-NEXT: popq %rbx
+; FALLBACK10-NEXT: popq %r12
+; FALLBACK10-NEXT: popq %r13
+; FALLBACK10-NEXT: popq %r14
+; FALLBACK10-NEXT: popq %r15
+; FALLBACK10-NEXT: popq %rbp
+; FALLBACK10-NEXT: vzeroupper
+; FALLBACK10-NEXT: retq
+;
+; FALLBACK11-LABEL: ashr_64bytes:
+; FALLBACK11: # %bb.0:
+; FALLBACK11-NEXT: pushq %r15
+; FALLBACK11-NEXT: pushq %r14
+; FALLBACK11-NEXT: pushq %rbx
+; FALLBACK11-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK11-NEXT: vmovups 32(%rdi), %xmm1
+; FALLBACK11-NEXT: movq 48(%rdi), %rcx
+; FALLBACK11-NEXT: movq 56(%rdi), %rdi
+; FALLBACK11-NEXT: movl (%rsi), %eax
+; FALLBACK11-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: vmovaps %xmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: sarq $63, %rdi
+; FALLBACK11-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK11-NEXT: leal (,%rax,8), %ecx
+; FALLBACK11-NEXT: andl $56, %ecx
+; FALLBACK11-NEXT: andl $56, %eax
+; FALLBACK11-NEXT: movq -96(%rsp,%rax), %rdi
+; FALLBACK11-NEXT: movq -104(%rsp,%rax), %r9
+; FALLBACK11-NEXT: movq %r9, %rsi
+; FALLBACK11-NEXT: shrdq %cl, %rdi, %rsi
+; FALLBACK11-NEXT: movq -112(%rsp,%rax), %r10
+; FALLBACK11-NEXT: movq %r10, %r8
+; FALLBACK11-NEXT: shrdq %cl, %r9, %r8
+; FALLBACK11-NEXT: movq -80(%rsp,%rax), %r9
+; FALLBACK11-NEXT: movq -88(%rsp,%rax), %r11
+; FALLBACK11-NEXT: movq %r11, %rbx
+; FALLBACK11-NEXT: shrdq %cl, %r9, %rbx
+; FALLBACK11-NEXT: shrdq %cl, %r11, %rdi
+; FALLBACK11-NEXT: movq -72(%rsp,%rax), %r11
+; FALLBACK11-NEXT: shrdq %cl, %r11, %r9
+; FALLBACK11-NEXT: movq -128(%rsp,%rax), %r14
+; FALLBACK11-NEXT: movq -120(%rsp,%rax), %rax
+; FALLBACK11-NEXT: movq %rax, %r15
+; FALLBACK11-NEXT: shrdq %cl, %r10, %r15
+; FALLBACK11-NEXT: sarxq %rcx, %r11, %r10
+; FALLBACK11-NEXT: # kill: def $cl killed $cl killed $rcx
+; FALLBACK11-NEXT: shrdq %cl, %rax, %r14
+; FALLBACK11-NEXT: movq %r15, 8(%rdx)
+; FALLBACK11-NEXT: movq %r9, 48(%rdx)
+; FALLBACK11-NEXT: movq %rdi, 32(%rdx)
+; FALLBACK11-NEXT: movq %rbx, 40(%rdx)
+; FALLBACK11-NEXT: movq %r8, 16(%rdx)
+; FALLBACK11-NEXT: movq %rsi, 24(%rdx)
+; FALLBACK11-NEXT: movq %r14, (%rdx)
+; FALLBACK11-NEXT: movq %r10, 56(%rdx)
+; FALLBACK11-NEXT: popq %rbx
+; FALLBACK11-NEXT: popq %r14
+; FALLBACK11-NEXT: popq %r15
+; FALLBACK11-NEXT: vzeroupper
+; FALLBACK11-NEXT: retq
+;
+; FALLBACK12-LABEL: ashr_64bytes:
+; FALLBACK12: # %bb.0:
+; FALLBACK12-NEXT: pushq %rbp
+; FALLBACK12-NEXT: pushq %r15
+; FALLBACK12-NEXT: pushq %r14
+; FALLBACK12-NEXT: pushq %r13
+; FALLBACK12-NEXT: pushq %r12
+; FALLBACK12-NEXT: pushq %rbx
+; FALLBACK12-NEXT: pushq %rax
+; FALLBACK12-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK12-NEXT: vmovups 32(%rdi), %xmm1
+; FALLBACK12-NEXT: movq 48(%rdi), %rax
+; FALLBACK12-NEXT: movq 56(%rdi), %rcx
+; FALLBACK12-NEXT: movl (%rsi), %edi
+; FALLBACK12-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK12-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
+; FALLBACK12-NEXT: vmovaps %xmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK12-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK12-NEXT: sarq $63, %rcx
+; FALLBACK12-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK12-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK12-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK12-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK12-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK12-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK12-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK12-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK12-NEXT: leal (,%rdi,8), %eax
+; FALLBACK12-NEXT: andl $56, %eax
+; FALLBACK12-NEXT: andl $56, %edi
+; FALLBACK12-NEXT: movq -128(%rsp,%rdi), %r10
+; FALLBACK12-NEXT: movq -120(%rsp,%rdi), %r9
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shrq %cl, %r10
+; FALLBACK12-NEXT: movl %eax, %esi
+; FALLBACK12-NEXT: notb %sil
+; FALLBACK12-NEXT: leaq (%r9,%r9), %r8
+; FALLBACK12-NEXT: movl %esi, %ecx
+; FALLBACK12-NEXT: shlq %cl, %r8
+; FALLBACK12-NEXT: orq %r10, %r8
+; FALLBACK12-NEXT: movq -104(%rsp,%rdi), %r10
+; FALLBACK12-NEXT: movq %r10, %rbx
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shrq %cl, %rbx
+; FALLBACK12-NEXT: movq -96(%rsp,%rdi), %r12
+; FALLBACK12-NEXT: leaq (%r12,%r12), %r11
+; FALLBACK12-NEXT: movl %esi, %ecx
+; FALLBACK12-NEXT: shlq %cl, %r11
+; FALLBACK12-NEXT: orq %rbx, %r11
+; FALLBACK12-NEXT: movq -112(%rsp,%rdi), %rbx
+; FALLBACK12-NEXT: movq %rbx, %r14
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shrq %cl, %r14
+; FALLBACK12-NEXT: addq %r10, %r10
+; FALLBACK12-NEXT: movl %esi, %ecx
+; FALLBACK12-NEXT: shlq %cl, %r10
+; FALLBACK12-NEXT: orq %r14, %r10
+; FALLBACK12-NEXT: movq -88(%rsp,%rdi), %r14
+; FALLBACK12-NEXT: movq %r14, %r13
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shrq %cl, %r13
+; FALLBACK12-NEXT: movq -80(%rsp,%rdi), %rbp
+; FALLBACK12-NEXT: leaq (%rbp,%rbp), %r15
+; FALLBACK12-NEXT: movl %esi, %ecx
+; FALLBACK12-NEXT: shlq %cl, %r15
+; FALLBACK12-NEXT: orq %r13, %r15
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shrq %cl, %r12
+; FALLBACK12-NEXT: addq %r14, %r14
+; FALLBACK12-NEXT: movl %esi, %ecx
+; FALLBACK12-NEXT: shlq %cl, %r14
+; FALLBACK12-NEXT: orq %r12, %r14
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shrq %cl, %rbp
+; FALLBACK12-NEXT: movq -72(%rsp,%rdi), %rdi
+; FALLBACK12-NEXT: leaq (%rdi,%rdi), %r12
+; FALLBACK12-NEXT: movl %esi, %ecx
+; FALLBACK12-NEXT: shlq %cl, %r12
+; FALLBACK12-NEXT: orq %rbp, %r12
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: shrq %cl, %r9
+; FALLBACK12-NEXT: addq %rbx, %rbx
+; FALLBACK12-NEXT: movl %esi, %ecx
+; FALLBACK12-NEXT: shlq %cl, %rbx
+; FALLBACK12-NEXT: orq %r9, %rbx
+; FALLBACK12-NEXT: movl %eax, %ecx
+; FALLBACK12-NEXT: sarq %cl, %rdi
+; FALLBACK12-NEXT: movq %rdi, 56(%rdx)
+; FALLBACK12-NEXT: movq %rbx, 8(%rdx)
+; FALLBACK12-NEXT: movq %r12, 48(%rdx)
+; FALLBACK12-NEXT: movq %r14, 32(%rdx)
+; FALLBACK12-NEXT: movq %r15, 40(%rdx)
+; FALLBACK12-NEXT: movq %r10, 16(%rdx)
+; FALLBACK12-NEXT: movq %r11, 24(%rdx)
+; FALLBACK12-NEXT: movq %r8, (%rdx)
+; FALLBACK12-NEXT: addq $8, %rsp
+; FALLBACK12-NEXT: popq %rbx
+; FALLBACK12-NEXT: popq %r12
+; FALLBACK12-NEXT: popq %r13
+; FALLBACK12-NEXT: popq %r14
+; FALLBACK12-NEXT: popq %r15
+; FALLBACK12-NEXT: popq %rbp
+; FALLBACK12-NEXT: vzeroupper
+; FALLBACK12-NEXT: retq
+;
+; FALLBACK13-LABEL: ashr_64bytes:
+; FALLBACK13: # %bb.0:
+; FALLBACK13-NEXT: pushq %r15
+; FALLBACK13-NEXT: pushq %r14
+; FALLBACK13-NEXT: pushq %rbx
+; FALLBACK13-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK13-NEXT: vmovups 32(%rdi), %xmm1
+; FALLBACK13-NEXT: movq 48(%rdi), %rcx
+; FALLBACK13-NEXT: movq 56(%rdi), %rdi
+; FALLBACK13-NEXT: movl (%rsi), %eax
+; FALLBACK13-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK13-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK13-NEXT: vmovaps %xmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK13-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK13-NEXT: sarq $63, %rdi
+; FALLBACK13-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK13-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK13-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK13-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK13-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK13-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK13-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK13-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK13-NEXT: leal (,%rax,8), %ecx
+; FALLBACK13-NEXT: andl $56, %ecx
+; FALLBACK13-NEXT: andl $56, %eax
+; FALLBACK13-NEXT: movq -96(%rsp,%rax), %rdi
+; FALLBACK13-NEXT: movq -104(%rsp,%rax), %r9
+; FALLBACK13-NEXT: movq %r9, %rsi
+; FALLBACK13-NEXT: shrdq %cl, %rdi, %rsi
+; FALLBACK13-NEXT: movq -112(%rsp,%rax), %r10
+; FALLBACK13-NEXT: movq %r10, %r8
+; FALLBACK13-NEXT: shrdq %cl, %r9, %r8
+; FALLBACK13-NEXT: movq -80(%rsp,%rax), %r9
+; FALLBACK13-NEXT: movq -88(%rsp,%rax), %r11
+; FALLBACK13-NEXT: movq %r11, %rbx
+; FALLBACK13-NEXT: shrdq %cl, %r9, %rbx
+; FALLBACK13-NEXT: shrdq %cl, %r11, %rdi
+; FALLBACK13-NEXT: movq -72(%rsp,%rax), %r11
+; FALLBACK13-NEXT: shrdq %cl, %r11, %r9
+; FALLBACK13-NEXT: movq -128(%rsp,%rax), %r14
+; FALLBACK13-NEXT: movq -120(%rsp,%rax), %rax
+; FALLBACK13-NEXT: movq %rax, %r15
+; FALLBACK13-NEXT: shrdq %cl, %r10, %r15
+; FALLBACK13-NEXT: shrdq %cl, %rax, %r14
+; FALLBACK13-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK13-NEXT: sarq %cl, %r11
+; FALLBACK13-NEXT: movq %r15, 8(%rdx)
+; FALLBACK13-NEXT: movq %r9, 48(%rdx)
+; FALLBACK13-NEXT: movq %r11, 56(%rdx)
+; FALLBACK13-NEXT: movq %rdi, 32(%rdx)
+; FALLBACK13-NEXT: movq %rbx, 40(%rdx)
+; FALLBACK13-NEXT: movq %r8, 16(%rdx)
+; FALLBACK13-NEXT: movq %rsi, 24(%rdx)
+; FALLBACK13-NEXT: movq %r14, (%rdx)
+; FALLBACK13-NEXT: popq %rbx
+; FALLBACK13-NEXT: popq %r14
+; FALLBACK13-NEXT: popq %r15
+; FALLBACK13-NEXT: vzeroupper
+; FALLBACK13-NEXT: retq
+;
+; FALLBACK14-LABEL: ashr_64bytes:
+; FALLBACK14: # %bb.0:
+; FALLBACK14-NEXT: pushq %rbp
+; FALLBACK14-NEXT: pushq %r15
+; FALLBACK14-NEXT: pushq %r14
+; FALLBACK14-NEXT: pushq %r13
+; FALLBACK14-NEXT: pushq %r12
+; FALLBACK14-NEXT: pushq %rbx
+; FALLBACK14-NEXT: pushq %rax
+; FALLBACK14-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK14-NEXT: vmovups 32(%rdi), %xmm1
+; FALLBACK14-NEXT: movq 48(%rdi), %rcx
+; FALLBACK14-NEXT: movq 56(%rdi), %rdi
+; FALLBACK14-NEXT: movl (%rsi), %eax
+; FALLBACK14-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: vmovaps %xmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: sarq $63, %rdi
+; FALLBACK14-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: leal (,%rax,8), %esi
+; FALLBACK14-NEXT: andl $56, %esi
+; FALLBACK14-NEXT: andl $56, %eax
+; FALLBACK14-NEXT: shrxq %rsi, -128(%rsp,%rax), %r11
+; FALLBACK14-NEXT: movq -112(%rsp,%rax), %rcx
+; FALLBACK14-NEXT: movq -104(%rsp,%rax), %rdi
+; FALLBACK14-NEXT: shrxq %rsi, %rdi, %r12
+; FALLBACK14-NEXT: movq -96(%rsp,%rax), %r13
+; FALLBACK14-NEXT: shrxq %rsi, %rcx, %r9
+; FALLBACK14-NEXT: movq -88(%rsp,%rax), %r10
+; FALLBACK14-NEXT: shrxq %rsi, %r10, %r14
+; FALLBACK14-NEXT: shrxq %rsi, %r13, %r15
+; FALLBACK14-NEXT: movl %esi, %ebx
+; FALLBACK14-NEXT: notb %bl
+; FALLBACK14-NEXT: movq -120(%rsp,%rax), %rbp
+; FALLBACK14-NEXT: leaq (%rbp,%rbp), %r8
+; FALLBACK14-NEXT: shlxq %rbx, %r8, %r8
+; FALLBACK14-NEXT: orq %r11, %r8
+; FALLBACK14-NEXT: leaq (%r13,%r13), %r11
+; FALLBACK14-NEXT: shlxq %rbx, %r11, %r11
+; FALLBACK14-NEXT: orq %r12, %r11
+; FALLBACK14-NEXT: movq -80(%rsp,%rax), %r12
+; FALLBACK14-NEXT: shrxq %rsi, %r12, %r13
+; FALLBACK14-NEXT: shrxq %rsi, %rbp, %rbp
+; FALLBACK14-NEXT: movq -72(%rsp,%rax), %rax
+; FALLBACK14-NEXT: sarxq %rsi, %rax, %rsi
+; FALLBACK14-NEXT: addq %rdi, %rdi
+; FALLBACK14-NEXT: shlxq %rbx, %rdi, %rdi
+; FALLBACK14-NEXT: orq %r9, %rdi
+; FALLBACK14-NEXT: leaq (%r12,%r12), %r9
+; FALLBACK14-NEXT: shlxq %rbx, %r9, %r9
+; FALLBACK14-NEXT: orq %r14, %r9
+; FALLBACK14-NEXT: addq %r10, %r10
+; FALLBACK14-NEXT: shlxq %rbx, %r10, %r10
+; FALLBACK14-NEXT: orq %r15, %r10
+; FALLBACK14-NEXT: addq %rax, %rax
+; FALLBACK14-NEXT: shlxq %rbx, %rax, %rax
+; FALLBACK14-NEXT: orq %r13, %rax
+; FALLBACK14-NEXT: addq %rcx, %rcx
+; FALLBACK14-NEXT: shlxq %rbx, %rcx, %rcx
+; FALLBACK14-NEXT: orq %rbp, %rcx
+; FALLBACK14-NEXT: movq %rsi, 56(%rdx)
+; FALLBACK14-NEXT: movq %rcx, 8(%rdx)
+; FALLBACK14-NEXT: movq %rax, 48(%rdx)
+; FALLBACK14-NEXT: movq %r10, 32(%rdx)
+; FALLBACK14-NEXT: movq %r9, 40(%rdx)
+; FALLBACK14-NEXT: movq %rdi, 16(%rdx)
+; FALLBACK14-NEXT: movq %r11, 24(%rdx)
+; FALLBACK14-NEXT: movq %r8, (%rdx)
+; FALLBACK14-NEXT: addq $8, %rsp
+; FALLBACK14-NEXT: popq %rbx
+; FALLBACK14-NEXT: popq %r12
+; FALLBACK14-NEXT: popq %r13
+; FALLBACK14-NEXT: popq %r14
+; FALLBACK14-NEXT: popq %r15
+; FALLBACK14-NEXT: popq %rbp
+; FALLBACK14-NEXT: vzeroupper
+; FALLBACK14-NEXT: retq
+;
+; FALLBACK15-LABEL: ashr_64bytes:
+; FALLBACK15: # %bb.0:
+; FALLBACK15-NEXT: pushq %r15
+; FALLBACK15-NEXT: pushq %r14
+; FALLBACK15-NEXT: pushq %rbx
+; FALLBACK15-NEXT: vmovups (%rdi), %ymm0
+; FALLBACK15-NEXT: vmovups 32(%rdi), %xmm1
+; FALLBACK15-NEXT: movq 48(%rdi), %rcx
+; FALLBACK15-NEXT: movq 56(%rdi), %rdi
+; FALLBACK15-NEXT: movl (%rsi), %eax
+; FALLBACK15-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK15-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK15-NEXT: vmovaps %xmm1, -{{[0-9]+}}(%rsp)
+; FALLBACK15-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; FALLBACK15-NEXT: sarq $63, %rdi
+; FALLBACK15-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK15-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK15-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK15-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK15-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK15-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK15-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK15-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK15-NEXT: leal (,%rax,8), %ecx
+; FALLBACK15-NEXT: andl $56, %ecx
+; FALLBACK15-NEXT: andl $56, %eax
+; FALLBACK15-NEXT: movq -96(%rsp,%rax), %rdi
+; FALLBACK15-NEXT: movq -104(%rsp,%rax), %r9
+; FALLBACK15-NEXT: movq %r9, %rsi
+; FALLBACK15-NEXT: shrdq %cl, %rdi, %rsi
+; FALLBACK15-NEXT: movq -112(%rsp,%rax), %r10
+; FALLBACK15-NEXT: movq %r10, %r8
+; FALLBACK15-NEXT: shrdq %cl, %r9, %r8
+; FALLBACK15-NEXT: movq -80(%rsp,%rax), %r9
+; FALLBACK15-NEXT: movq -88(%rsp,%rax), %r11
+; FALLBACK15-NEXT: movq %r11, %rbx
+; FALLBACK15-NEXT: shrdq %cl, %r9, %rbx
+; FALLBACK15-NEXT: shrdq %cl, %r11, %rdi
+; FALLBACK15-NEXT: movq -72(%rsp,%rax), %r11
+; FALLBACK15-NEXT: shrdq %cl, %r11, %r9
+; FALLBACK15-NEXT: movq -128(%rsp,%rax), %r14
+; FALLBACK15-NEXT: movq -120(%rsp,%rax), %rax
+; FALLBACK15-NEXT: movq %rax, %r15
+; FALLBACK15-NEXT: shrdq %cl, %r10, %r15
+; FALLBACK15-NEXT: sarxq %rcx, %r11, %r10
+; FALLBACK15-NEXT: # kill: def $cl killed $cl killed $rcx
+; FALLBACK15-NEXT: shrdq %cl, %rax, %r14
+; FALLBACK15-NEXT: movq %r15, 8(%rdx)
+; FALLBACK15-NEXT: movq %r9, 48(%rdx)
+; FALLBACK15-NEXT: movq %rdi, 32(%rdx)
+; FALLBACK15-NEXT: movq %rbx, 40(%rdx)
+; FALLBACK15-NEXT: movq %r8, 16(%rdx)
+; FALLBACK15-NEXT: movq %rsi, 24(%rdx)
+; FALLBACK15-NEXT: movq %r14, (%rdx)
+; FALLBACK15-NEXT: movq %r10, 56(%rdx)
+; FALLBACK15-NEXT: popq %rbx
+; FALLBACK15-NEXT: popq %r14
+; FALLBACK15-NEXT: popq %r15
+; FALLBACK15-NEXT: vzeroupper
+; FALLBACK15-NEXT: retq
+;
+; FALLBACK16-LABEL: ashr_64bytes:
+; FALLBACK16: # %bb.0:
+; FALLBACK16-NEXT: pushl %ebp
+; FALLBACK16-NEXT: pushl %ebx
+; FALLBACK16-NEXT: pushl %edi
+; FALLBACK16-NEXT: pushl %esi
+; FALLBACK16-NEXT: subl $204, %esp
+; FALLBACK16-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK16-NEXT: movl (%ecx), %eax
+; FALLBACK16-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 4(%ecx), %eax
+; FALLBACK16-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 8(%ecx), %eax
+; FALLBACK16-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 12(%ecx), %eax
+; FALLBACK16-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 16(%ecx), %eax
+; FALLBACK16-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 20(%ecx), %eax
+; FALLBACK16-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 24(%ecx), %eax
+; FALLBACK16-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 28(%ecx), %eax
+; FALLBACK16-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 32(%ecx), %eax
+; FALLBACK16-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 36(%ecx), %eax
+; FALLBACK16-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 40(%ecx), %ebx
+; FALLBACK16-NEXT: movl 44(%ecx), %edi
+; FALLBACK16-NEXT: movl 48(%ecx), %esi
+; FALLBACK16-NEXT: movl 52(%ecx), %edx
+; FALLBACK16-NEXT: movl 56(%ecx), %eax
+; FALLBACK16-NEXT: movl 60(%ecx), %ecx
+; FALLBACK16-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK16-NEXT: movl (%ebp), %ebp
+; FALLBACK16-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK16-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK16-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK16-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK16-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK16-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK16-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK16-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK16-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK16-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK16-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: sarl $31, %ecx
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK16-NEXT: movl %ebp, %ecx
+; FALLBACK16-NEXT: movl %ebp, %esi
+; FALLBACK16-NEXT: andl $60, %esi
+; FALLBACK16-NEXT: movl 68(%esp,%esi), %edx
+; FALLBACK16-NEXT: shll $3, %ecx
+; FALLBACK16-NEXT: andl $24, %ecx
+; FALLBACK16-NEXT: movl %edx, %eax
+; FALLBACK16-NEXT: shrl %cl, %eax
+; FALLBACK16-NEXT: movl 72(%esp,%esi), %edi
+; FALLBACK16-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: addl %edi, %edi
+; FALLBACK16-NEXT: movb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; FALLBACK16-NEXT: movl %ecx, %ebx
+; FALLBACK16-NEXT: movb {{[-0-9]+}}(%e{{[sb]}}p), %ch # 1-byte Reload
+; FALLBACK16-NEXT: notb %ch
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: movb %ch, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; FALLBACK16-NEXT: shll %cl, %edi
+; FALLBACK16-NEXT: orl %eax, %edi
+; FALLBACK16-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 64(%esp,%esi), %eax
+; FALLBACK16-NEXT: movb %bl, %cl
+; FALLBACK16-NEXT: shrl %cl, %eax
+; FALLBACK16-NEXT: addl %edx, %edx
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %edx
+; FALLBACK16-NEXT: orl %eax, %edx
+; FALLBACK16-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 76(%esp,%esi), %ebp
+; FALLBACK16-NEXT: movl %ebp, %edx
+; FALLBACK16-NEXT: movb %bl, %cl
+; FALLBACK16-NEXT: shrl %cl, %edx
+; FALLBACK16-NEXT: movl 80(%esp,%esi), %edi
+; FALLBACK16-NEXT: leal (%edi,%edi), %eax
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %eax
+; FALLBACK16-NEXT: orl %edx, %eax
+; FALLBACK16-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movb %bl, %cl
+; FALLBACK16-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK16-NEXT: shrl %cl, %eax
+; FALLBACK16-NEXT: addl %ebp, %ebp
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %ebp
+; FALLBACK16-NEXT: orl %eax, %ebp
+; FALLBACK16-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl %esi, %edx
+; FALLBACK16-NEXT: movl 84(%esp,%esi), %eax
+; FALLBACK16-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movb %bl, %cl
+; FALLBACK16-NEXT: shrl %cl, %eax
+; FALLBACK16-NEXT: movl 88(%esp,%esi), %esi
+; FALLBACK16-NEXT: leal (%esi,%esi), %ebp
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %ebp
+; FALLBACK16-NEXT: orl %eax, %ebp
+; FALLBACK16-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movb %bl, %cl
+; FALLBACK16-NEXT: shrl %cl, %edi
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; FALLBACK16-NEXT: addl %ebx, %ebx
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %ebx
+; FALLBACK16-NEXT: orl %edi, %ebx
+; FALLBACK16-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl %edx, %eax
+; FALLBACK16-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl 92(%esp,%edx), %ebp
+; FALLBACK16-NEXT: movl %ebp, %edx
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; FALLBACK16-NEXT: movb %bl, %cl
+; FALLBACK16-NEXT: shrl %cl, %edx
+; FALLBACK16-NEXT: movl 96(%esp,%eax), %edi
+; FALLBACK16-NEXT: leal (%edi,%edi), %eax
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %eax
+; FALLBACK16-NEXT: orl %edx, %eax
+; FALLBACK16-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movb %bl, %cl
+; FALLBACK16-NEXT: shrl %cl, %esi
+; FALLBACK16-NEXT: addl %ebp, %ebp
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %ebp
+; FALLBACK16-NEXT: orl %esi, %ebp
+; FALLBACK16-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK16-NEXT: movl 100(%esp,%edx), %eax
+; FALLBACK16-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movb %bl, %cl
+; FALLBACK16-NEXT: shrl %cl, %eax
+; FALLBACK16-NEXT: movl 104(%esp,%edx), %esi
+; FALLBACK16-NEXT: leal (%esi,%esi), %ebp
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %ebp
+; FALLBACK16-NEXT: orl %eax, %ebp
+; FALLBACK16-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl %ebx, %edx
+; FALLBACK16-NEXT: movb %dl, %cl
+; FALLBACK16-NEXT: shrl %cl, %edi
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; FALLBACK16-NEXT: addl %ebx, %ebx
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %ebx
+; FALLBACK16-NEXT: orl %edi, %ebx
+; FALLBACK16-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; FALLBACK16-NEXT: movl 108(%esp,%ebp), %edi
+; FALLBACK16-NEXT: movl %edi, %eax
+; FALLBACK16-NEXT: movl %edx, %ebx
+; FALLBACK16-NEXT: movl %ebx, %ecx
+; FALLBACK16-NEXT: shrl %cl, %eax
+; FALLBACK16-NEXT: movl 112(%esp,%ebp), %ecx
+; FALLBACK16-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movl %ebp, %edx
+; FALLBACK16-NEXT: leal (%ecx,%ecx), %ebp
+; FALLBACK16-NEXT: movb {{[-0-9]+}}(%e{{[sb]}}p), %ch # 1-byte Reload
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %ebp
+; FALLBACK16-NEXT: orl %eax, %ebp
+; FALLBACK16-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK16-NEXT: movb %bl, %cl
+; FALLBACK16-NEXT: shrl %cl, %esi
+; FALLBACK16-NEXT: addl %edi, %edi
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %edi
+; FALLBACK16-NEXT: orl %esi, %edi
+; FALLBACK16-NEXT: movl 116(%esp,%edx), %esi
+; FALLBACK16-NEXT: movl %esi, %eax
+; FALLBACK16-NEXT: movl %ebx, %ecx
+; FALLBACK16-NEXT: shrl %cl, %eax
+; FALLBACK16-NEXT: movl 120(%esp,%edx), %edx
+; FALLBACK16-NEXT: leal (%edx,%edx), %ebp
+; FALLBACK16-NEXT: movb {{[-0-9]+}}(%e{{[sb]}}p), %ch # 1-byte Reload
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %ebp
+; FALLBACK16-NEXT: orl %eax, %ebp
+; FALLBACK16-NEXT: movb %bl, %cl
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK16-NEXT: shrl %cl, %eax
+; FALLBACK16-NEXT: addl %esi, %esi
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %esi
+; FALLBACK16-NEXT: orl %eax, %esi
+; FALLBACK16-NEXT: movb %bl, %cl
+; FALLBACK16-NEXT: movl %edx, %eax
+; FALLBACK16-NEXT: shrl %cl, %eax
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK16-NEXT: movl 124(%esp,%edx), %ebx
+; FALLBACK16-NEXT: leal (%ebx,%ebx), %edx
+; FALLBACK16-NEXT: movb %ch, %cl
+; FALLBACK16-NEXT: shll %cl, %edx
+; FALLBACK16-NEXT: orl %eax, %edx
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK16-NEXT: sarl %cl, %ebx
+; FALLBACK16-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK16-NEXT: movl %ebx, 60(%eax)
+; FALLBACK16-NEXT: movl %edx, 56(%eax)
+; FALLBACK16-NEXT: movl %esi, 48(%eax)
+; FALLBACK16-NEXT: movl %ebp, 52(%eax)
+; FALLBACK16-NEXT: movl %edi, 40(%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, 44(%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, 32(%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, 36(%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, 24(%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, 28(%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, 16(%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, 20(%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, 8(%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, 12(%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, (%eax)
+; FALLBACK16-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK16-NEXT: movl %ecx, 4(%eax)
+; FALLBACK16-NEXT: addl $204, %esp
+; FALLBACK16-NEXT: popl %esi
+; FALLBACK16-NEXT: popl %edi
+; FALLBACK16-NEXT: popl %ebx
+; FALLBACK16-NEXT: popl %ebp
+; FALLBACK16-NEXT: retl
+;
+; FALLBACK17-LABEL: ashr_64bytes:
+; FALLBACK17: # %bb.0:
+; FALLBACK17-NEXT: pushl %ebp
+; FALLBACK17-NEXT: pushl %ebx
+; FALLBACK17-NEXT: pushl %edi
+; FALLBACK17-NEXT: pushl %esi
+; FALLBACK17-NEXT: subl $188, %esp
+; FALLBACK17-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK17-NEXT: movl (%eax), %ecx
+; FALLBACK17-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 4(%eax), %ecx
+; FALLBACK17-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 8(%eax), %ecx
+; FALLBACK17-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 12(%eax), %ecx
+; FALLBACK17-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 16(%eax), %ecx
+; FALLBACK17-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 20(%eax), %ecx
+; FALLBACK17-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 24(%eax), %ecx
+; FALLBACK17-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 28(%eax), %ecx
+; FALLBACK17-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 32(%eax), %ecx
+; FALLBACK17-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 36(%eax), %ecx
+; FALLBACK17-NEXT: movl %ecx, (%esp) # 4-byte Spill
+; FALLBACK17-NEXT: movl 40(%eax), %ebp
+; FALLBACK17-NEXT: movl 44(%eax), %ebx
+; FALLBACK17-NEXT: movl 48(%eax), %edi
+; FALLBACK17-NEXT: movl 52(%eax), %esi
+; FALLBACK17-NEXT: movl 56(%eax), %edx
+; FALLBACK17-NEXT: movl 60(%eax), %eax
+; FALLBACK17-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK17-NEXT: movl (%ecx), %ecx
+; FALLBACK17-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %ebp, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl (%esp), %edx # 4-byte Reload
+; FALLBACK17-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK17-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK17-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK17-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK17-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK17-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK17-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK17-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK17-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK17-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: sarl $31, %eax
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK17-NEXT: movl %ecx, %ebp
+; FALLBACK17-NEXT: andl $60, %ebp
+; FALLBACK17-NEXT: movl 56(%esp,%ebp), %edx
+; FALLBACK17-NEXT: movl 52(%esp,%ebp), %eax
+; FALLBACK17-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: shll $3, %ecx
+; FALLBACK17-NEXT: andl $24, %ecx
+; FALLBACK17-NEXT: shrdl %cl, %edx, %eax
+; FALLBACK17-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 64(%esp,%ebp), %edi
+; FALLBACK17-NEXT: movl 60(%esp,%ebp), %eax
+; FALLBACK17-NEXT: movl %eax, %esi
+; FALLBACK17-NEXT: shrdl %cl, %edi, %esi
+; FALLBACK17-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK17-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 72(%esp,%ebp), %esi
+; FALLBACK17-NEXT: movl 68(%esp,%ebp), %eax
+; FALLBACK17-NEXT: movl %eax, %edx
+; FALLBACK17-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK17-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: shrdl %cl, %eax, %edi
+; FALLBACK17-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 80(%esp,%ebp), %edi
+; FALLBACK17-NEXT: movl 76(%esp,%ebp), %eax
+; FALLBACK17-NEXT: movl %eax, %edx
+; FALLBACK17-NEXT: shrdl %cl, %edi, %edx
+; FALLBACK17-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK17-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 88(%esp,%ebp), %esi
+; FALLBACK17-NEXT: movl 84(%esp,%ebp), %eax
+; FALLBACK17-NEXT: movl %eax, %edx
+; FALLBACK17-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK17-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl %esi, %edx
+; FALLBACK17-NEXT: shrdl %cl, %eax, %edi
+; FALLBACK17-NEXT: movl %edi, (%esp) # 4-byte Spill
+; FALLBACK17-NEXT: movl 96(%esp,%ebp), %esi
+; FALLBACK17-NEXT: movl 92(%esp,%ebp), %eax
+; FALLBACK17-NEXT: movl %eax, %edi
+; FALLBACK17-NEXT: shrdl %cl, %esi, %edi
+; FALLBACK17-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK17-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK17-NEXT: movl 104(%esp,%ebp), %edx
+; FALLBACK17-NEXT: movl 100(%esp,%ebp), %eax
+; FALLBACK17-NEXT: movl %eax, %edi
+; FALLBACK17-NEXT: shrdl %cl, %edx, %edi
+; FALLBACK17-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK17-NEXT: movl 48(%esp,%ebp), %ebx
+; FALLBACK17-NEXT: movl 108(%esp,%ebp), %eax
+; FALLBACK17-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK17-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK17-NEXT: movl %edx, 56(%ebp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK17-NEXT: shrdl %cl, %edx, %ebx
+; FALLBACK17-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK17-NEXT: sarl %cl, %eax
+; FALLBACK17-NEXT: movl %eax, 60(%ebp)
+; FALLBACK17-NEXT: movl %esi, 48(%ebp)
+; FALLBACK17-NEXT: movl %edi, 52(%ebp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, 40(%ebp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, 44(%ebp)
+; FALLBACK17-NEXT: movl (%esp), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, 32(%ebp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, 36(%ebp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, 24(%ebp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, 28(%ebp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, 16(%ebp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, 20(%ebp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, 8(%ebp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, 12(%ebp)
+; FALLBACK17-NEXT: movl %ebx, (%ebp)
+; FALLBACK17-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK17-NEXT: movl %eax, 4(%ebp)
+; FALLBACK17-NEXT: addl $188, %esp
+; FALLBACK17-NEXT: popl %esi
+; FALLBACK17-NEXT: popl %edi
+; FALLBACK17-NEXT: popl %ebx
+; FALLBACK17-NEXT: popl %ebp
+; FALLBACK17-NEXT: retl
+;
+; FALLBACK18-LABEL: ashr_64bytes:
+; FALLBACK18: # %bb.0:
+; FALLBACK18-NEXT: pushl %ebp
+; FALLBACK18-NEXT: pushl %ebx
+; FALLBACK18-NEXT: pushl %edi
+; FALLBACK18-NEXT: pushl %esi
+; FALLBACK18-NEXT: subl $204, %esp
+; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK18-NEXT: movl (%eax), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 4(%eax), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 8(%eax), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 12(%eax), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 16(%eax), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 20(%eax), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 24(%eax), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 28(%eax), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 32(%eax), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 36(%eax), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 40(%eax), %ebp
+; FALLBACK18-NEXT: movl 44(%eax), %ebx
+; FALLBACK18-NEXT: movl 48(%eax), %edi
+; FALLBACK18-NEXT: movl 52(%eax), %esi
+; FALLBACK18-NEXT: movl 56(%eax), %edx
+; FALLBACK18-NEXT: movl 60(%eax), %ecx
+; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK18-NEXT: movl (%eax), %eax
+; FALLBACK18-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %ebp, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK18-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK18-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK18-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK18-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK18-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK18-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK18-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK18-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK18-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK18-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: sarl $31, %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %eax, %ecx
+; FALLBACK18-NEXT: leal (,%eax,8), %edx
+; FALLBACK18-NEXT: andl $24, %edx
+; FALLBACK18-NEXT: andl $60, %ecx
+; FALLBACK18-NEXT: movl 68(%esp,%ecx), %esi
+; FALLBACK18-NEXT: movl 72(%esp,%ecx), %edi
+; FALLBACK18-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shrxl %edx, %esi, %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl %edx, %ebx
+; FALLBACK18-NEXT: notb %bl
+; FALLBACK18-NEXT: leal (%edi,%edi), %ebp
+; FALLBACK18-NEXT: shlxl %ebx, %ebp, %eax
+; FALLBACK18-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shrxl %edx, 64(%esp,%ecx), %edi
+; FALLBACK18-NEXT: addl %esi, %esi
+; FALLBACK18-NEXT: shlxl %ebx, %esi, %eax
+; FALLBACK18-NEXT: orl %edi, %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 80(%esp,%ecx), %esi
+; FALLBACK18-NEXT: leal (%esi,%esi), %edi
+; FALLBACK18-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK18-NEXT: movl 76(%esp,%ecx), %edi
+; FALLBACK18-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK18-NEXT: orl %ebp, %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK18-NEXT: addl %edi, %edi
+; FALLBACK18-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK18-NEXT: orl %eax, %edi
+; FALLBACK18-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 88(%esp,%ecx), %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: leal (%eax,%eax), %edi
+; FALLBACK18-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK18-NEXT: movl 84(%esp,%ecx), %edi
+; FALLBACK18-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK18-NEXT: orl %ebp, %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK18-NEXT: addl %edi, %edi
+; FALLBACK18-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK18-NEXT: orl %esi, %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 96(%esp,%ecx), %esi
+; FALLBACK18-NEXT: leal (%esi,%esi), %edi
+; FALLBACK18-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK18-NEXT: movl 92(%esp,%ecx), %edi
+; FALLBACK18-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK18-NEXT: orl %ebp, %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK18-NEXT: addl %edi, %edi
+; FALLBACK18-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK18-NEXT: orl %eax, %edi
+; FALLBACK18-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 104(%esp,%ecx), %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: leal (%eax,%eax), %edi
+; FALLBACK18-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK18-NEXT: movl 100(%esp,%ecx), %edi
+; FALLBACK18-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK18-NEXT: orl %ebp, %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK18-NEXT: addl %edi, %edi
+; FALLBACK18-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK18-NEXT: orl %esi, %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 112(%esp,%ecx), %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: leal (%eax,%eax), %esi
+; FALLBACK18-NEXT: shlxl %ebx, %esi, %eax
+; FALLBACK18-NEXT: movl 108(%esp,%ecx), %esi
+; FALLBACK18-NEXT: movl %ecx, %edi
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shrxl %edx, %esi, %ebp
+; FALLBACK18-NEXT: orl %ebp, %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; FALLBACK18-NEXT: addl %esi, %esi
+; FALLBACK18-NEXT: shlxl %ebx, %esi, %esi
+; FALLBACK18-NEXT: orl %ecx, %esi
+; FALLBACK18-NEXT: movl 120(%esp,%edi), %ebp
+; FALLBACK18-NEXT: leal (%ebp,%ebp), %ecx
+; FALLBACK18-NEXT: shlxl %ebx, %ecx, %ecx
+; FALLBACK18-NEXT: movl 116(%esp,%edi), %eax
+; FALLBACK18-NEXT: shrxl %edx, %eax, %edi
+; FALLBACK18-NEXT: orl %edi, %ecx
+; FALLBACK18-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK18-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: addl %eax, %eax
+; FALLBACK18-NEXT: shlxl %ebx, %eax, %edi
+; FALLBACK18-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK18-NEXT: shrxl %edx, %ebp, %eax
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; FALLBACK18-NEXT: movl 124(%esp,%ebp), %ebp
+; FALLBACK18-NEXT: sarxl %edx, %ebp, %edx
+; FALLBACK18-NEXT: addl %ebp, %ebp
+; FALLBACK18-NEXT: shlxl %ebx, %ebp, %ebx
+; FALLBACK18-NEXT: orl %eax, %ebx
+; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK18-NEXT: movl %edx, 60(%eax)
+; FALLBACK18-NEXT: movl %ebx, 56(%eax)
+; FALLBACK18-NEXT: movl %edi, 48(%eax)
+; FALLBACK18-NEXT: movl %ecx, 52(%eax)
+; FALLBACK18-NEXT: movl %esi, 40(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 44(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 32(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 36(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 24(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 28(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 16(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 20(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 8(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 12(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, (%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 4(%eax)
+; FALLBACK18-NEXT: addl $204, %esp
+; FALLBACK18-NEXT: popl %esi
+; FALLBACK18-NEXT: popl %edi
+; FALLBACK18-NEXT: popl %ebx
+; FALLBACK18-NEXT: popl %ebp
+; FALLBACK18-NEXT: retl
+;
+; FALLBACK19-LABEL: ashr_64bytes:
+; FALLBACK19: # %bb.0:
+; FALLBACK19-NEXT: pushl %ebp
+; FALLBACK19-NEXT: pushl %ebx
+; FALLBACK19-NEXT: pushl %edi
+; FALLBACK19-NEXT: pushl %esi
+; FALLBACK19-NEXT: subl $188, %esp
+; FALLBACK19-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK19-NEXT: movl (%eax), %ecx
+; FALLBACK19-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 4(%eax), %ecx
+; FALLBACK19-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 8(%eax), %ecx
+; FALLBACK19-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 12(%eax), %ecx
+; FALLBACK19-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 16(%eax), %ecx
+; FALLBACK19-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 20(%eax), %ecx
+; FALLBACK19-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 24(%eax), %ecx
+; FALLBACK19-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 28(%eax), %ecx
+; FALLBACK19-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 32(%eax), %ecx
+; FALLBACK19-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 36(%eax), %ecx
+; FALLBACK19-NEXT: movl %ecx, (%esp) # 4-byte Spill
+; FALLBACK19-NEXT: movl 40(%eax), %ebp
+; FALLBACK19-NEXT: movl 44(%eax), %ebx
+; FALLBACK19-NEXT: movl 48(%eax), %edi
+; FALLBACK19-NEXT: movl 52(%eax), %esi
+; FALLBACK19-NEXT: movl 56(%eax), %edx
+; FALLBACK19-NEXT: movl 60(%eax), %eax
+; FALLBACK19-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK19-NEXT: movl (%ecx), %ecx
+; FALLBACK19-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %ebp, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl (%esp), %edx # 4-byte Reload
+; FALLBACK19-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK19-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK19-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK19-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK19-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK19-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK19-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK19-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK19-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK19-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: sarl $31, %eax
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK19-NEXT: movl %ecx, %ebp
+; FALLBACK19-NEXT: andl $60, %ebp
+; FALLBACK19-NEXT: movl 56(%esp,%ebp), %edx
+; FALLBACK19-NEXT: movl 52(%esp,%ebp), %eax
+; FALLBACK19-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: shll $3, %ecx
+; FALLBACK19-NEXT: andl $24, %ecx
+; FALLBACK19-NEXT: shrdl %cl, %edx, %eax
+; FALLBACK19-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 64(%esp,%ebp), %edi
+; FALLBACK19-NEXT: movl 60(%esp,%ebp), %eax
+; FALLBACK19-NEXT: movl %eax, %esi
+; FALLBACK19-NEXT: shrdl %cl, %edi, %esi
+; FALLBACK19-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK19-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 72(%esp,%ebp), %esi
+; FALLBACK19-NEXT: movl 68(%esp,%ebp), %eax
+; FALLBACK19-NEXT: movl %eax, %edx
+; FALLBACK19-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK19-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: shrdl %cl, %eax, %edi
+; FALLBACK19-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 80(%esp,%ebp), %edi
+; FALLBACK19-NEXT: movl 76(%esp,%ebp), %eax
+; FALLBACK19-NEXT: movl %eax, %edx
+; FALLBACK19-NEXT: shrdl %cl, %edi, %edx
+; FALLBACK19-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK19-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: movl 88(%esp,%ebp), %ebx
+; FALLBACK19-NEXT: movl 84(%esp,%ebp), %eax
+; FALLBACK19-NEXT: movl %eax, %edx
+; FALLBACK19-NEXT: shrdl %cl, %ebx, %edx
+; FALLBACK19-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: shrdl %cl, %eax, %edi
+; FALLBACK19-NEXT: movl %edi, (%esp) # 4-byte Spill
+; FALLBACK19-NEXT: movl 96(%esp,%ebp), %esi
+; FALLBACK19-NEXT: movl 92(%esp,%ebp), %eax
+; FALLBACK19-NEXT: movl %eax, %edx
+; FALLBACK19-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK19-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: shrdl %cl, %eax, %ebx
+; FALLBACK19-NEXT: movl 104(%esp,%ebp), %eax
+; FALLBACK19-NEXT: movl 100(%esp,%ebp), %edi
+; FALLBACK19-NEXT: movl %edi, %edx
+; FALLBACK19-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK19-NEXT: shrdl %cl, %edi, %esi
+; FALLBACK19-NEXT: movl 48(%esp,%ebp), %edi
+; FALLBACK19-NEXT: movl 108(%esp,%ebp), %ebp
+; FALLBACK19-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK19-NEXT: shrdl %cl, %ebp, %eax
+; FALLBACK19-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK19-NEXT: movl %eax, 56(%ebp)
+; FALLBACK19-NEXT: movl %esi, 48(%ebp)
+; FALLBACK19-NEXT: movl %edx, 52(%ebp)
+; FALLBACK19-NEXT: movl %ebx, 40(%ebp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, 44(%ebp)
+; FALLBACK19-NEXT: movl (%esp), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, 32(%ebp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, 36(%ebp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, 24(%ebp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, 28(%ebp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, 16(%ebp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, 20(%ebp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, 8(%ebp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK19-NEXT: movl %eax, 12(%ebp)
+; FALLBACK19-NEXT: sarxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK19-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK19-NEXT: shrdl %cl, %edx, %edi
+; FALLBACK19-NEXT: movl %edi, (%ebp)
+; FALLBACK19-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK19-NEXT: movl %ecx, 4(%ebp)
+; FALLBACK19-NEXT: movl %eax, 60(%ebp)
+; FALLBACK19-NEXT: addl $188, %esp
+; FALLBACK19-NEXT: popl %esi
+; FALLBACK19-NEXT: popl %edi
+; FALLBACK19-NEXT: popl %ebx
+; FALLBACK19-NEXT: popl %ebp
+; FALLBACK19-NEXT: retl
+;
+; FALLBACK20-LABEL: ashr_64bytes:
+; FALLBACK20: # %bb.0:
+; FALLBACK20-NEXT: pushl %ebp
+; FALLBACK20-NEXT: pushl %ebx
+; FALLBACK20-NEXT: pushl %edi
+; FALLBACK20-NEXT: pushl %esi
+; FALLBACK20-NEXT: subl $204, %esp
+; FALLBACK20-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK20-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK20-NEXT: movups (%ecx), %xmm0
+; FALLBACK20-NEXT: movups 16(%ecx), %xmm1
+; FALLBACK20-NEXT: movups 32(%ecx), %xmm2
+; FALLBACK20-NEXT: movl 48(%ecx), %edx
+; FALLBACK20-NEXT: movl 52(%ecx), %esi
+; FALLBACK20-NEXT: movl 56(%ecx), %edi
+; FALLBACK20-NEXT: movl 60(%ecx), %ecx
+; FALLBACK20-NEXT: movl (%eax), %eax
+; FALLBACK20-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: sarl $31, %ecx
+; FALLBACK20-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK20-NEXT: movl %eax, %esi
+; FALLBACK20-NEXT: andl $60, %esi
+; FALLBACK20-NEXT: movl 68(%esp,%esi), %edx
+; FALLBACK20-NEXT: shll $3, %eax
+; FALLBACK20-NEXT: andl $24, %eax
+; FALLBACK20-NEXT: movl %edx, %edi
+; FALLBACK20-NEXT: movl %eax, %ecx
+; FALLBACK20-NEXT: shrl %cl, %edi
+; FALLBACK20-NEXT: movl 72(%esp,%esi), %ecx
+; FALLBACK20-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: leal (%ecx,%ecx), %ebx
+; FALLBACK20-NEXT: movb %al, %ch
+; FALLBACK20-NEXT: notb %ch
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shll %cl, %ebx
+; FALLBACK20-NEXT: orl %edi, %ebx
+; FALLBACK20-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl 64(%esp,%esi), %edi
+; FALLBACK20-NEXT: movb %al, %cl
+; FALLBACK20-NEXT: shrl %cl, %edi
+; FALLBACK20-NEXT: addl %edx, %edx
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shll %cl, %edx
+; FALLBACK20-NEXT: orl %edi, %edx
+; FALLBACK20-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl 76(%esp,%esi), %edx
+; FALLBACK20-NEXT: movl %edx, %ebp
+; FALLBACK20-NEXT: movb %al, %cl
+; FALLBACK20-NEXT: shrl %cl, %ebp
+; FALLBACK20-NEXT: movl 80(%esp,%esi), %edi
+; FALLBACK20-NEXT: leal (%edi,%edi), %ebx
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shll %cl, %ebx
+; FALLBACK20-NEXT: orl %ebp, %ebx
+; FALLBACK20-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movb %al, %cl
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; FALLBACK20-NEXT: shrl %cl, %ebx
+; FALLBACK20-NEXT: addl %edx, %edx
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shll %cl, %edx
+; FALLBACK20-NEXT: orl %ebx, %edx
+; FALLBACK20-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl 84(%esp,%esi), %ebx
+; FALLBACK20-NEXT: movl %ebx, %ebp
+; FALLBACK20-NEXT: movl %eax, %edx
+; FALLBACK20-NEXT: movb %dl, %cl
+; FALLBACK20-NEXT: shrl %cl, %ebp
+; FALLBACK20-NEXT: movl 88(%esp,%esi), %eax
+; FALLBACK20-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: addl %eax, %eax
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shll %cl, %eax
+; FALLBACK20-NEXT: orl %ebp, %eax
+; FALLBACK20-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movb %dl, %cl
+; FALLBACK20-NEXT: shrl %cl, %edi
+; FALLBACK20-NEXT: addl %ebx, %ebx
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: movb %ch, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; FALLBACK20-NEXT: shll %cl, %ebx
+; FALLBACK20-NEXT: orl %edi, %ebx
+; FALLBACK20-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl 92(%esp,%esi), %ebx
+; FALLBACK20-NEXT: movl %ebx, %ebp
+; FALLBACK20-NEXT: movb %dl, %cl
+; FALLBACK20-NEXT: shrl %cl, %ebp
+; FALLBACK20-NEXT: movl 96(%esp,%esi), %edi
+; FALLBACK20-NEXT: leal (%edi,%edi), %eax
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shll %cl, %eax
+; FALLBACK20-NEXT: orl %ebp, %eax
+; FALLBACK20-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movb %dl, %cl
+; FALLBACK20-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK20-NEXT: shrl %cl, %eax
+; FALLBACK20-NEXT: addl %ebx, %ebx
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shll %cl, %ebx
+; FALLBACK20-NEXT: orl %eax, %ebx
+; FALLBACK20-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl 100(%esp,%esi), %ebx
+; FALLBACK20-NEXT: movl %ebx, %ebp
+; FALLBACK20-NEXT: movb %dl, %cl
+; FALLBACK20-NEXT: shrl %cl, %ebp
+; FALLBACK20-NEXT: movl 104(%esp,%esi), %edx
+; FALLBACK20-NEXT: leal (%edx,%edx), %eax
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shll %cl, %eax
+; FALLBACK20-NEXT: orl %ebp, %eax
+; FALLBACK20-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK20-NEXT: movb %al, %cl
+; FALLBACK20-NEXT: shrl %cl, %edi
+; FALLBACK20-NEXT: addl %ebx, %ebx
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shll %cl, %ebx
+; FALLBACK20-NEXT: orl %edi, %ebx
+; FALLBACK20-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl 108(%esp,%esi), %edi
+; FALLBACK20-NEXT: movl %edi, %ebp
+; FALLBACK20-NEXT: movl %eax, %ecx
+; FALLBACK20-NEXT: shrl %cl, %ebp
+; FALLBACK20-NEXT: movl 112(%esp,%esi), %ecx
+; FALLBACK20-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: leal (%ecx,%ecx), %ebx
+; FALLBACK20-NEXT: movb {{[-0-9]+}}(%e{{[sb]}}p), %ch # 1-byte Reload
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shll %cl, %ebx
+; FALLBACK20-NEXT: orl %ebp, %ebx
+; FALLBACK20-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movb %al, %cl
+; FALLBACK20-NEXT: shrl %cl, %edx
+; FALLBACK20-NEXT: addl %edi, %edi
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shll %cl, %edi
+; FALLBACK20-NEXT: orl %edx, %edi
+; FALLBACK20-NEXT: movl %esi, %edx
+; FALLBACK20-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK20-NEXT: movl 116(%esp,%esi), %esi
+; FALLBACK20-NEXT: movl %esi, %ebx
+; FALLBACK20-NEXT: movb %al, %cl
+; FALLBACK20-NEXT: shrl %cl, %ebx
+; FALLBACK20-NEXT: movl 120(%esp,%edx), %eax
+; FALLBACK20-NEXT: leal (%eax,%eax), %ebp
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shll %cl, %ebp
+; FALLBACK20-NEXT: orl %ebx, %ebp
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK20-NEXT: movb %dl, %cl
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; FALLBACK20-NEXT: shrl %cl, %ebx
+; FALLBACK20-NEXT: addl %esi, %esi
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shll %cl, %esi
+; FALLBACK20-NEXT: orl %ebx, %esi
+; FALLBACK20-NEXT: movb %dl, %cl
+; FALLBACK20-NEXT: shrl %cl, %eax
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK20-NEXT: movl 124(%esp,%edx), %ebx
+; FALLBACK20-NEXT: leal (%ebx,%ebx), %edx
+; FALLBACK20-NEXT: movb %ch, %cl
+; FALLBACK20-NEXT: shll %cl, %edx
+; FALLBACK20-NEXT: orl %eax, %edx
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK20-NEXT: sarl %cl, %ebx
+; FALLBACK20-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK20-NEXT: movl %ebx, 60(%eax)
+; FALLBACK20-NEXT: movl %edx, 56(%eax)
+; FALLBACK20-NEXT: movl %esi, 48(%eax)
+; FALLBACK20-NEXT: movl %ebp, 52(%eax)
+; FALLBACK20-NEXT: movl %edi, 40(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, 44(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, 32(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, 36(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, 24(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, 28(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, 16(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, 20(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, 8(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, 12(%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, (%eax)
+; FALLBACK20-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK20-NEXT: movl %ecx, 4(%eax)
+; FALLBACK20-NEXT: addl $204, %esp
+; FALLBACK20-NEXT: popl %esi
+; FALLBACK20-NEXT: popl %edi
+; FALLBACK20-NEXT: popl %ebx
+; FALLBACK20-NEXT: popl %ebp
+; FALLBACK20-NEXT: retl
+;
+; FALLBACK21-LABEL: ashr_64bytes:
+; FALLBACK21: # %bb.0:
+; FALLBACK21-NEXT: pushl %ebp
+; FALLBACK21-NEXT: pushl %ebx
+; FALLBACK21-NEXT: pushl %edi
+; FALLBACK21-NEXT: pushl %esi
+; FALLBACK21-NEXT: subl $188, %esp
+; FALLBACK21-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK21-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK21-NEXT: movups (%eax), %xmm0
+; FALLBACK21-NEXT: movups 16(%eax), %xmm1
+; FALLBACK21-NEXT: movups 32(%eax), %xmm2
+; FALLBACK21-NEXT: movl 48(%eax), %edx
+; FALLBACK21-NEXT: movl 52(%eax), %esi
+; FALLBACK21-NEXT: movl 56(%eax), %edi
+; FALLBACK21-NEXT: movl 60(%eax), %eax
+; FALLBACK21-NEXT: movl (%ecx), %ecx
+; FALLBACK21-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: sarl $31, %eax
+; FALLBACK21-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK21-NEXT: movl %ecx, %ebp
+; FALLBACK21-NEXT: andl $60, %ebp
+; FALLBACK21-NEXT: movl 56(%esp,%ebp), %edx
+; FALLBACK21-NEXT: movl 52(%esp,%ebp), %eax
+; FALLBACK21-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: shll $3, %ecx
+; FALLBACK21-NEXT: andl $24, %ecx
+; FALLBACK21-NEXT: shrdl %cl, %edx, %eax
+; FALLBACK21-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: movl 64(%esp,%ebp), %edi
+; FALLBACK21-NEXT: movl 60(%esp,%ebp), %eax
+; FALLBACK21-NEXT: movl %eax, %esi
+; FALLBACK21-NEXT: shrdl %cl, %edi, %esi
+; FALLBACK21-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK21-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: movl 72(%esp,%ebp), %esi
+; FALLBACK21-NEXT: movl 68(%esp,%ebp), %eax
+; FALLBACK21-NEXT: movl %eax, %edx
+; FALLBACK21-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK21-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: shrdl %cl, %eax, %edi
+; FALLBACK21-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: movl 80(%esp,%ebp), %edi
+; FALLBACK21-NEXT: movl 76(%esp,%ebp), %eax
+; FALLBACK21-NEXT: movl %eax, %edx
+; FALLBACK21-NEXT: shrdl %cl, %edi, %edx
+; FALLBACK21-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK21-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: movl 88(%esp,%ebp), %esi
+; FALLBACK21-NEXT: movl 84(%esp,%ebp), %eax
+; FALLBACK21-NEXT: movl %eax, %edx
+; FALLBACK21-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK21-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: movl %esi, %edx
+; FALLBACK21-NEXT: shrdl %cl, %eax, %edi
+; FALLBACK21-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: movl 96(%esp,%ebp), %esi
+; FALLBACK21-NEXT: movl 92(%esp,%ebp), %eax
+; FALLBACK21-NEXT: movl %eax, %edi
+; FALLBACK21-NEXT: shrdl %cl, %esi, %edi
+; FALLBACK21-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK21-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK21-NEXT: movl %edx, (%esp) # 4-byte Spill
+; FALLBACK21-NEXT: movl 104(%esp,%ebp), %edx
+; FALLBACK21-NEXT: movl 100(%esp,%ebp), %eax
+; FALLBACK21-NEXT: movl %eax, %edi
+; FALLBACK21-NEXT: shrdl %cl, %edx, %edi
+; FALLBACK21-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK21-NEXT: movl 48(%esp,%ebp), %ebx
+; FALLBACK21-NEXT: movl 108(%esp,%ebp), %eax
+; FALLBACK21-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK21-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK21-NEXT: movl %edx, 56(%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK21-NEXT: shrdl %cl, %edx, %ebx
+; FALLBACK21-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK21-NEXT: sarl %cl, %eax
+; FALLBACK21-NEXT: movl %eax, 60(%ebp)
+; FALLBACK21-NEXT: movl %esi, 48(%ebp)
+; FALLBACK21-NEXT: movl %edi, 52(%ebp)
+; FALLBACK21-NEXT: movl (%esp), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 40(%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 44(%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 32(%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 36(%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 24(%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 28(%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 16(%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 20(%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 8(%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 12(%ebp)
+; FALLBACK21-NEXT: movl %ebx, (%ebp)
+; FALLBACK21-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK21-NEXT: movl %eax, 4(%ebp)
+; FALLBACK21-NEXT: addl $188, %esp
+; FALLBACK21-NEXT: popl %esi
+; FALLBACK21-NEXT: popl %edi
+; FALLBACK21-NEXT: popl %ebx
+; FALLBACK21-NEXT: popl %ebp
+; FALLBACK21-NEXT: retl
+;
+; FALLBACK22-LABEL: ashr_64bytes:
+; FALLBACK22: # %bb.0:
+; FALLBACK22-NEXT: pushl %ebp
+; FALLBACK22-NEXT: pushl %ebx
+; FALLBACK22-NEXT: pushl %edi
+; FALLBACK22-NEXT: pushl %esi
+; FALLBACK22-NEXT: subl $204, %esp
+; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK22-NEXT: movups (%ecx), %xmm0
+; FALLBACK22-NEXT: movups 16(%ecx), %xmm1
+; FALLBACK22-NEXT: movups 32(%ecx), %xmm2
+; FALLBACK22-NEXT: movl 48(%ecx), %edx
+; FALLBACK22-NEXT: movl 52(%ecx), %esi
+; FALLBACK22-NEXT: movl 56(%ecx), %edi
+; FALLBACK22-NEXT: movl 60(%ecx), %ecx
+; FALLBACK22-NEXT: movl (%eax), %eax
+; FALLBACK22-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: sarl $31, %ecx
+; FALLBACK22-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %eax, %ecx
+; FALLBACK22-NEXT: leal (,%eax,8), %edx
+; FALLBACK22-NEXT: andl $24, %edx
+; FALLBACK22-NEXT: andl $60, %ecx
+; FALLBACK22-NEXT: movl 68(%esp,%ecx), %esi
+; FALLBACK22-NEXT: movl 72(%esp,%ecx), %edi
+; FALLBACK22-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shrxl %edx, %esi, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl %edx, %ebx
+; FALLBACK22-NEXT: notb %bl
+; FALLBACK22-NEXT: leal (%edi,%edi), %ebp
+; FALLBACK22-NEXT: shlxl %ebx, %ebp, %eax
+; FALLBACK22-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shrxl %edx, 64(%esp,%ecx), %edi
+; FALLBACK22-NEXT: addl %esi, %esi
+; FALLBACK22-NEXT: shlxl %ebx, %esi, %eax
+; FALLBACK22-NEXT: orl %edi, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 80(%esp,%ecx), %esi
+; FALLBACK22-NEXT: leal (%esi,%esi), %edi
+; FALLBACK22-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK22-NEXT: movl 76(%esp,%ecx), %edi
+; FALLBACK22-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK22-NEXT: orl %ebp, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK22-NEXT: addl %edi, %edi
+; FALLBACK22-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK22-NEXT: orl %eax, %edi
+; FALLBACK22-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 88(%esp,%ecx), %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: leal (%eax,%eax), %edi
+; FALLBACK22-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK22-NEXT: movl 84(%esp,%ecx), %edi
+; FALLBACK22-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK22-NEXT: orl %ebp, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK22-NEXT: addl %edi, %edi
+; FALLBACK22-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK22-NEXT: orl %esi, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 96(%esp,%ecx), %esi
+; FALLBACK22-NEXT: leal (%esi,%esi), %edi
+; FALLBACK22-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK22-NEXT: movl 92(%esp,%ecx), %edi
+; FALLBACK22-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK22-NEXT: orl %ebp, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK22-NEXT: addl %edi, %edi
+; FALLBACK22-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK22-NEXT: orl %eax, %edi
+; FALLBACK22-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 104(%esp,%ecx), %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: leal (%eax,%eax), %edi
+; FALLBACK22-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK22-NEXT: movl 100(%esp,%ecx), %edi
+; FALLBACK22-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK22-NEXT: orl %ebp, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK22-NEXT: addl %edi, %edi
+; FALLBACK22-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK22-NEXT: orl %esi, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 112(%esp,%ecx), %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: leal (%eax,%eax), %esi
+; FALLBACK22-NEXT: shlxl %ebx, %esi, %eax
+; FALLBACK22-NEXT: movl 108(%esp,%ecx), %esi
+; FALLBACK22-NEXT: movl %ecx, %edi
+; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shrxl %edx, %esi, %ebp
+; FALLBACK22-NEXT: orl %ebp, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; FALLBACK22-NEXT: addl %esi, %esi
+; FALLBACK22-NEXT: shlxl %ebx, %esi, %esi
+; FALLBACK22-NEXT: orl %ecx, %esi
+; FALLBACK22-NEXT: movl 120(%esp,%edi), %ebp
+; FALLBACK22-NEXT: leal (%ebp,%ebp), %ecx
+; FALLBACK22-NEXT: shlxl %ebx, %ecx, %ecx
+; FALLBACK22-NEXT: movl 116(%esp,%edi), %eax
+; FALLBACK22-NEXT: shrxl %edx, %eax, %edi
+; FALLBACK22-NEXT: orl %edi, %ecx
+; FALLBACK22-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK22-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: addl %eax, %eax
+; FALLBACK22-NEXT: shlxl %ebx, %eax, %edi
+; FALLBACK22-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK22-NEXT: shrxl %edx, %ebp, %eax
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; FALLBACK22-NEXT: movl 124(%esp,%ebp), %ebp
+; FALLBACK22-NEXT: sarxl %edx, %ebp, %edx
+; FALLBACK22-NEXT: addl %ebp, %ebp
+; FALLBACK22-NEXT: shlxl %ebx, %ebp, %ebx
+; FALLBACK22-NEXT: orl %eax, %ebx
+; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK22-NEXT: movl %edx, 60(%eax)
+; FALLBACK22-NEXT: movl %ebx, 56(%eax)
+; FALLBACK22-NEXT: movl %edi, 48(%eax)
+; FALLBACK22-NEXT: movl %ecx, 52(%eax)
+; FALLBACK22-NEXT: movl %esi, 40(%eax)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: movl %ecx, 44(%eax)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: movl %ecx, 32(%eax)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: movl %ecx, 36(%eax)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: movl %ecx, 24(%eax)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: movl %ecx, 28(%eax)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: movl %ecx, 16(%eax)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: movl %ecx, 20(%eax)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: movl %ecx, 8(%eax)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: movl %ecx, 12(%eax)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: movl %ecx, (%eax)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: movl %ecx, 4(%eax)
+; FALLBACK22-NEXT: addl $204, %esp
+; FALLBACK22-NEXT: popl %esi
+; FALLBACK22-NEXT: popl %edi
+; FALLBACK22-NEXT: popl %ebx
+; FALLBACK22-NEXT: popl %ebp
+; FALLBACK22-NEXT: retl
+;
+; FALLBACK23-LABEL: ashr_64bytes:
+; FALLBACK23: # %bb.0:
+; FALLBACK23-NEXT: pushl %ebp
+; FALLBACK23-NEXT: pushl %ebx
+; FALLBACK23-NEXT: pushl %edi
+; FALLBACK23-NEXT: pushl %esi
+; FALLBACK23-NEXT: subl $188, %esp
+; FALLBACK23-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK23-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK23-NEXT: movups (%eax), %xmm0
+; FALLBACK23-NEXT: movups 16(%eax), %xmm1
+; FALLBACK23-NEXT: movups 32(%eax), %xmm2
+; FALLBACK23-NEXT: movl 48(%eax), %edx
+; FALLBACK23-NEXT: movl 52(%eax), %esi
+; FALLBACK23-NEXT: movl 56(%eax), %edi
+; FALLBACK23-NEXT: movl 60(%eax), %eax
+; FALLBACK23-NEXT: movl (%ecx), %ecx
+; FALLBACK23-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: sarl $31, %eax
+; FALLBACK23-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK23-NEXT: movl %ecx, %ebp
+; FALLBACK23-NEXT: andl $60, %ebp
+; FALLBACK23-NEXT: movl 56(%esp,%ebp), %edx
+; FALLBACK23-NEXT: movl 52(%esp,%ebp), %eax
+; FALLBACK23-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: shll $3, %ecx
+; FALLBACK23-NEXT: andl $24, %ecx
+; FALLBACK23-NEXT: shrdl %cl, %edx, %eax
+; FALLBACK23-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: movl 64(%esp,%ebp), %edi
+; FALLBACK23-NEXT: movl 60(%esp,%ebp), %eax
+; FALLBACK23-NEXT: movl %eax, %esi
+; FALLBACK23-NEXT: shrdl %cl, %edi, %esi
+; FALLBACK23-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK23-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: movl 72(%esp,%ebp), %esi
+; FALLBACK23-NEXT: movl 68(%esp,%ebp), %eax
+; FALLBACK23-NEXT: movl %eax, %edx
+; FALLBACK23-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK23-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: shrdl %cl, %eax, %edi
+; FALLBACK23-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: movl 80(%esp,%ebp), %edi
+; FALLBACK23-NEXT: movl 76(%esp,%ebp), %eax
+; FALLBACK23-NEXT: movl %eax, %edx
+; FALLBACK23-NEXT: shrdl %cl, %edi, %edx
+; FALLBACK23-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK23-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: movl 88(%esp,%ebp), %ebx
+; FALLBACK23-NEXT: movl 84(%esp,%ebp), %eax
+; FALLBACK23-NEXT: movl %eax, %edx
+; FALLBACK23-NEXT: shrdl %cl, %ebx, %edx
+; FALLBACK23-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: shrdl %cl, %eax, %edi
+; FALLBACK23-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: movl 96(%esp,%ebp), %esi
+; FALLBACK23-NEXT: movl 92(%esp,%ebp), %eax
+; FALLBACK23-NEXT: movl %eax, %edx
+; FALLBACK23-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK23-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK23-NEXT: shrdl %cl, %eax, %ebx
+; FALLBACK23-NEXT: movl 104(%esp,%ebp), %eax
+; FALLBACK23-NEXT: movl 100(%esp,%ebp), %edi
+; FALLBACK23-NEXT: movl %edi, %edx
+; FALLBACK23-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK23-NEXT: shrdl %cl, %edi, %esi
+; FALLBACK23-NEXT: movl 48(%esp,%ebp), %edi
+; FALLBACK23-NEXT: movl 108(%esp,%ebp), %ebp
+; FALLBACK23-NEXT: movl %ebp, (%esp) # 4-byte Spill
+; FALLBACK23-NEXT: shrdl %cl, %ebp, %eax
+; FALLBACK23-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK23-NEXT: movl %eax, 56(%ebp)
+; FALLBACK23-NEXT: movl %esi, 48(%ebp)
+; FALLBACK23-NEXT: movl %edx, 52(%ebp)
+; FALLBACK23-NEXT: movl %ebx, 40(%ebp)
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK23-NEXT: movl %eax, 44(%ebp)
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK23-NEXT: movl %eax, 32(%ebp)
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK23-NEXT: movl %eax, 36(%ebp)
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK23-NEXT: movl %eax, 24(%ebp)
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK23-NEXT: movl %eax, 28(%ebp)
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK23-NEXT: movl %eax, 16(%ebp)
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK23-NEXT: movl %eax, 20(%ebp)
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK23-NEXT: movl %eax, 8(%ebp)
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK23-NEXT: movl %eax, 12(%ebp)
+; FALLBACK23-NEXT: sarxl %ecx, (%esp), %eax # 4-byte Folded Reload
+; FALLBACK23-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK23-NEXT: shrdl %cl, %edx, %edi
+; FALLBACK23-NEXT: movl %edi, (%ebp)
+; FALLBACK23-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK23-NEXT: movl %ecx, 4(%ebp)
+; FALLBACK23-NEXT: movl %eax, 60(%ebp)
+; FALLBACK23-NEXT: addl $188, %esp
+; FALLBACK23-NEXT: popl %esi
+; FALLBACK23-NEXT: popl %edi
+; FALLBACK23-NEXT: popl %ebx
+; FALLBACK23-NEXT: popl %ebp
+; FALLBACK23-NEXT: retl
+;
+; FALLBACK24-LABEL: ashr_64bytes:
+; FALLBACK24: # %bb.0:
+; FALLBACK24-NEXT: pushl %ebp
+; FALLBACK24-NEXT: pushl %ebx
+; FALLBACK24-NEXT: pushl %edi
+; FALLBACK24-NEXT: pushl %esi
+; FALLBACK24-NEXT: subl $204, %esp
+; FALLBACK24-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK24-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK24-NEXT: vmovups (%ecx), %ymm0
+; FALLBACK24-NEXT: vmovups 32(%ecx), %xmm1
+; FALLBACK24-NEXT: movl 48(%ecx), %edx
+; FALLBACK24-NEXT: movl 52(%ecx), %esi
+; FALLBACK24-NEXT: movl 56(%ecx), %edi
+; FALLBACK24-NEXT: movl 60(%ecx), %ecx
+; FALLBACK24-NEXT: movl (%eax), %eax
+; FALLBACK24-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: vmovaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: sarl $31, %ecx
+; FALLBACK24-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK24-NEXT: movl %eax, %esi
+; FALLBACK24-NEXT: andl $60, %esi
+; FALLBACK24-NEXT: movl 68(%esp,%esi), %edx
+; FALLBACK24-NEXT: shll $3, %eax
+; FALLBACK24-NEXT: andl $24, %eax
+; FALLBACK24-NEXT: movl %edx, %edi
+; FALLBACK24-NEXT: movl %eax, %ecx
+; FALLBACK24-NEXT: shrl %cl, %edi
+; FALLBACK24-NEXT: movl 72(%esp,%esi), %ecx
+; FALLBACK24-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: leal (%ecx,%ecx), %ebx
+; FALLBACK24-NEXT: movb %al, %ch
+; FALLBACK24-NEXT: notb %ch
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shll %cl, %ebx
+; FALLBACK24-NEXT: orl %edi, %ebx
+; FALLBACK24-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl 64(%esp,%esi), %edi
+; FALLBACK24-NEXT: movb %al, %cl
+; FALLBACK24-NEXT: shrl %cl, %edi
+; FALLBACK24-NEXT: addl %edx, %edx
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shll %cl, %edx
+; FALLBACK24-NEXT: orl %edi, %edx
+; FALLBACK24-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl 76(%esp,%esi), %edx
+; FALLBACK24-NEXT: movl %edx, %ebp
+; FALLBACK24-NEXT: movb %al, %cl
+; FALLBACK24-NEXT: shrl %cl, %ebp
+; FALLBACK24-NEXT: movl 80(%esp,%esi), %edi
+; FALLBACK24-NEXT: leal (%edi,%edi), %ebx
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shll %cl, %ebx
+; FALLBACK24-NEXT: orl %ebp, %ebx
+; FALLBACK24-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movb %al, %cl
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; FALLBACK24-NEXT: shrl %cl, %ebx
+; FALLBACK24-NEXT: addl %edx, %edx
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shll %cl, %edx
+; FALLBACK24-NEXT: orl %ebx, %edx
+; FALLBACK24-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl 84(%esp,%esi), %ebx
+; FALLBACK24-NEXT: movl %ebx, %ebp
+; FALLBACK24-NEXT: movl %eax, %edx
+; FALLBACK24-NEXT: movb %dl, %cl
+; FALLBACK24-NEXT: shrl %cl, %ebp
+; FALLBACK24-NEXT: movl 88(%esp,%esi), %eax
+; FALLBACK24-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: addl %eax, %eax
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shll %cl, %eax
+; FALLBACK24-NEXT: orl %ebp, %eax
+; FALLBACK24-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movb %dl, %cl
+; FALLBACK24-NEXT: shrl %cl, %edi
+; FALLBACK24-NEXT: addl %ebx, %ebx
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: movb %ch, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; FALLBACK24-NEXT: shll %cl, %ebx
+; FALLBACK24-NEXT: orl %edi, %ebx
+; FALLBACK24-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl 92(%esp,%esi), %ebx
+; FALLBACK24-NEXT: movl %ebx, %ebp
+; FALLBACK24-NEXT: movb %dl, %cl
+; FALLBACK24-NEXT: shrl %cl, %ebp
+; FALLBACK24-NEXT: movl 96(%esp,%esi), %edi
+; FALLBACK24-NEXT: leal (%edi,%edi), %eax
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shll %cl, %eax
+; FALLBACK24-NEXT: orl %ebp, %eax
+; FALLBACK24-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movb %dl, %cl
+; FALLBACK24-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK24-NEXT: shrl %cl, %eax
+; FALLBACK24-NEXT: addl %ebx, %ebx
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shll %cl, %ebx
+; FALLBACK24-NEXT: orl %eax, %ebx
+; FALLBACK24-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl 100(%esp,%esi), %ebx
+; FALLBACK24-NEXT: movl %ebx, %ebp
+; FALLBACK24-NEXT: movb %dl, %cl
+; FALLBACK24-NEXT: shrl %cl, %ebp
+; FALLBACK24-NEXT: movl 104(%esp,%esi), %edx
+; FALLBACK24-NEXT: leal (%edx,%edx), %eax
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shll %cl, %eax
+; FALLBACK24-NEXT: orl %ebp, %eax
+; FALLBACK24-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK24-NEXT: movb %al, %cl
+; FALLBACK24-NEXT: shrl %cl, %edi
+; FALLBACK24-NEXT: addl %ebx, %ebx
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shll %cl, %ebx
+; FALLBACK24-NEXT: orl %edi, %ebx
+; FALLBACK24-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl 108(%esp,%esi), %edi
+; FALLBACK24-NEXT: movl %edi, %ebp
+; FALLBACK24-NEXT: movl %eax, %ecx
+; FALLBACK24-NEXT: shrl %cl, %ebp
+; FALLBACK24-NEXT: movl 112(%esp,%esi), %ecx
+; FALLBACK24-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: leal (%ecx,%ecx), %ebx
+; FALLBACK24-NEXT: movb {{[-0-9]+}}(%e{{[sb]}}p), %ch # 1-byte Reload
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shll %cl, %ebx
+; FALLBACK24-NEXT: orl %ebp, %ebx
+; FALLBACK24-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movb %al, %cl
+; FALLBACK24-NEXT: shrl %cl, %edx
+; FALLBACK24-NEXT: addl %edi, %edi
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shll %cl, %edi
+; FALLBACK24-NEXT: orl %edx, %edi
+; FALLBACK24-NEXT: movl %esi, %edx
+; FALLBACK24-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK24-NEXT: movl 116(%esp,%esi), %esi
+; FALLBACK24-NEXT: movl %esi, %ebx
+; FALLBACK24-NEXT: movb %al, %cl
+; FALLBACK24-NEXT: shrl %cl, %ebx
+; FALLBACK24-NEXT: movl 120(%esp,%edx), %eax
+; FALLBACK24-NEXT: leal (%eax,%eax), %ebp
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shll %cl, %ebp
+; FALLBACK24-NEXT: orl %ebx, %ebp
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK24-NEXT: movb %dl, %cl
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; FALLBACK24-NEXT: shrl %cl, %ebx
+; FALLBACK24-NEXT: addl %esi, %esi
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shll %cl, %esi
+; FALLBACK24-NEXT: orl %ebx, %esi
+; FALLBACK24-NEXT: movb %dl, %cl
+; FALLBACK24-NEXT: shrl %cl, %eax
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK24-NEXT: movl 124(%esp,%edx), %ebx
+; FALLBACK24-NEXT: leal (%ebx,%ebx), %edx
+; FALLBACK24-NEXT: movb %ch, %cl
+; FALLBACK24-NEXT: shll %cl, %edx
+; FALLBACK24-NEXT: orl %eax, %edx
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK24-NEXT: sarl %cl, %ebx
+; FALLBACK24-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK24-NEXT: movl %ebx, 60(%eax)
+; FALLBACK24-NEXT: movl %edx, 56(%eax)
+; FALLBACK24-NEXT: movl %esi, 48(%eax)
+; FALLBACK24-NEXT: movl %ebp, 52(%eax)
+; FALLBACK24-NEXT: movl %edi, 40(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, 44(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, 32(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, 36(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, 24(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, 28(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, 16(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, 20(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, 8(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, 12(%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, (%eax)
+; FALLBACK24-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK24-NEXT: movl %ecx, 4(%eax)
+; FALLBACK24-NEXT: addl $204, %esp
+; FALLBACK24-NEXT: popl %esi
+; FALLBACK24-NEXT: popl %edi
+; FALLBACK24-NEXT: popl %ebx
+; FALLBACK24-NEXT: popl %ebp
+; FALLBACK24-NEXT: vzeroupper
+; FALLBACK24-NEXT: retl
+;
+; FALLBACK25-LABEL: ashr_64bytes:
+; FALLBACK25: # %bb.0:
+; FALLBACK25-NEXT: pushl %ebp
+; FALLBACK25-NEXT: pushl %ebx
+; FALLBACK25-NEXT: pushl %edi
+; FALLBACK25-NEXT: pushl %esi
+; FALLBACK25-NEXT: subl $188, %esp
+; FALLBACK25-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK25-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK25-NEXT: vmovups (%eax), %ymm0
+; FALLBACK25-NEXT: vmovups 32(%eax), %xmm1
+; FALLBACK25-NEXT: movl 48(%eax), %edx
+; FALLBACK25-NEXT: movl 52(%eax), %esi
+; FALLBACK25-NEXT: movl 56(%eax), %edi
+; FALLBACK25-NEXT: movl 60(%eax), %eax
+; FALLBACK25-NEXT: movl (%ecx), %ecx
+; FALLBACK25-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: vmovaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: sarl $31, %eax
+; FALLBACK25-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK25-NEXT: movl %ecx, %ebp
+; FALLBACK25-NEXT: andl $60, %ebp
+; FALLBACK25-NEXT: movl 56(%esp,%ebp), %edx
+; FALLBACK25-NEXT: movl 52(%esp,%ebp), %eax
+; FALLBACK25-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: shll $3, %ecx
+; FALLBACK25-NEXT: andl $24, %ecx
+; FALLBACK25-NEXT: shrdl %cl, %edx, %eax
+; FALLBACK25-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: movl 64(%esp,%ebp), %edi
+; FALLBACK25-NEXT: movl 60(%esp,%ebp), %eax
+; FALLBACK25-NEXT: movl %eax, %esi
+; FALLBACK25-NEXT: shrdl %cl, %edi, %esi
+; FALLBACK25-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK25-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: movl 72(%esp,%ebp), %esi
+; FALLBACK25-NEXT: movl 68(%esp,%ebp), %eax
+; FALLBACK25-NEXT: movl %eax, %edx
+; FALLBACK25-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK25-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: shrdl %cl, %eax, %edi
+; FALLBACK25-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: movl 80(%esp,%ebp), %edi
+; FALLBACK25-NEXT: movl 76(%esp,%ebp), %eax
+; FALLBACK25-NEXT: movl %eax, %edx
+; FALLBACK25-NEXT: shrdl %cl, %edi, %edx
+; FALLBACK25-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK25-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: movl 88(%esp,%ebp), %esi
+; FALLBACK25-NEXT: movl 84(%esp,%ebp), %eax
+; FALLBACK25-NEXT: movl %eax, %edx
+; FALLBACK25-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK25-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: movl %esi, %edx
+; FALLBACK25-NEXT: shrdl %cl, %eax, %edi
+; FALLBACK25-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: movl 96(%esp,%ebp), %esi
+; FALLBACK25-NEXT: movl 92(%esp,%ebp), %eax
+; FALLBACK25-NEXT: movl %eax, %edi
+; FALLBACK25-NEXT: shrdl %cl, %esi, %edi
+; FALLBACK25-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK25-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK25-NEXT: movl %edx, (%esp) # 4-byte Spill
+; FALLBACK25-NEXT: movl 104(%esp,%ebp), %edx
+; FALLBACK25-NEXT: movl 100(%esp,%ebp), %eax
+; FALLBACK25-NEXT: movl %eax, %edi
+; FALLBACK25-NEXT: shrdl %cl, %edx, %edi
+; FALLBACK25-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK25-NEXT: movl 48(%esp,%ebp), %ebx
+; FALLBACK25-NEXT: movl 108(%esp,%ebp), %eax
+; FALLBACK25-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK25-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK25-NEXT: movl %edx, 56(%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK25-NEXT: shrdl %cl, %edx, %ebx
+; FALLBACK25-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK25-NEXT: sarl %cl, %eax
+; FALLBACK25-NEXT: movl %eax, 60(%ebp)
+; FALLBACK25-NEXT: movl %esi, 48(%ebp)
+; FALLBACK25-NEXT: movl %edi, 52(%ebp)
+; FALLBACK25-NEXT: movl (%esp), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 40(%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 44(%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 32(%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 36(%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 24(%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 28(%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 16(%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 20(%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 8(%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 12(%ebp)
+; FALLBACK25-NEXT: movl %ebx, (%ebp)
+; FALLBACK25-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK25-NEXT: movl %eax, 4(%ebp)
+; FALLBACK25-NEXT: addl $188, %esp
+; FALLBACK25-NEXT: popl %esi
+; FALLBACK25-NEXT: popl %edi
+; FALLBACK25-NEXT: popl %ebx
+; FALLBACK25-NEXT: popl %ebp
+; FALLBACK25-NEXT: vzeroupper
+; FALLBACK25-NEXT: retl
+;
+; FALLBACK26-LABEL: ashr_64bytes:
+; FALLBACK26: # %bb.0:
+; FALLBACK26-NEXT: pushl %ebp
+; FALLBACK26-NEXT: pushl %ebx
+; FALLBACK26-NEXT: pushl %edi
+; FALLBACK26-NEXT: pushl %esi
+; FALLBACK26-NEXT: subl $204, %esp
+; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK26-NEXT: vmovups (%ecx), %ymm0
+; FALLBACK26-NEXT: vmovups 32(%ecx), %xmm1
+; FALLBACK26-NEXT: movl 48(%ecx), %edx
+; FALLBACK26-NEXT: movl 52(%ecx), %esi
+; FALLBACK26-NEXT: movl 56(%ecx), %edi
+; FALLBACK26-NEXT: movl 60(%ecx), %ecx
+; FALLBACK26-NEXT: movl (%eax), %eax
+; FALLBACK26-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: vmovaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: sarl $31, %ecx
+; FALLBACK26-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %eax, %ecx
+; FALLBACK26-NEXT: leal (,%eax,8), %edx
+; FALLBACK26-NEXT: andl $24, %edx
+; FALLBACK26-NEXT: andl $60, %ecx
+; FALLBACK26-NEXT: movl 68(%esp,%ecx), %esi
+; FALLBACK26-NEXT: movl 72(%esp,%ecx), %edi
+; FALLBACK26-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shrxl %edx, %esi, %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl %edx, %ebx
+; FALLBACK26-NEXT: notb %bl
+; FALLBACK26-NEXT: leal (%edi,%edi), %ebp
+; FALLBACK26-NEXT: shlxl %ebx, %ebp, %eax
+; FALLBACK26-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shrxl %edx, 64(%esp,%ecx), %edi
+; FALLBACK26-NEXT: addl %esi, %esi
+; FALLBACK26-NEXT: shlxl %ebx, %esi, %eax
+; FALLBACK26-NEXT: orl %edi, %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 80(%esp,%ecx), %esi
+; FALLBACK26-NEXT: leal (%esi,%esi), %edi
+; FALLBACK26-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK26-NEXT: movl 76(%esp,%ecx), %edi
+; FALLBACK26-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK26-NEXT: orl %ebp, %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK26-NEXT: addl %edi, %edi
+; FALLBACK26-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK26-NEXT: orl %eax, %edi
+; FALLBACK26-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 88(%esp,%ecx), %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: leal (%eax,%eax), %edi
+; FALLBACK26-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK26-NEXT: movl 84(%esp,%ecx), %edi
+; FALLBACK26-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK26-NEXT: orl %ebp, %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK26-NEXT: addl %edi, %edi
+; FALLBACK26-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK26-NEXT: orl %esi, %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 96(%esp,%ecx), %esi
+; FALLBACK26-NEXT: leal (%esi,%esi), %edi
+; FALLBACK26-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK26-NEXT: movl 92(%esp,%ecx), %edi
+; FALLBACK26-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK26-NEXT: orl %ebp, %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK26-NEXT: addl %edi, %edi
+; FALLBACK26-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK26-NEXT: orl %eax, %edi
+; FALLBACK26-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 104(%esp,%ecx), %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: leal (%eax,%eax), %edi
+; FALLBACK26-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK26-NEXT: movl 100(%esp,%ecx), %edi
+; FALLBACK26-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK26-NEXT: orl %ebp, %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK26-NEXT: addl %edi, %edi
+; FALLBACK26-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK26-NEXT: orl %esi, %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 112(%esp,%ecx), %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: leal (%eax,%eax), %esi
+; FALLBACK26-NEXT: shlxl %ebx, %esi, %eax
+; FALLBACK26-NEXT: movl 108(%esp,%ecx), %esi
+; FALLBACK26-NEXT: movl %ecx, %edi
+; FALLBACK26-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shrxl %edx, %esi, %ebp
+; FALLBACK26-NEXT: orl %ebp, %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; FALLBACK26-NEXT: addl %esi, %esi
+; FALLBACK26-NEXT: shlxl %ebx, %esi, %esi
+; FALLBACK26-NEXT: orl %ecx, %esi
+; FALLBACK26-NEXT: movl 120(%esp,%edi), %ebp
+; FALLBACK26-NEXT: leal (%ebp,%ebp), %ecx
+; FALLBACK26-NEXT: shlxl %ebx, %ecx, %ecx
+; FALLBACK26-NEXT: movl 116(%esp,%edi), %eax
+; FALLBACK26-NEXT: shrxl %edx, %eax, %edi
+; FALLBACK26-NEXT: orl %edi, %ecx
+; FALLBACK26-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK26-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: addl %eax, %eax
+; FALLBACK26-NEXT: shlxl %ebx, %eax, %edi
+; FALLBACK26-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK26-NEXT: shrxl %edx, %ebp, %eax
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; FALLBACK26-NEXT: movl 124(%esp,%ebp), %ebp
+; FALLBACK26-NEXT: sarxl %edx, %ebp, %edx
+; FALLBACK26-NEXT: addl %ebp, %ebp
+; FALLBACK26-NEXT: shlxl %ebx, %ebp, %ebx
+; FALLBACK26-NEXT: orl %eax, %ebx
+; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK26-NEXT: movl %edx, 60(%eax)
+; FALLBACK26-NEXT: movl %ebx, 56(%eax)
+; FALLBACK26-NEXT: movl %edi, 48(%eax)
+; FALLBACK26-NEXT: movl %ecx, 52(%eax)
+; FALLBACK26-NEXT: movl %esi, 40(%eax)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK26-NEXT: movl %ecx, 44(%eax)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK26-NEXT: movl %ecx, 32(%eax)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK26-NEXT: movl %ecx, 36(%eax)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK26-NEXT: movl %ecx, 24(%eax)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK26-NEXT: movl %ecx, 28(%eax)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK26-NEXT: movl %ecx, 16(%eax)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK26-NEXT: movl %ecx, 20(%eax)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK26-NEXT: movl %ecx, 8(%eax)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK26-NEXT: movl %ecx, 12(%eax)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK26-NEXT: movl %ecx, (%eax)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK26-NEXT: movl %ecx, 4(%eax)
+; FALLBACK26-NEXT: addl $204, %esp
+; FALLBACK26-NEXT: popl %esi
+; FALLBACK26-NEXT: popl %edi
+; FALLBACK26-NEXT: popl %ebx
+; FALLBACK26-NEXT: popl %ebp
+; FALLBACK26-NEXT: vzeroupper
+; FALLBACK26-NEXT: retl
+;
+; FALLBACK27-LABEL: ashr_64bytes:
+; FALLBACK27: # %bb.0:
+; FALLBACK27-NEXT: pushl %ebp
+; FALLBACK27-NEXT: pushl %ebx
+; FALLBACK27-NEXT: pushl %edi
+; FALLBACK27-NEXT: pushl %esi
+; FALLBACK27-NEXT: subl $188, %esp
+; FALLBACK27-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK27-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK27-NEXT: vmovups (%eax), %ymm0
+; FALLBACK27-NEXT: vmovups 32(%eax), %xmm1
+; FALLBACK27-NEXT: movl 48(%eax), %edx
+; FALLBACK27-NEXT: movl 52(%eax), %esi
+; FALLBACK27-NEXT: movl 56(%eax), %edi
+; FALLBACK27-NEXT: movl 60(%eax), %eax
+; FALLBACK27-NEXT: movl (%ecx), %ecx
+; FALLBACK27-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: vmovaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: sarl $31, %eax
+; FALLBACK27-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK27-NEXT: movl %ecx, %ebp
+; FALLBACK27-NEXT: andl $60, %ebp
+; FALLBACK27-NEXT: movl 56(%esp,%ebp), %edx
+; FALLBACK27-NEXT: movl 52(%esp,%ebp), %eax
+; FALLBACK27-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: shll $3, %ecx
+; FALLBACK27-NEXT: andl $24, %ecx
+; FALLBACK27-NEXT: shrdl %cl, %edx, %eax
+; FALLBACK27-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: movl 64(%esp,%ebp), %edi
+; FALLBACK27-NEXT: movl 60(%esp,%ebp), %eax
+; FALLBACK27-NEXT: movl %eax, %esi
+; FALLBACK27-NEXT: shrdl %cl, %edi, %esi
+; FALLBACK27-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK27-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: movl 72(%esp,%ebp), %esi
+; FALLBACK27-NEXT: movl 68(%esp,%ebp), %eax
+; FALLBACK27-NEXT: movl %eax, %edx
+; FALLBACK27-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK27-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: shrdl %cl, %eax, %edi
+; FALLBACK27-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: movl 80(%esp,%ebp), %edi
+; FALLBACK27-NEXT: movl 76(%esp,%ebp), %eax
+; FALLBACK27-NEXT: movl %eax, %edx
+; FALLBACK27-NEXT: shrdl %cl, %edi, %edx
+; FALLBACK27-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK27-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: movl 88(%esp,%ebp), %ebx
+; FALLBACK27-NEXT: movl 84(%esp,%ebp), %eax
+; FALLBACK27-NEXT: movl %eax, %edx
+; FALLBACK27-NEXT: shrdl %cl, %ebx, %edx
+; FALLBACK27-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: shrdl %cl, %eax, %edi
+; FALLBACK27-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: movl 96(%esp,%ebp), %esi
+; FALLBACK27-NEXT: movl 92(%esp,%ebp), %eax
+; FALLBACK27-NEXT: movl %eax, %edx
+; FALLBACK27-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK27-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK27-NEXT: shrdl %cl, %eax, %ebx
+; FALLBACK27-NEXT: movl 104(%esp,%ebp), %eax
+; FALLBACK27-NEXT: movl 100(%esp,%ebp), %edi
+; FALLBACK27-NEXT: movl %edi, %edx
+; FALLBACK27-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK27-NEXT: shrdl %cl, %edi, %esi
+; FALLBACK27-NEXT: movl 48(%esp,%ebp), %edi
+; FALLBACK27-NEXT: movl 108(%esp,%ebp), %ebp
+; FALLBACK27-NEXT: movl %ebp, (%esp) # 4-byte Spill
+; FALLBACK27-NEXT: shrdl %cl, %ebp, %eax
+; FALLBACK27-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK27-NEXT: movl %eax, 56(%ebp)
+; FALLBACK27-NEXT: movl %esi, 48(%ebp)
+; FALLBACK27-NEXT: movl %edx, 52(%ebp)
+; FALLBACK27-NEXT: movl %ebx, 40(%ebp)
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK27-NEXT: movl %eax, 44(%ebp)
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK27-NEXT: movl %eax, 32(%ebp)
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK27-NEXT: movl %eax, 36(%ebp)
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK27-NEXT: movl %eax, 24(%ebp)
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK27-NEXT: movl %eax, 28(%ebp)
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK27-NEXT: movl %eax, 16(%ebp)
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK27-NEXT: movl %eax, 20(%ebp)
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK27-NEXT: movl %eax, 8(%ebp)
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK27-NEXT: movl %eax, 12(%ebp)
+; FALLBACK27-NEXT: sarxl %ecx, (%esp), %eax # 4-byte Folded Reload
+; FALLBACK27-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK27-NEXT: shrdl %cl, %edx, %edi
+; FALLBACK27-NEXT: movl %edi, (%ebp)
+; FALLBACK27-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK27-NEXT: movl %ecx, 4(%ebp)
+; FALLBACK27-NEXT: movl %eax, 60(%ebp)
+; FALLBACK27-NEXT: addl $188, %esp
+; FALLBACK27-NEXT: popl %esi
+; FALLBACK27-NEXT: popl %edi
+; FALLBACK27-NEXT: popl %ebx
+; FALLBACK27-NEXT: popl %ebp
+; FALLBACK27-NEXT: vzeroupper
+; FALLBACK27-NEXT: retl
+;
+; FALLBACK28-LABEL: ashr_64bytes:
+; FALLBACK28: # %bb.0:
+; FALLBACK28-NEXT: pushl %ebp
+; FALLBACK28-NEXT: pushl %ebx
+; FALLBACK28-NEXT: pushl %edi
+; FALLBACK28-NEXT: pushl %esi
+; FALLBACK28-NEXT: subl $204, %esp
+; FALLBACK28-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK28-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK28-NEXT: vmovups (%ecx), %ymm0
+; FALLBACK28-NEXT: vmovups 32(%ecx), %xmm1
+; FALLBACK28-NEXT: movl 48(%ecx), %edx
+; FALLBACK28-NEXT: movl 52(%ecx), %esi
+; FALLBACK28-NEXT: movl 56(%ecx), %edi
+; FALLBACK28-NEXT: movl 60(%ecx), %ecx
+; FALLBACK28-NEXT: movl (%eax), %eax
+; FALLBACK28-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: vmovaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: sarl $31, %ecx
+; FALLBACK28-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK28-NEXT: movl %eax, %esi
+; FALLBACK28-NEXT: andl $60, %esi
+; FALLBACK28-NEXT: movl 68(%esp,%esi), %edx
+; FALLBACK28-NEXT: shll $3, %eax
+; FALLBACK28-NEXT: andl $24, %eax
+; FALLBACK28-NEXT: movl %edx, %edi
+; FALLBACK28-NEXT: movl %eax, %ecx
+; FALLBACK28-NEXT: shrl %cl, %edi
+; FALLBACK28-NEXT: movl 72(%esp,%esi), %ecx
+; FALLBACK28-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: leal (%ecx,%ecx), %ebx
+; FALLBACK28-NEXT: movb %al, %ch
+; FALLBACK28-NEXT: notb %ch
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shll %cl, %ebx
+; FALLBACK28-NEXT: orl %edi, %ebx
+; FALLBACK28-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl 64(%esp,%esi), %edi
+; FALLBACK28-NEXT: movb %al, %cl
+; FALLBACK28-NEXT: shrl %cl, %edi
+; FALLBACK28-NEXT: addl %edx, %edx
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shll %cl, %edx
+; FALLBACK28-NEXT: orl %edi, %edx
+; FALLBACK28-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl 76(%esp,%esi), %edx
+; FALLBACK28-NEXT: movl %edx, %ebp
+; FALLBACK28-NEXT: movb %al, %cl
+; FALLBACK28-NEXT: shrl %cl, %ebp
+; FALLBACK28-NEXT: movl 80(%esp,%esi), %edi
+; FALLBACK28-NEXT: leal (%edi,%edi), %ebx
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shll %cl, %ebx
+; FALLBACK28-NEXT: orl %ebp, %ebx
+; FALLBACK28-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movb %al, %cl
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; FALLBACK28-NEXT: shrl %cl, %ebx
+; FALLBACK28-NEXT: addl %edx, %edx
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shll %cl, %edx
+; FALLBACK28-NEXT: orl %ebx, %edx
+; FALLBACK28-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl 84(%esp,%esi), %ebx
+; FALLBACK28-NEXT: movl %ebx, %ebp
+; FALLBACK28-NEXT: movl %eax, %edx
+; FALLBACK28-NEXT: movb %dl, %cl
+; FALLBACK28-NEXT: shrl %cl, %ebp
+; FALLBACK28-NEXT: movl 88(%esp,%esi), %eax
+; FALLBACK28-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: addl %eax, %eax
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shll %cl, %eax
+; FALLBACK28-NEXT: orl %ebp, %eax
+; FALLBACK28-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movb %dl, %cl
+; FALLBACK28-NEXT: shrl %cl, %edi
+; FALLBACK28-NEXT: addl %ebx, %ebx
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: movb %ch, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; FALLBACK28-NEXT: shll %cl, %ebx
+; FALLBACK28-NEXT: orl %edi, %ebx
+; FALLBACK28-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl 92(%esp,%esi), %ebx
+; FALLBACK28-NEXT: movl %ebx, %ebp
+; FALLBACK28-NEXT: movb %dl, %cl
+; FALLBACK28-NEXT: shrl %cl, %ebp
+; FALLBACK28-NEXT: movl 96(%esp,%esi), %edi
+; FALLBACK28-NEXT: leal (%edi,%edi), %eax
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shll %cl, %eax
+; FALLBACK28-NEXT: orl %ebp, %eax
+; FALLBACK28-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movb %dl, %cl
+; FALLBACK28-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK28-NEXT: shrl %cl, %eax
+; FALLBACK28-NEXT: addl %ebx, %ebx
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shll %cl, %ebx
+; FALLBACK28-NEXT: orl %eax, %ebx
+; FALLBACK28-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl 100(%esp,%esi), %ebx
+; FALLBACK28-NEXT: movl %ebx, %ebp
+; FALLBACK28-NEXT: movb %dl, %cl
+; FALLBACK28-NEXT: shrl %cl, %ebp
+; FALLBACK28-NEXT: movl 104(%esp,%esi), %edx
+; FALLBACK28-NEXT: leal (%edx,%edx), %eax
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shll %cl, %eax
+; FALLBACK28-NEXT: orl %ebp, %eax
+; FALLBACK28-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK28-NEXT: movb %al, %cl
+; FALLBACK28-NEXT: shrl %cl, %edi
+; FALLBACK28-NEXT: addl %ebx, %ebx
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shll %cl, %ebx
+; FALLBACK28-NEXT: orl %edi, %ebx
+; FALLBACK28-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl 108(%esp,%esi), %edi
+; FALLBACK28-NEXT: movl %edi, %ebp
+; FALLBACK28-NEXT: movl %eax, %ecx
+; FALLBACK28-NEXT: shrl %cl, %ebp
+; FALLBACK28-NEXT: movl 112(%esp,%esi), %ecx
+; FALLBACK28-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: leal (%ecx,%ecx), %ebx
+; FALLBACK28-NEXT: movb {{[-0-9]+}}(%e{{[sb]}}p), %ch # 1-byte Reload
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shll %cl, %ebx
+; FALLBACK28-NEXT: orl %ebp, %ebx
+; FALLBACK28-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movb %al, %cl
+; FALLBACK28-NEXT: shrl %cl, %edx
+; FALLBACK28-NEXT: addl %edi, %edi
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shll %cl, %edi
+; FALLBACK28-NEXT: orl %edx, %edi
+; FALLBACK28-NEXT: movl %esi, %edx
+; FALLBACK28-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK28-NEXT: movl 116(%esp,%esi), %esi
+; FALLBACK28-NEXT: movl %esi, %ebx
+; FALLBACK28-NEXT: movb %al, %cl
+; FALLBACK28-NEXT: shrl %cl, %ebx
+; FALLBACK28-NEXT: movl 120(%esp,%edx), %eax
+; FALLBACK28-NEXT: leal (%eax,%eax), %ebp
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shll %cl, %ebp
+; FALLBACK28-NEXT: orl %ebx, %ebp
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK28-NEXT: movb %dl, %cl
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; FALLBACK28-NEXT: shrl %cl, %ebx
+; FALLBACK28-NEXT: addl %esi, %esi
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shll %cl, %esi
+; FALLBACK28-NEXT: orl %ebx, %esi
+; FALLBACK28-NEXT: movb %dl, %cl
+; FALLBACK28-NEXT: shrl %cl, %eax
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK28-NEXT: movl 124(%esp,%edx), %ebx
+; FALLBACK28-NEXT: leal (%ebx,%ebx), %edx
+; FALLBACK28-NEXT: movb %ch, %cl
+; FALLBACK28-NEXT: shll %cl, %edx
+; FALLBACK28-NEXT: orl %eax, %edx
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK28-NEXT: sarl %cl, %ebx
+; FALLBACK28-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK28-NEXT: movl %ebx, 60(%eax)
+; FALLBACK28-NEXT: movl %edx, 56(%eax)
+; FALLBACK28-NEXT: movl %esi, 48(%eax)
+; FALLBACK28-NEXT: movl %ebp, 52(%eax)
+; FALLBACK28-NEXT: movl %edi, 40(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, 44(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, 32(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, 36(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, 24(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, 28(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, 16(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, 20(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, 8(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, 12(%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, (%eax)
+; FALLBACK28-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK28-NEXT: movl %ecx, 4(%eax)
+; FALLBACK28-NEXT: addl $204, %esp
+; FALLBACK28-NEXT: popl %esi
+; FALLBACK28-NEXT: popl %edi
+; FALLBACK28-NEXT: popl %ebx
+; FALLBACK28-NEXT: popl %ebp
+; FALLBACK28-NEXT: vzeroupper
+; FALLBACK28-NEXT: retl
+;
+; FALLBACK29-LABEL: ashr_64bytes:
+; FALLBACK29: # %bb.0:
+; FALLBACK29-NEXT: pushl %ebp
+; FALLBACK29-NEXT: pushl %ebx
+; FALLBACK29-NEXT: pushl %edi
+; FALLBACK29-NEXT: pushl %esi
+; FALLBACK29-NEXT: subl $188, %esp
+; FALLBACK29-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK29-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK29-NEXT: vmovups (%eax), %ymm0
+; FALLBACK29-NEXT: vmovups 32(%eax), %xmm1
+; FALLBACK29-NEXT: movl 48(%eax), %edx
+; FALLBACK29-NEXT: movl 52(%eax), %esi
+; FALLBACK29-NEXT: movl 56(%eax), %edi
+; FALLBACK29-NEXT: movl 60(%eax), %eax
+; FALLBACK29-NEXT: movl (%ecx), %ecx
+; FALLBACK29-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: vmovaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: sarl $31, %eax
+; FALLBACK29-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK29-NEXT: movl %ecx, %ebp
+; FALLBACK29-NEXT: andl $60, %ebp
+; FALLBACK29-NEXT: movl 56(%esp,%ebp), %edx
+; FALLBACK29-NEXT: movl 52(%esp,%ebp), %eax
+; FALLBACK29-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: shll $3, %ecx
+; FALLBACK29-NEXT: andl $24, %ecx
+; FALLBACK29-NEXT: shrdl %cl, %edx, %eax
+; FALLBACK29-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: movl 64(%esp,%ebp), %edi
+; FALLBACK29-NEXT: movl 60(%esp,%ebp), %eax
+; FALLBACK29-NEXT: movl %eax, %esi
+; FALLBACK29-NEXT: shrdl %cl, %edi, %esi
+; FALLBACK29-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK29-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: movl 72(%esp,%ebp), %esi
+; FALLBACK29-NEXT: movl 68(%esp,%ebp), %eax
+; FALLBACK29-NEXT: movl %eax, %edx
+; FALLBACK29-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK29-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: shrdl %cl, %eax, %edi
+; FALLBACK29-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: movl 80(%esp,%ebp), %edi
+; FALLBACK29-NEXT: movl 76(%esp,%ebp), %eax
+; FALLBACK29-NEXT: movl %eax, %edx
+; FALLBACK29-NEXT: shrdl %cl, %edi, %edx
+; FALLBACK29-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK29-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: movl 88(%esp,%ebp), %esi
+; FALLBACK29-NEXT: movl 84(%esp,%ebp), %eax
+; FALLBACK29-NEXT: movl %eax, %edx
+; FALLBACK29-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK29-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: movl %esi, %edx
+; FALLBACK29-NEXT: shrdl %cl, %eax, %edi
+; FALLBACK29-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: movl 96(%esp,%ebp), %esi
+; FALLBACK29-NEXT: movl 92(%esp,%ebp), %eax
+; FALLBACK29-NEXT: movl %eax, %edi
+; FALLBACK29-NEXT: shrdl %cl, %esi, %edi
+; FALLBACK29-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK29-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK29-NEXT: movl %edx, (%esp) # 4-byte Spill
+; FALLBACK29-NEXT: movl 104(%esp,%ebp), %edx
+; FALLBACK29-NEXT: movl 100(%esp,%ebp), %eax
+; FALLBACK29-NEXT: movl %eax, %edi
+; FALLBACK29-NEXT: shrdl %cl, %edx, %edi
+; FALLBACK29-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK29-NEXT: movl 48(%esp,%ebp), %ebx
+; FALLBACK29-NEXT: movl 108(%esp,%ebp), %eax
+; FALLBACK29-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK29-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK29-NEXT: movl %edx, 56(%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK29-NEXT: shrdl %cl, %edx, %ebx
+; FALLBACK29-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK29-NEXT: sarl %cl, %eax
+; FALLBACK29-NEXT: movl %eax, 60(%ebp)
+; FALLBACK29-NEXT: movl %esi, 48(%ebp)
+; FALLBACK29-NEXT: movl %edi, 52(%ebp)
+; FALLBACK29-NEXT: movl (%esp), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 40(%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 44(%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 32(%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 36(%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 24(%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 28(%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 16(%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 20(%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 8(%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 12(%ebp)
+; FALLBACK29-NEXT: movl %ebx, (%ebp)
+; FALLBACK29-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK29-NEXT: movl %eax, 4(%ebp)
+; FALLBACK29-NEXT: addl $188, %esp
+; FALLBACK29-NEXT: popl %esi
+; FALLBACK29-NEXT: popl %edi
+; FALLBACK29-NEXT: popl %ebx
+; FALLBACK29-NEXT: popl %ebp
+; FALLBACK29-NEXT: vzeroupper
+; FALLBACK29-NEXT: retl
+;
+; FALLBACK30-LABEL: ashr_64bytes:
+; FALLBACK30: # %bb.0:
+; FALLBACK30-NEXT: pushl %ebp
+; FALLBACK30-NEXT: pushl %ebx
+; FALLBACK30-NEXT: pushl %edi
+; FALLBACK30-NEXT: pushl %esi
+; FALLBACK30-NEXT: subl $204, %esp
+; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK30-NEXT: vmovups (%ecx), %ymm0
+; FALLBACK30-NEXT: vmovups 32(%ecx), %xmm1
+; FALLBACK30-NEXT: movl 48(%ecx), %edx
+; FALLBACK30-NEXT: movl 52(%ecx), %esi
+; FALLBACK30-NEXT: movl 56(%ecx), %edi
+; FALLBACK30-NEXT: movl 60(%ecx), %ecx
+; FALLBACK30-NEXT: movl (%eax), %eax
+; FALLBACK30-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: vmovaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: sarl $31, %ecx
+; FALLBACK30-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %eax, %ecx
+; FALLBACK30-NEXT: leal (,%eax,8), %edx
+; FALLBACK30-NEXT: andl $24, %edx
+; FALLBACK30-NEXT: andl $60, %ecx
+; FALLBACK30-NEXT: movl 68(%esp,%ecx), %esi
+; FALLBACK30-NEXT: movl 72(%esp,%ecx), %edi
+; FALLBACK30-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shrxl %edx, %esi, %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl %edx, %ebx
+; FALLBACK30-NEXT: notb %bl
+; FALLBACK30-NEXT: leal (%edi,%edi), %ebp
+; FALLBACK30-NEXT: shlxl %ebx, %ebp, %eax
+; FALLBACK30-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shrxl %edx, 64(%esp,%ecx), %edi
+; FALLBACK30-NEXT: addl %esi, %esi
+; FALLBACK30-NEXT: shlxl %ebx, %esi, %eax
+; FALLBACK30-NEXT: orl %edi, %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 80(%esp,%ecx), %esi
+; FALLBACK30-NEXT: leal (%esi,%esi), %edi
+; FALLBACK30-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK30-NEXT: movl 76(%esp,%ecx), %edi
+; FALLBACK30-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK30-NEXT: orl %ebp, %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK30-NEXT: addl %edi, %edi
+; FALLBACK30-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK30-NEXT: orl %eax, %edi
+; FALLBACK30-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 88(%esp,%ecx), %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: leal (%eax,%eax), %edi
+; FALLBACK30-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK30-NEXT: movl 84(%esp,%ecx), %edi
+; FALLBACK30-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK30-NEXT: orl %ebp, %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK30-NEXT: addl %edi, %edi
+; FALLBACK30-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK30-NEXT: orl %esi, %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 96(%esp,%ecx), %esi
+; FALLBACK30-NEXT: leal (%esi,%esi), %edi
+; FALLBACK30-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK30-NEXT: movl 92(%esp,%ecx), %edi
+; FALLBACK30-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK30-NEXT: orl %ebp, %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK30-NEXT: addl %edi, %edi
+; FALLBACK30-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK30-NEXT: orl %eax, %edi
+; FALLBACK30-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 104(%esp,%ecx), %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: leal (%eax,%eax), %edi
+; FALLBACK30-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK30-NEXT: movl 100(%esp,%ecx), %edi
+; FALLBACK30-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK30-NEXT: orl %ebp, %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK30-NEXT: addl %edi, %edi
+; FALLBACK30-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK30-NEXT: orl %esi, %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 112(%esp,%ecx), %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: leal (%eax,%eax), %esi
+; FALLBACK30-NEXT: shlxl %ebx, %esi, %eax
+; FALLBACK30-NEXT: movl 108(%esp,%ecx), %esi
+; FALLBACK30-NEXT: movl %ecx, %edi
+; FALLBACK30-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shrxl %edx, %esi, %ebp
+; FALLBACK30-NEXT: orl %ebp, %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; FALLBACK30-NEXT: addl %esi, %esi
+; FALLBACK30-NEXT: shlxl %ebx, %esi, %esi
+; FALLBACK30-NEXT: orl %ecx, %esi
+; FALLBACK30-NEXT: movl 120(%esp,%edi), %ebp
+; FALLBACK30-NEXT: leal (%ebp,%ebp), %ecx
+; FALLBACK30-NEXT: shlxl %ebx, %ecx, %ecx
+; FALLBACK30-NEXT: movl 116(%esp,%edi), %eax
+; FALLBACK30-NEXT: shrxl %edx, %eax, %edi
+; FALLBACK30-NEXT: orl %edi, %ecx
+; FALLBACK30-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK30-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: addl %eax, %eax
+; FALLBACK30-NEXT: shlxl %ebx, %eax, %edi
+; FALLBACK30-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK30-NEXT: shrxl %edx, %ebp, %eax
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; FALLBACK30-NEXT: movl 124(%esp,%ebp), %ebp
+; FALLBACK30-NEXT: sarxl %edx, %ebp, %edx
+; FALLBACK30-NEXT: addl %ebp, %ebp
+; FALLBACK30-NEXT: shlxl %ebx, %ebp, %ebx
+; FALLBACK30-NEXT: orl %eax, %ebx
+; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK30-NEXT: movl %edx, 60(%eax)
+; FALLBACK30-NEXT: movl %ebx, 56(%eax)
+; FALLBACK30-NEXT: movl %edi, 48(%eax)
+; FALLBACK30-NEXT: movl %ecx, 52(%eax)
+; FALLBACK30-NEXT: movl %esi, 40(%eax)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK30-NEXT: movl %ecx, 44(%eax)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK30-NEXT: movl %ecx, 32(%eax)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK30-NEXT: movl %ecx, 36(%eax)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK30-NEXT: movl %ecx, 24(%eax)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK30-NEXT: movl %ecx, 28(%eax)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK30-NEXT: movl %ecx, 16(%eax)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK30-NEXT: movl %ecx, 20(%eax)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK30-NEXT: movl %ecx, 8(%eax)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK30-NEXT: movl %ecx, 12(%eax)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK30-NEXT: movl %ecx, (%eax)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK30-NEXT: movl %ecx, 4(%eax)
+; FALLBACK30-NEXT: addl $204, %esp
+; FALLBACK30-NEXT: popl %esi
+; FALLBACK30-NEXT: popl %edi
+; FALLBACK30-NEXT: popl %ebx
+; FALLBACK30-NEXT: popl %ebp
+; FALLBACK30-NEXT: vzeroupper
+; FALLBACK30-NEXT: retl
+;
+; FALLBACK31-LABEL: ashr_64bytes:
+; FALLBACK31: # %bb.0:
+; FALLBACK31-NEXT: pushl %ebp
+; FALLBACK31-NEXT: pushl %ebx
+; FALLBACK31-NEXT: pushl %edi
+; FALLBACK31-NEXT: pushl %esi
+; FALLBACK31-NEXT: subl $188, %esp
+; FALLBACK31-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK31-NEXT: movl {{[0-9]+}}(%esp), %eax
+; FALLBACK31-NEXT: vmovups (%eax), %ymm0
+; FALLBACK31-NEXT: vmovups 32(%eax), %xmm1
+; FALLBACK31-NEXT: movl 48(%eax), %edx
+; FALLBACK31-NEXT: movl 52(%eax), %esi
+; FALLBACK31-NEXT: movl 56(%eax), %edi
+; FALLBACK31-NEXT: movl 60(%eax), %eax
+; FALLBACK31-NEXT: movl (%ecx), %ecx
+; FALLBACK31-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: vmovaps %xmm1, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: sarl $31, %eax
+; FALLBACK31-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK31-NEXT: movl %ecx, %ebp
+; FALLBACK31-NEXT: andl $60, %ebp
+; FALLBACK31-NEXT: movl 56(%esp,%ebp), %edx
+; FALLBACK31-NEXT: movl 52(%esp,%ebp), %eax
+; FALLBACK31-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: shll $3, %ecx
+; FALLBACK31-NEXT: andl $24, %ecx
+; FALLBACK31-NEXT: shrdl %cl, %edx, %eax
+; FALLBACK31-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: movl 64(%esp,%ebp), %edi
+; FALLBACK31-NEXT: movl 60(%esp,%ebp), %eax
+; FALLBACK31-NEXT: movl %eax, %esi
+; FALLBACK31-NEXT: shrdl %cl, %edi, %esi
+; FALLBACK31-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK31-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: movl 72(%esp,%ebp), %esi
+; FALLBACK31-NEXT: movl 68(%esp,%ebp), %eax
+; FALLBACK31-NEXT: movl %eax, %edx
+; FALLBACK31-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK31-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: shrdl %cl, %eax, %edi
+; FALLBACK31-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: movl 80(%esp,%ebp), %edi
+; FALLBACK31-NEXT: movl 76(%esp,%ebp), %eax
+; FALLBACK31-NEXT: movl %eax, %edx
+; FALLBACK31-NEXT: shrdl %cl, %edi, %edx
+; FALLBACK31-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: shrdl %cl, %eax, %esi
+; FALLBACK31-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: movl 88(%esp,%ebp), %ebx
+; FALLBACK31-NEXT: movl 84(%esp,%ebp), %eax
+; FALLBACK31-NEXT: movl %eax, %edx
+; FALLBACK31-NEXT: shrdl %cl, %ebx, %edx
+; FALLBACK31-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: shrdl %cl, %eax, %edi
+; FALLBACK31-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: movl 96(%esp,%ebp), %esi
+; FALLBACK31-NEXT: movl 92(%esp,%ebp), %eax
+; FALLBACK31-NEXT: movl %eax, %edx
+; FALLBACK31-NEXT: shrdl %cl, %esi, %edx
+; FALLBACK31-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK31-NEXT: shrdl %cl, %eax, %ebx
+; FALLBACK31-NEXT: movl 104(%esp,%ebp), %eax
+; FALLBACK31-NEXT: movl 100(%esp,%ebp), %edi
+; FALLBACK31-NEXT: movl %edi, %edx
+; FALLBACK31-NEXT: shrdl %cl, %eax, %edx
+; FALLBACK31-NEXT: shrdl %cl, %edi, %esi
+; FALLBACK31-NEXT: movl 48(%esp,%ebp), %edi
+; FALLBACK31-NEXT: movl 108(%esp,%ebp), %ebp
+; FALLBACK31-NEXT: movl %ebp, (%esp) # 4-byte Spill
+; FALLBACK31-NEXT: shrdl %cl, %ebp, %eax
+; FALLBACK31-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; FALLBACK31-NEXT: movl %eax, 56(%ebp)
+; FALLBACK31-NEXT: movl %esi, 48(%ebp)
+; FALLBACK31-NEXT: movl %edx, 52(%ebp)
+; FALLBACK31-NEXT: movl %ebx, 40(%ebp)
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK31-NEXT: movl %eax, 44(%ebp)
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK31-NEXT: movl %eax, 32(%ebp)
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK31-NEXT: movl %eax, 36(%ebp)
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK31-NEXT: movl %eax, 24(%ebp)
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK31-NEXT: movl %eax, 28(%ebp)
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK31-NEXT: movl %eax, 16(%ebp)
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK31-NEXT: movl %eax, 20(%ebp)
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK31-NEXT: movl %eax, 8(%ebp)
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK31-NEXT: movl %eax, 12(%ebp)
+; FALLBACK31-NEXT: sarxl %ecx, (%esp), %eax # 4-byte Folded Reload
+; FALLBACK31-NEXT: # kill: def $cl killed $cl killed $ecx
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; FALLBACK31-NEXT: shrdl %cl, %edx, %edi
+; FALLBACK31-NEXT: movl %edi, (%ebp)
+; FALLBACK31-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK31-NEXT: movl %ecx, 4(%ebp)
+; FALLBACK31-NEXT: movl %eax, 60(%ebp)
+; FALLBACK31-NEXT: addl $188, %esp
+; FALLBACK31-NEXT: popl %esi
+; FALLBACK31-NEXT: popl %edi
+; FALLBACK31-NEXT: popl %ebx
+; FALLBACK31-NEXT: popl %ebp
+; FALLBACK31-NEXT: vzeroupper
+; FALLBACK31-NEXT: retl
+ %src = load i512, ptr %src.ptr, align 1
+ %byteOff = load i512, ptr %byteOff.ptr, align 1
+ %bitOff = shl i512 %byteOff, 3
+ %res = ashr i512 %src, %bitOff
+ store i512 %res, ptr %dst, align 1
+ ret void
+}
+
+define void @ashr_64bytes_qwordOff(ptr %src.ptr, ptr %qwordOff.ptr, ptr %dst) nounwind {
+; X64-SSE2-LABEL: ashr_64bytes_qwordOff:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pushq %rbx
; X64-SSE2-NEXT: movq (%rdi), %rax
@@ -2394,15 +24296,15 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X64-SSE2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-SSE2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-SSE2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
-; X64-SSE2-NEXT: andl $63, %esi
-; X64-SSE2-NEXT: movq -128(%rsp,%rsi), %rax
-; X64-SSE2-NEXT: movq -120(%rsp,%rsi), %rcx
-; X64-SSE2-NEXT: movq -104(%rsp,%rsi), %rdi
-; X64-SSE2-NEXT: movq -112(%rsp,%rsi), %r8
-; X64-SSE2-NEXT: movq -88(%rsp,%rsi), %r9
-; X64-SSE2-NEXT: movq -96(%rsp,%rsi), %r10
-; X64-SSE2-NEXT: movq -72(%rsp,%rsi), %r11
-; X64-SSE2-NEXT: movq -80(%rsp,%rsi), %rsi
+; X64-SSE2-NEXT: andl $7, %esi
+; X64-SSE2-NEXT: movq -128(%rsp,%rsi,8), %rax
+; X64-SSE2-NEXT: movq -120(%rsp,%rsi,8), %rcx
+; X64-SSE2-NEXT: movq -104(%rsp,%rsi,8), %rdi
+; X64-SSE2-NEXT: movq -112(%rsp,%rsi,8), %r8
+; X64-SSE2-NEXT: movq -88(%rsp,%rsi,8), %r9
+; X64-SSE2-NEXT: movq -96(%rsp,%rsi,8), %r10
+; X64-SSE2-NEXT: movq -72(%rsp,%rsi,8), %r11
+; X64-SSE2-NEXT: movq -80(%rsp,%rsi,8), %rsi
; X64-SSE2-NEXT: movq %rsi, 48(%rdx)
; X64-SSE2-NEXT: movq %r11, 56(%rdx)
; X64-SSE2-NEXT: movq %r10, 32(%rdx)
@@ -2414,8 +24316,9 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X64-SSE2-NEXT: popq %rbx
; X64-SSE2-NEXT: retq
;
-; X64-SSE42-LABEL: ashr_64bytes:
+; X64-SSE42-LABEL: ashr_64bytes_qwordOff:
; X64-SSE42: # %bb.0:
+; X64-SSE42-NEXT: pushq %rax
; X64-SSE42-NEXT: movups (%rdi), %xmm0
; X64-SSE42-NEXT: movups 16(%rdi), %xmm1
; X64-SSE42-NEXT: movups 32(%rdi), %xmm2
@@ -2424,9 +24327,9 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X64-SSE42-NEXT: movl (%rsi), %esi
; X64-SSE42-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; X64-SSE42-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
-; X64-SSE42-NEXT: movups %xmm2, -{{[0-9]+}}(%rsp)
-; X64-SSE42-NEXT: movups %xmm1, -{{[0-9]+}}(%rsp)
-; X64-SSE42-NEXT: movups %xmm0, -{{[0-9]+}}(%rsp)
+; X64-SSE42-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-SSE42-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-SSE42-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-SSE42-NEXT: sarq $63, %rcx
; X64-SSE42-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; X64-SSE42-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
@@ -2436,19 +24339,21 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X64-SSE42-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; X64-SSE42-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; X64-SSE42-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
-; X64-SSE42-NEXT: andl $63, %esi
-; X64-SSE42-NEXT: movups -128(%rsp,%rsi), %xmm0
-; X64-SSE42-NEXT: movups -112(%rsp,%rsi), %xmm1
-; X64-SSE42-NEXT: movups -96(%rsp,%rsi), %xmm2
-; X64-SSE42-NEXT: movups -80(%rsp,%rsi), %xmm3
+; X64-SSE42-NEXT: andl $7, %esi
+; X64-SSE42-NEXT: movups -128(%rsp,%rsi,8), %xmm0
+; X64-SSE42-NEXT: movups -112(%rsp,%rsi,8), %xmm1
+; X64-SSE42-NEXT: movups -96(%rsp,%rsi,8), %xmm2
+; X64-SSE42-NEXT: movups -80(%rsp,%rsi,8), %xmm3
; X64-SSE42-NEXT: movups %xmm3, 48(%rdx)
; X64-SSE42-NEXT: movups %xmm1, 16(%rdx)
; X64-SSE42-NEXT: movups %xmm2, 32(%rdx)
; X64-SSE42-NEXT: movups %xmm0, (%rdx)
+; X64-SSE42-NEXT: popq %rax
; X64-SSE42-NEXT: retq
;
-; X64-AVX-LABEL: ashr_64bytes:
+; X64-AVX-LABEL: ashr_64bytes_qwordOff:
; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: pushq %rax
; X64-AVX-NEXT: vmovups (%rdi), %ymm0
; X64-AVX-NEXT: vmovups 32(%rdi), %xmm1
; X64-AVX-NEXT: movq 48(%rdi), %rax
@@ -2456,7 +24361,7 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X64-AVX-NEXT: movl (%rsi), %esi
; X64-AVX-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; X64-AVX-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
-; X64-AVX-NEXT: vmovups %xmm1, -{{[0-9]+}}(%rsp)
+; X64-AVX-NEXT: vmovaps %xmm1, -{{[0-9]+}}(%rsp)
; X64-AVX-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
; X64-AVX-NEXT: sarq $63, %rcx
; X64-AVX-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
@@ -2467,25 +24372,26 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X64-AVX-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; X64-AVX-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; X64-AVX-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
-; X64-AVX-NEXT: andl $63, %esi
-; X64-AVX-NEXT: vmovups -128(%rsp,%rsi), %xmm0
-; X64-AVX-NEXT: vmovups -112(%rsp,%rsi), %xmm1
-; X64-AVX-NEXT: vmovups -96(%rsp,%rsi), %xmm2
-; X64-AVX-NEXT: vmovups -80(%rsp,%rsi), %xmm3
+; X64-AVX-NEXT: andl $7, %esi
+; X64-AVX-NEXT: vmovups -128(%rsp,%rsi,8), %xmm0
+; X64-AVX-NEXT: vmovups -112(%rsp,%rsi,8), %xmm1
+; X64-AVX-NEXT: vmovups -96(%rsp,%rsi,8), %xmm2
+; X64-AVX-NEXT: vmovups -80(%rsp,%rsi,8), %xmm3
; X64-AVX-NEXT: vmovups %xmm3, 48(%rdx)
; X64-AVX-NEXT: vmovups %xmm1, 16(%rdx)
; X64-AVX-NEXT: vmovups %xmm2, 32(%rdx)
; X64-AVX-NEXT: vmovups %xmm0, (%rdx)
+; X64-AVX-NEXT: popq %rax
; X64-AVX-NEXT: vzeroupper
; X64-AVX-NEXT: retq
;
-; X86-SSE2-LABEL: ashr_64bytes:
+; X86-SSE2-LABEL: ashr_64bytes_qwordOff:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pushl %ebp
; X86-SSE2-NEXT: pushl %ebx
; X86-SSE2-NEXT: pushl %edi
; X86-SSE2-NEXT: pushl %esi
-; X86-SSE2-NEXT: subl $168, %esp
+; X86-SSE2-NEXT: subl $188, %esp
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movl (%eax), %ecx
; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
@@ -2506,7 +24412,7 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-SSE2-NEXT: movl 32(%eax), %ecx
; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-SSE2-NEXT: movl 36(%eax), %ecx
-; X86-SSE2-NEXT: movl %ecx, (%esp) # 4-byte Spill
+; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-SSE2-NEXT: movl 40(%eax), %ebp
; X86-SSE2-NEXT: movl 44(%eax), %ebx
; X86-SSE2-NEXT: movl 48(%eax), %edi
@@ -2520,7 +24426,7 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-SSE2-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl %ebx, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl %ebp, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: movl (%esp), %edx # 4-byte Reload
+; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X86-SSE2-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X86-SSE2-NEXT: movl %edx, {{[0-9]+}}(%esp)
@@ -2558,33 +24464,33 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT: andl $63, %eax
-; X86-SSE2-NEXT: movl 40(%esp,%eax), %ecx
+; X86-SSE2-NEXT: andl $7, %eax
+; X86-SSE2-NEXT: movl 48(%esp,%eax,8), %ecx
+; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SSE2-NEXT: movl 52(%esp,%eax,8), %ecx
; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 44(%esp,%eax), %ecx
+; X86-SSE2-NEXT: movl 60(%esp,%eax,8), %ecx
; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 52(%esp,%eax), %ecx
+; X86-SSE2-NEXT: movl 56(%esp,%eax,8), %ecx
; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 48(%esp,%eax), %ecx
+; X86-SSE2-NEXT: movl 68(%esp,%eax,8), %ecx
; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 60(%esp,%eax), %ecx
+; X86-SSE2-NEXT: movl 64(%esp,%eax,8), %ecx
; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 56(%esp,%eax), %ecx
+; X86-SSE2-NEXT: movl 76(%esp,%eax,8), %ecx
; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 68(%esp,%eax), %ecx
+; X86-SSE2-NEXT: movl 72(%esp,%eax,8), %ecx
; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 64(%esp,%eax), %ecx
+; X86-SSE2-NEXT: movl 84(%esp,%eax,8), %ecx
; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 76(%esp,%eax), %ecx
+; X86-SSE2-NEXT: movl 80(%esp,%eax,8), %ecx
; X86-SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SSE2-NEXT: movl 72(%esp,%eax), %ecx
-; X86-SSE2-NEXT: movl %ecx, (%esp) # 4-byte Spill
-; X86-SSE2-NEXT: movl 84(%esp,%eax), %ebp
-; X86-SSE2-NEXT: movl 80(%esp,%eax), %ebx
-; X86-SSE2-NEXT: movl 92(%esp,%eax), %edi
-; X86-SSE2-NEXT: movl 88(%esp,%eax), %esi
-; X86-SSE2-NEXT: movl 100(%esp,%eax), %edx
-; X86-SSE2-NEXT: movl 96(%esp,%eax), %ecx
+; X86-SSE2-NEXT: movl 92(%esp,%eax,8), %ebp
+; X86-SSE2-NEXT: movl 88(%esp,%eax,8), %ebx
+; X86-SSE2-NEXT: movl 100(%esp,%eax,8), %edi
+; X86-SSE2-NEXT: movl 96(%esp,%eax,8), %esi
+; X86-SSE2-NEXT: movl 108(%esp,%eax,8), %edx
+; X86-SSE2-NEXT: movl 104(%esp,%eax,8), %ecx
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movl %ecx, 56(%eax)
; X86-SSE2-NEXT: movl %edx, 60(%eax)
@@ -2592,7 +24498,7 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-SSE2-NEXT: movl %edi, 52(%eax)
; X86-SSE2-NEXT: movl %ebx, 40(%eax)
; X86-SSE2-NEXT: movl %ebp, 44(%eax)
-; X86-SSE2-NEXT: movl (%esp), %ecx # 4-byte Reload
+; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-SSE2-NEXT: movl %ecx, 32(%eax)
; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-SSE2-NEXT: movl %ecx, 36(%eax)
@@ -2612,14 +24518,14 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-SSE2-NEXT: movl %ecx, (%eax)
; X86-SSE2-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-SSE2-NEXT: movl %ecx, 4(%eax)
-; X86-SSE2-NEXT: addl $168, %esp
+; X86-SSE2-NEXT: addl $188, %esp
; X86-SSE2-NEXT: popl %esi
; X86-SSE2-NEXT: popl %edi
; X86-SSE2-NEXT: popl %ebx
; X86-SSE2-NEXT: popl %ebp
; X86-SSE2-NEXT: retl
;
-; X86-SSE42-LABEL: ashr_64bytes:
+; X86-SSE42-LABEL: ashr_64bytes_qwordOff:
; X86-SSE42: # %bb.0:
; X86-SSE42-NEXT: pushl %ebx
; X86-SSE42-NEXT: pushl %edi
@@ -2640,9 +24546,9 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-SSE42-NEXT: movl %ebx, {{[0-9]+}}(%esp)
; X86-SSE42-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-SSE42-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X86-SSE42-NEXT: movups %xmm2, {{[0-9]+}}(%esp)
-; X86-SSE42-NEXT: movups %xmm1, {{[0-9]+}}(%esp)
-; X86-SSE42-NEXT: movups %xmm0, (%esp)
+; X86-SSE42-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SSE42-NEXT: movaps %xmm0, (%esp)
; X86-SSE42-NEXT: sarl $31, %edx
; X86-SSE42-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-SSE42-NEXT: movl %edx, {{[0-9]+}}(%esp)
@@ -2660,11 +24566,11 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-SSE42-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-SSE42-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-SSE42-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-SSE42-NEXT: andl $63, %ecx
-; X86-SSE42-NEXT: movups (%esp,%ecx), %xmm0
-; X86-SSE42-NEXT: movups 16(%esp,%ecx), %xmm1
-; X86-SSE42-NEXT: movups 32(%esp,%ecx), %xmm2
-; X86-SSE42-NEXT: movups 48(%esp,%ecx), %xmm3
+; X86-SSE42-NEXT: andl $7, %ecx
+; X86-SSE42-NEXT: movups (%esp,%ecx,8), %xmm0
+; X86-SSE42-NEXT: movups 16(%esp,%ecx,8), %xmm1
+; X86-SSE42-NEXT: movups 32(%esp,%ecx,8), %xmm2
+; X86-SSE42-NEXT: movups 48(%esp,%ecx,8), %xmm3
; X86-SSE42-NEXT: movups %xmm3, 48(%eax)
; X86-SSE42-NEXT: movups %xmm2, 32(%eax)
; X86-SSE42-NEXT: movups %xmm1, 16(%eax)
@@ -2675,7 +24581,7 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-SSE42-NEXT: popl %ebx
; X86-SSE42-NEXT: retl
;
-; X86-AVX-LABEL: ashr_64bytes:
+; X86-AVX-LABEL: ashr_64bytes_qwordOff:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: pushl %ebx
; X86-AVX-NEXT: pushl %edi
@@ -2695,7 +24601,7 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-AVX-NEXT: movl %ebx, {{[0-9]+}}(%esp)
; X86-AVX-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-AVX-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X86-AVX-NEXT: vmovups %xmm1, {{[0-9]+}}(%esp)
+; X86-AVX-NEXT: vmovaps %xmm1, {{[0-9]+}}(%esp)
; X86-AVX-NEXT: vmovups %ymm0, (%esp)
; X86-AVX-NEXT: sarl $31, %edx
; X86-AVX-NEXT: movl %edx, {{[0-9]+}}(%esp)
@@ -2714,11 +24620,11 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-AVX-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-AVX-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-AVX-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-AVX-NEXT: andl $63, %ecx
-; X86-AVX-NEXT: vmovups (%esp,%ecx), %xmm0
-; X86-AVX-NEXT: vmovups 16(%esp,%ecx), %xmm1
-; X86-AVX-NEXT: vmovups 32(%esp,%ecx), %xmm2
-; X86-AVX-NEXT: vmovups 48(%esp,%ecx), %xmm3
+; X86-AVX-NEXT: andl $7, %ecx
+; X86-AVX-NEXT: vmovups (%esp,%ecx,8), %xmm0
+; X86-AVX-NEXT: vmovups 16(%esp,%ecx,8), %xmm1
+; X86-AVX-NEXT: vmovups 32(%esp,%ecx,8), %xmm2
+; X86-AVX-NEXT: vmovups 48(%esp,%ecx,8), %xmm3
; X86-AVX-NEXT: vmovups %xmm3, 48(%eax)
; X86-AVX-NEXT: vmovups %xmm2, 32(%eax)
; X86-AVX-NEXT: vmovups %xmm1, 16(%eax)
@@ -2730,45 +24636,14 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-AVX-NEXT: vzeroupper
; X86-AVX-NEXT: retl
%src = load i512, ptr %src.ptr, align 1
- %byteOff = load i512, ptr %byteOff.ptr, align 1
- %bitOff = shl i512 %byteOff, 3
+ %qwordOff = load i512, ptr %qwordOff.ptr, align 1
+ %bitOff = shl i512 %qwordOff, 6
%res = ashr i512 %src, %bitOff
store i512 %res, ptr %dst, align 1
ret void
}
+
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; ALL: {{.*}}
-; FALLBACK0: {{.*}}
-; FALLBACK1: {{.*}}
-; FALLBACK10: {{.*}}
-; FALLBACK11: {{.*}}
-; FALLBACK12: {{.*}}
-; FALLBACK13: {{.*}}
-; FALLBACK14: {{.*}}
-; FALLBACK15: {{.*}}
-; FALLBACK16: {{.*}}
-; FALLBACK17: {{.*}}
-; FALLBACK18: {{.*}}
-; FALLBACK19: {{.*}}
-; FALLBACK2: {{.*}}
-; FALLBACK20: {{.*}}
-; FALLBACK21: {{.*}}
-; FALLBACK22: {{.*}}
-; FALLBACK23: {{.*}}
-; FALLBACK24: {{.*}}
-; FALLBACK25: {{.*}}
-; FALLBACK26: {{.*}}
-; FALLBACK27: {{.*}}
-; FALLBACK28: {{.*}}
-; FALLBACK29: {{.*}}
-; FALLBACK3: {{.*}}
-; FALLBACK30: {{.*}}
-; FALLBACK31: {{.*}}
-; FALLBACK4: {{.*}}
-; FALLBACK5: {{.*}}
-; FALLBACK6: {{.*}}
-; FALLBACK7: {{.*}}
-; FALLBACK8: {{.*}}
-; FALLBACK9: {{.*}}
; X64: {{.*}}
; X86: {{.*}}
diff --git a/llvm/test/CodeGen/X86/wide-scalar-shift-legalization.ll b/llvm/test/CodeGen/X86/wide-scalar-shift-legalization.ll
index f84131d..8c08734 100644
--- a/llvm/test/CodeGen/X86/wide-scalar-shift-legalization.ll
+++ b/llvm/test/CodeGen/X86/wide-scalar-shift-legalization.ll
@@ -588,61 +588,58 @@ define void @lshr_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-NO-BMI2-NO-SHLD-NEXT: pushl %ebx
; X86-NO-BMI2-NO-SHLD-NEXT: pushl %edi
; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: subl $36, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: subl $44, %esp
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl (%ecx), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl (%ecx), %ebx
; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%ecx), %esi
; X86-NO-BMI2-NO-SHLD-NEXT: movl 8(%ecx), %edi
; X86-NO-BMI2-NO-SHLD-NEXT: movl 12(%ecx), %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movb (%eax), %ah
+; X86-NO-BMI2-NO-SHLD-NEXT: movb (%eax), %dh
+; X86-NO-BMI2-NO-SHLD-NEXT: xorps %xmm0, %xmm0
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movb %ah, %al
-; X86-NO-BMI2-NO-SHLD-NEXT: andb $7, %al
-; X86-NO-BMI2-NO-SHLD-NEXT: shrb $3, %ah
-; X86-NO-BMI2-NO-SHLD-NEXT: andb $15, %ah
-; X86-NO-BMI2-NO-SHLD-NEXT: movzbl %ah, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 8(%esp,%ebp), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, (%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dh, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shrb $3, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: andb $12, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: movzbl %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%esp,%eax), %esi
; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dh, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: notb %dl
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 12(%esp,%ebp), %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, (%esp) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: leal (%ecx,%ecx), %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dh, %dl
+; X86-NO-BMI2-NO-SHLD-NEXT: andb $31, %dl
+; X86-NO-BMI2-NO-SHLD-NEXT: xorb $31, %dl
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 8(%esp,%eax), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%ebp,%ebp), %edi
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edi
; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%esp,%ebp), %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp,%eax), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dh, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
; X86-NO-BMI2-NO-SHLD-NEXT: addl %esi, %esi
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %esi
; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, (%esp) # 4-byte Folded Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 16(%esp,%ebp), %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: leal (%ebx,%ebx), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dh, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 12(%esp,%eax), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%ebx,%ebx), %eax
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: orl (%esp), %ebp # 4-byte Folded Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dh, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, 12(%edx)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, 8(%edx)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, (%edx)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, 4(%edx)
-; X86-NO-BMI2-NO-SHLD-NEXT: addl $36, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, 12(%ebp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, 8(%ebp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, (%ebp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, 4(%ebp)
+; X86-NO-BMI2-NO-SHLD-NEXT: addl $44, %esp
; X86-NO-BMI2-NO-SHLD-NEXT: popl %esi
; X86-NO-BMI2-NO-SHLD-NEXT: popl %edi
; X86-NO-BMI2-NO-SHLD-NEXT: popl %ebx
@@ -655,50 +652,39 @@ define void @lshr_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-NO-BMI2-HAVE-SHLD-NEXT: pushl %ebx
; X86-NO-BMI2-HAVE-SHLD-NEXT: pushl %edi
; X86-NO-BMI2-HAVE-SHLD-NEXT: pushl %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: subl $32, %esp
+; X86-NO-BMI2-HAVE-SHLD-NEXT: subl $44, %esp
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%ecx), %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 4(%ecx), %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 8(%ecx), %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 12(%ecx), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movb (%eax), %ah
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%edx), %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 4(%edx), %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 8(%edx), %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 12(%edx), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movzbl (%ecx), %ecx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: xorps %xmm0, %xmm0
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, (%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movb %ah, %al
-; X86-NO-BMI2-HAVE-SHLD-NEXT: andb $7, %al
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrb $3, %ah
-; X86-NO-BMI2-HAVE-SHLD-NEXT: andb $15, %ah
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movzbl %ah, %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 4(%esp,%ebp), %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: notb %cl
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 8(%esp,%ebp), %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: leal (%ebx,%ebx), %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shll %cl, %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: orl %edx, %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%esp,%ebp), %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 12(%esp,%ebp), %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %ebp, %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, 8(%ecx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %esi, %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebp, 12(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, (%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrb $3, %dl
+; X86-NO-BMI2-HAVE-SHLD-NEXT: andb $12, %dl
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movzbl %dl, %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 8(%esp,%ebx), %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%esp,%ebx), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 4(%esp,%ebx), %ebp
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebp, %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %esi, %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 12(%esp,%ebx), %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %ebx, %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %ebp, %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, 8(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, 12(%eax)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, (%eax)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, 4(%eax)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: addl $32, %esp
+; X86-NO-BMI2-HAVE-SHLD-NEXT: addl $44, %esp
; X86-NO-BMI2-HAVE-SHLD-NEXT: popl %esi
; X86-NO-BMI2-HAVE-SHLD-NEXT: popl %edi
; X86-NO-BMI2-HAVE-SHLD-NEXT: popl %ebx
@@ -711,51 +697,49 @@ define void @lshr_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $32, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $44, %esp
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%ecx), %edx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%ecx), %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%ecx), %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%ecx), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl (%eax), %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl (%eax), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm0, %xmm0
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, (%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $7, %al
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %bl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $15, %bl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %bl, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%esp,%esi), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%esp,%esi), %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %edi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %dl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%ebx,%ebx), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $12, %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %cl, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%esp,%esi), %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%esp,%esi), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ebx, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $31, %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edi,%edi), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %edx
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, (%esp,%esi), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ebx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %edi, %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%esp,%esi), %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %esi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %esi, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %esi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 12(%esi)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 8(%esi)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, (%esi)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 4(%esi)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $32, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 8(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, (%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 4(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $44, %esp
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %ebx
@@ -768,47 +752,40 @@ define void @lshr_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pushl %ebx
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pushl %edi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pushl %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: subl $32, %esp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: subl $44, %esp
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%ecx), %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 4(%ecx), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 8(%ecx), %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 12(%ecx), %ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movzbl (%eax), %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%edx), %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 4(%edx), %edi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 8(%edx), %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 12(%edx), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movzbl (%ecx), %ecx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: xorps %xmm0, %xmm0
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, (%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: andb $7, %cl
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrb $3, %al
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: andb $15, %al
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movzbl %al, %ebx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 4(%esp,%ebx), %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %ecx, %eax, %ebp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, (%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: notb %dl
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 8(%esp,%ebx), %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: leal (%edi,%edi), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shlxl %edx, %esi, %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: orl %ebp, %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%esp,%ebx), %ebp
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 12(%esp,%ebx), %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrb $3, %dl
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: andb $12, %dl
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movzbl %dl, %ebp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 8(%esp,%ebp), %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%esp,%ebp), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 4(%esp,%ebp), %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, %edi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %ebx, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, 8(%esi)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %ecx, %ebx, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, 12(%esi)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 12(%esp,%ebp), %ebp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %ebp, %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, 8(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %ecx, %ebp, %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, 12(%eax)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %eax, %ebp
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebp, (%esi)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, 4(%esi)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: addl $32, %esp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %esi, %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, (%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, 4(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: addl $44, %esp
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: popl %esi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: popl %edi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: popl %ebx
@@ -899,66 +876,62 @@ define void @shl_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-NO-BMI2-NO-SHLD-NEXT: pushl %ebx
; X86-NO-BMI2-NO-SHLD-NEXT: pushl %edi
; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: subl $40, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: subl $60, %esp
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl (%ecx), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl (%ecx), %ebx
; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%ecx), %esi
; X86-NO-BMI2-NO-SHLD-NEXT: movl 8(%ecx), %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 12(%ecx), %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movzbl (%eax), %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 12(%ecx), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movb (%eax), %dh
+; X86-NO-BMI2-NO-SHLD-NEXT: xorps %xmm0, %xmm0
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: andb $7, %al
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dh, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: shrb $3, %cl
-; X86-NO-BMI2-NO-SHLD-NEXT: andb $15, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: andb $12, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: negb %cl
; X86-NO-BMI2-NO-SHLD-NEXT: movsbl %cl, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 24(%esp,%ebp), %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, (%esp) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 28(%esp,%ebp), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 32(%esp,%ebp), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 36(%esp,%ebp), %esi
; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dh, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: notb %dl
-; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp), %ebx # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dh, %dl
+; X86-NO-BMI2-NO-SHLD-NEXT: andb $31, %dl
+; X86-NO-BMI2-NO-SHLD-NEXT: xorb $31, %dl
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %ebx
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
; X86-NO-BMI2-NO-SHLD-NEXT: orl %edi, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 36(%esp,%ebp), %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 32(%esp,%ebp), %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 44(%esp,%ebp), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dh, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 40(%esp,%ebp), %edi
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, %ebp
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %ebp
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %eax, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dh, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edi
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %esi
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
; X86-NO-BMI2-NO-SHLD-NEXT: orl %edi, %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp), %eax # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, (%edx)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, 8(%edx)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, 12(%edx)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, 4(%edx)
-; X86-NO-BMI2-NO-SHLD-NEXT: addl $40, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dh, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, (%eax)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, 8(%eax)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, 12(%eax)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, 4(%eax)
+; X86-NO-BMI2-NO-SHLD-NEXT: addl $60, %esp
; X86-NO-BMI2-NO-SHLD-NEXT: popl %esi
; X86-NO-BMI2-NO-SHLD-NEXT: popl %edi
; X86-NO-BMI2-NO-SHLD-NEXT: popl %ebx
@@ -967,58 +940,45 @@ define void @shl_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
;
; X86-NO-BMI2-HAVE-SHLD-LABEL: shl_16bytes:
; X86-NO-BMI2-HAVE-SHLD: # %bb.0:
-; X86-NO-BMI2-HAVE-SHLD-NEXT: pushl %ebp
; X86-NO-BMI2-HAVE-SHLD-NEXT: pushl %ebx
; X86-NO-BMI2-HAVE-SHLD-NEXT: pushl %edi
; X86-NO-BMI2-HAVE-SHLD-NEXT: pushl %esi
; X86-NO-BMI2-HAVE-SHLD-NEXT: subl $32, %esp
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%ecx), %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 4(%ecx), %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 8(%ecx), %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 12(%ecx), %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movzbl (%eax), %ecx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%edx), %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 4(%edx), %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 8(%edx), %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 12(%edx), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movzbl (%ecx), %ecx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: xorps %xmm0, %xmm0
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, (%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, %eax
-; X86-NO-BMI2-HAVE-SHLD-NEXT: andb $7, %al
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrb $3, %cl
-; X86-NO-BMI2-HAVE-SHLD-NEXT: andb $15, %cl
-; X86-NO-BMI2-HAVE-SHLD-NEXT: negb %cl
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movsbl %cl, %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 24(%esp,%ebp), %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shll %cl, %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: notb %cl
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 20(%esp,%ebp), %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: orl %edx, %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 16(%esp,%ebp), %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 28(%esp,%ebp), %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shldl %cl, %ebx, %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebp, 12(%ebx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shldl %cl, %edx, %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shll %cl, %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, (%ebx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, 4(%ebx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, 8(%ebx)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrb $3, %dl
+; X86-NO-BMI2-HAVE-SHLD-NEXT: andb $12, %dl
+; X86-NO-BMI2-HAVE-SHLD-NEXT: negb %dl
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movsbl %dl, %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 24(%esp,%edi), %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 28(%esp,%edi), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shldl %cl, %esi, %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 16(%esp,%edi), %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 20(%esp,%edi), %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shldl %cl, %edi, %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shldl %cl, %ebx, %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shll %cl, %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, 8(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, 12(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, (%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, 4(%eax)
; X86-NO-BMI2-HAVE-SHLD-NEXT: addl $32, %esp
; X86-NO-BMI2-HAVE-SHLD-NEXT: popl %esi
; X86-NO-BMI2-HAVE-SHLD-NEXT: popl %edi
; X86-NO-BMI2-HAVE-SHLD-NEXT: popl %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: popl %ebp
; X86-NO-BMI2-HAVE-SHLD-NEXT: retl
;
; X86-HAVE-BMI2-NO-SHLD-LABEL: shl_16bytes:
@@ -1027,34 +987,32 @@ define void @shl_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $32, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $44, %esp
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%ecx), %edx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%ecx), %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%ecx), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%ecx), %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl (%eax), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%ecx), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl (%eax), %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm0, %xmm0
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $7, %bl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, (%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $15, %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: negb %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movsbl %cl, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %al
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $12, %al
+; X86-HAVE-BMI2-NO-SHLD-NEXT: negb %al
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movsbl %al, %edx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 16(%esp,%edx), %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%esp,%edx), %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %ecx, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edi, %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %al
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $31, %al
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %al
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %edi, %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %edi
@@ -1072,7 +1030,7 @@ define void @shl_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 8(%ecx)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 12(%ecx)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 4(%ecx)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $32, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $44, %esp
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %ebx
@@ -1081,57 +1039,45 @@ define void @shl_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
;
; X86-HAVE-BMI2-HAVE-SHLD-LABEL: shl_16bytes:
; X86-HAVE-BMI2-HAVE-SHLD: # %bb.0:
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pushl %ebp
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pushl %ebx
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pushl %edi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pushl %esi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: subl $32, %esp
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%ecx), %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 4(%ecx), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 8(%ecx), %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 12(%ecx), %ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movzbl (%eax), %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%edx), %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 4(%edx), %edi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 8(%edx), %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 12(%edx), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movzbl (%ecx), %ecx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: xorps %xmm0, %xmm0
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: andb $7, %cl
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, (%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrb $3, %al
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: andb $15, %al
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: negb %al
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movsbl %al, %ebx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 24(%esp,%ebx), %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shlxl %ecx, %edi, %ebp
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: notb %dl
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 20(%esp,%ebx), %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrl %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %edx, %esi, %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: orl %ebp, %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 16(%esp,%ebx), %ebp
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 28(%esp,%ebx), %ebx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shldl %cl, %edi, %ebx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, 12(%esi)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shlxl %ecx, %ebp, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, (%esi)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shldl %cl, %ebp, %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 4(%esi)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, 8(%esi)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrb $3, %dl
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: andb $12, %dl
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: negb %dl
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movsbl %dl, %edi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 24(%esp,%edi), %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 28(%esp,%edi), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shldl %cl, %esi, %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 16(%esp,%edi), %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 20(%esp,%edi), %edi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shldl %cl, %edi, %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shldl %cl, %ebx, %edi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shlxl %ecx, %ebx, %ecx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, 8(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, 12(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, (%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, 4(%eax)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: addl $32, %esp
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: popl %esi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: popl %edi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: popl %ebx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: popl %ebp
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: retl
%src = load i128, ptr %src.ptr, align 1
%bitOff = load i128, ptr %bitOff.ptr, align 1
@@ -1218,62 +1164,61 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-NO-BMI2-NO-SHLD-NEXT: pushl %ebx
; X86-NO-BMI2-NO-SHLD-NEXT: pushl %edi
; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: subl $36, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: subl $44, %esp
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl (%ecx), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl (%ecx), %ebx
; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%ecx), %esi
; X86-NO-BMI2-NO-SHLD-NEXT: movl 8(%ecx), %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 12(%ecx), %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movzbl (%eax), %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 12(%ecx), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movb (%eax), %dh
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: sarl $31, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: andb $7, %al
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, (%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: sarl $31, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dh, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: shrb $3, %cl
-; X86-NO-BMI2-NO-SHLD-NEXT: andb $15, %cl
-; X86-NO-BMI2-NO-SHLD-NEXT: movzbl %cl, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 8(%esp,%ebp), %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: notb %dl
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 12(%esp,%ebp), %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, (%esp) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: leal (%ecx,%ecx), %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: andb $12, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: movzbl %cl, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%esp,%ebx), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dh, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dh, %dl
+; X86-NO-BMI2-NO-SHLD-NEXT: andb $31, %dl
+; X86-NO-BMI2-NO-SHLD-NEXT: xorb $31, %dl
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 8(%esp,%ebx), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%ebp,%ebp), %edi
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%esp,%ebp), %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %eax, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp,%ebx), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dh, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %eax
; X86-NO-BMI2-NO-SHLD-NEXT: addl %esi, %esi
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, (%esp) # 4-byte Folded Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 16(%esp,%ebp), %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: leal (%ebx,%ebx), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %eax, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dh, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 12(%esp,%ebx), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %ebx
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: orl (%esp), %ebp # 4-byte Folded Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: sarl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, 12(%edx)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, 8(%edx)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, (%edx)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, 4(%edx)
-; X86-NO-BMI2-NO-SHLD-NEXT: addl $36, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebp, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dh, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: sarl %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, 12(%ebp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, 8(%ebp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, (%ebp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, 4(%ebp)
+; X86-NO-BMI2-NO-SHLD-NEXT: addl $44, %esp
; X86-NO-BMI2-NO-SHLD-NEXT: popl %esi
; X86-NO-BMI2-NO-SHLD-NEXT: popl %edi
; X86-NO-BMI2-NO-SHLD-NEXT: popl %ebx
@@ -1286,51 +1231,42 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-NO-BMI2-HAVE-SHLD-NEXT: pushl %ebx
; X86-NO-BMI2-HAVE-SHLD-NEXT: pushl %edi
; X86-NO-BMI2-HAVE-SHLD-NEXT: pushl %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: subl $32, %esp
+; X86-NO-BMI2-HAVE-SHLD-NEXT: subl $44, %esp
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%ecx), %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 4(%ecx), %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 8(%ecx), %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 12(%ecx), %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movzbl (%eax), %ecx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%edx), %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 4(%edx), %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 8(%edx), %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 12(%edx), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movzbl (%ecx), %ecx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, (%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: sarl $31, %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, %eax
-; X86-NO-BMI2-HAVE-SHLD-NEXT: andb $7, %al
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrb $3, %cl
-; X86-NO-BMI2-HAVE-SHLD-NEXT: andb $15, %cl
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movzbl %cl, %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 4(%esp,%ebp), %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: notb %cl
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 8(%esp,%ebp), %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: leal (%ebx,%ebx), %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shll %cl, %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: orl %edx, %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%esp,%ebp), %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 12(%esp,%ebp), %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %ebp, %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, 8(%ecx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %esi, %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: sarl %cl, %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebp, 12(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, (%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: sarl $31, %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrb $3, %dl
+; X86-NO-BMI2-HAVE-SHLD-NEXT: andb $12, %dl
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movzbl %dl, %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 8(%esp,%ebx), %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%esp,%ebx), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 4(%esp,%ebx), %ebp
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebp, %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %esi, %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 12(%esp,%ebx), %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %ebx, %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %ebp, %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: sarl %cl, %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, 8(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, 12(%eax)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, (%eax)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, 4(%eax)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: addl $32, %esp
+; X86-NO-BMI2-HAVE-SHLD-NEXT: addl $44, %esp
; X86-NO-BMI2-HAVE-SHLD-NEXT: popl %esi
; X86-NO-BMI2-HAVE-SHLD-NEXT: popl %edi
; X86-NO-BMI2-HAVE-SHLD-NEXT: popl %ebx
@@ -1343,52 +1279,52 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $32, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $44, %esp
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%ecx), %edx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%ecx), %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%ecx), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%ecx), %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl (%eax), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%ecx), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl (%eax), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, (%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: sarl $31, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $7, %al
+; X86-HAVE-BMI2-NO-SHLD-NEXT: sarl $31, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $15, %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $12, %cl
; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %cl, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%esp,%esi), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%esp,%esi), %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %edi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %dl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%ebx,%ebx), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%esp,%esi), %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%esp,%esi), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ebx, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $31, %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edi,%edi), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %edx
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, (%esp,%esi), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ebx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %edi, %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%esp,%esi), %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: sarxl %eax, %esi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %esi, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %esi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 12(%esi)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 8(%esi)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, (%esi)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 4(%esi)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $32, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 8(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, (%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 4(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $44, %esp
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %ebx
@@ -1401,48 +1337,43 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pushl %ebx
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pushl %edi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pushl %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: subl $32, %esp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: subl $44, %esp
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%ecx), %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 4(%ecx), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 8(%ecx), %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 12(%ecx), %ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movzbl (%eax), %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%edx), %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 4(%edx), %edi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 8(%edx), %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 12(%edx), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movzbl (%ecx), %ecx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, (%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: sarl $31, %ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: andb $7, %cl
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrb $3, %al
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: andb $15, %al
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movzbl %al, %ebx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 4(%esp,%ebx), %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %ecx, %eax, %ebp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, (%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: sarl $31, %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: notb %dl
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 8(%esp,%ebx), %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: leal (%edi,%edi), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shlxl %edx, %esi, %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: orl %ebp, %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%esp,%ebx), %ebp
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 12(%esp,%ebx), %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrb $3, %dl
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: andb $12, %dl
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movzbl %dl, %ebp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 8(%esp,%ebp), %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%esp,%ebp), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 4(%esp,%ebp), %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, %edi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %ebx, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, 8(%esi)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: sarxl %ecx, %ebx, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, 12(%esi)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 12(%esp,%ebp), %ebp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %ebp, %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, 8(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: sarxl %ecx, %ebp, %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, 12(%eax)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %eax, %ebp
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebp, (%esi)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, 4(%esi)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: addl $32, %esp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %esi, %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, (%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, 4(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: addl $44, %esp
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: popl %esi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: popl %edi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: popl %ebx
@@ -1459,35 +1390,34 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-NO-BMI2-NO-SHLD-LABEL: lshr_32bytes:
; X64-NO-BMI2-NO-SHLD: # %bb.0:
; X64-NO-BMI2-NO-SHLD-NEXT: pushq %rbx
-; X64-NO-BMI2-NO-SHLD-NEXT: movq (%rdi), %rax
-; X64-NO-BMI2-NO-SHLD-NEXT: movq 8(%rdi), %rcx
-; X64-NO-BMI2-NO-SHLD-NEXT: movq 16(%rdi), %r8
+; X64-NO-BMI2-NO-SHLD-NEXT: movq (%rdi), %rcx
+; X64-NO-BMI2-NO-SHLD-NEXT: movq 8(%rdi), %r8
+; X64-NO-BMI2-NO-SHLD-NEXT: movq 16(%rdi), %r9
; X64-NO-BMI2-NO-SHLD-NEXT: movq 24(%rdi), %rdi
-; X64-NO-BMI2-NO-SHLD-NEXT: movzbl (%rsi), %esi
+; X64-NO-BMI2-NO-SHLD-NEXT: movzbl (%rsi), %eax
+; X64-NO-BMI2-NO-SHLD-NEXT: xorps %xmm0, %xmm0
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-NO-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-NO-SHLD-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-NO-SHLD-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-NO-SHLD-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-NO-SHLD-NEXT: movl %esi, %eax
-; X64-NO-BMI2-NO-SHLD-NEXT: andb $7, %al
-; X64-NO-BMI2-NO-SHLD-NEXT: shrb $3, %sil
-; X64-NO-BMI2-NO-SHLD-NEXT: movzbl %sil, %r9d
-; X64-NO-BMI2-NO-SHLD-NEXT: movq -64(%rsp,%r9), %r10
-; X64-NO-BMI2-NO-SHLD-NEXT: movq -56(%rsp,%r9), %rdi
+; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X64-NO-BMI2-NO-SHLD-NEXT: shrb $6, %cl
+; X64-NO-BMI2-NO-SHLD-NEXT: movzbl %cl, %r8d
+; X64-NO-BMI2-NO-SHLD-NEXT: movq -64(%rsp,%r8,8), %r10
+; X64-NO-BMI2-NO-SHLD-NEXT: movq -56(%rsp,%r8,8), %rdi
; X64-NO-BMI2-NO-SHLD-NEXT: movq %rdi, %r11
; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %r11
; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %esi
-; X64-NO-BMI2-NO-SHLD-NEXT: notb %sil
-; X64-NO-BMI2-NO-SHLD-NEXT: movq -48(%rsp,%r9), %rbx
-; X64-NO-BMI2-NO-SHLD-NEXT: leaq (%rbx,%rbx), %r8
+; X64-NO-BMI2-NO-SHLD-NEXT: andb $63, %sil
+; X64-NO-BMI2-NO-SHLD-NEXT: xorb $63, %sil
+; X64-NO-BMI2-NO-SHLD-NEXT: movq -48(%rsp,%r8,8), %rbx
+; X64-NO-BMI2-NO-SHLD-NEXT: leaq (%rbx,%rbx), %r9
; X64-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
-; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %r8
-; X64-NO-BMI2-NO-SHLD-NEXT: orq %r11, %r8
+; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %r9
+; X64-NO-BMI2-NO-SHLD-NEXT: orq %r11, %r9
; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %r10
; X64-NO-BMI2-NO-SHLD-NEXT: addq %rdi, %rdi
@@ -1496,142 +1426,124 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-NO-BMI2-NO-SHLD-NEXT: orq %r10, %rdi
; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %rbx
-; X64-NO-BMI2-NO-SHLD-NEXT: movq -40(%rsp,%r9), %r9
-; X64-NO-BMI2-NO-SHLD-NEXT: leaq (%r9,%r9), %r10
+; X64-NO-BMI2-NO-SHLD-NEXT: movq -40(%rsp,%r8,8), %r8
+; X64-NO-BMI2-NO-SHLD-NEXT: leaq (%r8,%r8), %r10
; X64-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %r10
; X64-NO-BMI2-NO-SHLD-NEXT: orq %rbx, %r10
; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %r9
-; X64-NO-BMI2-NO-SHLD-NEXT: movq %r9, 24(%rdx)
+; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %r8
+; X64-NO-BMI2-NO-SHLD-NEXT: movq %r8, 24(%rdx)
; X64-NO-BMI2-NO-SHLD-NEXT: movq %r10, 16(%rdx)
; X64-NO-BMI2-NO-SHLD-NEXT: movq %rdi, (%rdx)
-; X64-NO-BMI2-NO-SHLD-NEXT: movq %r8, 8(%rdx)
+; X64-NO-BMI2-NO-SHLD-NEXT: movq %r9, 8(%rdx)
; X64-NO-BMI2-NO-SHLD-NEXT: popq %rbx
; X64-NO-BMI2-NO-SHLD-NEXT: retq
;
; X64-NO-BMI2-HAVE-SHLD-LABEL: lshr_32bytes:
; X64-NO-BMI2-HAVE-SHLD: # %bb.0:
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq (%rdi), %rax
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 8(%rdi), %rcx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 16(%rdi), %r8
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 8(%rdi), %r8
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 16(%rdi), %r9
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 24(%rdi), %rdi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movzbl (%rsi), %esi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movzbl (%rsi), %ecx
+; X64-NO-BMI2-HAVE-SHLD-NEXT: xorps %xmm0, %xmm0
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, %eax
-; X64-NO-BMI2-HAVE-SHLD-NEXT: andb $7, %al
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shrb $3, %sil
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movzbl %sil, %esi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -64(%rsp,%rsi), %rdi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -56(%rsp,%rsi), %r8
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, %eax
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shrb $6, %al
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movzbl %al, %eax
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -56(%rsp,%rax,8), %rsi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -72(%rsp,%rax,8), %rdi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -64(%rsp,%rax,8), %r8
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r8, %r9
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shrq %cl, %r9
-; X64-NO-BMI2-HAVE-SHLD-NEXT: notb %cl
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -48(%rsp,%rsi), %r10
-; X64-NO-BMI2-HAVE-SHLD-NEXT: leaq (%r10,%r10), %r11
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shlq %cl, %r11
-; X64-NO-BMI2-HAVE-SHLD-NEXT: orq %r9, %r11
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -40(%rsp,%rsi), %rsi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rsi, %r10
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rsi, %r9
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -48(%rsp,%rax,8), %rax
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rax, %rsi
; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r8, %rdi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shrq %cl, %rsi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r10, 16(%rdx)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rsi, 24(%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shrq %cl, %rax
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rsi, 16(%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rax, 24(%rdx)
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rdi, (%rdx)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r11, 8(%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r9, 8(%rdx)
; X64-NO-BMI2-HAVE-SHLD-NEXT: retq
;
; X64-HAVE-BMI2-NO-SHLD-LABEL: lshr_32bytes:
; X64-HAVE-BMI2-NO-SHLD: # %bb.0:
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq (%rdi), %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq 8(%rdi), %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq 16(%rdi), %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq (%rdi), %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq 8(%rdi), %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq 16(%rdi), %r9
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq 24(%rdi), %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movzbl (%rsi), %esi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movzbl (%rsi), %eax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm0, %xmm0
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %eax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: andb $7, %al
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %sil
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movzbl %sil, %ecx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -56(%rsp,%rcx), %rsi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -48(%rsp,%rcx), %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %rsi, %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, -64(%rsp,%rcx), %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrb $6, %cl
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movzbl %cl, %esi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -64(%rsp,%rsi,8), %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -56(%rsp,%rsi,8), %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %rcx, %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, -72(%rsp,%rsi,8), %r9
; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %rdi, %r10
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -40(%rsp,%rcx), %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %rcx, %r11
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -48(%rsp,%rsi,8), %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %rsi, %r11
; X64-HAVE-BMI2-NO-SHLD-NEXT: # kill: def $al killed $al killed $rax def $rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: notb %al
+; X64-HAVE-BMI2-NO-SHLD-NEXT: andb $63, %al
+; X64-HAVE-BMI2-NO-SHLD-NEXT: xorb $63, %al
; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rdi, %rdi
; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rdi, %rdi
; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r8, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rsi, %rsi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rsi, %rsi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r9, %rsi
; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rcx, %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rcx, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rcx, %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r9, %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rsi, %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rsi, %rax
; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r10, %rax
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r11, 24(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, 16(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rsi, (%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, (%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, 8(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
;
; X64-HAVE-BMI2-HAVE-SHLD-LABEL: lshr_32bytes:
; X64-HAVE-BMI2-HAVE-SHLD: # %bb.0:
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: pushq %rbx
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq (%rdi), %rax
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq 8(%rdi), %rcx
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq 16(%rdi), %r8
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq 8(%rdi), %r8
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq 16(%rdi), %r9
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq 24(%rdi), %rdi
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movzbl (%rsi), %esi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movzbl (%rsi), %ecx
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: xorps %xmm0, %xmm0
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, %ecx
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: andb $7, %cl
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrb $3, %sil
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movzbl %sil, %eax
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -56(%rsp,%rax), %rsi
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrxq %rcx, %rsi, %rdi
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -40(%rsp,%rax), %r8
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrxq %rcx, %r8, %r9
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, %r10d
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: notb %r10b
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -48(%rsp,%rax), %r11
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: leaq (%r11,%r11), %rbx
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shlxq %r10, %rbx, %r10
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -64(%rsp,%rax), %rax
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: orq %rdi, %r10
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r8, %r11
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: # kill: def $cl killed $cl killed $rcx
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rsi, %rax
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r11, 16(%rdx)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r9, 24(%rdx)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rax, (%rdx)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r10, 8(%rdx)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: popq %rbx
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, %eax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrb $6, %al
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movzbl %al, %eax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -56(%rsp,%rax,8), %rsi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -72(%rsp,%rax,8), %rdi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -64(%rsp,%rax,8), %r8
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r8, %r9
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rsi, %r9
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -48(%rsp,%rax,8), %rax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rax, %rsi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r8, %rdi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrxq %rcx, %rax, %rax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rsi, 16(%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rax, 24(%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rdi, (%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r9, 8(%rdx)
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: retq
;
; X86-NO-BMI2-NO-SHLD-LABEL: lshr_32bytes:
@@ -1640,127 +1552,120 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-NO-BMI2-NO-SHLD-NEXT: pushl %ebx
; X86-NO-BMI2-NO-SHLD-NEXT: pushl %edi
; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: subl $88, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: subl $108, %esp
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl (%edi), %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, (%esp) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%edi), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl (%ebp), %eax
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 8(%edi), %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 12(%edi), %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 16(%edi), %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movb (%ecx), %ch
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 20(%edi), %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 24(%edi), %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 28(%edi), %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%ebp), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 8(%ebp), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 12(%ebp), %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 16(%ebp), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movzbl (%ecx), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 20(%ebp), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 24(%ebp), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 28(%ebp), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: xorps %xmm0, %xmm0
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp), %eax # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %al
-; X86-NO-BMI2-NO-SHLD-NEXT: andb $7, %al
-; X86-NO-BMI2-NO-SHLD-NEXT: shrb $3, %ch
-; X86-NO-BMI2-NO-SHLD-NEXT: movzbl %ch, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 24(%esp,%edi), %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 28(%esp,%edi), %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, (%esp) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movb %al, %ah
-; X86-NO-BMI2-NO-SHLD-NEXT: notb %ah
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 32(%esp,%edi), %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: shrb $5, %al
+; X86-NO-BMI2-NO-SHLD-NEXT: movzbl %al, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 32(%esp,%eax,4), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 36(%esp,%eax,4), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dl, %ch
+; X86-NO-BMI2-NO-SHLD-NEXT: andb $31, %ch
+; X86-NO-BMI2-NO-SHLD-NEXT: xorb $31, %ch
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 40(%esp,%edi,4), %edi
; X86-NO-BMI2-NO-SHLD-NEXT: leal (%edi,%edi), %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movb %ah, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %eax, %ebp
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dl, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp), %ebx # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: addl %ebx, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movb %ah, %cl
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, (%esp) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 36(%esp,%ebx), %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 40(%esp,%ebx), %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: leal (%ecx,%ecx), %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movb %ah, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebx
; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %ebx
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 44(%esp,%esi,4), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dl, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 48(%esp,%esi,4), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%edx,%edx), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %eax, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %bl, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edi
; X86-NO-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movb %ah, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebp
; X86-NO-BMI2-NO-SHLD-NEXT: orl %edi, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 44(%esp,%edx), %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 48(%esp,%edx), %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: movb %ah, %cl
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movb %al, %ch
-; X86-NO-BMI2-NO-SHLD-NEXT: movb %al, %cl
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: addl %edi, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movb %ah, %cl
-; X86-NO-BMI2-NO-SHLD-NEXT: movb %ah, %dl
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movb %al, %cl
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %eax
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 52(%esp,%esi), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 52(%esp,%esi,4), %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %bl, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 56(%esp,%esi,4), %ebx
; X86-NO-BMI2-NO-SHLD-NEXT: leal (%ebx,%ebx), %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: movb %dl, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %esi
; X86-NO-BMI2-NO-SHLD-NEXT: orl %eax, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dl, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %edi, %edi
; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %eax, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dl, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, 28(%eax)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, 24(%eax)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, 16(%eax)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 20(%eax)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, 8(%eax)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 12(%eax)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp), %ecx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, (%eax)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 4(%eax)
-; X86-NO-BMI2-NO-SHLD-NEXT: addl $88, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 60(%esp,%eax,4), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, 28(%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, 24(%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, 16(%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, 20(%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, 8(%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, 12(%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, (%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, 4(%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: addl $108, %esp
; X86-NO-BMI2-NO-SHLD-NEXT: popl %esi
; X86-NO-BMI2-NO-SHLD-NEXT: popl %edi
; X86-NO-BMI2-NO-SHLD-NEXT: popl %ebx
@@ -1775,95 +1680,67 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-NO-BMI2-HAVE-SHLD-NEXT: pushl %esi
; X86-NO-BMI2-HAVE-SHLD-NEXT: subl $92, %esp
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%edi), %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%ebp), %eax
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 4(%edi), %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 4(%ebp), %eax
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, (%esp) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 8(%edi), %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 12(%edi), %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 16(%edi), %ebp
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 8(%ebp), %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 12(%ebp), %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 16(%ebp), %ebx
; X86-NO-BMI2-HAVE-SHLD-NEXT: movzbl (%ecx), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 20(%edi), %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 24(%edi), %eax
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 28(%edi), %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 20(%ebp), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 24(%ebp), %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 28(%ebp), %ebp
+; X86-NO-BMI2-HAVE-SHLD-NEXT: xorps %xmm0, %xmm0
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebp, {{[0-9]+}}(%esp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebp, {{[0-9]+}}(%esp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%esp), %eax # 4-byte Reload
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, %eax
-; X86-NO-BMI2-HAVE-SHLD-NEXT: andb $7, %al
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrb $3, %cl
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movzbl %cl, %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 32(%esp,%ebp), %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: notb %dl
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 36(%esp,%ebp), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: leal (%ecx,%ecx), %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shll %cl, %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: orl %esi, %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 40(%esp,%ebp), %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrb $5, %al
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movzbl %al, %ebp
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 24(%esp,%ebp,4), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 20(%esp,%ebp,4), %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, (%esp) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 32(%esp,%ebp,4), %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 28(%esp,%ebp,4), %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %ebx, %esi
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 44(%esp,%ebp), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, (%esp) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: leal (%ecx,%ecx), %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shll %cl, %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: orl %esi, %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 48(%esp,%ebp), %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 52(%esp,%ebp), %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: leal (%ebx,%ebx), %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shll %cl, %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: orl %edi, %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, (%esp) # 4-byte Folded Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 28(%esp,%ebp), %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 56(%esp,%ebp), %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edi, %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %eax, %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 40(%esp,%ebp,4), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 36(%esp,%ebp,4), %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %eax, %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 16(%esp,%ebp,4), %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 44(%esp,%ebp,4), %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %eax, %edx
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, 24(%ebp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %ebx, %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, 28(%ebp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%esp), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 16(%ebp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, 24(%ebp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%esp), %edx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 28(%ebp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, 16(%ebp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, 20(%ebp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 8(%ebp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, (%ebp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, 20(%ebp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 12(%ebp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, (%ebp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 4(%ebp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: addl $92, %esp
@@ -1879,103 +1756,95 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $84, %esp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%edi), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%edi), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%edi), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%edi), %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 16(%edi), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl (%ecx), %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%edi), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%edi), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%edi), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $108, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%eax), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%eax), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 16(%eax), %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%eax), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%eax), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl (%eax), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm0, %xmm0
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $7, %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %dl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %dl, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%esp,%edi), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%esp,%edi), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %esi, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %dl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ebp, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebx, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, 20(%esp,%edi), %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %esi, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $5, %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %cl, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 36(%esp,%esi,4), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 40(%esp,%esi,4), %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ecx, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $31, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ebx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, 32(%esp,%esi,4), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 36(%esp,%edi), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, (%esp) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%ecx,%ecx), %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ebx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 32(%esp,%edi), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ebp, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 48(%esp,%esi,4), %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ebp, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ecx, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 44(%esp,%edi), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%ebp,%ebp), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 44(%esp,%esi,4), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ecx, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 40(%esp,%edi), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %eax, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, (%esp), %esi # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, (%esp) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %eax, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %eax, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl (%esp), %esi # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %ebp, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 48(%esp,%edi), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %edi, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 56(%esp,%esi,4), %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%ebx,%ebx), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 52(%esp,%esi,4), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %edi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edi, %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %eax, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, 28(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 24(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, 16(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 20(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 8(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 12(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, (%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 4(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $84, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ebx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 60(%esp,%esi,4), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %esi, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 28(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 24(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 16(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 20(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 8(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 12(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, (%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 4(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $108, %esp
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %ebx
@@ -1988,92 +1857,73 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pushl %ebx
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pushl %edi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pushl %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: subl $88, %esp
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%edi), %ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 4(%edi), %ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, (%esp) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 8(%edi), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 12(%edi), %ebx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 16(%edi), %ebp
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movzbl (%eax), %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 20(%edi), %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 24(%edi), %ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 28(%edi), %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: subl $92, %esp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%ecx), %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 4(%ecx), %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, (%esp) # 4-byte Spill
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 8(%ecx), %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 12(%ecx), %edi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 16(%ecx), %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 20(%ecx), %ebp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 24(%ecx), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 28(%ecx), %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movzbl (%ecx), %ecx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: xorps %xmm0, %xmm0
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebp, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%esp), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: andb $7, %cl
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrb $3, %al
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movzbl %al, %ebx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 28(%esp,%ebx), %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%esp), %eax # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrb $5, %al
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movzbl %al, %ebp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 24(%esp,%ebp,4), %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 20(%esp,%ebp,4), %eax
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %ecx, %eax, %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: notb %dl
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 32(%esp,%ebx), %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %esi, %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, (%esp) # 4-byte Spill
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 32(%esp,%ebp,4), %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 28(%esp,%ebp,4), %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %ebx, %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %eax, %esi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shlxl %edx, %esi, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: orl %eax, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, (%esp) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 40(%esp,%ebx), %ebp
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: leal (%ebp,%ebp), %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shlxl %edx, %eax, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 36(%esp,%ebx), %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %ecx, %eax, %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: orl %eax, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 48(%esp,%ebx), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: leal (%esi,%esi), %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shlxl %edx, %edi, %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 44(%esp,%ebx), %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %ecx, %edx, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: orl %edi, %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %eax, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %ebp
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 24(%esp,%ebx), %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 52(%esp,%ebx), %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 40(%esp,%ebp,4), %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 36(%esp,%ebp,4), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, %esi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %eax, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, 24(%ebx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %ecx, %eax, %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 28(%ebx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebp, 16(%ebx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, 8(%ebx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 16(%esp,%ebp,4), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 44(%esp,%ebp,4), %edi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edi, %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 24(%ebp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %ecx, %edi, %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 28(%ebp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, 16(%ebp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, 20(%ebp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %eax, %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, (%ebx)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 8(%ebp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 20(%ebx)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 12(%ebp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 12(%ebx)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %eax, %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, (%ebp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%esp), %eax # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 4(%ebx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: addl $88, %esp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 4(%ebp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: addl $92, %esp
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: popl %esi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: popl %edi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: popl %ebx
@@ -2089,31 +1939,31 @@ define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-NO-BMI2-NO-SHLD-LABEL: shl_32bytes:
; X64-NO-BMI2-NO-SHLD: # %bb.0:
; X64-NO-BMI2-NO-SHLD-NEXT: pushq %rbx
-; X64-NO-BMI2-NO-SHLD-NEXT: movq (%rdi), %rax
-; X64-NO-BMI2-NO-SHLD-NEXT: movq 8(%rdi), %rcx
-; X64-NO-BMI2-NO-SHLD-NEXT: movq 16(%rdi), %r8
+; X64-NO-BMI2-NO-SHLD-NEXT: movq (%rdi), %rcx
+; X64-NO-BMI2-NO-SHLD-NEXT: movq 8(%rdi), %r8
+; X64-NO-BMI2-NO-SHLD-NEXT: movq 16(%rdi), %r9
; X64-NO-BMI2-NO-SHLD-NEXT: movq 24(%rdi), %rdi
-; X64-NO-BMI2-NO-SHLD-NEXT: movzbl (%rsi), %esi
+; X64-NO-BMI2-NO-SHLD-NEXT: movzbl (%rsi), %eax
+; X64-NO-BMI2-NO-SHLD-NEXT: xorps %xmm0, %xmm0
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-NO-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-NO-SHLD-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-NO-SHLD-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-NO-SHLD-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-NO-SHLD-NEXT: movl %esi, %eax
-; X64-NO-BMI2-NO-SHLD-NEXT: andb $7, %al
-; X64-NO-BMI2-NO-SHLD-NEXT: shrb $3, %sil
-; X64-NO-BMI2-NO-SHLD-NEXT: negb %sil
-; X64-NO-BMI2-NO-SHLD-NEXT: movsbq %sil, %r10
+; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X64-NO-BMI2-NO-SHLD-NEXT: shrb $3, %cl
+; X64-NO-BMI2-NO-SHLD-NEXT: andb $24, %cl
+; X64-NO-BMI2-NO-SHLD-NEXT: negb %cl
+; X64-NO-BMI2-NO-SHLD-NEXT: movsbq %cl, %r10
; X64-NO-BMI2-NO-SHLD-NEXT: movq -32(%rsp,%r10), %r8
; X64-NO-BMI2-NO-SHLD-NEXT: movq -24(%rsp,%r10), %rdi
; X64-NO-BMI2-NO-SHLD-NEXT: movq %rdi, %r11
; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %r11
; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %esi
-; X64-NO-BMI2-NO-SHLD-NEXT: notb %sil
+; X64-NO-BMI2-NO-SHLD-NEXT: andb $63, %sil
+; X64-NO-BMI2-NO-SHLD-NEXT: xorb $63, %sil
; X64-NO-BMI2-NO-SHLD-NEXT: movq %r8, %r9
; X64-NO-BMI2-NO-SHLD-NEXT: shrq %r9
; X64-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
@@ -2146,79 +1996,70 @@ define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-NO-BMI2-HAVE-SHLD-LABEL: shl_32bytes:
; X64-NO-BMI2-HAVE-SHLD: # %bb.0:
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq (%rdi), %rax
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 8(%rdi), %rcx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 16(%rdi), %r8
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 8(%rdi), %r8
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 16(%rdi), %r9
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 24(%rdi), %rdi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movzbl (%rsi), %esi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movzbl (%rsi), %ecx
+; X64-NO-BMI2-HAVE-SHLD-NEXT: xorps %xmm0, %xmm0
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, %eax
-; X64-NO-BMI2-HAVE-SHLD-NEXT: andb $7, %al
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shrb $3, %sil
-; X64-NO-BMI2-HAVE-SHLD-NEXT: negb %sil
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movsbq %sil, %rsi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -16(%rsp,%rsi), %rdi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rdi, %r8
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, %eax
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shrb $3, %al
+; X64-NO-BMI2-HAVE-SHLD-NEXT: andb $24, %al
+; X64-NO-BMI2-HAVE-SHLD-NEXT: negb %al
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movsbq %al, %rax
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -24(%rsp,%rax), %rsi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -16(%rsp,%rax), %rdi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shldq %cl, %rsi, %rdi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -40(%rsp,%rax), %r8
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -32(%rsp,%rax), %rax
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shldq %cl, %rax, %rsi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shldq %cl, %r8, %rax
; X64-NO-BMI2-HAVE-SHLD-NEXT: shlq %cl, %r8
-; X64-NO-BMI2-HAVE-SHLD-NEXT: notb %cl
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -32(%rsp,%rsi), %r9
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -24(%rsp,%rsi), %r10
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r10, %r11
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shrq %r11
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shrq %cl, %r11
-; X64-NO-BMI2-HAVE-SHLD-NEXT: orq %r8, %r11
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -8(%rsp,%rsi), %rsi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shldq %cl, %rdi, %rsi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shldq %cl, %r9, %r10
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shlq %cl, %r9
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rsi, 24(%rdx)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r9, (%rdx)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r10, 8(%rdx)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r11, 16(%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rsi, 16(%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rdi, 24(%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r8, (%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rax, 8(%rdx)
; X64-NO-BMI2-HAVE-SHLD-NEXT: retq
;
; X64-HAVE-BMI2-NO-SHLD-LABEL: shl_32bytes:
; X64-HAVE-BMI2-NO-SHLD: # %bb.0:
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq (%rdi), %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq 8(%rdi), %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq 16(%rdi), %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq (%rdi), %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq 8(%rdi), %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq 16(%rdi), %r9
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq 24(%rdi), %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movzbl (%rsi), %esi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movzbl (%rsi), %eax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm0, %xmm0
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %eax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: andb $7, %al
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %sil
-; X64-HAVE-BMI2-NO-SHLD-NEXT: negb %sil
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movsbq %sil, %rsi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -32(%rsp,%rsi), %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -24(%rsp,%rsi), %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rcx, %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, -8(%rsp,%rsi), %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -16(%rsp,%rsi), %rsi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rsi, %r10
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rdi, %r11
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %cl
+; X64-HAVE-BMI2-NO-SHLD-NEXT: andb $24, %cl
+; X64-HAVE-BMI2-NO-SHLD-NEXT: negb %cl
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movsbq %cl, %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -40(%rsp,%rdi), %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -32(%rsp,%rdi), %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rcx, %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, -16(%rsp,%rdi), %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -24(%rsp,%rdi), %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rdi, %r10
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %r8, %r11
; X64-HAVE-BMI2-NO-SHLD-NEXT: # kill: def $al killed $al killed $rax def $rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: notb %al
+; X64-HAVE-BMI2-NO-SHLD-NEXT: andb $63, %al
+; X64-HAVE-BMI2-NO-SHLD-NEXT: xorb $63, %al
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %r8, %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rsi, %r8
; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %rdi, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r8, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %rsi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %rsi, %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %rdi, %rsi
; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r9, %rsi
; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %rcx
; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %rcx, %rax
@@ -2226,50 +2067,40 @@ define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r11, (%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, 16(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rsi, 24(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, 8(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r8, 8(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
;
; X64-HAVE-BMI2-HAVE-SHLD-LABEL: shl_32bytes:
; X64-HAVE-BMI2-HAVE-SHLD: # %bb.0:
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: pushq %rbx
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq (%rdi), %rax
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq 8(%rdi), %rcx
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq 16(%rdi), %r8
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq 8(%rdi), %r8
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq 16(%rdi), %r9
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq 24(%rdi), %rdi
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movzbl (%rsi), %esi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movzbl (%rsi), %ecx
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: xorps %xmm0, %xmm0
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, %ecx
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: andb $7, %cl
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrb $3, %sil
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: negb %sil
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movsbq %sil, %rax
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -16(%rsp,%rax), %rsi
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shlxq %rcx, %rsi, %rdi
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -32(%rsp,%rax), %r8
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shlxq %rcx, %r8, %r9
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, %r10d
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: notb %r10b
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -24(%rsp,%rax), %r11
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r11, %rbx
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrq %rbx
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrxq %r10, %rbx, %r10
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: orq %rdi, %r10
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -8(%rsp,%rax), %rax
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shldq %cl, %rsi, %rax
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: # kill: def $cl killed $cl killed $rcx
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shldq %cl, %r8, %r11
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rax, 24(%rdx)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r9, (%rdx)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r11, 8(%rdx)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r10, 16(%rdx)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: popq %rbx
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, %eax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrb $3, %al
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: andb $24, %al
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: negb %al
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movsbq %al, %rax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -24(%rsp,%rax), %rsi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -16(%rsp,%rax), %rdi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shldq %cl, %rsi, %rdi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -40(%rsp,%rax), %r8
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -32(%rsp,%rax), %rax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shldq %cl, %rax, %rsi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shldq %cl, %r8, %rax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shlxq %rcx, %r8, %rcx
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rsi, 16(%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rdi, 24(%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rcx, (%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rax, 8(%rdx)
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: retq
;
; X86-NO-BMI2-NO-SHLD-LABEL: shl_32bytes:
@@ -2278,118 +2109,112 @@ define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-NO-BMI2-NO-SHLD-NEXT: pushl %ebx
; X86-NO-BMI2-NO-SHLD-NEXT: pushl %edi
; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: subl $88, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: subl $108, %esp
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl (%edi), %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, (%esp) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%edi), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl (%ebp), %eax
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 8(%edi), %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 12(%edi), %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 16(%edi), %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movzbl (%ecx), %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 20(%edi), %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 24(%edi), %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 28(%edi), %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%ebp), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 8(%ebp), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 12(%ebp), %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 16(%ebp), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movb (%ecx), %ch
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 20(%ebp), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 24(%ebp), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 28(%ebp), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: xorps %xmm0, %xmm0
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp), %eax # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: andb $7, %al
-; X86-NO-BMI2-NO-SHLD-NEXT: shrb $3, %cl
-; X86-NO-BMI2-NO-SHLD-NEXT: negb %cl
-; X86-NO-BMI2-NO-SHLD-NEXT: movsbl %cl, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 56(%esp,%ecx), %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 60(%esp,%ecx), %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %al
+; X86-NO-BMI2-NO-SHLD-NEXT: shrb $3, %al
+; X86-NO-BMI2-NO-SHLD-NEXT: andb $28, %al
+; X86-NO-BMI2-NO-SHLD-NEXT: negb %al
+; X86-NO-BMI2-NO-SHLD-NEXT: movsbl %al, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 64(%esp,%ebx), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 68(%esp,%ebx), %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %dl
+; X86-NO-BMI2-NO-SHLD-NEXT: andb $31, %dl
+; X86-NO-BMI2-NO-SHLD-NEXT: xorb $31, %dl
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dl, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %eax, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 76(%esp,%ebx), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: movb %al, %ah
-; X86-NO-BMI2-NO-SHLD-NEXT: notb %ah
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 72(%esp,%ebx), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dl, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %eax
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movb %ah, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dl, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %eax, %edi
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 68(%esp,%ebp), %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, (%esp) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 84(%esp,%ebx), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 64(%esp,%ebp), %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movb %ah, %cl
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %edi, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: movb %ah, %cl
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 76(%esp,%ebp), %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 72(%esp,%ebp), %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 80(%esp,%ebx), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ebp
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movb %ah, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dl, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp), %edx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: movb %ah, %cl
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %edi, %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, (%esp) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %edi, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %eax
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 84(%esp,%edi), %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 80(%esp,%edi), %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dl, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %eax, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 92(%esp,%ebx), %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 88(%esp,%ebx), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ebx
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movb %ah, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dl, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %edx, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %edi, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %eax
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: movb %ah, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dl, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %edi, %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %eax, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edx
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, (%eax)
; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, 24(%eax)
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, 28(%eax)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp), %ecx # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 16(%eax)
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, 20(%eax)
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
@@ -2398,7 +2223,7 @@ define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 12(%eax)
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 4(%eax)
-; X86-NO-BMI2-NO-SHLD-NEXT: addl $88, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: addl $108, %esp
; X86-NO-BMI2-NO-SHLD-NEXT: popl %esi
; X86-NO-BMI2-NO-SHLD-NEXT: popl %edi
; X86-NO-BMI2-NO-SHLD-NEXT: popl %ebx
@@ -2413,99 +2238,70 @@ define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-NO-BMI2-HAVE-SHLD-NEXT: pushl %esi
; X86-NO-BMI2-HAVE-SHLD-NEXT: subl $92, %esp
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%edi), %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%ebp), %eax
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 4(%edi), %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 4(%ebp), %eax
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, (%esp) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 8(%edi), %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 12(%edi), %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 16(%edi), %ebp
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 8(%ebp), %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 12(%ebp), %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 16(%ebp), %ebx
; X86-NO-BMI2-HAVE-SHLD-NEXT: movzbl (%ecx), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 20(%edi), %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 24(%edi), %eax
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 28(%edi), %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 20(%ebp), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 24(%ebp), %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 28(%ebp), %ebp
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebp, {{[0-9]+}}(%esp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: xorps %xmm0, %xmm0
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebp, {{[0-9]+}}(%esp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%esp), %eax # 4-byte Reload
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, %eax
-; X86-NO-BMI2-HAVE-SHLD-NEXT: andb $7, %al
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrb $3, %cl
-; X86-NO-BMI2-HAVE-SHLD-NEXT: negb %cl
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movsbl %cl, %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 68(%esp,%ebx), %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shll %cl, %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: notb %dl
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 64(%esp,%ebx), %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: orl %esi, %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 76(%esp,%ebx), %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrb $3, %al
+; X86-NO-BMI2-HAVE-SHLD-NEXT: andb $28, %al
+; X86-NO-BMI2-HAVE-SHLD-NEXT: negb %al
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movsbl %al, %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 56(%esp,%eax), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 60(%esp,%eax), %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shldl %cl, %edx, %esi
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shll %cl, %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 72(%esp,%ebx), %ebp
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 52(%esp,%eax), %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, (%esp) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shldl %cl, %esi, %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 64(%esp,%eax), %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 68(%esp,%eax), %ebp
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: orl %esi, %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 84(%esp,%ebx), %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shll %cl, %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 80(%esp,%ebx), %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, (%esp) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: orl %esi, %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shldl %cl, %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shldl %cl, %edx, (%esp) # 4-byte Folded Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 60(%esp,%ebx), %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 88(%esp,%ebx), %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shldl %cl, %ebx, %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, 28(%ebx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%esp), %ecx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, 20(%ebx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, 12(%ebx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shldl %cl, %edi, %ebp
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shldl %cl, %ebx, %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 48(%esp,%eax), %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 72(%esp,%eax), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 76(%esp,%eax), %esi
; X86-NO-BMI2-HAVE-SHLD-NEXT: shldl %cl, %edx, %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shll %cl, %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, (%ebx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, 4(%ebx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, 24(%ebx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebp, 16(%ebx)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 8(%ebx)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shldl %cl, %eax, %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, 24(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, 28(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, 16(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebp, 20(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, 8(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, 12(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%esp), %edx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shldl %cl, %ebx, %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shll %cl, %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, (%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, 4(%eax)
; X86-NO-BMI2-HAVE-SHLD-NEXT: addl $92, %esp
; X86-NO-BMI2-HAVE-SHLD-NEXT: popl %esi
; X86-NO-BMI2-HAVE-SHLD-NEXT: popl %edi
@@ -2519,106 +2315,105 @@ define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $88, %esp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%edi), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%edi), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%edi), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%edi), %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 16(%edi), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl (%ecx), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%edi), %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%edi), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%edi), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $108, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%eax), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%eax), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 16(%eax), %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%eax), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%eax), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl (%eax), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm0, %xmm0
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $7, %dl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $28, %cl
; X86-HAVE-BMI2-NO-SHLD-NEXT: negb %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movsbl %cl, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 56(%esp,%esi), %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movsbl %cl, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 64(%esp,%edx), %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 60(%esp,%esi), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, (%esp) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %eax, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 68(%esp,%edx), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %ecx, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $31, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %dl
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %ebx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %ebx, %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 64(%esp,%esi), %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 72(%esp,%esi), %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edi, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 68(%esp,%esi), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %edi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %edi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 76(%esp,%esi), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebp, %edi, %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ebx, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%esp), %eax # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %eax, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebx, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, (%esp) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 72(%esp,%esi), %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %esi, %ebx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %ecx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 80(%esp,%ebp), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %ebx, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 76(%esp,%esi), %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ebx, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %ebx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 84(%esp,%ebp), %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %esi, %ebx, %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %esi, %ecx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %eax, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, 84(%esp,%esi), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 80(%esp,%esi), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %edi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ecx, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %esi, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, 92(%esp,%esi), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 88(%esp,%esi), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %esi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %esi, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %ebx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %ebx, %edx
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %eax, %edx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, (%eax)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 24(%eax)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, 28(%eax)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 16(%eax)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 20(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%esp), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 8(%eax)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 12(%eax)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 4(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $88, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $108, %esp
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %ebx
@@ -2631,95 +2426,75 @@ define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pushl %ebx
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pushl %edi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pushl %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: subl $88, %esp
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%edi), %ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 4(%edi), %ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, (%esp) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 8(%edi), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 12(%edi), %ebx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 16(%edi), %ebp
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movzbl (%eax), %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 20(%edi), %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 24(%edi), %ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 28(%edi), %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: subl $92, %esp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%ecx), %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 4(%ecx), %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 8(%ecx), %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 12(%ecx), %edi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 16(%ecx), %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 20(%ecx), %ebp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 24(%ecx), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 28(%ecx), %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movzbl (%ecx), %ecx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: xorps %xmm0, %xmm0
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebp, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%esp), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: andb $7, %cl
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, %eax
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrb $3, %al
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: andb $28, %al
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: negb %al
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movsbl %al, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 64(%esp,%esi), %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shlxl %ecx, %eax, %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: notb %al
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 60(%esp,%esi), %ebx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, (%esp) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrl %ebx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %eax, %ebx, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: orl %edx, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 68(%esp,%esi), %ebx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrl %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %eax, %edx, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 72(%esp,%esi), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movsbl %al, %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 56(%esp,%eax), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 60(%esp,%eax), %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, (%esp) # 4-byte Spill
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shldl %cl, %edx, %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 52(%esp,%eax), %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shldl %cl, %ebx, %edx
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shlxl %ecx, %edx, %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: orl %edx, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 76(%esp,%esi), %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrl %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %eax, %edx, %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 80(%esp,%esi), %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shlxl %ecx, %edx, %ebp
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: orl %ebp, %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shldl %cl, %eax, %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 64(%esp,%eax), %edi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 68(%esp,%eax), %ebp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shldl %cl, %edi, %ebp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%esp), %edx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shldl %cl, %edx, %edi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 48(%esp,%eax), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, (%esp) # 4-byte Spill
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 72(%esp,%eax), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 76(%esp,%eax), %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shldl %cl, %edx, %esi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shldl %cl, %eax, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 56(%esp,%esi), %ebp
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 84(%esp,%esi), %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shldl %cl, %edx, %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 28(%esi)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, 20(%esi)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, 12(%esi)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shlxl %ecx, %ebp, %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, (%esi)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shldl %cl, %eax, %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, 24(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, 28(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, 16(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebp, 20(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, 8(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, 12(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%esp), %esi # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shlxl %ecx, %esi, %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, (%eax)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%esp), %eax # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shldl %cl, %ebp, %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 4(%esi)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 24(%esi)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 16(%esi)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 8(%esi)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: addl $88, %esp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shldl %cl, %esi, %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, 4(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: addl $92, %esp
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: popl %esi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: popl %edi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: popl %ebx
@@ -2735,36 +2510,36 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-NO-BMI2-NO-SHLD-LABEL: ashr_32bytes:
; X64-NO-BMI2-NO-SHLD: # %bb.0:
; X64-NO-BMI2-NO-SHLD-NEXT: pushq %rbx
-; X64-NO-BMI2-NO-SHLD-NEXT: movq (%rdi), %rax
-; X64-NO-BMI2-NO-SHLD-NEXT: movq 8(%rdi), %rcx
-; X64-NO-BMI2-NO-SHLD-NEXT: movq 16(%rdi), %r8
+; X64-NO-BMI2-NO-SHLD-NEXT: movq (%rdi), %rcx
+; X64-NO-BMI2-NO-SHLD-NEXT: movq 8(%rdi), %r8
+; X64-NO-BMI2-NO-SHLD-NEXT: movq 16(%rdi), %r9
; X64-NO-BMI2-NO-SHLD-NEXT: movq 24(%rdi), %rdi
-; X64-NO-BMI2-NO-SHLD-NEXT: movzbl (%rsi), %esi
+; X64-NO-BMI2-NO-SHLD-NEXT: movzbl (%rsi), %eax
; X64-NO-BMI2-NO-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-NO-SHLD-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-NO-SHLD-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-NO-SHLD-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-NO-SHLD-NEXT: sarq $63, %rdi
; X64-NO-BMI2-NO-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-NO-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-NO-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-NO-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-NO-SHLD-NEXT: movl %esi, %eax
-; X64-NO-BMI2-NO-SHLD-NEXT: andb $7, %al
-; X64-NO-BMI2-NO-SHLD-NEXT: shrb $3, %sil
-; X64-NO-BMI2-NO-SHLD-NEXT: movzbl %sil, %r9d
-; X64-NO-BMI2-NO-SHLD-NEXT: movq -64(%rsp,%r9), %r10
-; X64-NO-BMI2-NO-SHLD-NEXT: movq -56(%rsp,%r9), %rdi
+; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X64-NO-BMI2-NO-SHLD-NEXT: shrb $6, %cl
+; X64-NO-BMI2-NO-SHLD-NEXT: movzbl %cl, %r8d
+; X64-NO-BMI2-NO-SHLD-NEXT: movq -64(%rsp,%r8,8), %r10
+; X64-NO-BMI2-NO-SHLD-NEXT: movq -56(%rsp,%r8,8), %rdi
; X64-NO-BMI2-NO-SHLD-NEXT: movq %rdi, %r11
; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %r11
; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %esi
-; X64-NO-BMI2-NO-SHLD-NEXT: notb %sil
-; X64-NO-BMI2-NO-SHLD-NEXT: movq -48(%rsp,%r9), %rbx
-; X64-NO-BMI2-NO-SHLD-NEXT: leaq (%rbx,%rbx), %r8
+; X64-NO-BMI2-NO-SHLD-NEXT: andb $63, %sil
+; X64-NO-BMI2-NO-SHLD-NEXT: xorb $63, %sil
+; X64-NO-BMI2-NO-SHLD-NEXT: movq -48(%rsp,%r8,8), %rbx
+; X64-NO-BMI2-NO-SHLD-NEXT: leaq (%rbx,%rbx), %r9
; X64-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
-; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %r8
-; X64-NO-BMI2-NO-SHLD-NEXT: orq %r11, %r8
+; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %r9
+; X64-NO-BMI2-NO-SHLD-NEXT: orq %r11, %r9
; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %r10
; X64-NO-BMI2-NO-SHLD-NEXT: addq %rdi, %rdi
@@ -2773,145 +2548,130 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-NO-BMI2-NO-SHLD-NEXT: orq %r10, %rdi
; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %rbx
-; X64-NO-BMI2-NO-SHLD-NEXT: movq -40(%rsp,%r9), %r9
-; X64-NO-BMI2-NO-SHLD-NEXT: leaq (%r9,%r9), %r10
+; X64-NO-BMI2-NO-SHLD-NEXT: movq -40(%rsp,%r8,8), %r8
+; X64-NO-BMI2-NO-SHLD-NEXT: leaq (%r8,%r8), %r10
; X64-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %r10
; X64-NO-BMI2-NO-SHLD-NEXT: orq %rbx, %r10
; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X64-NO-BMI2-NO-SHLD-NEXT: sarq %cl, %r9
-; X64-NO-BMI2-NO-SHLD-NEXT: movq %r9, 24(%rdx)
+; X64-NO-BMI2-NO-SHLD-NEXT: sarq %cl, %r8
+; X64-NO-BMI2-NO-SHLD-NEXT: movq %r8, 24(%rdx)
; X64-NO-BMI2-NO-SHLD-NEXT: movq %r10, 16(%rdx)
; X64-NO-BMI2-NO-SHLD-NEXT: movq %rdi, (%rdx)
-; X64-NO-BMI2-NO-SHLD-NEXT: movq %r8, 8(%rdx)
+; X64-NO-BMI2-NO-SHLD-NEXT: movq %r9, 8(%rdx)
; X64-NO-BMI2-NO-SHLD-NEXT: popq %rbx
; X64-NO-BMI2-NO-SHLD-NEXT: retq
;
; X64-NO-BMI2-HAVE-SHLD-LABEL: ashr_32bytes:
; X64-NO-BMI2-HAVE-SHLD: # %bb.0:
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq (%rdi), %rax
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 8(%rdi), %rcx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 16(%rdi), %r8
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 8(%rdi), %r8
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 16(%rdi), %r9
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 24(%rdi), %rdi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movzbl (%rsi), %esi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movzbl (%rsi), %ecx
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-HAVE-SHLD-NEXT: sarq $63, %rdi
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, %eax
-; X64-NO-BMI2-HAVE-SHLD-NEXT: andb $7, %al
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shrb $3, %sil
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movzbl %sil, %esi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -64(%rsp,%rsi), %rdi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -56(%rsp,%rsi), %r8
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, %eax
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shrb $6, %al
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movzbl %al, %eax
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -56(%rsp,%rax,8), %rsi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -72(%rsp,%rax,8), %rdi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -64(%rsp,%rax,8), %r8
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r8, %r9
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shrq %cl, %r9
-; X64-NO-BMI2-HAVE-SHLD-NEXT: notb %cl
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -48(%rsp,%rsi), %r10
-; X64-NO-BMI2-HAVE-SHLD-NEXT: leaq (%r10,%r10), %r11
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shlq %cl, %r11
-; X64-NO-BMI2-HAVE-SHLD-NEXT: orq %r9, %r11
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -40(%rsp,%rsi), %rsi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rsi, %r10
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rsi, %r9
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -48(%rsp,%rax,8), %rax
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rax, %rsi
; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r8, %rdi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: sarq %cl, %rsi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r10, 16(%rdx)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rsi, 24(%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: sarq %cl, %rax
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rsi, 16(%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rax, 24(%rdx)
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rdi, (%rdx)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r11, 8(%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r9, 8(%rdx)
; X64-NO-BMI2-HAVE-SHLD-NEXT: retq
;
; X64-HAVE-BMI2-NO-SHLD-LABEL: ashr_32bytes:
; X64-HAVE-BMI2-NO-SHLD: # %bb.0:
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq (%rdi), %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq 8(%rdi), %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq 16(%rdi), %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq (%rdi), %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq 8(%rdi), %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq 16(%rdi), %r9
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq 24(%rdi), %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movzbl (%rsi), %esi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movzbl (%rsi), %eax
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: sarq $63, %rdi
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %eax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: andb $7, %al
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %sil
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movzbl %sil, %ecx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -56(%rsp,%rcx), %rsi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -48(%rsp,%rcx), %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %rsi, %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, -64(%rsp,%rcx), %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrb $6, %cl
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movzbl %cl, %esi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -64(%rsp,%rsi,8), %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -56(%rsp,%rsi,8), %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %rcx, %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, -72(%rsp,%rsi,8), %r9
; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %rdi, %r10
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -40(%rsp,%rcx), %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: sarxq %rax, %rcx, %r11
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -48(%rsp,%rsi,8), %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: sarxq %rax, %rsi, %r11
; X64-HAVE-BMI2-NO-SHLD-NEXT: # kill: def $al killed $al killed $rax def $rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: notb %al
+; X64-HAVE-BMI2-NO-SHLD-NEXT: andb $63, %al
+; X64-HAVE-BMI2-NO-SHLD-NEXT: xorb $63, %al
; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rdi, %rdi
; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rdi, %rdi
; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r8, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rsi, %rsi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rsi, %rsi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r9, %rsi
; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rcx, %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rcx, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rcx, %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r9, %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rsi, %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rsi, %rax
; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r10, %rax
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r11, 24(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, 16(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rsi, (%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, (%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, 8(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
;
; X64-HAVE-BMI2-HAVE-SHLD-LABEL: ashr_32bytes:
; X64-HAVE-BMI2-HAVE-SHLD: # %bb.0:
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: pushq %rbx
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq (%rdi), %rax
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq 8(%rdi), %rcx
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq 16(%rdi), %r8
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq 8(%rdi), %r8
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq 16(%rdi), %r9
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq 24(%rdi), %rdi
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movzbl (%rsi), %esi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movzbl (%rsi), %ecx
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: sarq $63, %rdi
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, %ecx
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: andb $7, %cl
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrb $3, %sil
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movzbl %sil, %eax
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -56(%rsp,%rax), %rsi
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrxq %rcx, %rsi, %rdi
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -40(%rsp,%rax), %r8
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: sarxq %rcx, %r8, %r9
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, %r10d
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: notb %r10b
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -48(%rsp,%rax), %r11
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: leaq (%r11,%r11), %rbx
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shlxq %r10, %rbx, %r10
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -64(%rsp,%rax), %rax
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: orq %rdi, %r10
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r8, %r11
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: # kill: def $cl killed $cl killed $rcx
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rsi, %rax
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r11, 16(%rdx)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r9, 24(%rdx)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rax, (%rdx)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r10, 8(%rdx)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: popq %rbx
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, %eax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrb $6, %al
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movzbl %al, %eax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -56(%rsp,%rax,8), %rsi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -72(%rsp,%rax,8), %rdi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -64(%rsp,%rax,8), %r8
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r8, %r9
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rsi, %r9
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -48(%rsp,%rax,8), %rax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rax, %rsi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r8, %rdi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: sarxq %rcx, %rax, %rax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rsi, 16(%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rax, 24(%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rdi, (%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r9, 8(%rdx)
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: retq
;
; X86-NO-BMI2-NO-SHLD-LABEL: ashr_32bytes:
@@ -2920,17 +2680,17 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-NO-BMI2-NO-SHLD-NEXT: pushl %ebx
; X86-NO-BMI2-NO-SHLD-NEXT: pushl %edi
; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: subl $88, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: subl $108, %esp
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NO-BMI2-NO-SHLD-NEXT: movl (%edx), %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, (%esp) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%edx), %eax
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NO-BMI2-NO-SHLD-NEXT: movl 8(%edx), %edi
; X86-NO-BMI2-NO-SHLD-NEXT: movl 12(%edx), %ebx
; X86-NO-BMI2-NO-SHLD-NEXT: movl 16(%edx), %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movb (%ecx), %ch
+; X86-NO-BMI2-NO-SHLD-NEXT: movzbl (%ecx), %ecx
; X86-NO-BMI2-NO-SHLD-NEXT: movl 20(%edx), %esi
; X86-NO-BMI2-NO-SHLD-NEXT: movl 24(%edx), %eax
; X86-NO-BMI2-NO-SHLD-NEXT: movl 28(%edx), %edx
@@ -2942,7 +2702,7 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp), %eax # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: sarl $31, %edx
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
@@ -2953,95 +2713,94 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %al
-; X86-NO-BMI2-NO-SHLD-NEXT: andb $7, %al
-; X86-NO-BMI2-NO-SHLD-NEXT: shrb $3, %ch
-; X86-NO-BMI2-NO-SHLD-NEXT: movzbl %ch, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: shrb $5, %al
+; X86-NO-BMI2-NO-SHLD-NEXT: movzbl %al, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 32(%esp,%eax,4), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 36(%esp,%eax,4), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %cl, %ch
+; X86-NO-BMI2-NO-SHLD-NEXT: andb $31, %ch
+; X86-NO-BMI2-NO-SHLD-NEXT: xorb $31, %ch
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 40(%esp,%ebp,4), %edi
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 24(%esp,%edi), %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 28(%esp,%edi), %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, (%esp) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movb %al, %ah
-; X86-NO-BMI2-NO-SHLD-NEXT: notb %ah
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 32(%esp,%edi), %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: leal (%edi,%edi), %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movb %ah, %cl
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp), %ebx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: addl %ebx, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movb %ah, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%edi,%edi), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, (%esp) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 36(%esp,%ebx), %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %eax, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 1-byte Folded Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %bl, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 40(%esp,%ebx), %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 44(%esp,%ebp,4), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, %eax
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: leal (%ecx,%ecx), %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movb %ah, %cl
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %bl, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 48(%esp,%esi,4), %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%edi,%edi), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %eax, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dl, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %eax
; X86-NO-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movb %ah, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %edi, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 44(%esp,%edx), %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 48(%esp,%edx), %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: movb %ah, %cl
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movb %al, %ch
-; X86-NO-BMI2-NO-SHLD-NEXT: movb %al, %cl
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: addl %edi, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movb %ah, %cl
-; X86-NO-BMI2-NO-SHLD-NEXT: movb %ah, %dl
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movb %al, %cl
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %eax, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 52(%esp,%ebx,4), %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dl, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 52(%esp,%esi), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 56(%esp,%ebx,4), %ebx
; X86-NO-BMI2-NO-SHLD-NEXT: leal (%ebx,%ebx), %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: movb %dl, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %esi
; X86-NO-BMI2-NO-SHLD-NEXT: orl %eax, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dl, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %edi, %edi
; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
-; X86-NO-BMI2-NO-SHLD-NEXT: sarl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, 28(%eax)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, 24(%eax)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, 16(%eax)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 20(%eax)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, 8(%eax)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 12(%eax)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp), %ecx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, (%eax)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 4(%eax)
-; X86-NO-BMI2-NO-SHLD-NEXT: addl $88, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %eax, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dl, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 60(%esp,%eax,4), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: sarl %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, 28(%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, 24(%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, 16(%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, 20(%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, 8(%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, 12(%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, (%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, 4(%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: addl $108, %esp
; X86-NO-BMI2-NO-SHLD-NEXT: popl %esi
; X86-NO-BMI2-NO-SHLD-NEXT: popl %edi
; X86-NO-BMI2-NO-SHLD-NEXT: popl %ebx
@@ -3088,64 +2847,41 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, %eax
-; X86-NO-BMI2-HAVE-SHLD-NEXT: andb $7, %al
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrb $3, %cl
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movzbl %cl, %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 32(%esp,%ebp), %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: notb %dl
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 36(%esp,%ebp), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: leal (%ecx,%ecx), %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shll %cl, %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: orl %esi, %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 40(%esp,%ebp), %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrb $5, %al
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movzbl %al, %ebp
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 24(%esp,%ebp,4), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 20(%esp,%ebp,4), %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, (%esp) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 32(%esp,%ebp,4), %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 28(%esp,%ebp,4), %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %ebx, %esi
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 44(%esp,%ebp), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, (%esp) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: leal (%ecx,%ecx), %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shll %cl, %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: orl %esi, %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 48(%esp,%ebp), %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 52(%esp,%ebp), %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: leal (%ebx,%ebx), %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shll %cl, %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: orl %edi, %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, (%esp) # 4-byte Folded Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 28(%esp,%ebp), %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 56(%esp,%ebp), %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edi, %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %eax, %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 40(%esp,%ebp,4), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 36(%esp,%ebp,4), %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %eax, %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 16(%esp,%ebp,4), %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 44(%esp,%ebp,4), %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %eax, %edx
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, 24(%ebp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %ebx, %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: sarl %cl, %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, 28(%ebp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%esp), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 16(%ebp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, 24(%ebp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%esp), %edx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: sarl %cl, %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 28(%ebp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, 16(%ebp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, 20(%ebp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 8(%ebp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, (%ebp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, 20(%ebp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 12(%ebp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, (%ebp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 4(%ebp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: addl $92, %esp
@@ -3161,106 +2897,101 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $84, %esp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%edx), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, (%esp) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%edx), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%edx), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%edx), %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 16(%edx), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl (%ecx), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%edx), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%edx), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%edx), %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $108, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%eax), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%eax), %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 16(%eax), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%eax), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%eax), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl (%eax), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%esp), %eax # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: sarl $31, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $7, %bl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %cl, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%esp,%edi), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%esp,%edi), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: sarl $31, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $5, %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %cl, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 36(%esp,%esi,4), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 40(%esp,%esi,4), %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ecx, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $31, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ebx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, 32(%esp,%esi,4), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %esi, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, (%esp) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %dl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%ecx,%ecx), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ebp, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl (%esp), %eax # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, (%esp) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, 20(%esp,%edi), %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %esi, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebx, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 36(%esp,%edi), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ebx, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 32(%esp,%edi), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %ebp, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebx, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ebp, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ecx, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 44(%esp,%edi), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%ebp,%ebp), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 48(%esp,%esi,4), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 44(%esp,%esi,4), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ecx, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 40(%esp,%edi), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %eax, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %eax, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %eax, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %ebp, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 48(%esp,%edi), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: sarxl %ebx, %edi, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 56(%esp,%esi,4), %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%ebx,%ebx), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 52(%esp,%esi,4), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %edi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edi, %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %eax, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, 28(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 24(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, 16(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 20(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 8(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 12(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, (%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%esp), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 4(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $84, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ebx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 60(%esp,%esi,4), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: sarxl %eax, %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %esi, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 28(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 24(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 16(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 20(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 8(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 12(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, (%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 4(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $108, %esp
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %ebx
@@ -3273,93 +3004,79 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pushl %ebx
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pushl %edi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pushl %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: subl $88, %esp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: subl $92, %esp
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%edx), %ecx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%eax), %ecx
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 4(%edx), %ecx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 4(%eax), %ecx
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, (%esp) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 8(%edx), %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 12(%edx), %ebx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 16(%edx), %ebp
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movzbl (%eax), %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 20(%edx), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 24(%edx), %ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 28(%edx), %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 8(%eax), %edi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 12(%eax), %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 16(%eax), %ebp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 20(%eax), %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 24(%eax), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 28(%eax), %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movzbl (%ecx), %ecx
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebp, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%esp), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: sarl $31, %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%esp), %edx # 4-byte Reload
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: andb $7, %cl
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrb $3, %al
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movzbl %al, %ebx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 28(%esp,%ebx), %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: sarl $31, %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrb $5, %al
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movzbl %al, %ebp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 24(%esp,%ebp,4), %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 20(%esp,%ebp,4), %eax
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %ecx, %eax, %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: notb %dl
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 32(%esp,%ebx), %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %esi, %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, (%esp) # 4-byte Spill
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 32(%esp,%ebp,4), %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 28(%esp,%ebp,4), %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %ebx, %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %eax, %esi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shlxl %edx, %esi, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: orl %eax, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, (%esp) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 40(%esp,%ebx), %ebp
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: leal (%ebp,%ebp), %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shlxl %edx, %eax, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 36(%esp,%ebx), %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %ecx, %eax, %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: orl %eax, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 48(%esp,%ebx), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: leal (%esi,%esi), %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shlxl %edx, %edi, %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 44(%esp,%ebx), %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %ecx, %edx, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: orl %edi, %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %eax, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %ebp
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 24(%esp,%ebx), %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 52(%esp,%ebx), %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 40(%esp,%ebp,4), %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 36(%esp,%ebp,4), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, %esi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %eax, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, 24(%ebx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: sarxl %ecx, %eax, %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 28(%ebx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebp, 16(%ebx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, 8(%ebx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 16(%esp,%ebp,4), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 44(%esp,%ebp,4), %edi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edi, %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 24(%ebp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: sarxl %ecx, %edi, %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 28(%ebp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, 16(%ebp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, 20(%ebp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %eax, %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, (%ebx)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 8(%ebp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 20(%ebx)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 12(%ebp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 12(%ebx)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %eax, %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, (%ebp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%esp), %eax # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 4(%ebx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: addl $88, %esp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 4(%ebp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: addl $92, %esp
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: popl %esi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: popl %edi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: popl %ebx
@@ -3381,6 +3098,7 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-NO-BMI2-NO-SHLD-NEXT: pushq %r13
; X64-NO-BMI2-NO-SHLD-NEXT: pushq %r12
; X64-NO-BMI2-NO-SHLD-NEXT: pushq %rbx
+; X64-NO-BMI2-NO-SHLD-NEXT: pushq %rax
; X64-NO-BMI2-NO-SHLD-NEXT: movq (%rdi), %rax
; X64-NO-BMI2-NO-SHLD-NEXT: movq 8(%rdi), %rcx
; X64-NO-BMI2-NO-SHLD-NEXT: movq 16(%rdi), %r9
@@ -3390,6 +3108,11 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-NO-BMI2-NO-SHLD-NEXT: movq 48(%rdi), %r14
; X64-NO-BMI2-NO-SHLD-NEXT: movq 56(%rdi), %rdi
; X64-NO-BMI2-NO-SHLD-NEXT: movl (%rsi), %r8d
+; X64-NO-BMI2-NO-SHLD-NEXT: xorps %xmm0, %xmm0
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-NO-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-NO-SHLD-NEXT: movq %r14, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-NO-SHLD-NEXT: movq %rbx, -{{[0-9]+}}(%rsp)
@@ -3398,18 +3121,10 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-NO-BMI2-NO-SHLD-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-NO-SHLD-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-NO-SHLD-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-NO-SHLD-NEXT: movl %r8d, %eax
-; X64-NO-BMI2-NO-SHLD-NEXT: andl $7, %eax
+; X64-NO-BMI2-NO-SHLD-NEXT: andl $63, %eax
; X64-NO-BMI2-NO-SHLD-NEXT: shrl $3, %r8d
-; X64-NO-BMI2-NO-SHLD-NEXT: andl $63, %r8d
+; X64-NO-BMI2-NO-SHLD-NEXT: andl $56, %r8d
; X64-NO-BMI2-NO-SHLD-NEXT: movq -128(%rsp,%r8), %r11
; X64-NO-BMI2-NO-SHLD-NEXT: movq -120(%rsp,%r8), %r9
; X64-NO-BMI2-NO-SHLD-NEXT: movq %r9, %rsi
@@ -3417,7 +3132,6 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %rsi
; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %edi
; X64-NO-BMI2-NO-SHLD-NEXT: notl %edi
-; X64-NO-BMI2-NO-SHLD-NEXT: andl $63, %edi
; X64-NO-BMI2-NO-SHLD-NEXT: movq -112(%rsp,%r8), %r14
; X64-NO-BMI2-NO-SHLD-NEXT: leaq (%r14,%r14), %r10
; X64-NO-BMI2-NO-SHLD-NEXT: movl %edi, %ecx
@@ -3426,7 +3140,7 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %r11
; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %esi
-; X64-NO-BMI2-NO-SHLD-NEXT: notb %sil
+; X64-NO-BMI2-NO-SHLD-NEXT: xorb $63, %sil
; X64-NO-BMI2-NO-SHLD-NEXT: addq %r9, %r9
; X64-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %r9
@@ -3478,6 +3192,7 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-NO-BMI2-NO-SHLD-NEXT: movq %rbx, 24(%rdx)
; X64-NO-BMI2-NO-SHLD-NEXT: movq %r9, (%rdx)
; X64-NO-BMI2-NO-SHLD-NEXT: movq %r10, 8(%rdx)
+; X64-NO-BMI2-NO-SHLD-NEXT: addq $8, %rsp
; X64-NO-BMI2-NO-SHLD-NEXT: popq %rbx
; X64-NO-BMI2-NO-SHLD-NEXT: popq %r12
; X64-NO-BMI2-NO-SHLD-NEXT: popq %r13
@@ -3488,22 +3203,24 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
;
; X64-NO-BMI2-HAVE-SHLD-LABEL: lshr_64bytes:
; X64-NO-BMI2-HAVE-SHLD: # %bb.0:
-; X64-NO-BMI2-HAVE-SHLD-NEXT: pushq %rbp
; X64-NO-BMI2-HAVE-SHLD-NEXT: pushq %r15
; X64-NO-BMI2-HAVE-SHLD-NEXT: pushq %r14
-; X64-NO-BMI2-HAVE-SHLD-NEXT: pushq %r13
-; X64-NO-BMI2-HAVE-SHLD-NEXT: pushq %r12
; X64-NO-BMI2-HAVE-SHLD-NEXT: pushq %rbx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: pushq %rax
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq (%rdi), %rax
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 8(%rdi), %rcx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 16(%rdi), %r8
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 24(%rdi), %r9
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 32(%rdi), %r10
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 40(%rdi), %r11
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 48(%rdi), %rbx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 56(%rdi), %r14
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movl (%rsi), %edi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq (%rdi), %rcx
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 8(%rdi), %r8
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 16(%rdi), %r9
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 24(%rdi), %r10
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 32(%rdi), %r11
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 40(%rdi), %rbx
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 48(%rdi), %r14
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 56(%rdi), %rdi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movl (%rsi), %eax
+; X64-NO-BMI2-HAVE-SHLD-NEXT: xorps %xmm0, %xmm0
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r14, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rbx, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r11, -{{[0-9]+}}(%rsp)
@@ -3511,73 +3228,41 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq $0, (%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, %eax
-; X64-NO-BMI2-HAVE-SHLD-NEXT: andl $7, %eax
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shrl $3, %edi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: andl $63, %edi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -120(%rsp,%rdi), %r8
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -112(%rsp,%rdi), %rbx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shrq %cl, %rbx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %esi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: notl %esi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: andl $63, %esi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -104(%rsp,%rdi), %r11
-; X64-NO-BMI2-HAVE-SHLD-NEXT: leaq (%r11,%r11), %r10
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, %ecx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shlq %cl, %r10
-; X64-NO-BMI2-HAVE-SHLD-NEXT: orq %rbx, %r10
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -96(%rsp,%rdi), %r15
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r15, %r12
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shrq %cl, %r12
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -88(%rsp,%rdi), %r14
-; X64-NO-BMI2-HAVE-SHLD-NEXT: leaq (%r14,%r14), %rbx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, %ecx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shlq %cl, %rbx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: orq %r12, %rbx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -80(%rsp,%rdi), %r12
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r12, %r13
; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shrq %cl, %r13
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -72(%rsp,%rdi), %rbp
-; X64-NO-BMI2-HAVE-SHLD-NEXT: leaq (%rbp,%rbp), %r9
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, %ecx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shlq %cl, %r9
-; X64-NO-BMI2-HAVE-SHLD-NEXT: orq %r13, %r9
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r15, %r11
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r12, %r14
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -64(%rsp,%rdi), %rsi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rsi, %rbp
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
+; X64-NO-BMI2-HAVE-SHLD-NEXT: andl $63, %ecx
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shrl $3, %eax
+; X64-NO-BMI2-HAVE-SHLD-NEXT: andl $56, %eax
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -112(%rsp,%rax), %rdi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -128(%rsp,%rax), %rsi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -120(%rsp,%rax), %r9
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r9, %r8
; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rdi, %r8
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shrq %cl, %rsi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rbp, 48(%rdx)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rsi, 56(%rdx)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r14, 32(%rdx)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r11, 16(%rdx)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r8, (%rdx)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r9, 40(%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -96(%rsp,%rax), %r10
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -104(%rsp,%rax), %r11
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r11, %rbx
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r10, %rbx
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r11, %rdi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -80(%rsp,%rax), %r11
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -88(%rsp,%rax), %r14
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r14, %r15
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r11, %r15
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r14, %r10
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -72(%rsp,%rax), %rax
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rax, %r11
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r9, %rsi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shrq %cl, %rax
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r11, 48(%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rax, 56(%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r10, 32(%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r15, 40(%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rdi, 16(%rdx)
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rbx, 24(%rdx)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r10, 8(%rdx)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: addq $8, %rsp
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rsi, (%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r8, 8(%rdx)
; X64-NO-BMI2-HAVE-SHLD-NEXT: popq %rbx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: popq %r12
-; X64-NO-BMI2-HAVE-SHLD-NEXT: popq %r13
; X64-NO-BMI2-HAVE-SHLD-NEXT: popq %r14
; X64-NO-BMI2-HAVE-SHLD-NEXT: popq %r15
-; X64-NO-BMI2-HAVE-SHLD-NEXT: popq %rbp
; X64-NO-BMI2-HAVE-SHLD-NEXT: retq
;
; X64-HAVE-BMI2-NO-SHLD-LABEL: lshr_64bytes:
@@ -3588,6 +3273,7 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %r13
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %r12
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %rbx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %rax
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq (%rdi), %rcx
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq 8(%rdi), %r8
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq 16(%rdi), %r9
@@ -3597,6 +3283,11 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq 48(%rdi), %r14
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq 56(%rdi), %rdi
; X64-HAVE-BMI2-NO-SHLD-NEXT: movl (%rsi), %eax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm0, %xmm0
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r14, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rbx, -{{[0-9]+}}(%rsp)
@@ -3606,52 +3297,43 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $7, %ecx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $63, %ecx
; X64-HAVE-BMI2-NO-SHLD-NEXT: shrl $3, %eax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $63, %eax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $56, %eax
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -120(%rsp,%rax), %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -112(%rsp,%rax), %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %rdi, %rbx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, -128(%rsp,%rax), %r14
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -112(%rsp,%rax), %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %rdi, %r15
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, -128(%rsp,%rax), %rbx
; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %esi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -104(%rsp,%rax), %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r8, %r13
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r9, %r10
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -104(%rsp,%rax), %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r9, %r13
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r8, %r10
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -88(%rsp,%rax), %r11
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r11, %r15
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r11, %r14
; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %r12d
; X64-HAVE-BMI2-NO-SHLD-NEXT: notl %r12d
-; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $63, %r12d
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %r9, %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %r12, %r9, %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rbx, %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -96(%rsp,%rax), %rbx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %rbx, %rbp
-; X64-HAVE-BMI2-NO-SHLD-NEXT: notb %sil
+; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %r8, %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %r12, %r8, %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r15, %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -96(%rsp,%rax), %r15
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r15, %rbp
+; X64-HAVE-BMI2-NO-SHLD-NEXT: xorb $63, %sil
; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rdi, %rdi
; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %rdi, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r14, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rbx, %rbx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rbx, %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r15,%r15), %rbx
; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %r12, %rbx, %rbx
; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r13, %rbx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -80(%rsp,%rax), %r14
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r14, %r13
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -80(%rsp,%rax), %r15
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r15, %r13
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -72(%rsp,%rax), %rax
; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %rax, %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %r8, %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %r8, %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r10, %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r14,%r14), %r10
+; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %r9, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %r9, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r10, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r15,%r15), %r10
; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %r12, %r10, %r10
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r15, %r10
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r14, %r10
; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %r11, %r11
; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %r11, %r11
; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rbp, %r11
@@ -3662,10 +3344,11 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, 48(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r11, 32(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r10, 40(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r8, 16(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r9, 16(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rbx, 24(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, (%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r9, 8(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r8, 8(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: addq $8, %rsp
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %rbx
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %r12
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %r13
@@ -3676,11 +3359,8 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
;
; X64-HAVE-BMI2-HAVE-SHLD-LABEL: lshr_64bytes:
; X64-HAVE-BMI2-HAVE-SHLD: # %bb.0:
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: pushq %rbp
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: pushq %r15
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: pushq %r14
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: pushq %r13
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: pushq %r12
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: pushq %rbx
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq (%rdi), %rcx
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq 8(%rdi), %r8
@@ -3691,6 +3371,11 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq 48(%rdi), %r14
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq 56(%rdi), %rdi
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%rsi), %eax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: xorps %xmm0, %xmm0
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r14, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rbx, -{{[0-9]+}}(%rsp)
@@ -3700,60 +3385,39 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: andl $7, %ecx
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: andl $63, %ecx
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrl $3, %eax
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: andl $63, %eax
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -120(%rsp,%rax), %rsi
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrxq %rcx, %rsi, %r10
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -104(%rsp,%rax), %r8
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrxq %rcx, %r8, %r15
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -88(%rsp,%rax), %r11
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrxq %rcx, %r11, %r14
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, %r12d
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: notl %r12d
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: andl $63, %r12d
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -112(%rsp,%rax), %r9
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: leaq (%r9,%r9), %rdi
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shlxq %r12, %rdi, %rdi
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: orq %r10, %rdi
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -96(%rsp,%rax), %rbx
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: leaq (%rbx,%rbx), %r10
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shlxq %r12, %r10, %r10
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: orq %r15, %r10
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -80(%rsp,%rax), %r13
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: leaq (%r13,%r13), %r15
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shlxq %r12, %r15, %r15
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -72(%rsp,%rax), %r12
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrxq %rcx, %r12, %rbp
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -128(%rsp,%rax), %rax
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: orq %r14, %r15
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r8, %r9
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r11, %rbx
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r12, %r13
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: # kill: def $cl killed $cl killed $rcx
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rsi, %rax
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r13, 48(%rdx)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rbp, 56(%rdx)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rbx, 32(%rdx)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r9, 16(%rdx)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rax, (%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: andl $56, %eax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -112(%rsp,%rax), %rdi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -128(%rsp,%rax), %rsi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -120(%rsp,%rax), %r9
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r9, %r8
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rdi, %r8
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -96(%rsp,%rax), %r10
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -104(%rsp,%rax), %r11
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r11, %rbx
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r10, %rbx
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r11, %rdi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -80(%rsp,%rax), %r11
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -88(%rsp,%rax), %r14
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r14, %r15
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r11, %r15
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r14, %r10
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -72(%rsp,%rax), %rax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rax, %r11
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r9, %rsi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrxq %rcx, %rax, %rax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r11, 48(%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rax, 56(%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r10, 32(%rdx)
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r15, 40(%rdx)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r10, 24(%rdx)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rdi, 8(%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rdi, 16(%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rbx, 24(%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rsi, (%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r8, 8(%rdx)
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: popq %rbx
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: popq %r12
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: popq %r13
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: popq %r14
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: popq %r15
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: popq %rbp
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: retq
;
; X86-NO-BMI2-NO-SHLD-LABEL: lshr_64bytes:
@@ -3762,40 +3426,44 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-NO-BMI2-NO-SHLD-NEXT: pushl %ebx
; X86-NO-BMI2-NO-SHLD-NEXT: pushl %edi
; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: subl $208, %esp
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esi), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: subl $204, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl (%edi), %eax
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%esi), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%edi), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, (%esp) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 8(%edi), %eax
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 8(%esi), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 12(%edi), %eax
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 12(%esi), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 16(%edi), %eax
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 16(%esi), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 20(%edi), %eax
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 20(%esi), %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, (%esp) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 24(%esi), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 24(%edi), %eax
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 28(%esi), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 28(%edi), %eax
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 32(%esi), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 32(%edi), %eax
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 36(%esi), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 36(%edi), %eax
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 40(%esi), %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 44(%esi), %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 48(%esi), %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 52(%esi), %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 56(%esi), %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 60(%esi), %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esi), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 40(%edi), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 44(%edi), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 48(%edi), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 52(%edi), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 56(%edi), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 60(%edi), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl (%edi), %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: xorps %xmm0, %xmm0
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
@@ -3806,8 +3474,7 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp), %eax # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
@@ -3816,214 +3483,199 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp), %eax # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: andl $7, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl $3, %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: andl $63, %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 84(%esp,%esi), %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 88(%esp,%esi), %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: notl %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: andl $31, %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %edi, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 92(%esp,%esi), %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 96(%esp,%esi), %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: leal (%ecx,%ecx), %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %edi, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 100(%esp,%esi), %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 104(%esp,%esi), %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: leal (%ecx,%ecx), %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %edi, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 108(%esp,%esi), %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 112(%esp,%esi), %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: leal (%ecx,%ecx), %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %edi, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 116(%esp,%esi), %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 120(%esp,%esi), %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: leal (%ecx,%ecx), %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 124(%esp,%esi), %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, (%esp) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 128(%esp,%esi), %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: leal (%ecx,%ecx), %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 132(%esp,%esi), %edi
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 136(%esp,%esi), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: andl $31, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl $3, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: andl $60, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 68(%esp,%edi), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: notl %ecx
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: leal (%ecx,%ecx), %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 72(%esp,%edi), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%edx,%edx), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 64(%esp,%edi), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: xorb $31, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %cl, (%esp) # 1-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %eax, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 76(%esp,%edi), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, %eax
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 80(%esp,%esi), %ebx
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: notb %dl
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 80(%esp,%edi), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebp
; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %ebp
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %eax, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movzbl (%esp), %ecx # 1-byte Folded Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %edx, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 84(%esp,%edi), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 88(%esp,%edi), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %eax, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movzbl (%esp), %ecx # 1-byte Folded Reload
; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %ebp
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 92(%esp,%edi), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 96(%esp,%edi), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %ebp
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movzbl (%esp), %ecx # 1-byte Folded Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %eax, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 100(%esp,%edi), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 104(%esp,%edi), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%edx,%edx), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %eax
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
; X86-NO-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movzbl (%esp), %ecx # 1-byte Folded Reload
; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %ebp
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 108(%esp,%edi), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %esi
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 112(%esp,%edi), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%ecx,%ecx), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %ebp
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp), %ebp # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %ebx, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movzbl (%esp), %ecx # 1-byte Folded Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %edx, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 116(%esp,%edi), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, %edx
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 120(%esp,%edi), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%ecx,%ecx), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, (%esp) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: addl %edi, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %eax, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %eax
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %esi, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movb (%esp), %ch # 1-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %edx, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %al, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 140(%esp,%esi), %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %eax, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 124(%esp,%edi), %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%edi,%edi), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %eax, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edi
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, 60(%eax)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, 56(%eax)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, 48(%eax)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp), %ecx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 40(%eax)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 32(%eax)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 24(%eax)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 16(%eax)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 8(%eax)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, (%eax)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 52(%eax)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, 60(%eax)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, 56(%eax)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, 48(%eax)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, 52(%eax)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, 40(%eax)
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 44(%eax)
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 32(%eax)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 36(%eax)
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 24(%eax)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 28(%eax)
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 16(%eax)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 20(%eax)
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 8(%eax)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 12(%eax)
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, (%eax)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 4(%eax)
-; X86-NO-BMI2-NO-SHLD-NEXT: addl $208, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: addl $204, %esp
; X86-NO-BMI2-NO-SHLD-NEXT: popl %esi
; X86-NO-BMI2-NO-SHLD-NEXT: popl %edi
; X86-NO-BMI2-NO-SHLD-NEXT: popl %ebx
@@ -4036,209 +3688,153 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-NO-BMI2-HAVE-SHLD-NEXT: pushl %ebx
; X86-NO-BMI2-HAVE-SHLD-NEXT: pushl %edi
; X86-NO-BMI2-HAVE-SHLD-NEXT: pushl %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: subl $204, %esp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%esi), %eax
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 4(%esi), %eax
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 8(%esi), %eax
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 12(%esi), %eax
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 16(%esi), %eax
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 20(%esi), %eax
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, (%esp) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 24(%esi), %eax
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 28(%esi), %eax
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 32(%esi), %eax
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 36(%esi), %eax
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 40(%esi), %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 44(%esi), %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 48(%esi), %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 52(%esi), %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 56(%esi), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 60(%esi), %eax
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%esi), %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: subl $188, %esp
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%eax), %ecx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 4(%eax), %ecx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 8(%eax), %ecx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 12(%eax), %ecx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 16(%eax), %ecx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 20(%eax), %ecx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 24(%eax), %ecx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 28(%eax), %ecx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 32(%eax), %ecx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 36(%eax), %ecx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, (%esp) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 40(%eax), %ebp
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 44(%eax), %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 48(%eax), %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 52(%eax), %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 56(%eax), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 60(%eax), %ecx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%eax), %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: xorps %xmm0, %xmm0
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebp, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%esp), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, %eax
-; X86-NO-BMI2-HAVE-SHLD-NEXT: andl $7, %eax
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl $3, %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: andl $63, %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 80(%esp,%esi), %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 84(%esp,%esi), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: leal (%ecx,%ecx), %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: notl %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: andl $31, %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shll %cl, %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: orl %edi, %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 88(%esp,%esi), %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 92(%esp,%esi), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: leal (%ecx,%ecx), %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shll %cl, %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: orl %edi, %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 96(%esp,%esi), %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 100(%esp,%esi), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: leal (%ecx,%ecx), %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shll %cl, %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: orl %edi, %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 104(%esp,%esi), %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%esp), %ecx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 108(%esp,%esi), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: leal (%ecx,%ecx), %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shll %cl, %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: orl %edi, %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 112(%esp,%esi), %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: andl $31, %ecx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl $3, %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: andl $60, %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 56(%esp,%eax), %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 52(%esp,%eax), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %esi, %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 64(%esp,%eax), %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 60(%esp,%eax), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %ebx, %edi
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 116(%esp,%esi), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: leal (%ecx,%ecx), %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shll %cl, %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: orl %edi, %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 72(%esp,%eax), %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 68(%esp,%eax), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edi, %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %ebx
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 120(%esp,%esi), %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 80(%esp,%eax), %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 76(%esp,%eax), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %ebx, %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %edi
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 124(%esp,%esi), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, (%esp) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: leal (%ecx,%ecx), %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shll %cl, %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: orl %edi, %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 88(%esp,%eax), %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 84(%esp,%eax), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edi, %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, (%esp) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 96(%esp,%eax), %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 92(%esp,%eax), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edi, %ebx
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 128(%esp,%esi), %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 132(%esp,%esi), %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: leal (%ebx,%ebx), %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shll %cl, %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: orl %edi, %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, (%esp) # 4-byte Folded Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 76(%esp,%esi), %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 136(%esp,%esi), %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 104(%esp,%eax), %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 100(%esp,%eax), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %ebx
; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %esi, %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, 56(%edx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %ebx, %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, 60(%edx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%esp), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 48(%edx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 40(%edx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 32(%edx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 24(%edx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 16(%edx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 8(%edx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, (%edx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebp, 52(%edx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 44(%edx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 36(%edx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 28(%edx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 20(%edx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 12(%edx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 4(%edx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: addl $204, %esp
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 48(%esp,%eax), %ebp
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 108(%esp,%eax), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, 56(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %esi, %ebp
+; X86-NO-BMI2-HAVE-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, 60(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, 48(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, 52(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, 40(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, 44(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%esp), %ecx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, 32(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, 36(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, 24(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, 28(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, 16(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, 20(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, 8(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, 12(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebp, (%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, 4(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: addl $188, %esp
; X86-NO-BMI2-HAVE-SHLD-NEXT: popl %esi
; X86-NO-BMI2-HAVE-SHLD-NEXT: popl %edi
; X86-NO-BMI2-HAVE-SHLD-NEXT: popl %ebx
@@ -4252,42 +3848,46 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $204, %esp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%edx), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%edx), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%edx), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%edx), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 16(%edx), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%edx), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%edx), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%edx), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 32(%edx), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 36(%edx), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, (%esp) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 40(%edx), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 44(%edx), %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 48(%edx), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 52(%edx), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 56(%edx), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 60(%edx), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%edx), %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 16(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 32(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 36(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 40(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 44(%eax), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 48(%eax), %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 52(%eax), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 56(%eax), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 60(%eax), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm0, %xmm0
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%esp), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
@@ -4297,6 +3897,7 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
@@ -4307,163 +3908,141 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $7, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl $3, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $63, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 80(%esp,%edx), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 84(%esp,%edx), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: notl %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $31, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ecx, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 92(%esp,%edx), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, (%esp) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %ecx, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 88(%esp,%edx), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ecx, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 100(%esp,%edx), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %ecx, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 96(%esp,%edx), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ecx, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $31, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl $3, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $60, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 68(%esp,%ecx), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 72(%esp,%ecx), %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 108(%esp,%edx), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %ecx, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 104(%esp,%edx), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ecx, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %edi, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notl %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %ebp, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebx, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 116(%esp,%edx), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %ecx, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 112(%esp,%edx), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ecx, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %bl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, 64(%esp,%ecx), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 124(%esp,%edx), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %ecx, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 120(%esp,%edx), %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ebx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ecx, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 80(%esp,%ecx), %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 132(%esp,%edx), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %ecx, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 128(%esp,%edx), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %esi, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ecx, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, 76(%esp,%edx), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, (%esp), %edi # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, (%esp) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 76(%esp,%ecx), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %ebp, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 88(%esp,%ecx), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 84(%esp,%ecx), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %ebp, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebx, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebx, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 96(%esp,%ecx), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 92(%esp,%ecx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %esi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 104(%esp,%ecx), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 136(%esp,%edx), %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %edx, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edx,%edx), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %eax, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 100(%esp,%ecx), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %ebp, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 112(%esp,%ecx), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 108(%esp,%ecx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %esi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 120(%esp,%ecx), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %esi, %edi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 116(%esp,%ecx), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %edi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %eax, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 124(%esp,%ecx), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %eax, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %eax, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %eax, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 60(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 56(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, 48(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, 40(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, 32(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 60(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, 56(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 48(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, 52(%eax)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 24(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%esp), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 16(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 8(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, (%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 52(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 40(%eax)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 44(%eax)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 32(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 36(%eax)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 24(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 28(%eax)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 16(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 20(%eax)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 8(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 12(%eax)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, (%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 4(%eax)
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $204, %esp
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
@@ -4478,7 +4057,7 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pushl %ebx
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pushl %edi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pushl %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: subl $200, %esp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: subl $188, %esp
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%eax), %ecx
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
@@ -4489,7 +4068,7 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 12(%eax), %ecx
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 16(%eax), %ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, (%esp) # 4-byte Spill
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 20(%eax), %ecx
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 24(%eax), %ecx
@@ -4499,7 +4078,7 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 32(%eax), %ecx
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 36(%eax), %ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, (%esp) # 4-byte Spill
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 40(%eax), %ebp
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 44(%eax), %ebx
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 48(%eax), %edi
@@ -4508,13 +4087,17 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 60(%eax), %ecx
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%eax), %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: xorps %xmm0, %xmm0
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebp, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%esp), %ecx # 4-byte Reload
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
@@ -4522,9 +4105,10 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%esp), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
@@ -4534,138 +4118,90 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: andl $7, %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrl $3, %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: andl $63, %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 76(%esp,%eax), %ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %edx, %ecx, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, %ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: notl %ecx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: andl $31, %ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 80(%esp,%eax), %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: addl %edi, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shlxl %ecx, %edi, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: orl %esi, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 88(%esp,%eax), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shlxl %ecx, %esi, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 84(%esp,%eax), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %edx, %esi, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: orl %esi, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 96(%esp,%eax), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shlxl %ecx, %esi, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 92(%esp,%eax), %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrl $3, %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: andl $60, %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 56(%esp,%eax), %edi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 52(%esp,%eax), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edi, %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 64(%esp,%eax), %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 60(%esp,%eax), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %ebx, %esi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %edx, %esi, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: orl %esi, %edi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %edi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 104(%esp,%eax), %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 72(%esp,%eax), %edi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 68(%esp,%eax), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edi, %esi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shlxl %ecx, %esi, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 100(%esp,%eax), %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 80(%esp,%eax), %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 76(%esp,%eax), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %ebx, %esi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %edx, %esi, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: orl %esi, %edi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %edi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 112(%esp,%eax), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, (%esp) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shlxl %ecx, %esi, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 108(%esp,%eax), %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 88(%esp,%eax), %ebp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 84(%esp,%eax), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %ebp, %esi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %edx, %esi, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: orl %esi, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 120(%esp,%eax), %ebp
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: leal (%ebp,%ebp), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shlxl %ecx, %esi, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 116(%esp,%eax), %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, (%esp) # 4-byte Spill
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 96(%esp,%eax), %edi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 92(%esp,%eax), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edi, %esi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %edx, %esi, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: orl %esi, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 128(%esp,%eax), %ebx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: leal (%ebx,%ebx), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shlxl %ecx, %esi, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 124(%esp,%eax), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %edx, %esi, %ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: orl %ecx, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, %ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edi, (%esp) # 4-byte Folded Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %esi, %ebp
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 72(%esp,%eax), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 132(%esp,%eax), %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edi, %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %ebp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 104(%esp,%eax), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 100(%esp,%eax), %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %ebx, %edi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 48(%esp,%eax), %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 108(%esp,%eax), %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %eax, %edx
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, 56(%eax)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebp, 48(%eax)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%esp), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, 40(%eax)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, 32(%eax)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, 24(%eax)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, 16(%eax)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, 8(%eax)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %edx, %edi, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, %ecx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, 56(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, 48(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, 52(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebp, 40(%eax)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, (%eax)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, 60(%eax)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, 52(%eax)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, 44(%eax)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, 36(%eax)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, 28(%eax)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, 20(%eax)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, 12(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, 44(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%esp), %edx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, 32(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, 36(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, 24(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, 28(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, 16(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, 20(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, 8(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, 12(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %esi, %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, (%eax)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, 4(%eax)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: addl $200, %esp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, 60(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: addl $188, %esp
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: popl %esi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: popl %edi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: popl %ebx
@@ -4680,7 +4216,6 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
define void @shl_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-NO-BMI2-NO-SHLD-LABEL: shl_64bytes:
; X64-NO-BMI2-NO-SHLD: # %bb.0:
-; X64-NO-BMI2-NO-SHLD-NEXT: pushq %rbp
; X64-NO-BMI2-NO-SHLD-NEXT: pushq %r15
; X64-NO-BMI2-NO-SHLD-NEXT: pushq %r14
; X64-NO-BMI2-NO-SHLD-NEXT: pushq %r13
@@ -4695,6 +4230,11 @@ define void @shl_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-NO-BMI2-NO-SHLD-NEXT: movq 48(%rdi), %rbx
; X64-NO-BMI2-NO-SHLD-NEXT: movq 56(%rdi), %rdi
; X64-NO-BMI2-NO-SHLD-NEXT: movl (%rsi), %esi
+; X64-NO-BMI2-NO-SHLD-NEXT: xorps %xmm0, %xmm0
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-NO-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-NO-SHLD-NEXT: movq %rbx, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-NO-SHLD-NEXT: movq %r11, -{{[0-9]+}}(%rsp)
@@ -4703,107 +4243,91 @@ define void @shl_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-NO-BMI2-NO-SHLD-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-NO-SHLD-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-NO-SHLD-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-NO-SHLD-NEXT: movl %esi, %eax
-; X64-NO-BMI2-NO-SHLD-NEXT: andl $7, %eax
+; X64-NO-BMI2-NO-SHLD-NEXT: andl $63, %eax
; X64-NO-BMI2-NO-SHLD-NEXT: shrl $3, %esi
-; X64-NO-BMI2-NO-SHLD-NEXT: andl $63, %esi
+; X64-NO-BMI2-NO-SHLD-NEXT: andl $56, %esi
; X64-NO-BMI2-NO-SHLD-NEXT: negl %esi
-; X64-NO-BMI2-NO-SHLD-NEXT: movslq %esi, %r14
-; X64-NO-BMI2-NO-SHLD-NEXT: movq -64(%rsp,%r14), %r9
-; X64-NO-BMI2-NO-SHLD-NEXT: movq -56(%rsp,%r14), %r8
-; X64-NO-BMI2-NO-SHLD-NEXT: movq %r8, %rdi
+; X64-NO-BMI2-NO-SHLD-NEXT: movslq %esi, %rbx
+; X64-NO-BMI2-NO-SHLD-NEXT: movq -64(%rsp,%rbx), %r8
+; X64-NO-BMI2-NO-SHLD-NEXT: movq -56(%rsp,%rbx), %rdi
+; X64-NO-BMI2-NO-SHLD-NEXT: movq %rdi, %r10
; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %rdi
+; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %r10
; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %esi
-; X64-NO-BMI2-NO-SHLD-NEXT: notb %sil
-; X64-NO-BMI2-NO-SHLD-NEXT: movq %r9, %r10
-; X64-NO-BMI2-NO-SHLD-NEXT: shrq %r10
+; X64-NO-BMI2-NO-SHLD-NEXT: xorb $63, %sil
+; X64-NO-BMI2-NO-SHLD-NEXT: movq %r8, %r9
+; X64-NO-BMI2-NO-SHLD-NEXT: shrq %r9
; X64-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
-; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %r10
-; X64-NO-BMI2-NO-SHLD-NEXT: orq %rdi, %r10
-; X64-NO-BMI2-NO-SHLD-NEXT: movq -40(%rsp,%r14), %r11
-; X64-NO-BMI2-NO-SHLD-NEXT: movq %r11, %rdi
+; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %r9
+; X64-NO-BMI2-NO-SHLD-NEXT: orq %r10, %r9
+; X64-NO-BMI2-NO-SHLD-NEXT: movq -40(%rsp,%rbx), %r10
+; X64-NO-BMI2-NO-SHLD-NEXT: movq %r10, %r14
; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %rdi
-; X64-NO-BMI2-NO-SHLD-NEXT: movq -48(%rsp,%r14), %r15
-; X64-NO-BMI2-NO-SHLD-NEXT: movq %r15, %rbx
-; X64-NO-BMI2-NO-SHLD-NEXT: shrq %rbx
+; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %r14
+; X64-NO-BMI2-NO-SHLD-NEXT: movq -48(%rsp,%rbx), %r15
+; X64-NO-BMI2-NO-SHLD-NEXT: movq %r15, %r11
+; X64-NO-BMI2-NO-SHLD-NEXT: shrq %r11
; X64-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
-; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %rbx
-; X64-NO-BMI2-NO-SHLD-NEXT: orq %rdi, %rbx
+; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %r11
+; X64-NO-BMI2-NO-SHLD-NEXT: orq %r14, %r11
; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %r15
-; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %edi
-; X64-NO-BMI2-NO-SHLD-NEXT: notl %edi
-; X64-NO-BMI2-NO-SHLD-NEXT: andl $63, %edi
-; X64-NO-BMI2-NO-SHLD-NEXT: shrq %r8
-; X64-NO-BMI2-NO-SHLD-NEXT: movl %edi, %ecx
-; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %r8
-; X64-NO-BMI2-NO-SHLD-NEXT: orq %r15, %r8
-; X64-NO-BMI2-NO-SHLD-NEXT: movq -24(%rsp,%r14), %r15
-; X64-NO-BMI2-NO-SHLD-NEXT: movq %r15, %r13
-; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %r13
-; X64-NO-BMI2-NO-SHLD-NEXT: movq -32(%rsp,%r14), %rbp
-; X64-NO-BMI2-NO-SHLD-NEXT: movq %rbp, %r12
-; X64-NO-BMI2-NO-SHLD-NEXT: shrq %r12
+; X64-NO-BMI2-NO-SHLD-NEXT: shrq %rdi
; X64-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
-; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %r12
-; X64-NO-BMI2-NO-SHLD-NEXT: orq %r13, %r12
+; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %rdi
+; X64-NO-BMI2-NO-SHLD-NEXT: orq %r15, %rdi
+; X64-NO-BMI2-NO-SHLD-NEXT: movq -24(%rsp,%rbx), %r14
+; X64-NO-BMI2-NO-SHLD-NEXT: movq %r14, %r12
; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %rbp
-; X64-NO-BMI2-NO-SHLD-NEXT: shrq %r11
-; X64-NO-BMI2-NO-SHLD-NEXT: movl %edi, %ecx
-; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %r11
-; X64-NO-BMI2-NO-SHLD-NEXT: orq %rbp, %r11
-; X64-NO-BMI2-NO-SHLD-NEXT: movq -8(%rsp,%r14), %r13
+; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %r12
+; X64-NO-BMI2-NO-SHLD-NEXT: movq -32(%rsp,%rbx), %r13
+; X64-NO-BMI2-NO-SHLD-NEXT: movq %r13, %r15
+; X64-NO-BMI2-NO-SHLD-NEXT: shrq %r15
+; X64-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
+; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %r15
+; X64-NO-BMI2-NO-SHLD-NEXT: orq %r12, %r15
; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %r13
-; X64-NO-BMI2-NO-SHLD-NEXT: movq -16(%rsp,%r14), %r14
-; X64-NO-BMI2-NO-SHLD-NEXT: movq %r14, %rbp
-; X64-NO-BMI2-NO-SHLD-NEXT: shrq %rbp
+; X64-NO-BMI2-NO-SHLD-NEXT: shrq %r10
; X64-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
-; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %rbp
-; X64-NO-BMI2-NO-SHLD-NEXT: orq %r13, %rbp
+; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %r10
+; X64-NO-BMI2-NO-SHLD-NEXT: orq %r13, %r10
+; X64-NO-BMI2-NO-SHLD-NEXT: movq -8(%rsp,%rbx), %r12
; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %r14
-; X64-NO-BMI2-NO-SHLD-NEXT: shrq %r15
-; X64-NO-BMI2-NO-SHLD-NEXT: movl %edi, %ecx
-; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %r15
-; X64-NO-BMI2-NO-SHLD-NEXT: orq %r14, %r15
+; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %r12
+; X64-NO-BMI2-NO-SHLD-NEXT: movq -16(%rsp,%rbx), %rbx
+; X64-NO-BMI2-NO-SHLD-NEXT: movq %rbx, %r13
+; X64-NO-BMI2-NO-SHLD-NEXT: shrq %r13
+; X64-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
+; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %r13
+; X64-NO-BMI2-NO-SHLD-NEXT: orq %r12, %r13
; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %r9
-; X64-NO-BMI2-NO-SHLD-NEXT: movq %r9, (%rdx)
-; X64-NO-BMI2-NO-SHLD-NEXT: movq %r15, 48(%rdx)
-; X64-NO-BMI2-NO-SHLD-NEXT: movq %rbp, 56(%rdx)
-; X64-NO-BMI2-NO-SHLD-NEXT: movq %r11, 32(%rdx)
-; X64-NO-BMI2-NO-SHLD-NEXT: movq %r12, 40(%rdx)
-; X64-NO-BMI2-NO-SHLD-NEXT: movq %r8, 16(%rdx)
-; X64-NO-BMI2-NO-SHLD-NEXT: movq %rbx, 24(%rdx)
-; X64-NO-BMI2-NO-SHLD-NEXT: movq %r10, 8(%rdx)
+; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %rbx
+; X64-NO-BMI2-NO-SHLD-NEXT: shrq %r14
+; X64-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
+; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %r14
+; X64-NO-BMI2-NO-SHLD-NEXT: orq %rbx, %r14
+; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %r8
+; X64-NO-BMI2-NO-SHLD-NEXT: movq %r8, (%rdx)
+; X64-NO-BMI2-NO-SHLD-NEXT: movq %r14, 48(%rdx)
+; X64-NO-BMI2-NO-SHLD-NEXT: movq %r13, 56(%rdx)
+; X64-NO-BMI2-NO-SHLD-NEXT: movq %r10, 32(%rdx)
+; X64-NO-BMI2-NO-SHLD-NEXT: movq %r15, 40(%rdx)
+; X64-NO-BMI2-NO-SHLD-NEXT: movq %rdi, 16(%rdx)
+; X64-NO-BMI2-NO-SHLD-NEXT: movq %r11, 24(%rdx)
+; X64-NO-BMI2-NO-SHLD-NEXT: movq %r9, 8(%rdx)
; X64-NO-BMI2-NO-SHLD-NEXT: popq %rbx
; X64-NO-BMI2-NO-SHLD-NEXT: popq %r12
; X64-NO-BMI2-NO-SHLD-NEXT: popq %r13
; X64-NO-BMI2-NO-SHLD-NEXT: popq %r14
; X64-NO-BMI2-NO-SHLD-NEXT: popq %r15
-; X64-NO-BMI2-NO-SHLD-NEXT: popq %rbp
; X64-NO-BMI2-NO-SHLD-NEXT: retq
;
; X64-NO-BMI2-HAVE-SHLD-LABEL: shl_64bytes:
; X64-NO-BMI2-HAVE-SHLD: # %bb.0:
-; X64-NO-BMI2-HAVE-SHLD-NEXT: pushq %rbp
-; X64-NO-BMI2-HAVE-SHLD-NEXT: pushq %r15
; X64-NO-BMI2-HAVE-SHLD-NEXT: pushq %r14
-; X64-NO-BMI2-HAVE-SHLD-NEXT: pushq %r13
-; X64-NO-BMI2-HAVE-SHLD-NEXT: pushq %r12
; X64-NO-BMI2-HAVE-SHLD-NEXT: pushq %rbx
; X64-NO-BMI2-HAVE-SHLD-NEXT: pushq %rax
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq (%rdi), %rax
@@ -4815,7 +4339,12 @@ define void @shl_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 48(%rdi), %rbx
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 56(%rdi), %rdi
; X64-NO-BMI2-HAVE-SHLD-NEXT: movl (%rsi), %esi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rdi, (%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: xorps %xmm0, %xmm0
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rbx, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r11, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r10, -{{[0-9]+}}(%rsp)
@@ -4823,77 +4352,42 @@ define void @shl_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, %eax
-; X64-NO-BMI2-HAVE-SHLD-NEXT: andl $7, %eax
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, %ecx
+; X64-NO-BMI2-HAVE-SHLD-NEXT: andl $63, %ecx
; X64-NO-BMI2-HAVE-SHLD-NEXT: shrl $3, %esi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: andl $63, %esi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: andl $56, %esi
; X64-NO-BMI2-HAVE-SHLD-NEXT: negl %esi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movslq %esi, %r10
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -40(%rsp,%r10), %rbx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shlq %cl, %rbx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %esi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: notl %esi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: andl $63, %esi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -56(%rsp,%r10), %r9
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -48(%rsp,%r10), %r8
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r8, %rdi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shrq %rdi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, %ecx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shrq %cl, %rdi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: orq %rbx, %rdi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -24(%rsp,%r10), %r15
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r15, %r12
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shlq %cl, %r12
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -32(%rsp,%r10), %r14
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r14, %rbx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shrq %rbx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, %ecx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shrq %cl, %rbx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: orq %r12, %rbx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -8(%rsp,%r10), %r12
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r12, %r13
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shlq %cl, %r13
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -16(%rsp,%r10), %r11
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r11, %rbp
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shrq %rbp
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, %ecx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shrq %cl, %rbp
-; X64-NO-BMI2-HAVE-SHLD-NEXT: orq %r13, %rbp
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shldq %cl, %rsi, %r14
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shldq %cl, %r15, %r11
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq (%rsp,%r10), %rsi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shldq %cl, %r12, %rsi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shldq %cl, %r9, %r8
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shlq %cl, %r9
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rsi, 56(%rdx)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r11, 40(%rdx)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r14, 24(%rdx)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r9, (%rdx)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r8, 8(%rdx)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rbp, 48(%rdx)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rbx, 32(%rdx)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rdi, 16(%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movslq %esi, %r9
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -48(%rsp,%r9), %rax
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -40(%rsp,%r9), %r10
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r10, %rsi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shldq %cl, %rax, %rsi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -64(%rsp,%r9), %r8
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -56(%rsp,%r9), %rdi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shldq %cl, %rdi, %rax
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -32(%rsp,%r9), %r11
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -24(%rsp,%r9), %rbx
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rbx, %r14
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shldq %cl, %r11, %r14
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shldq %cl, %r10, %r11
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -16(%rsp,%r9), %r10
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -8(%rsp,%r9), %r9
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shldq %cl, %r10, %r9
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shldq %cl, %rbx, %r10
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shldq %cl, %r8, %rdi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shlq %cl, %r8
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r10, 48(%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r9, 56(%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r11, 32(%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r14, 40(%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rax, 16(%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rsi, 24(%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r8, (%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rdi, 8(%rdx)
; X64-NO-BMI2-HAVE-SHLD-NEXT: addq $8, %rsp
; X64-NO-BMI2-HAVE-SHLD-NEXT: popq %rbx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: popq %r12
-; X64-NO-BMI2-HAVE-SHLD-NEXT: popq %r13
; X64-NO-BMI2-HAVE-SHLD-NEXT: popq %r14
-; X64-NO-BMI2-HAVE-SHLD-NEXT: popq %r15
-; X64-NO-BMI2-HAVE-SHLD-NEXT: popq %rbp
; X64-NO-BMI2-HAVE-SHLD-NEXT: retq
;
; X64-HAVE-BMI2-NO-SHLD-LABEL: shl_64bytes:
@@ -4904,6 +4398,7 @@ define void @shl_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %r13
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %r12
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %rbx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %rax
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq (%rdi), %rax
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq 8(%rdi), %rcx
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq 16(%rdi), %r8
@@ -4913,6 +4408,11 @@ define void @shl_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq 48(%rdi), %rbx
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq 56(%rdi), %rdi
; X64-HAVE-BMI2-NO-SHLD-NEXT: movl (%rsi), %esi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm0, %xmm0
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rbx, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r11, -{{[0-9]+}}(%rsp)
@@ -4922,68 +4422,58 @@ define void @shl_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %eax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $7, %eax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $63, %eax
; X64-HAVE-BMI2-NO-SHLD-NEXT: shrl $3, %esi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $63, %esi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $56, %esi
; X64-HAVE-BMI2-NO-SHLD-NEXT: negl %esi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movslq %esi, %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -64(%rsp,%rcx), %r10
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -56(%rsp,%rcx), %rsi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rsi, %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -40(%rsp,%rcx), %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rdi, %rbx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -48(%rsp,%rcx), %r15
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %r15, %r14
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %r8d
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -24(%rsp,%rcx), %r11
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %r11, %r12
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %r10, %r13
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ebp
-; X64-HAVE-BMI2-NO-SHLD-NEXT: notb %bpl
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movslq %esi, %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -64(%rsp,%rsi), %r10
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -56(%rsp,%rsi), %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rcx, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -40(%rsp,%rsi), %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rdi, %r11
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -48(%rsp,%rsi), %r14
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %r14, %rbx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -24(%rsp,%rsi), %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %r8, %r15
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %r10, %r12
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %r13d
+; X64-HAVE-BMI2-NO-SHLD-NEXT: xorb $63, %r13b
; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %r10
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rbp, %r10, %r10
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %r13, %r10, %r10
; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r9, %r10
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %r15
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rbp, %r15, %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rbx, %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -32(%rsp,%rcx), %rbx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rbx, %r15
-; X64-HAVE-BMI2-NO-SHLD-NEXT: notl %r8d
-; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $63, %r8d
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %rsi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %r8, %rsi, %rsi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r14, %rsi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, -8(%rsp,%rcx), %r14
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -16(%rsp,%rcx), %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rcx, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %rbx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rbp, %rbx, %rbx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r12, %rbx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %r8, %rdi, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r15, %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -32(%rsp,%rsi), %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %r9, %rbp
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %r14
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %r13, %r14, %r14
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r11, %r14
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, -8(%rsp,%rsi), %r11
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -16(%rsp,%rsi), %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rsi, %rax
; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rbp, %rcx, %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r14, %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %r11
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %r8, %r11, %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %r13, %rcx, %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rbx, %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %r13, %r9, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r15, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %r13, %rdi, %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rbp, %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %r13, %rsi, %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r11, %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %r13, %r8, %r8
; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rax, %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r13, (%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r12, (%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r8, 48(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, 56(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rsi, 56(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, 32(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rbx, 40(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rsi, 16(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r9, 24(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r9, 40(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, 16(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r14, 24(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r10, 8(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: addq $8, %rsp
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %rbx
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %r12
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %r13
@@ -4994,12 +4484,9 @@ define void @shl_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
;
; X64-HAVE-BMI2-HAVE-SHLD-LABEL: shl_64bytes:
; X64-HAVE-BMI2-HAVE-SHLD: # %bb.0:
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: pushq %rbp
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: pushq %r15
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: pushq %r14
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: pushq %r13
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: pushq %r12
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: pushq %rbx
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: pushq %rax
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq (%rdi), %rax
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq 8(%rdi), %rcx
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq 16(%rdi), %r8
@@ -5009,6 +4496,11 @@ define void @shl_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq 48(%rdi), %rbx
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq 56(%rdi), %rdi
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%rsi), %esi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: xorps %xmm0, %xmm0
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rbx, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r11, -{{[0-9]+}}(%rsp)
@@ -5018,65 +4510,40 @@ define void @shl_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, %ecx
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: andl $7, %ecx
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq $0, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: andl $63, %ecx
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrl $3, %esi
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: andl $63, %esi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: andl $56, %esi
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: negl %esi
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movslq %esi, %rax
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -48(%rsp,%rax), %rsi
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shlxq %rcx, %rsi, %rbx
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -32(%rsp,%rax), %rdi
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shlxq %rcx, %rdi, %r12
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -16(%rsp,%rax), %r10
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shlxq %rcx, %r10, %r15
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, %ebp
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: notl %ebp
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: andl $63, %ebp
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -64(%rsp,%rax), %r11
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -56(%rsp,%rax), %r9
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r9, %r8
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrq %r8
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrxq %rbp, %r8, %r8
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: orq %rbx, %r8
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -40(%rsp,%rax), %r14
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r14, %rbx
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrq %rbx
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrxq %rbp, %rbx, %rbx
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: orq %r12, %rbx
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -24(%rsp,%rax), %r13
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r13, %r12
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrq %r12
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrxq %rbp, %r12, %r12
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shlxq %rcx, %r11, %rbp
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: orq %r15, %r12
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shldq %cl, %rsi, %r14
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shldq %cl, %rdi, %r13
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -8(%rsp,%rax), %rax
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shldq %cl, %r10, %rax
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: # kill: def $cl killed $cl killed $rcx
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shldq %cl, %r11, %r9
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rax, 56(%rdx)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r13, 40(%rdx)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r14, 24(%rdx)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rbp, (%rdx)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r9, 8(%rdx)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r12, 48(%rdx)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rbx, 32(%rdx)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r8, 16(%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movslq %esi, %r8
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -48(%rsp,%r8), %rax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -40(%rsp,%r8), %r9
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r9, %rsi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shldq %cl, %rax, %rsi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -64(%rsp,%r8), %r10
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -56(%rsp,%r8), %rdi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shldq %cl, %rdi, %rax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -32(%rsp,%r8), %r11
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -24(%rsp,%r8), %rbx
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rbx, %r14
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shldq %cl, %r11, %r14
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shldq %cl, %r9, %r11
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -16(%rsp,%r8), %r9
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -8(%rsp,%r8), %r8
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shldq %cl, %r9, %r8
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shldq %cl, %rbx, %r9
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shldq %cl, %r10, %rdi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shlxq %rcx, %r10, %rcx
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r9, 48(%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r8, 56(%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r11, 32(%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r14, 40(%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rax, 16(%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rsi, 24(%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rcx, (%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rdi, 8(%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: addq $8, %rsp
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: popq %rbx
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: popq %r12
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: popq %r13
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: popq %r14
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: popq %r15
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: popq %rbp
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: retq
;
; X86-NO-BMI2-NO-SHLD-LABEL: shl_64bytes:
@@ -5085,42 +4552,44 @@ define void @shl_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-NO-BMI2-NO-SHLD-NEXT: pushl %ebx
; X86-NO-BMI2-NO-SHLD-NEXT: pushl %edi
; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: subl $192, %esp
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl (%ebx), %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%ebx), %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, (%esp) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 8(%ebx), %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 12(%ebx), %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 16(%ebx), %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 20(%ebx), %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 24(%ebx), %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 28(%ebx), %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 32(%ebx), %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 36(%ebx), %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 40(%ebx), %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 44(%ebx), %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 48(%ebx), %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 52(%ebx), %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 56(%ebx), %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 60(%ebx), %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl (%ebx), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: subl $204, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl (%eax), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%eax), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 8(%eax), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 12(%eax), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 16(%eax), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 20(%eax), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 24(%eax), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 28(%eax), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 32(%eax), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 36(%eax), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 40(%eax), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 44(%eax), %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 48(%eax), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 52(%eax), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 56(%eax), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 60(%eax), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl (%ebp), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: xorps %xmm0, %xmm0
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
@@ -5129,6 +4598,9 @@ define void @shl_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
@@ -5137,200 +4609,179 @@ define void @shl_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp), %eax # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl $3, %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: andl $63, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl $3, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: andl $60, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: subl %ecx, %eax
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: leal {{[0-9]+}}(%esp), %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: subl %eax, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl (%ebp), %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%ebp), %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: andl $7, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl (%eax), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%eax), %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: andl $31, %edx
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: movb %bl, %ch
-; X86-NO-BMI2-NO-SHLD-NEXT: notb %ch
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dl, %ch
+; X86-NO-BMI2-NO-SHLD-NEXT: xorb $31, %ch
; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
-; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, (%esp) # 1-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %eax, %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 12(%ebp), %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movb %bl, %cl
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 8(%ebp), %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %eax, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 12(%ebx), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dl, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 8(%ebx), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ebx
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %ebx
; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %edx, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %eax, %ebx
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %bl, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %esi
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: notl %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: andl $31, %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %eax, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %edi
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 20(%ebp), %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 16(%ebp), %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movzbl (%esp), %ecx # 1-byte Folded Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %edx, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 20(%ebp), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %bl, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 16(%ebp), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %eax, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %bl, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %esi
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %eax, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %edi
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: movl 28(%ebp), %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 24(%ebp), %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movzbl (%esp), %ecx # 1-byte Folded Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %edx, %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 36(%ebp), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %bl, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 24(%ebp), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %eax, %edx
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 32(%ebp), %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dl, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movzbl (%esp), %ecx # 1-byte Folded Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %edx, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %ebx
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 36(%ebp), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dl, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 32(%ebp), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %eax, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dl, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %esi
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %eax, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %edi
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 44(%ebp), %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 40(%ebp), %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 44(%ebp), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dl, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 40(%ebp), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %edi
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movzbl (%esp), %ecx # 1-byte Folded Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %edx, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %eax, %edi
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %eax, %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 52(%ebp), %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dl, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 52(%eax), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dl, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: negl %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 176(%esp,%ecx), %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: negl %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 176(%esp,%eax), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %edi, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dl, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebx
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: movzbl (%esp), %ecx # 1-byte Folded Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %edi, %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %esi
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 60(%edi), %edx
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 56(%edi), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movb {{[-0-9]+}}(%e{{[sb]}}p), %ch # 1-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edi
; X86-NO-BMI2-NO-SHLD-NEXT: orl %edx, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 60(%ebp), %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 56(%ebp), %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dl, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebx
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movzbl (%esp), %ecx # 1-byte Folded Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %edi, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %edx, %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edx
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, (%ecx)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, 56(%ecx)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, 60(%ecx)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, 48(%ecx)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, 52(%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, 56(%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, 60(%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, 48(%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, 52(%ecx)
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, 40(%ecx)
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
@@ -5353,7 +4804,7 @@ define void @shl_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, 12(%ecx)
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, 4(%ecx)
-; X86-NO-BMI2-NO-SHLD-NEXT: addl $192, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: addl $204, %esp
; X86-NO-BMI2-NO-SHLD-NEXT: popl %esi
; X86-NO-BMI2-NO-SHLD-NEXT: popl %edi
; X86-NO-BMI2-NO-SHLD-NEXT: popl %ebx
@@ -5366,213 +4817,153 @@ define void @shl_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-NO-BMI2-HAVE-SHLD-NEXT: pushl %ebx
; X86-NO-BMI2-HAVE-SHLD-NEXT: pushl %edi
; X86-NO-BMI2-HAVE-SHLD-NEXT: pushl %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: subl $204, %esp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%eax), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 4(%eax), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 8(%eax), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 12(%eax), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 16(%eax), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, (%esp) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 20(%eax), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 24(%eax), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 28(%eax), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 32(%eax), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 36(%eax), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 40(%eax), %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 44(%eax), %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 48(%eax), %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 52(%eax), %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 56(%eax), %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 60(%eax), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%eax), %eax
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: subl $188, %esp
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%ecx), %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 4(%ecx), %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 8(%ecx), %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 12(%ecx), %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 16(%ecx), %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 20(%ecx), %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 24(%ecx), %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 28(%ecx), %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 32(%ecx), %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 36(%ecx), %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, (%esp) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 40(%ecx), %ebp
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 44(%ecx), %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 48(%ecx), %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 52(%ecx), %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 56(%ecx), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 60(%ecx), %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%ecx), %ecx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: xorps %xmm0, %xmm0
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebp, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%esp), %ecx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl $3, %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: andl $63, %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: leal {{[0-9]+}}(%esp), %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: subl %esi, %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 8(%edi), %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: andl $7, %eax
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shll %cl, %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 4(%edi), %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: notl %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: andl $31, %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: orl %ebx, %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 16(%edi), %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shll %cl, %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 12(%edi), %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: orl %ebx, %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 24(%edi), %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shll %cl, %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 20(%edi), %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: orl %ebx, %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 32(%edi), %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shll %cl, %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 28(%edi), %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: orl %ebx, %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 40(%edi), %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%esp), %eax # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, %ebp
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl $3, %ebp
+; X86-NO-BMI2-HAVE-SHLD-NEXT: andl $60, %ebp
+; X86-NO-BMI2-HAVE-SHLD-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: subl %ebp, %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 8(%eax), %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 12(%eax), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: andl $31, %ecx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shldl %cl, %esi, %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 4(%eax), %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shldl %cl, %edi, %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 16(%eax), %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 20(%eax), %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shldl %cl, %edi, %ebx
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shll %cl, %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 36(%edi), %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: orl %ebx, %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: negl %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 188(%esp,%esi), %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shldl %cl, %edx, %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 24(%eax), %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 28(%eax), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shldl %cl, %edi, %ebx
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shll %cl, %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 44(%edi), %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, (%esp) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: orl %ebx, %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 56(%edi), %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shldl %cl, %esi, %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 32(%eax), %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 36(%eax), %esi
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shll %cl, %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 52(%edi), %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: orl %ebx, %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shldl %cl, %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shldl %cl, %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shldl %cl, %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shldl %cl, %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shldl %cl, %edi, %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shldl %cl, %edx, %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 40(%eax), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 44(%eax), %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shldl %cl, %edx, %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, (%esp) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shldl %cl, %esi, %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 56(%eax), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 60(%eax), %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shldl %cl, %edx, %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%eax), %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 52(%eax), %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shldl %cl, %esi, %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: negl %ebp
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 160(%esp,%ebp), %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, 56(%ebp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, 60(%ebp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shldl %cl, %edx, (%esp) # 4-byte Folded Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%edi), %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 60(%edi), %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shldl %cl, %esi, %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, 60(%edx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shldl %cl, %ebx, %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shldl %cl, %ebx, %edx
; X86-NO-BMI2-HAVE-SHLD-NEXT: shll %cl, %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X86-NO-BMI2-HAVE-SHLD-NEXT: shldl %cl, %eax, %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, 52(%edx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%esp), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 44(%edx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 36(%edx)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shldl %cl, %edi, %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 48(%ebp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, 52(%ebp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 28(%edx)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 40(%ebp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%esp), %eax # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 44(%ebp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 20(%edx)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 32(%ebp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 12(%edx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, (%edx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, 4(%edx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebp, 56(%edx)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 36(%ebp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 48(%edx)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 24(%ebp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 40(%edx)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 28(%ebp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 32(%edx)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 16(%ebp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 24(%edx)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 20(%ebp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 16(%edx)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 8(%ebp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 8(%edx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: addl $204, %esp
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 12(%ebp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, (%ebp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, 4(%ebp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: addl $188, %esp
; X86-NO-BMI2-HAVE-SHLD-NEXT: popl %esi
; X86-NO-BMI2-HAVE-SHLD-NEXT: popl %edi
; X86-NO-BMI2-HAVE-SHLD-NEXT: popl %ebx
@@ -5585,50 +4976,55 @@ define void @shl_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $216, %esp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%edx), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%edx), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $204, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%ebp), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%edx), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%ebp), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%edx), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%ebp), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 16(%edx), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%ebp), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%edx), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 16(%ebp), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%edx), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%ebp), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%edx), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%ebp), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 32(%edx), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%ebp), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 36(%edx), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 32(%ebp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, (%esp) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 36(%ebp), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 40(%edx), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 44(%edx), %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 48(%edx), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 52(%edx), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 56(%edx), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 60(%edx), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%edx), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 40(%ebp), %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 44(%ebp), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 48(%ebp), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 52(%ebp), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 56(%ebp), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 60(%ebp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%ebp), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm0, %xmm0
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%esp), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
@@ -5641,179 +5037,150 @@ define void @shl_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $7, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl $3, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $63, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $31, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl $3, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $60, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: leal {{[0-9]+}}(%esp), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: subl %edx, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%edi), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: notl %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $31, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%edi), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, (%esp) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %esi, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %ebp, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%edi), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: subl %ebp, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%edi), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%edi), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %bl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %ecx, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %eax, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ecx, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %esi, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 16(%edi), %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %edx, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%edi), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %ecx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%edi), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %eax, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 16(%edi), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %eax, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%edi), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %esi, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%edi), %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %edx, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%edi), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %esi, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 32(%edi), %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %edx, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 36(%edi), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %esi, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 40(%edi), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 44(%edi), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %esi, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 48(%edi), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 52(%edi), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %ebx, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 56(%edi), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %esi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %ecx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %eax, %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ecx, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, (%esp), %ebx # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%edi), %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, (%esp) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %esi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %ecx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%edi), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, (%esp), %eax # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %eax, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, (%esp) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 32(%edi), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %eax, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 36(%edi), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %esi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %ecx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %eax, %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: negl %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, 212(%esp,%ecx), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 40(%edi), %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%edi), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %ecx, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %dl
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %ecx, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %ecx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 44(%edi), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %ebp, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl (%esp), %eax # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, (%esp) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %eax, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 48(%edi), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %eax, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %eax, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 52(%edi), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ebp, %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %eax, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %eax, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %eax, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %eax, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: negl %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, 188(%esp,%ecx), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 56(%edi), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %edi, %edx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %eax, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %ebp, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edx, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, (%edx)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, 60(%edx)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 52(%edx)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 44(%edx)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, 36(%edx)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 28(%edx)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 20(%edx)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%esp), %eax # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 12(%edx)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 4(%edx)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 56(%edx)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 48(%edx)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 40(%edx)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 32(%edx)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 24(%edx)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 16(%edx)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 8(%edx)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $216, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %eax, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %edi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %eax, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, (%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 56(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, 60(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, 48(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 52(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 40(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 44(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 32(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 36(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%esp), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 24(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 28(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 16(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 20(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 8(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 12(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 4(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $204, %esp
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %ebx
@@ -5827,42 +5194,44 @@ define void @shl_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pushl %edi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pushl %esi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: subl $204, %esp
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%edi), %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%ebx), %eax
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 4(%edi), %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 4(%ebx), %eax
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 8(%edi), %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 8(%ebx), %eax
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 12(%edi), %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 12(%ebx), %eax
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 16(%edi), %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 16(%ebx), %eax
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 20(%edi), %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 20(%ebx), %eax
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 24(%edi), %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 24(%ebx), %eax
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 28(%edi), %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 28(%ebx), %eax
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 32(%edi), %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 32(%ebx), %eax
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 36(%edi), %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, (%esp) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 40(%edi), %ebp
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 44(%edi), %ebx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 48(%edi), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 52(%edi), %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 56(%edi), %ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 60(%edi), %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%edi), %edi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 36(%ebx), %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 40(%ebx), %ebp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 44(%ebx), %edi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 48(%ebx), %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 52(%ebx), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 56(%ebx), %ecx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 60(%ebx), %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%ebx), %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: xorps %xmm0, %xmm0
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebp, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%esp), %eax # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
@@ -5870,6 +5239,9 @@ define void @shl_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
@@ -5882,148 +5254,93 @@ define void @shl_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: andl $7, %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrl $3, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: andl $63, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: leal {{[0-9]+}}(%esp), %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: subl %edi, %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 8(%edx), %ebp
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebp, (%esp) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: notl %ecx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, %ecx
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: andl $31, %ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 4(%edx), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrl %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %ecx, %esi, %ebx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shlxl %eax, %ebp, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: orl %esi, %ebx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 12(%edx), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrl %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %ecx, %esi, %ebx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 16(%edx), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shlxl %eax, %esi, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: orl %esi, %ebx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 20(%edx), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrl %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %ecx, %esi, %ebx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 24(%edx), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shlxl %eax, %esi, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: orl %esi, %ebx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 28(%edx), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrl %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %ecx, %esi, %ebx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 32(%edx), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shlxl %eax, %esi, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: orl %esi, %ebx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 36(%edx), %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrl $3, %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: andl $60, %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: subl %ebx, %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 4(%eax), %esi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrl %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %ecx, %esi, %ebx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 40(%edx), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shlxl %eax, %esi, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: orl %esi, %ebx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 52(%edx), %ebp
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebp, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrl %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %ecx, %esi, %ebx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 56(%edx), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shlxl %eax, %esi, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: orl %esi, %ebx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 44(%edx), %ebx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrl %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %ecx, %esi, %ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: negl %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 188(%esp,%esi), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shlxl %eax, %esi, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: orl %ecx, %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 8(%eax), %edi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 12(%eax), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, %ebp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shldl %cl, %edi, %ebp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shldl %cl, %esi, %edi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 16(%eax), %edi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 20(%eax), %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, %ebp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shldl %cl, %edi, %ebp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shldl %cl, %edx, %edi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 24(%eax), %edi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 28(%eax), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, %ebp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shldl %cl, %edi, %ebp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shldl %cl, %esi, %edi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 32(%eax), %edi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 36(%eax), %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, %ebp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shldl %cl, %edi, %ebp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shldl %cl, %edx, %edi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 40(%eax), %ebp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 44(%eax), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shldl %cl, %ebp, %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shldl %cl, %esi, %ebp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 56(%eax), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 60(%eax), %edi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shldl %cl, %edx, %edi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%eax), %esi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%esp), %esi # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shldl %cl, %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shldl %cl, %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shldl %cl, %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shldl %cl, %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shldl %cl, %esi, %ebx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%edx), %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 60(%edx), %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 52(%eax), %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shldl %cl, %esi, %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: negl %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 176(%esp,%ebx), %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, 56(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, 60(%eax)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shldl %cl, %edx, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, 60(%edx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shlxl %eax, %edi, %ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, (%esp) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shldl %cl, %edi, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shldl %cl, %eax, %ebp
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebp, 52(%edx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, 44(%edx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 36(%edx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 28(%edx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 20(%edx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 12(%edx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%esp), %eax # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, (%edx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, 4(%edx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 56(%edx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 48(%edx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 40(%edx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 32(%edx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 24(%edx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 16(%edx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 8(%edx)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shlxl %ecx, %edx, %edi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shldl %cl, %edx, %edi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shldl %cl, %ebx, %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shldl %cl, %edx, %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, 48(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, 52(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebp, 40(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, 44(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, 32(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, 36(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, 24(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, 28(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, 16(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, 20(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, 8(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, 12(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, (%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, 4(%eax)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: addl $204, %esp
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: popl %esi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: popl %edi
@@ -6045,6 +5362,7 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-NO-BMI2-NO-SHLD-NEXT: pushq %r13
; X64-NO-BMI2-NO-SHLD-NEXT: pushq %r12
; X64-NO-BMI2-NO-SHLD-NEXT: pushq %rbx
+; X64-NO-BMI2-NO-SHLD-NEXT: pushq %rax
; X64-NO-BMI2-NO-SHLD-NEXT: movq (%rdi), %rax
; X64-NO-BMI2-NO-SHLD-NEXT: movq 8(%rdi), %rcx
; X64-NO-BMI2-NO-SHLD-NEXT: movq 16(%rdi), %r9
@@ -6072,9 +5390,9 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-NO-BMI2-NO-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-NO-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-NO-SHLD-NEXT: movl %r8d, %eax
-; X64-NO-BMI2-NO-SHLD-NEXT: andl $7, %eax
+; X64-NO-BMI2-NO-SHLD-NEXT: andl $63, %eax
; X64-NO-BMI2-NO-SHLD-NEXT: shrl $3, %r8d
-; X64-NO-BMI2-NO-SHLD-NEXT: andl $63, %r8d
+; X64-NO-BMI2-NO-SHLD-NEXT: andl $56, %r8d
; X64-NO-BMI2-NO-SHLD-NEXT: movq -128(%rsp,%r8), %r11
; X64-NO-BMI2-NO-SHLD-NEXT: movq -120(%rsp,%r8), %r9
; X64-NO-BMI2-NO-SHLD-NEXT: movq %r9, %rsi
@@ -6082,7 +5400,6 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %rsi
; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %edi
; X64-NO-BMI2-NO-SHLD-NEXT: notl %edi
-; X64-NO-BMI2-NO-SHLD-NEXT: andl $63, %edi
; X64-NO-BMI2-NO-SHLD-NEXT: movq -112(%rsp,%r8), %r14
; X64-NO-BMI2-NO-SHLD-NEXT: leaq (%r14,%r14), %r10
; X64-NO-BMI2-NO-SHLD-NEXT: movl %edi, %ecx
@@ -6091,7 +5408,7 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %r11
; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %esi
-; X64-NO-BMI2-NO-SHLD-NEXT: notb %sil
+; X64-NO-BMI2-NO-SHLD-NEXT: xorb $63, %sil
; X64-NO-BMI2-NO-SHLD-NEXT: addq %r9, %r9
; X64-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %r9
@@ -6143,6 +5460,7 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-NO-BMI2-NO-SHLD-NEXT: movq %rbx, 24(%rdx)
; X64-NO-BMI2-NO-SHLD-NEXT: movq %r9, (%rdx)
; X64-NO-BMI2-NO-SHLD-NEXT: movq %r10, 8(%rdx)
+; X64-NO-BMI2-NO-SHLD-NEXT: addq $8, %rsp
; X64-NO-BMI2-NO-SHLD-NEXT: popq %rbx
; X64-NO-BMI2-NO-SHLD-NEXT: popq %r12
; X64-NO-BMI2-NO-SHLD-NEXT: popq %r13
@@ -6153,22 +5471,19 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
;
; X64-NO-BMI2-HAVE-SHLD-LABEL: ashr_64bytes:
; X64-NO-BMI2-HAVE-SHLD: # %bb.0:
-; X64-NO-BMI2-HAVE-SHLD-NEXT: pushq %rbp
; X64-NO-BMI2-HAVE-SHLD-NEXT: pushq %r15
; X64-NO-BMI2-HAVE-SHLD-NEXT: pushq %r14
-; X64-NO-BMI2-HAVE-SHLD-NEXT: pushq %r13
-; X64-NO-BMI2-HAVE-SHLD-NEXT: pushq %r12
; X64-NO-BMI2-HAVE-SHLD-NEXT: pushq %rbx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: pushq %rax
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq (%rdi), %rax
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 8(%rdi), %rcx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 16(%rdi), %r8
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 24(%rdi), %r9
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 32(%rdi), %r10
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 40(%rdi), %r11
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 48(%rdi), %rbx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 56(%rdi), %r14
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movl (%rsi), %edi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq (%rdi), %rcx
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 8(%rdi), %r8
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 16(%rdi), %r9
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 24(%rdi), %r10
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 32(%rdi), %r11
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 40(%rdi), %rbx
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 48(%rdi), %r14
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq 56(%rdi), %rdi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movl (%rsi), %eax
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r14, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rbx, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r11, -{{[0-9]+}}(%rsp)
@@ -6176,74 +5491,50 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: sarq $63, %r14
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r14, (%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r14, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r14, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r14, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r14, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r14, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r14, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r14, -{{[0-9]+}}(%rsp)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, %eax
-; X64-NO-BMI2-HAVE-SHLD-NEXT: andl $7, %eax
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shrl $3, %edi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: andl $63, %edi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -120(%rsp,%rdi), %r8
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -112(%rsp,%rdi), %rbx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shrq %cl, %rbx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %esi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: notl %esi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: andl $63, %esi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -104(%rsp,%rdi), %r11
-; X64-NO-BMI2-HAVE-SHLD-NEXT: leaq (%r11,%r11), %r10
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, %ecx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shlq %cl, %r10
-; X64-NO-BMI2-HAVE-SHLD-NEXT: orq %rbx, %r10
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -96(%rsp,%rdi), %r15
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r15, %r12
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shrq %cl, %r12
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -88(%rsp,%rdi), %r14
-; X64-NO-BMI2-HAVE-SHLD-NEXT: leaq (%r14,%r14), %rbx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, %ecx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shlq %cl, %rbx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: orq %r12, %rbx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -80(%rsp,%rdi), %r12
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r12, %r13
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shrq %cl, %r13
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -72(%rsp,%rdi), %rbp
-; X64-NO-BMI2-HAVE-SHLD-NEXT: leaq (%rbp,%rbp), %r9
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, %ecx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shlq %cl, %r9
-; X64-NO-BMI2-HAVE-SHLD-NEXT: orq %r13, %r9
+; X64-NO-BMI2-HAVE-SHLD-NEXT: sarq $63, %rdi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r15, %r11
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r12, %r14
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -64(%rsp,%rdi), %rsi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rsi, %rbp
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
+; X64-NO-BMI2-HAVE-SHLD-NEXT: andl $63, %ecx
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shrl $3, %eax
+; X64-NO-BMI2-HAVE-SHLD-NEXT: andl $56, %eax
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -112(%rsp,%rax), %rdi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -128(%rsp,%rax), %rsi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -120(%rsp,%rax), %r9
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r9, %r8
; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rdi, %r8
-; X64-NO-BMI2-HAVE-SHLD-NEXT: sarq %cl, %rsi
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rbp, 48(%rdx)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rsi, 56(%rdx)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r14, 32(%rdx)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r11, 16(%rdx)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r8, (%rdx)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r9, 40(%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -96(%rsp,%rax), %r10
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -104(%rsp,%rax), %r11
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r11, %rbx
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r10, %rbx
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r11, %rdi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -80(%rsp,%rax), %r11
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -88(%rsp,%rax), %r14
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r14, %r15
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r11, %r15
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r14, %r10
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -72(%rsp,%rax), %rax
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rax, %r11
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r9, %rsi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NO-BMI2-HAVE-SHLD-NEXT: sarq %cl, %rax
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r11, 48(%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rax, 56(%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r10, 32(%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r15, 40(%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rdi, 16(%rdx)
; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rbx, 24(%rdx)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r10, 8(%rdx)
-; X64-NO-BMI2-HAVE-SHLD-NEXT: addq $8, %rsp
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rsi, (%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r8, 8(%rdx)
; X64-NO-BMI2-HAVE-SHLD-NEXT: popq %rbx
-; X64-NO-BMI2-HAVE-SHLD-NEXT: popq %r12
-; X64-NO-BMI2-HAVE-SHLD-NEXT: popq %r13
; X64-NO-BMI2-HAVE-SHLD-NEXT: popq %r14
; X64-NO-BMI2-HAVE-SHLD-NEXT: popq %r15
-; X64-NO-BMI2-HAVE-SHLD-NEXT: popq %rbp
; X64-NO-BMI2-HAVE-SHLD-NEXT: retq
;
; X64-HAVE-BMI2-NO-SHLD-LABEL: ashr_64bytes:
@@ -6254,6 +5545,7 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %r13
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %r12
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %rbx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %rax
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq (%rdi), %rcx
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq 8(%rdi), %r8
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq 16(%rdi), %r9
@@ -6281,44 +5573,43 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $7, %ecx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $63, %ecx
; X64-HAVE-BMI2-NO-SHLD-NEXT: shrl $3, %eax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $63, %eax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $56, %eax
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -120(%rsp,%rax), %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -112(%rsp,%rax), %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %rdi, %rbx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, -128(%rsp,%rax), %r14
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -112(%rsp,%rax), %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %rdi, %r15
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, -128(%rsp,%rax), %rbx
; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %esi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -104(%rsp,%rax), %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r8, %r13
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r9, %r10
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -104(%rsp,%rax), %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r9, %r13
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r8, %r10
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -88(%rsp,%rax), %r11
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r11, %r15
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r11, %r14
; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %r12d
; X64-HAVE-BMI2-NO-SHLD-NEXT: notl %r12d
-; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $63, %r12d
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %r9, %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %r12, %r9, %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rbx, %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -96(%rsp,%rax), %rbx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %rbx, %rbp
-; X64-HAVE-BMI2-NO-SHLD-NEXT: notb %sil
+; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %r8, %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %r12, %r8, %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r15, %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -96(%rsp,%rax), %r15
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r15, %rbp
+; X64-HAVE-BMI2-NO-SHLD-NEXT: xorb $63, %sil
; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rdi, %rdi
; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %rdi, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r14, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rbx, %rbx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rbx, %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r15,%r15), %rbx
; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %r12, %rbx, %rbx
; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r13, %rbx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -80(%rsp,%rax), %r14
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r14, %r13
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -80(%rsp,%rax), %r15
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r15, %r13
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -72(%rsp,%rax), %rax
; X64-HAVE-BMI2-NO-SHLD-NEXT: sarxq %rcx, %rax, %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %r8, %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %r8, %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r10, %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r14,%r14), %r10
+; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %r9, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %r9, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r10, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r15,%r15), %r10
; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %r12, %r10, %r10
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r15, %r10
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r14, %r10
; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %r11, %r11
; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %r11, %r11
; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rbp, %r11
@@ -6329,10 +5620,11 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, 48(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r11, 32(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r10, 40(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r8, 16(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r9, 16(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rbx, 24(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, (%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r9, 8(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r8, 8(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: addq $8, %rsp
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %rbx
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %r12
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %r13
@@ -6343,11 +5635,8 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
;
; X64-HAVE-BMI2-HAVE-SHLD-LABEL: ashr_64bytes:
; X64-HAVE-BMI2-HAVE-SHLD: # %bb.0:
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: pushq %rbp
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: pushq %r15
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: pushq %r14
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: pushq %r13
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: pushq %r12
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: pushq %rbx
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq (%rdi), %rcx
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq 8(%rdi), %r8
@@ -6376,52 +5665,39 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: andl $7, %ecx
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: andl $63, %ecx
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrl $3, %eax
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: andl $63, %eax
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -120(%rsp,%rax), %rsi
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrxq %rcx, %rsi, %r10
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -104(%rsp,%rax), %r8
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrxq %rcx, %r8, %r15
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -88(%rsp,%rax), %r11
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrxq %rcx, %r11, %r14
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, %r12d
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: notl %r12d
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: andl $63, %r12d
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -112(%rsp,%rax), %r9
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: leaq (%r9,%r9), %rdi
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shlxq %r12, %rdi, %rdi
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: orq %r10, %rdi
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -96(%rsp,%rax), %rbx
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: leaq (%rbx,%rbx), %r10
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shlxq %r12, %r10, %r10
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: orq %r15, %r10
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -80(%rsp,%rax), %r13
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: leaq (%r13,%r13), %r15
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shlxq %r12, %r15, %r15
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -72(%rsp,%rax), %r12
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: sarxq %rcx, %r12, %rbp
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -128(%rsp,%rax), %rax
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: orq %r14, %r15
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r8, %r9
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r11, %rbx
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r12, %r13
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: # kill: def $cl killed $cl killed $rcx
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rsi, %rax
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r13, 48(%rdx)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rbp, 56(%rdx)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rbx, 32(%rdx)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r9, 16(%rdx)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rax, (%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: andl $56, %eax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -112(%rsp,%rax), %rdi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -128(%rsp,%rax), %rsi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -120(%rsp,%rax), %r9
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r9, %r8
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rdi, %r8
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -96(%rsp,%rax), %r10
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -104(%rsp,%rax), %r11
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r11, %rbx
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r10, %rbx
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r11, %rdi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -80(%rsp,%rax), %r11
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -88(%rsp,%rax), %r14
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r14, %r15
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r11, %r15
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r14, %r10
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -72(%rsp,%rax), %rax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rax, %r11
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r9, %rsi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: sarxq %rcx, %rax, %rax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r11, 48(%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rax, 56(%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r10, 32(%rdx)
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r15, 40(%rdx)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r10, 24(%rdx)
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rdi, 8(%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rdi, 16(%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rbx, 24(%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rsi, (%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r8, 8(%rdx)
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: popq %rbx
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: popq %r12
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: popq %r13
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: popq %r14
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: popq %r15
-; X64-HAVE-BMI2-HAVE-SHLD-NEXT: popq %rbp
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: retq
;
; X86-NO-BMI2-NO-SHLD-LABEL: ashr_64bytes:
@@ -6430,12 +5706,12 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-NO-BMI2-NO-SHLD-NEXT: pushl %ebx
; X86-NO-BMI2-NO-SHLD-NEXT: pushl %edi
; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: subl $208, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: subl $204, %esp
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NO-BMI2-NO-SHLD-NEXT: movl (%eax), %ecx
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%eax), %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, (%esp) # 4-byte Spill
; X86-NO-BMI2-NO-SHLD-NEXT: movl 8(%eax), %ecx
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NO-BMI2-NO-SHLD-NEXT: movl 12(%eax), %ecx
@@ -6443,7 +5719,7 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-NO-BMI2-NO-SHLD-NEXT: movl 16(%eax), %ecx
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NO-BMI2-NO-SHLD-NEXT: movl 20(%eax), %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, (%esp) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NO-BMI2-NO-SHLD-NEXT: movl 24(%eax), %ecx
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NO-BMI2-NO-SHLD-NEXT: movl 28(%eax), %ecx
@@ -6452,19 +5728,19 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NO-BMI2-NO-SHLD-NEXT: movl 36(%eax), %ecx
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 40(%eax), %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 44(%eax), %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 48(%eax), %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 40(%eax), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 44(%eax), %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 48(%eax), %esi
; X86-NO-BMI2-NO-SHLD-NEXT: movl 52(%eax), %edx
; X86-NO-BMI2-NO-SHLD-NEXT: movl 56(%eax), %ecx
; X86-NO-BMI2-NO-SHLD-NEXT: movl 60(%eax), %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esi), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl (%ebp), %ebp
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
@@ -6473,7 +5749,7 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp), %ecx # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
@@ -6482,7 +5758,7 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp), %ecx # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
@@ -6503,196 +5779,195 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: andl $7, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl $3, %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: andl $63, %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 84(%esp,%esi), %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: andl $31, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl $3, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: andl $60, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 68(%esp,%ebp), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: notl %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 72(%esp,%ebp), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%edx,%edx), %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %edi
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 88(%esp,%esi), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 64(%esp,%ebp), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: xorb $31, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %cl, (%esp) # 1-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %eax, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %eax
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: notl %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: andl $31, %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %edi, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 92(%esp,%esi), %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 76(%esp,%ebp), %edi
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 96(%esp,%esi), %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: leal (%ecx,%ecx), %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %edi, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %eax
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 100(%esp,%esi), %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 104(%esp,%esi), %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: leal (%ecx,%ecx), %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 80(%esp,%ebp), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebx
; X86-NO-BMI2-NO-SHLD-NEXT: orl %edi, %ebx
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 108(%esp,%esi), %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %eax, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movzbl (%esp), %ecx # 1-byte Folded Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %edx, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 84(%esp,%ebp), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 112(%esp,%esi), %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: leal (%ecx,%ecx), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 88(%esp,%ebp), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %eax, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %edi, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %ebx, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movzbl (%esp), %ecx # 1-byte Folded Reload
; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %edi, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 116(%esp,%esi), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %ebx
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 92(%esp,%ebp), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %esi
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 120(%esp,%esi), %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: leal (%ecx,%ecx), %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 96(%esp,%ebp), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %edi
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 124(%esp,%esi), %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, (%esp) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movzbl (%esp), %ecx # 1-byte Folded Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %eax, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 100(%esp,%ebp), %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 104(%esp,%ebp), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%edx,%edx), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %eax
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 128(%esp,%esi), %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: leal (%ecx,%ecx), %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %edi, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movzbl (%esp), %ecx # 1-byte Folded Reload
; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %edi
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 132(%esp,%esi), %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 108(%esp,%ebp), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, %esi
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 136(%esp,%esi), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 112(%esp,%ebp), %ecx
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: leal (%ecx,%ecx), %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 80(%esp,%esi), %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %edx
-; X86-NO-BMI2-NO-SHLD-NEXT: notb %dl
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp), %ebp # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, (%esp) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%ecx,%ecx), %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: addl %edi, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %ebx, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movzbl (%esp), %ecx # 1-byte Folded Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %edx, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 116(%esp,%ebp), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, %edx
; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 120(%esp,%ebp), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%ecx,%ecx), %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %eax, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %eax
; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %esi, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movb (%esp), %ch # 1-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %edx, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %al, %cl
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl 140(%esp,%esi), %esi
-; X86-NO-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: orl %eax, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 124(%esp,%ebp), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%ebp,%ebp), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %eax, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
-; X86-NO-BMI2-NO-SHLD-NEXT: sarl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: sarl %cl, %ebp
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, 60(%eax)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, 56(%eax)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, 48(%eax)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp), %ecx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 40(%eax)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 32(%eax)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 24(%eax)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 16(%eax)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 8(%eax)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, (%eax)
-; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 52(%eax)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, 60(%eax)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, 56(%eax)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, 48(%eax)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, 52(%eax)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, 40(%eax)
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 44(%eax)
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 32(%eax)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 36(%eax)
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 24(%eax)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 28(%eax)
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 16(%eax)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 20(%eax)
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 8(%eax)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 12(%eax)
; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, (%eax)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, 4(%eax)
-; X86-NO-BMI2-NO-SHLD-NEXT: addl $208, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: addl $204, %esp
; X86-NO-BMI2-NO-SHLD-NEXT: popl %esi
; X86-NO-BMI2-NO-SHLD-NEXT: popl %edi
; X86-NO-BMI2-NO-SHLD-NEXT: popl %ebx
@@ -6705,7 +5980,7 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-NO-BMI2-HAVE-SHLD-NEXT: pushl %ebx
; X86-NO-BMI2-HAVE-SHLD-NEXT: pushl %edi
; X86-NO-BMI2-HAVE-SHLD-NEXT: pushl %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: subl $204, %esp
+; X86-NO-BMI2-HAVE-SHLD-NEXT: subl $188, %esp
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%eax), %ecx
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
@@ -6718,7 +5993,7 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 16(%eax), %ecx
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 20(%eax), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, (%esp) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 24(%eax), %ecx
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 28(%eax), %ecx
@@ -6726,189 +6001,144 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 32(%eax), %ecx
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 36(%eax), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, (%esp) # 4-byte Spill
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 40(%eax), %ebp
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 44(%eax), %ebx
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 48(%eax), %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 52(%eax), %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 56(%eax), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 60(%eax), %eax
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%esi), %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 52(%eax), %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 56(%eax), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 60(%eax), %ecx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%eax), %eax
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebp, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%esp), %edx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: sarl $31, %ecx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%esp), %ecx # 4-byte Reload
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: sarl $31, %eax
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, %eax
-; X86-NO-BMI2-HAVE-SHLD-NEXT: andl $7, %eax
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl $3, %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: andl $63, %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 80(%esp,%esi), %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 84(%esp,%esi), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: leal (%ecx,%ecx), %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: notl %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: andl $31, %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shll %cl, %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: orl %edi, %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 88(%esp,%esi), %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 92(%esp,%esi), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: leal (%ecx,%ecx), %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shll %cl, %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: orl %edi, %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 96(%esp,%esi), %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 100(%esp,%esi), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: leal (%ecx,%ecx), %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shll %cl, %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: orl %edi, %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 104(%esp,%esi), %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 108(%esp,%esi), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: leal (%ecx,%ecx), %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shll %cl, %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: orl %edi, %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 112(%esp,%esi), %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: andl $31, %ecx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl $3, %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: andl $60, %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 56(%esp,%eax), %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 52(%esp,%eax), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %esi, %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 64(%esp,%eax), %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 60(%esp,%eax), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %ebx, %edi
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 116(%esp,%esi), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: leal (%ecx,%ecx), %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shll %cl, %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: orl %edi, %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 72(%esp,%eax), %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 68(%esp,%eax), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edi, %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %ebx
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 120(%esp,%esi), %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 80(%esp,%eax), %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 76(%esp,%eax), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %ebx, %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %edi
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 124(%esp,%esi), %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, (%esp) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: leal (%ecx,%ecx), %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shll %cl, %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: orl %edi, %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 88(%esp,%eax), %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 84(%esp,%eax), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edi, %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, (%esp) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 96(%esp,%eax), %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 92(%esp,%eax), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %ebx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edi, %ebx
; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 128(%esp,%esi), %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrl %cl, %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 132(%esp,%esi), %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: leal (%ebx,%ebx), %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shll %cl, %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: orl %edi, %ebp
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, (%esp) # 4-byte Folded Spill
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 76(%esp,%esi), %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 136(%esp,%esi), %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 104(%esp,%eax), %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 100(%esp,%eax), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, %ebx
; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %esi, %ebx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, 56(%edx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %ebx, %edi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: sarl %cl, %esi
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, 60(%edx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%esp), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 48(%edx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 40(%edx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 32(%edx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 24(%edx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 16(%edx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 8(%edx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, (%edx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebp, 52(%edx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 44(%edx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 36(%edx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 28(%edx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 20(%edx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 12(%edx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, 4(%edx)
-; X86-NO-BMI2-HAVE-SHLD-NEXT: addl $204, %esp
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %edi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 48(%esp,%eax), %ebp
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl 108(%esp,%eax), %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %esi
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %esi, 56(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %esi, %ebp
+; X86-NO-BMI2-HAVE-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: sarl %cl, %edx
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edx, 60(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, 48(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebx, 52(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, 40(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, 44(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl (%esp), %ecx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, 32(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, 36(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, 24(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, 28(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, 16(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, 20(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, 8(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, 12(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ebp, (%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-HAVE-SHLD-NEXT: movl %ecx, 4(%eax)
+; X86-NO-BMI2-HAVE-SHLD-NEXT: addl $188, %esp
; X86-NO-BMI2-HAVE-SHLD-NEXT: popl %esi
; X86-NO-BMI2-HAVE-SHLD-NEXT: popl %edi
; X86-NO-BMI2-HAVE-SHLD-NEXT: popl %ebx
@@ -6942,199 +6172,199 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 32(%eax), %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 36(%eax), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, (%esp) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 40(%eax), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 44(%eax), %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 40(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 44(%eax), %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 48(%eax), %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 52(%eax), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 56(%eax), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 60(%eax), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%edx), %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 56(%eax), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 60(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%eax), %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%esp), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: sarl $31, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $7, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl $3, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $63, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 80(%esp,%edx), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 84(%esp,%edx), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: notl %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $31, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ecx, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 92(%esp,%edx), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, (%esp) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %ecx, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 88(%esp,%edx), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ecx, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 100(%esp,%edx), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %ecx, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 96(%esp,%edx), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ecx, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 108(%esp,%edx), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %ecx, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 104(%esp,%edx), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ecx, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 116(%esp,%edx), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %ecx, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 112(%esp,%edx), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ecx, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: sarl $31, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $31, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl $3, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $60, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 68(%esp,%ebx), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 72(%esp,%ebx), %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 124(%esp,%edx), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %edi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notl %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %ecx, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 120(%esp,%edx), %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ebx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ecx, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %eax, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 132(%esp,%edx), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %ecx, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 128(%esp,%edx), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %esi, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ecx, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %al
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, 76(%esp,%edx), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, (%esp), %edi # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %edi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, 64(%esp,%ebx), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 80(%esp,%ebx), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 76(%esp,%ebx), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %ebp, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, (%esp) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 88(%esp,%ebx), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 84(%esp,%ebx), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %ebp, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 96(%esp,%ebx), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 92(%esp,%ebx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %esi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 104(%esp,%ebx), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 100(%esp,%ebx), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %ebp, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebx, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebx, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 112(%esp,%ebx), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 108(%esp,%ebx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %esi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 136(%esp,%edx), %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: sarxl %eax, %edx, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edx,%edx), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %eax, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 120(%esp,%ebx), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %esi, %edi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 116(%esp,%ebx), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %edi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %eax, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 124(%esp,%ebx), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: sarxl %edx, %eax, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %eax, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %eax, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 60(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 56(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, 48(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, 40(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, 32(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 60(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, 56(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 48(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, 52(%eax)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 24(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%esp), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 16(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 8(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, (%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 52(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 40(%eax)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 44(%eax)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 32(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 36(%eax)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 24(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 28(%eax)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 16(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 20(%eax)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 8(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 12(%eax)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, (%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 4(%eax)
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $204, %esp
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
@@ -7149,7 +6379,7 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pushl %ebx
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pushl %edi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: pushl %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: subl $200, %esp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: subl $188, %esp
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%eax), %ecx
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
@@ -7158,7 +6388,7 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 8(%eax), %ecx
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 12(%eax), %ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, (%esp) # 4-byte Spill
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 16(%eax), %ecx
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 20(%eax), %ecx
@@ -7170,173 +6400,142 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 32(%eax), %ecx
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 36(%eax), %ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, (%esp) # 4-byte Spill
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 40(%eax), %ebp
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 44(%eax), %ebx
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 48(%eax), %edi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 52(%eax), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 56(%eax), %ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 60(%eax), %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%edx), %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 56(%eax), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 60(%eax), %ecx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%eax), %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebp, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%esp), %edx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: sarl $31, %ecx
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%esp), %ecx # 4-byte Reload
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: sarl $31, %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, %ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: andl $7, %ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrl $3, %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: andl $63, %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 76(%esp,%edx), %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %ecx, %eax, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: notl %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: andl $31, %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 80(%esp,%edx), %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: addl %edi, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shlxl %eax, %edi, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: orl %esi, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 88(%esp,%edx), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shlxl %eax, %esi, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 84(%esp,%edx), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %ecx, %esi, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: orl %esi, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 96(%esp,%edx), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shlxl %eax, %esi, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 92(%esp,%edx), %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: andl $31, %ecx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrl $3, %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: andl $60, %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 56(%esp,%eax), %edi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 52(%esp,%eax), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edi, %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 64(%esp,%eax), %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 60(%esp,%eax), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %ebx, %esi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %ecx, %esi, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: orl %esi, %edi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %edi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 104(%esp,%edx), %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 72(%esp,%eax), %edi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 68(%esp,%eax), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edi, %esi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shlxl %eax, %esi, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 100(%esp,%edx), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %ecx, %esi, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: orl %esi, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 112(%esp,%edx), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, (%esp) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shlxl %eax, %esi, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 108(%esp,%edx), %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 80(%esp,%eax), %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 76(%esp,%eax), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %ebx, %esi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %ecx, %esi, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: orl %esi, %edi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %edi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 120(%esp,%edx), %ebx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: leal (%ebx,%ebx), %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shlxl %eax, %esi, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 116(%esp,%edx), %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 88(%esp,%eax), %ebp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 84(%esp,%eax), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %ebp, %esi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %ecx, %esi, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: orl %esi, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 128(%esp,%edx), %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: leal (%edi,%edi), %ebp
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shlxl %eax, %ebp, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 124(%esp,%edx), %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrxl %ecx, %eax, %ebp
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: orl %ebp, %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, (%esp) # 4-byte Spill
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 96(%esp,%eax), %edi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 92(%esp,%eax), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edi, %esi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %ebp, %esi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %ebp, (%esp) # 4-byte Folded Spill
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %eax, %ebx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 72(%esp,%edx), %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 132(%esp,%edx), %ebp
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %ebp, %edi
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, 56(%edx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, 48(%edx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%esp), %edi # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, 40(%edx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, 32(%edx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, 24(%edx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, 16(%edx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, 8(%edx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: sarxl %ecx, %ebp, %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %ebp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 104(%esp,%eax), %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 100(%esp,%eax), %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edx, %esi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %ebx, %edi
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 48(%esp,%eax), %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl 108(%esp,%eax), %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %eax, %edx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, 56(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edi, 48(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, 52(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebp, 40(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, 44(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl (%esp), %edx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, 32(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, 36(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, 24(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, 28(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, 16(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, 20(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, 8(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, 12(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: sarxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %edi, %eax
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, (%edx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, 60(%edx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 52(%edx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 44(%edx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 36(%edx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 28(%edx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 20(%edx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 12(%edx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, 4(%edx)
-; X86-HAVE-BMI2-HAVE-SHLD-NEXT: addl $200, %esp
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: shrdl %cl, %esi, %ebx
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ebx, (%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, 4(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: movl %edx, 60(%eax)
+; X86-HAVE-BMI2-HAVE-SHLD-NEXT: addl $188, %esp
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: popl %esi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: popl %edi
; X86-HAVE-BMI2-HAVE-SHLD-NEXT: popl %ebx
diff --git a/llvm/test/CodeGen/X86/widen-load-of-small-alloca-with-zero-upper-half.ll b/llvm/test/CodeGen/X86/widen-load-of-small-alloca-with-zero-upper-half.ll
index 9ae1f27..044be12 100644
--- a/llvm/test/CodeGen/X86/widen-load-of-small-alloca-with-zero-upper-half.ll
+++ b/llvm/test/CodeGen/X86/widen-load-of-small-alloca-with-zero-upper-half.ll
@@ -432,30 +432,89 @@ define void @load_1byte_chunk_of_16byte_alloca_with_zero_upper_half(ptr %src, i6
; X64-HAVE-BMI2-NO-SHLD-NEXT: movb %cl, (%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
;
-; X86-LABEL: load_1byte_chunk_of_16byte_alloca_with_zero_upper_half:
-; X86: # %bb.0:
-; X86-NEXT: subl $32, %esp
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; X86-NEXT: shll $3, %ecx
-; X86-NEXT: movss %xmm0, (%esp)
-; X86-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; X86-NEXT: movss %xmm0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: shrb $3, %cl
-; X86-NEXT: andb $15, %cl
-; X86-NEXT: movzbl %cl, %ecx
-; X86-NEXT: movzbl (%esp,%ecx), %ecx
-; X86-NEXT: movb %cl, (%eax)
-; X86-NEXT: addl $32, %esp
-; X86-NEXT: retl
+; X86-NO-BMI2-NO-SHLD-LABEL: load_1byte_chunk_of_16byte_alloca_with_zero_upper_half:
+; X86-NO-BMI2-NO-SHLD: # %bb.0:
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: subl $40, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; X86-NO-BMI2-NO-SHLD-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
+; X86-NO-BMI2-NO-SHLD-NEXT: shll $3, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: xorps %xmm1, %xmm1
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movdqa %xmm0, (%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrb $3, %dl
+; X86-NO-BMI2-NO-SHLD-NEXT: andb $12, %dl
+; X86-NO-BMI2-NO-SHLD-NEXT: movzbl %dl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp,%edx), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%esp,%edx), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dl, (%eax)
+; X86-NO-BMI2-NO-SHLD-NEXT: addl $40, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: retl
+;
+; X86-SHLD-LABEL: load_1byte_chunk_of_16byte_alloca_with_zero_upper_half:
+; X86-SHLD: # %bb.0:
+; X86-SHLD-NEXT: pushl %ebx
+; X86-SHLD-NEXT: subl $40, %esp
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; X86-SHLD-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
+; X86-SHLD-NEXT: shll $3, %ecx
+; X86-SHLD-NEXT: xorps %xmm1, %xmm1
+; X86-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movdqa %xmm0, (%esp)
+; X86-SHLD-NEXT: movl %ecx, %edx
+; X86-SHLD-NEXT: shrb $3, %dl
+; X86-SHLD-NEXT: andb $12, %dl
+; X86-SHLD-NEXT: movzbl %dl, %edx
+; X86-SHLD-NEXT: movl (%esp,%edx), %ebx
+; X86-SHLD-NEXT: movl 4(%esp,%edx), %edx
+; X86-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-SHLD-NEXT: shrdl %cl, %edx, %ebx
+; X86-SHLD-NEXT: movb %bl, (%eax)
+; X86-SHLD-NEXT: addl $40, %esp
+; X86-SHLD-NEXT: popl %ebx
+; X86-SHLD-NEXT: retl
+;
+; X86-HAVE-BMI2-NO-SHLD-LABEL: load_1byte_chunk_of_16byte_alloca_with_zero_upper_half:
+; X86-HAVE-BMI2-NO-SHLD: # %bb.0:
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $40, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm1, %xmm1
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movdqa %xmm0, (%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $12, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %dl, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, (%esp,%edx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%esp,%edx), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movb %cl, (%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $40, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: retl
%init = load <8 x i8>, ptr %src, align 1
%intermediate.sroa.0.0.vec.expand = shufflevector <8 x i8> %init, <8 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%intermediate.sroa.0.0.vecblend = shufflevector <16 x i8> %intermediate.sroa.0.0.vec.expand, <16 x i8> <i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
@@ -505,30 +564,89 @@ define void @load_2byte_chunk_of_16byte_alloca_with_zero_upper_half(ptr %src, i6
; X64-HAVE-BMI2-NO-SHLD-NEXT: movw %cx, (%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
;
-; X86-LABEL: load_2byte_chunk_of_16byte_alloca_with_zero_upper_half:
-; X86: # %bb.0:
-; X86-NEXT: subl $32, %esp
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; X86-NEXT: shll $3, %ecx
-; X86-NEXT: movss %xmm0, (%esp)
-; X86-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; X86-NEXT: movss %xmm0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: shrb $3, %cl
-; X86-NEXT: andb $15, %cl
-; X86-NEXT: movzbl %cl, %ecx
-; X86-NEXT: movl (%esp,%ecx), %ecx
-; X86-NEXT: movw %cx, (%eax)
-; X86-NEXT: addl $32, %esp
-; X86-NEXT: retl
+; X86-NO-BMI2-NO-SHLD-LABEL: load_2byte_chunk_of_16byte_alloca_with_zero_upper_half:
+; X86-NO-BMI2-NO-SHLD: # %bb.0:
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: subl $40, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; X86-NO-BMI2-NO-SHLD-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
+; X86-NO-BMI2-NO-SHLD-NEXT: shll $3, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: xorps %xmm1, %xmm1
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movdqa %xmm0, (%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrb $3, %dl
+; X86-NO-BMI2-NO-SHLD-NEXT: andb $12, %dl
+; X86-NO-BMI2-NO-SHLD-NEXT: movzbl %dl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp,%edx), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%esp,%edx), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movw %dx, (%eax)
+; X86-NO-BMI2-NO-SHLD-NEXT: addl $40, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: retl
+;
+; X86-SHLD-LABEL: load_2byte_chunk_of_16byte_alloca_with_zero_upper_half:
+; X86-SHLD: # %bb.0:
+; X86-SHLD-NEXT: pushl %esi
+; X86-SHLD-NEXT: subl $40, %esp
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; X86-SHLD-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
+; X86-SHLD-NEXT: shll $3, %ecx
+; X86-SHLD-NEXT: xorps %xmm1, %xmm1
+; X86-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movdqa %xmm0, (%esp)
+; X86-SHLD-NEXT: movl %ecx, %edx
+; X86-SHLD-NEXT: shrb $3, %dl
+; X86-SHLD-NEXT: andb $12, %dl
+; X86-SHLD-NEXT: movzbl %dl, %edx
+; X86-SHLD-NEXT: movl (%esp,%edx), %esi
+; X86-SHLD-NEXT: movl 4(%esp,%edx), %edx
+; X86-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-SHLD-NEXT: shrdl %cl, %edx, %esi
+; X86-SHLD-NEXT: movw %si, (%eax)
+; X86-SHLD-NEXT: addl $40, %esp
+; X86-SHLD-NEXT: popl %esi
+; X86-SHLD-NEXT: retl
+;
+; X86-HAVE-BMI2-NO-SHLD-LABEL: load_2byte_chunk_of_16byte_alloca_with_zero_upper_half:
+; X86-HAVE-BMI2-NO-SHLD: # %bb.0:
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $40, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm1, %xmm1
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movdqa %xmm0, (%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $12, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %dl, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, (%esp,%edx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%esp,%edx), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movw %cx, (%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $40, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: retl
%init = load <8 x i8>, ptr %src, align 1
%intermediate.sroa.0.0.vec.expand = shufflevector <8 x i8> %init, <8 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%intermediate.sroa.0.0.vecblend = shufflevector <16 x i8> %intermediate.sroa.0.0.vec.expand, <16 x i8> <i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
@@ -577,30 +695,89 @@ define void @load_4byte_chunk_of_16byte_alloca_with_zero_upper_half(ptr %src, i6
; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, (%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
;
-; X86-LABEL: load_4byte_chunk_of_16byte_alloca_with_zero_upper_half:
-; X86: # %bb.0:
-; X86-NEXT: subl $32, %esp
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; X86-NEXT: shll $3, %ecx
-; X86-NEXT: movss %xmm0, (%esp)
-; X86-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; X86-NEXT: movss %xmm0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: shrb $3, %cl
-; X86-NEXT: andb $15, %cl
-; X86-NEXT: movzbl %cl, %ecx
-; X86-NEXT: movl (%esp,%ecx), %ecx
-; X86-NEXT: movl %ecx, (%eax)
-; X86-NEXT: addl $32, %esp
-; X86-NEXT: retl
+; X86-NO-BMI2-NO-SHLD-LABEL: load_4byte_chunk_of_16byte_alloca_with_zero_upper_half:
+; X86-NO-BMI2-NO-SHLD: # %bb.0:
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: subl $40, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; X86-NO-BMI2-NO-SHLD-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
+; X86-NO-BMI2-NO-SHLD-NEXT: shll $3, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: xorps %xmm1, %xmm1
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movdqa %xmm0, (%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrb $3, %dl
+; X86-NO-BMI2-NO-SHLD-NEXT: andb $12, %dl
+; X86-NO-BMI2-NO-SHLD-NEXT: movzbl %dl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp,%edx), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%esp,%edx), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, (%eax)
+; X86-NO-BMI2-NO-SHLD-NEXT: addl $40, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: retl
+;
+; X86-SHLD-LABEL: load_4byte_chunk_of_16byte_alloca_with_zero_upper_half:
+; X86-SHLD: # %bb.0:
+; X86-SHLD-NEXT: pushl %esi
+; X86-SHLD-NEXT: subl $40, %esp
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; X86-SHLD-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
+; X86-SHLD-NEXT: shll $3, %ecx
+; X86-SHLD-NEXT: xorps %xmm1, %xmm1
+; X86-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movdqa %xmm0, (%esp)
+; X86-SHLD-NEXT: movl %ecx, %edx
+; X86-SHLD-NEXT: shrb $3, %dl
+; X86-SHLD-NEXT: andb $12, %dl
+; X86-SHLD-NEXT: movzbl %dl, %edx
+; X86-SHLD-NEXT: movl (%esp,%edx), %esi
+; X86-SHLD-NEXT: movl 4(%esp,%edx), %edx
+; X86-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-SHLD-NEXT: shrdl %cl, %edx, %esi
+; X86-SHLD-NEXT: movl %esi, (%eax)
+; X86-SHLD-NEXT: addl $40, %esp
+; X86-SHLD-NEXT: popl %esi
+; X86-SHLD-NEXT: retl
+;
+; X86-HAVE-BMI2-NO-SHLD-LABEL: load_4byte_chunk_of_16byte_alloca_with_zero_upper_half:
+; X86-HAVE-BMI2-NO-SHLD: # %bb.0:
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $40, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm1, %xmm1
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movdqa %xmm0, (%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $12, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %dl, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, (%esp,%edx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%esp,%edx), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, (%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $40, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: retl
%init = load <8 x i8>, ptr %src, align 1
%intermediate.sroa.0.0.vec.expand = shufflevector <8 x i8> %init, <8 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%intermediate.sroa.0.0.vecblend = shufflevector <16 x i8> %intermediate.sroa.0.0.vec.expand, <16 x i8> <i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
@@ -649,32 +826,128 @@ define void @load_8byte_chunk_of_16byte_alloca_with_zero_upper_half(ptr %src, i6
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, (%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
;
-; X86-LABEL: load_8byte_chunk_of_16byte_alloca_with_zero_upper_half:
-; X86: # %bb.0:
-; X86-NEXT: subl $32, %esp
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; X86-NEXT: shll $3, %ecx
-; X86-NEXT: movss %xmm0, (%esp)
-; X86-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; X86-NEXT: movss %xmm0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: shrb $3, %cl
-; X86-NEXT: andb $15, %cl
-; X86-NEXT: movzbl %cl, %ecx
-; X86-NEXT: movl (%esp,%ecx), %edx
-; X86-NEXT: movl 4(%esp,%ecx), %ecx
-; X86-NEXT: movl %ecx, 4(%eax)
-; X86-NEXT: movl %edx, (%eax)
-; X86-NEXT: addl $32, %esp
-; X86-NEXT: retl
+; X86-NO-BMI2-NO-SHLD-LABEL: load_8byte_chunk_of_16byte_alloca_with_zero_upper_half:
+; X86-NO-BMI2-NO-SHLD: # %bb.0:
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: subl $44, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; X86-NO-BMI2-NO-SHLD-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
+; X86-NO-BMI2-NO-SHLD-NEXT: shll $3, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: xorps %xmm1, %xmm1
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movdqa %xmm0, (%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrb $3, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: andb $12, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: movzbl %cl, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp,%ebx), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%esp,%ebx), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebp, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: andb $24, %al
+; X86-NO-BMI2-NO-SHLD-NEXT: notb %al
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 8(%esp,%ebx), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %ebx, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, 4(%edx)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, (%edx)
+; X86-NO-BMI2-NO-SHLD-NEXT: addl $44, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: retl
+;
+; X86-SHLD-LABEL: load_8byte_chunk_of_16byte_alloca_with_zero_upper_half:
+; X86-SHLD: # %bb.0:
+; X86-SHLD-NEXT: pushl %ebx
+; X86-SHLD-NEXT: pushl %edi
+; X86-SHLD-NEXT: pushl %esi
+; X86-SHLD-NEXT: subl $32, %esp
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; X86-SHLD-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
+; X86-SHLD-NEXT: shll $3, %ecx
+; X86-SHLD-NEXT: xorps %xmm1, %xmm1
+; X86-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movdqa %xmm0, (%esp)
+; X86-SHLD-NEXT: movl %ecx, %edx
+; X86-SHLD-NEXT: shrb $3, %dl
+; X86-SHLD-NEXT: andb $12, %dl
+; X86-SHLD-NEXT: movzbl %dl, %edx
+; X86-SHLD-NEXT: movl 8(%esp,%edx), %esi
+; X86-SHLD-NEXT: movl (%esp,%edx), %edi
+; X86-SHLD-NEXT: movl 4(%esp,%edx), %edx
+; X86-SHLD-NEXT: movl %edx, %ebx
+; X86-SHLD-NEXT: shrdl %cl, %esi, %ebx
+; X86-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-SHLD-NEXT: shrdl %cl, %edx, %edi
+; X86-SHLD-NEXT: movl %ebx, 4(%eax)
+; X86-SHLD-NEXT: movl %edi, (%eax)
+; X86-SHLD-NEXT: addl $32, %esp
+; X86-SHLD-NEXT: popl %esi
+; X86-SHLD-NEXT: popl %edi
+; X86-SHLD-NEXT: popl %ebx
+; X86-SHLD-NEXT: retl
+;
+; X86-HAVE-BMI2-NO-SHLD-LABEL: load_8byte_chunk_of_16byte_alloca_with_zero_upper_half:
+; X86-HAVE-BMI2-NO-SHLD: # %bb.0:
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $44, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm1, %xmm1
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movdqa %xmm0, (%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $12, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %dl, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, (%esp,%edx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %bl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%esp,%edx), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%esp,%edx), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edi,%edi), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $24, %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 4(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, (%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $44, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: retl
%init = load <8 x i8>, ptr %src, align 1
%intermediate.sroa.0.0.vec.expand = shufflevector <8 x i8> %init, <8 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%intermediate.sroa.0.0.vecblend = shufflevector <16 x i8> %intermediate.sroa.0.0.vec.expand, <16 x i8> <i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
@@ -689,58 +962,123 @@ define void @load_8byte_chunk_of_16byte_alloca_with_zero_upper_half(ptr %src, i6
}
define void @load_1byte_chunk_of_32byte_alloca_with_zero_upper_half(ptr %src, i64 %byteOff, ptr %dst) nounwind {
-; X64-LABEL: load_1byte_chunk_of_32byte_alloca_with_zero_upper_half:
-; X64: # %bb.0:
-; X64-NEXT: movdqu (%rdi), %xmm0
-; X64-NEXT: shll $3, %esi
-; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; X64-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq %xmm1, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: shrb $3, %sil
-; X64-NEXT: movzbl %sil, %eax
-; X64-NEXT: movzbl -64(%rsp,%rax), %eax
-; X64-NEXT: movb %al, (%rdx)
-; X64-NEXT: retq
-;
-; X86-LABEL: load_1byte_chunk_of_32byte_alloca_with_zero_upper_half:
-; X86: # %bb.0:
-; X86-NEXT: subl $64, %esp
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movdqu (%edx), %xmm0
-; X86-NEXT: shll $3, %ecx
-; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
-; X86-NEXT: movd %xmm0, (%esp)
-; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: shrb $3, %cl
-; X86-NEXT: movzbl %cl, %ecx
-; X86-NEXT: movzbl (%esp,%ecx), %ecx
-; X86-NEXT: movb %cl, (%eax)
-; X86-NEXT: addl $64, %esp
-; X86-NEXT: retl
+; X64-NO-BMI2-LABEL: load_1byte_chunk_of_32byte_alloca_with_zero_upper_half:
+; X64-NO-BMI2: # %bb.0:
+; X64-NO-BMI2-NEXT: movups (%rdi), %xmm0
+; X64-NO-BMI2-NEXT: xorps %xmm1, %xmm1
+; X64-NO-BMI2-NEXT: leal (,%rsi,8), %ecx
+; X64-NO-BMI2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movl %ecx, %eax
+; X64-NO-BMI2-NEXT: shrb $6, %al
+; X64-NO-BMI2-NEXT: movzbl %al, %eax
+; X64-NO-BMI2-NEXT: movq -72(%rsp,%rax,8), %rax
+; X64-NO-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NO-BMI2-NEXT: shrq %cl, %rax
+; X64-NO-BMI2-NEXT: movb %al, (%rdx)
+; X64-NO-BMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: load_1byte_chunk_of_32byte_alloca_with_zero_upper_half:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movups (%rdi), %xmm0
+; X64-BMI2-NEXT: xorps %xmm1, %xmm1
+; X64-BMI2-NEXT: shll $3, %esi
+; X64-BMI2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movl %esi, %eax
+; X64-BMI2-NEXT: shrb $6, %al
+; X64-BMI2-NEXT: movzbl %al, %eax
+; X64-BMI2-NEXT: shrxq %rsi, -72(%rsp,%rax,8), %rax
+; X64-BMI2-NEXT: movb %al, (%rdx)
+; X64-BMI2-NEXT: retq
+;
+; X86-NO-BMI2-NO-SHLD-LABEL: load_1byte_chunk_of_32byte_alloca_with_zero_upper_half:
+; X86-NO-BMI2-NO-SHLD: # %bb.0:
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: subl $72, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movups (%edx), %xmm0
+; X86-NO-BMI2-NO-SHLD-NEXT: xorps %xmm1, %xmm1
+; X86-NO-BMI2-NO-SHLD-NEXT: shll $3, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrb $5, %dl
+; X86-NO-BMI2-NO-SHLD-NEXT: movzbl %dl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp,%edx,4), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%esp,%edx,4), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dl, (%eax)
+; X86-NO-BMI2-NO-SHLD-NEXT: addl $72, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: retl
+;
+; X86-SHLD-LABEL: load_1byte_chunk_of_32byte_alloca_with_zero_upper_half:
+; X86-SHLD: # %bb.0:
+; X86-SHLD-NEXT: pushl %ebx
+; X86-SHLD-NEXT: subl $72, %esp
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SHLD-NEXT: movups (%edx), %xmm0
+; X86-SHLD-NEXT: xorps %xmm1, %xmm1
+; X86-SHLD-NEXT: shll $3, %ecx
+; X86-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-SHLD-NEXT: movl %ecx, %edx
+; X86-SHLD-NEXT: shrb $5, %dl
+; X86-SHLD-NEXT: movzbl %dl, %edx
+; X86-SHLD-NEXT: movl (%esp,%edx,4), %ebx
+; X86-SHLD-NEXT: movl 4(%esp,%edx,4), %edx
+; X86-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-SHLD-NEXT: shrdl %cl, %edx, %ebx
+; X86-SHLD-NEXT: movb %bl, (%eax)
+; X86-SHLD-NEXT: addl $72, %esp
+; X86-SHLD-NEXT: popl %ebx
+; X86-SHLD-NEXT: retl
+;
+; X86-HAVE-BMI2-NO-SHLD-LABEL: load_1byte_chunk_of_32byte_alloca_with_zero_upper_half:
+; X86-HAVE-BMI2-NO-SHLD: # %bb.0:
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $72, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movups (%edx), %xmm0
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm1, %xmm1
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $5, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %dl, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, (%esp,%edx,4), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%esp,%edx,4), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movb %cl, (%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $72, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: retl
%init = load <16 x i8>, ptr %src, align 1
%intermediate.sroa.0.0.vec.expand = shufflevector <16 x i8> %init, <16 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%intermediate.sroa.0.0.vecblend = shufflevector <32 x i8> %intermediate.sroa.0.0.vec.expand, <32 x i8> <i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
@@ -756,58 +1094,136 @@ define void @load_1byte_chunk_of_32byte_alloca_with_zero_upper_half(ptr %src, i6
}
define void @load_2byte_chunk_of_32byte_alloca_with_zero_upper_half(ptr %src, i64 %byteOff, ptr %dst) nounwind {
-; X64-LABEL: load_2byte_chunk_of_32byte_alloca_with_zero_upper_half:
-; X64: # %bb.0:
-; X64-NEXT: movdqu (%rdi), %xmm0
-; X64-NEXT: shll $3, %esi
-; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; X64-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq %xmm1, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: shrb $3, %sil
-; X64-NEXT: movzbl %sil, %eax
-; X64-NEXT: movq -64(%rsp,%rax), %rax
-; X64-NEXT: movw %ax, (%rdx)
-; X64-NEXT: retq
-;
-; X86-LABEL: load_2byte_chunk_of_32byte_alloca_with_zero_upper_half:
-; X86: # %bb.0:
-; X86-NEXT: subl $64, %esp
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movdqu (%edx), %xmm0
-; X86-NEXT: shll $3, %ecx
-; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
-; X86-NEXT: movd %xmm0, (%esp)
-; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: shrb $3, %cl
-; X86-NEXT: movzbl %cl, %ecx
-; X86-NEXT: movl (%esp,%ecx), %ecx
-; X86-NEXT: movw %cx, (%eax)
-; X86-NEXT: addl $64, %esp
-; X86-NEXT: retl
+; X64-NO-BMI2-LABEL: load_2byte_chunk_of_32byte_alloca_with_zero_upper_half:
+; X64-NO-BMI2: # %bb.0:
+; X64-NO-BMI2-NEXT: movups (%rdi), %xmm0
+; X64-NO-BMI2-NEXT: xorps %xmm1, %xmm1
+; X64-NO-BMI2-NEXT: leal (,%rsi,8), %ecx
+; X64-NO-BMI2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movl %ecx, %eax
+; X64-NO-BMI2-NEXT: shrb $6, %al
+; X64-NO-BMI2-NEXT: movzbl %al, %eax
+; X64-NO-BMI2-NEXT: movq -72(%rsp,%rax,8), %rsi
+; X64-NO-BMI2-NEXT: shrq %cl, %rsi
+; X64-NO-BMI2-NEXT: movl -64(%rsp,%rax,8), %eax
+; X64-NO-BMI2-NEXT: addl %eax, %eax
+; X64-NO-BMI2-NEXT: andb $56, %cl
+; X64-NO-BMI2-NEXT: notb %cl
+; X64-NO-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NO-BMI2-NEXT: shlq %cl, %rax
+; X64-NO-BMI2-NEXT: orl %esi, %eax
+; X64-NO-BMI2-NEXT: movw %ax, (%rdx)
+; X64-NO-BMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: load_2byte_chunk_of_32byte_alloca_with_zero_upper_half:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movups (%rdi), %xmm0
+; X64-BMI2-NEXT: xorps %xmm1, %xmm1
+; X64-BMI2-NEXT: shll $3, %esi
+; X64-BMI2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movl %esi, %eax
+; X64-BMI2-NEXT: shrb $6, %al
+; X64-BMI2-NEXT: movzbl %al, %eax
+; X64-BMI2-NEXT: shrxq %rsi, -72(%rsp,%rax,8), %rcx
+; X64-BMI2-NEXT: # kill: def $sil killed $sil killed $rsi def $rsi
+; X64-BMI2-NEXT: andb $56, %sil
+; X64-BMI2-NEXT: notb %sil
+; X64-BMI2-NEXT: movl -64(%rsp,%rax,8), %eax
+; X64-BMI2-NEXT: addl %eax, %eax
+; X64-BMI2-NEXT: shlxq %rsi, %rax, %rax
+; X64-BMI2-NEXT: orl %eax, %ecx
+; X64-BMI2-NEXT: movw %cx, (%rdx)
+; X64-BMI2-NEXT: retq
+;
+; X86-NO-BMI2-NO-SHLD-LABEL: load_2byte_chunk_of_32byte_alloca_with_zero_upper_half:
+; X86-NO-BMI2-NO-SHLD: # %bb.0:
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: subl $72, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movups (%edx), %xmm0
+; X86-NO-BMI2-NO-SHLD-NEXT: xorps %xmm1, %xmm1
+; X86-NO-BMI2-NO-SHLD-NEXT: shll $3, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrb $5, %dl
+; X86-NO-BMI2-NO-SHLD-NEXT: movzbl %dl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp,%edx,4), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%esp,%edx,4), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movw %dx, (%eax)
+; X86-NO-BMI2-NO-SHLD-NEXT: addl $72, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: retl
+;
+; X86-SHLD-LABEL: load_2byte_chunk_of_32byte_alloca_with_zero_upper_half:
+; X86-SHLD: # %bb.0:
+; X86-SHLD-NEXT: pushl %esi
+; X86-SHLD-NEXT: subl $72, %esp
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SHLD-NEXT: movups (%edx), %xmm0
+; X86-SHLD-NEXT: xorps %xmm1, %xmm1
+; X86-SHLD-NEXT: shll $3, %ecx
+; X86-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-SHLD-NEXT: movl %ecx, %edx
+; X86-SHLD-NEXT: shrb $5, %dl
+; X86-SHLD-NEXT: movzbl %dl, %edx
+; X86-SHLD-NEXT: movl (%esp,%edx,4), %esi
+; X86-SHLD-NEXT: movl 4(%esp,%edx,4), %edx
+; X86-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-SHLD-NEXT: shrdl %cl, %edx, %esi
+; X86-SHLD-NEXT: movw %si, (%eax)
+; X86-SHLD-NEXT: addl $72, %esp
+; X86-SHLD-NEXT: popl %esi
+; X86-SHLD-NEXT: retl
+;
+; X86-HAVE-BMI2-NO-SHLD-LABEL: load_2byte_chunk_of_32byte_alloca_with_zero_upper_half:
+; X86-HAVE-BMI2-NO-SHLD: # %bb.0:
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $72, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movups (%edx), %xmm0
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm1, %xmm1
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $5, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %dl, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, (%esp,%edx,4), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%esp,%edx,4), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movw %cx, (%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $72, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: retl
%init = load <16 x i8>, ptr %src, align 1
%intermediate.sroa.0.0.vec.expand = shufflevector <16 x i8> %init, <16 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%intermediate.sroa.0.0.vecblend = shufflevector <32 x i8> %intermediate.sroa.0.0.vec.expand, <32 x i8> <i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
@@ -822,58 +1238,136 @@ define void @load_2byte_chunk_of_32byte_alloca_with_zero_upper_half(ptr %src, i6
}
define void @load_4byte_chunk_of_32byte_alloca_with_zero_upper_half(ptr %src, i64 %byteOff, ptr %dst) nounwind {
-; X64-LABEL: load_4byte_chunk_of_32byte_alloca_with_zero_upper_half:
-; X64: # %bb.0:
-; X64-NEXT: movdqu (%rdi), %xmm0
-; X64-NEXT: shll $3, %esi
-; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; X64-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq %xmm1, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: shrb $3, %sil
-; X64-NEXT: movzbl %sil, %eax
-; X64-NEXT: movl -64(%rsp,%rax), %eax
-; X64-NEXT: movl %eax, (%rdx)
-; X64-NEXT: retq
-;
-; X86-LABEL: load_4byte_chunk_of_32byte_alloca_with_zero_upper_half:
-; X86: # %bb.0:
-; X86-NEXT: subl $64, %esp
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movdqu (%edx), %xmm0
-; X86-NEXT: shll $3, %ecx
-; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
-; X86-NEXT: movd %xmm0, (%esp)
-; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: shrb $3, %cl
-; X86-NEXT: movzbl %cl, %ecx
-; X86-NEXT: movl (%esp,%ecx), %ecx
-; X86-NEXT: movl %ecx, (%eax)
-; X86-NEXT: addl $64, %esp
-; X86-NEXT: retl
+; X64-NO-BMI2-LABEL: load_4byte_chunk_of_32byte_alloca_with_zero_upper_half:
+; X64-NO-BMI2: # %bb.0:
+; X64-NO-BMI2-NEXT: movups (%rdi), %xmm0
+; X64-NO-BMI2-NEXT: xorps %xmm1, %xmm1
+; X64-NO-BMI2-NEXT: leal (,%rsi,8), %ecx
+; X64-NO-BMI2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movl %ecx, %eax
+; X64-NO-BMI2-NEXT: shrb $6, %al
+; X64-NO-BMI2-NEXT: movzbl %al, %eax
+; X64-NO-BMI2-NEXT: movq -72(%rsp,%rax,8), %rsi
+; X64-NO-BMI2-NEXT: shrq %cl, %rsi
+; X64-NO-BMI2-NEXT: movl -64(%rsp,%rax,8), %eax
+; X64-NO-BMI2-NEXT: addl %eax, %eax
+; X64-NO-BMI2-NEXT: andb $56, %cl
+; X64-NO-BMI2-NEXT: notb %cl
+; X64-NO-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NO-BMI2-NEXT: shlq %cl, %rax
+; X64-NO-BMI2-NEXT: orl %esi, %eax
+; X64-NO-BMI2-NEXT: movl %eax, (%rdx)
+; X64-NO-BMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: load_4byte_chunk_of_32byte_alloca_with_zero_upper_half:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movups (%rdi), %xmm0
+; X64-BMI2-NEXT: xorps %xmm1, %xmm1
+; X64-BMI2-NEXT: shll $3, %esi
+; X64-BMI2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movl %esi, %eax
+; X64-BMI2-NEXT: shrb $6, %al
+; X64-BMI2-NEXT: movzbl %al, %eax
+; X64-BMI2-NEXT: shrxq %rsi, -72(%rsp,%rax,8), %rcx
+; X64-BMI2-NEXT: # kill: def $sil killed $sil killed $rsi def $rsi
+; X64-BMI2-NEXT: andb $56, %sil
+; X64-BMI2-NEXT: notb %sil
+; X64-BMI2-NEXT: movl -64(%rsp,%rax,8), %eax
+; X64-BMI2-NEXT: addl %eax, %eax
+; X64-BMI2-NEXT: shlxq %rsi, %rax, %rax
+; X64-BMI2-NEXT: orl %eax, %ecx
+; X64-BMI2-NEXT: movl %ecx, (%rdx)
+; X64-BMI2-NEXT: retq
+;
+; X86-NO-BMI2-NO-SHLD-LABEL: load_4byte_chunk_of_32byte_alloca_with_zero_upper_half:
+; X86-NO-BMI2-NO-SHLD: # %bb.0:
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: subl $72, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movups (%edx), %xmm0
+; X86-NO-BMI2-NO-SHLD-NEXT: xorps %xmm1, %xmm1
+; X86-NO-BMI2-NO-SHLD-NEXT: shll $3, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrb $5, %dl
+; X86-NO-BMI2-NO-SHLD-NEXT: movzbl %dl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp,%edx,4), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%esp,%edx,4), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, (%eax)
+; X86-NO-BMI2-NO-SHLD-NEXT: addl $72, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: retl
+;
+; X86-SHLD-LABEL: load_4byte_chunk_of_32byte_alloca_with_zero_upper_half:
+; X86-SHLD: # %bb.0:
+; X86-SHLD-NEXT: pushl %esi
+; X86-SHLD-NEXT: subl $72, %esp
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SHLD-NEXT: movups (%edx), %xmm0
+; X86-SHLD-NEXT: xorps %xmm1, %xmm1
+; X86-SHLD-NEXT: shll $3, %ecx
+; X86-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-SHLD-NEXT: movl %ecx, %edx
+; X86-SHLD-NEXT: shrb $5, %dl
+; X86-SHLD-NEXT: movzbl %dl, %edx
+; X86-SHLD-NEXT: movl (%esp,%edx,4), %esi
+; X86-SHLD-NEXT: movl 4(%esp,%edx,4), %edx
+; X86-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-SHLD-NEXT: shrdl %cl, %edx, %esi
+; X86-SHLD-NEXT: movl %esi, (%eax)
+; X86-SHLD-NEXT: addl $72, %esp
+; X86-SHLD-NEXT: popl %esi
+; X86-SHLD-NEXT: retl
+;
+; X86-HAVE-BMI2-NO-SHLD-LABEL: load_4byte_chunk_of_32byte_alloca_with_zero_upper_half:
+; X86-HAVE-BMI2-NO-SHLD: # %bb.0:
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $72, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movups (%edx), %xmm0
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm1, %xmm1
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $5, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %dl, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, (%esp,%edx,4), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%esp,%edx,4), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, (%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $72, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: retl
%init = load <16 x i8>, ptr %src, align 1
%intermediate.sroa.0.0.vec.expand = shufflevector <16 x i8> %init, <16 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%intermediate.sroa.0.0.vecblend = shufflevector <32 x i8> %intermediate.sroa.0.0.vec.expand, <32 x i8> <i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
@@ -888,60 +1382,191 @@ define void @load_4byte_chunk_of_32byte_alloca_with_zero_upper_half(ptr %src, i6
}
define void @load_8byte_chunk_of_32byte_alloca_with_zero_upper_half(ptr %src, i64 %byteOff, ptr %dst) nounwind {
-; X64-LABEL: load_8byte_chunk_of_32byte_alloca_with_zero_upper_half:
-; X64: # %bb.0:
-; X64-NEXT: movdqu (%rdi), %xmm0
-; X64-NEXT: shll $3, %esi
-; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; X64-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq %xmm1, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: shrb $3, %sil
-; X64-NEXT: movzbl %sil, %eax
-; X64-NEXT: movq -64(%rsp,%rax), %rax
-; X64-NEXT: movq %rax, (%rdx)
-; X64-NEXT: retq
-;
-; X86-LABEL: load_8byte_chunk_of_32byte_alloca_with_zero_upper_half:
-; X86: # %bb.0:
-; X86-NEXT: subl $64, %esp
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movdqu (%edx), %xmm0
-; X86-NEXT: shll $3, %ecx
-; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
-; X86-NEXT: movd %xmm0, (%esp)
-; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: shrb $3, %cl
-; X86-NEXT: movzbl %cl, %ecx
-; X86-NEXT: movl (%esp,%ecx), %edx
-; X86-NEXT: movl 4(%esp,%ecx), %ecx
-; X86-NEXT: movl %ecx, 4(%eax)
-; X86-NEXT: movl %edx, (%eax)
-; X86-NEXT: addl $64, %esp
-; X86-NEXT: retl
+; X64-NO-BMI2-NO-SHLD-LABEL: load_8byte_chunk_of_32byte_alloca_with_zero_upper_half:
+; X64-NO-BMI2-NO-SHLD: # %bb.0:
+; X64-NO-BMI2-NO-SHLD-NEXT: movups (%rdi), %xmm0
+; X64-NO-BMI2-NO-SHLD-NEXT: xorps %xmm1, %xmm1
+; X64-NO-BMI2-NO-SHLD-NEXT: leal (,%rsi,8), %ecx
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movl %ecx, %eax
+; X64-NO-BMI2-NO-SHLD-NEXT: shrb $6, %al
+; X64-NO-BMI2-NO-SHLD-NEXT: movzbl %al, %eax
+; X64-NO-BMI2-NO-SHLD-NEXT: movq -72(%rsp,%rax,8), %rsi
+; X64-NO-BMI2-NO-SHLD-NEXT: movq -64(%rsp,%rax,8), %rax
+; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %rsi
+; X64-NO-BMI2-NO-SHLD-NEXT: notb %cl
+; X64-NO-BMI2-NO-SHLD-NEXT: addq %rax, %rax
+; X64-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %rax
+; X64-NO-BMI2-NO-SHLD-NEXT: orq %rsi, %rax
+; X64-NO-BMI2-NO-SHLD-NEXT: movq %rax, (%rdx)
+; X64-NO-BMI2-NO-SHLD-NEXT: retq
+;
+; X64-SHLD-LABEL: load_8byte_chunk_of_32byte_alloca_with_zero_upper_half:
+; X64-SHLD: # %bb.0:
+; X64-SHLD-NEXT: movups (%rdi), %xmm0
+; X64-SHLD-NEXT: xorps %xmm1, %xmm1
+; X64-SHLD-NEXT: leal (,%rsi,8), %ecx
+; X64-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-SHLD-NEXT: movl %ecx, %eax
+; X64-SHLD-NEXT: shrb $6, %al
+; X64-SHLD-NEXT: movzbl %al, %eax
+; X64-SHLD-NEXT: movq -72(%rsp,%rax,8), %rsi
+; X64-SHLD-NEXT: movq -64(%rsp,%rax,8), %rax
+; X64-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-SHLD-NEXT: shrdq %cl, %rax, %rsi
+; X64-SHLD-NEXT: movq %rsi, (%rdx)
+; X64-SHLD-NEXT: retq
+;
+; X64-HAVE-BMI2-NO-SHLD-LABEL: load_8byte_chunk_of_32byte_alloca_with_zero_upper_half:
+; X64-HAVE-BMI2-NO-SHLD: # %bb.0:
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movups (%rdi), %xmm0
+; X64-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm1, %xmm1
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %esi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %eax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrb $6, %al
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movzbl %al, %eax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rsi, -72(%rsp,%rax,8), %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: notb %sil
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -64(%rsp,%rax,8), %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rax, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %rax, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rcx, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, (%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
+;
+; X86-NO-BMI2-NO-SHLD-LABEL: load_8byte_chunk_of_32byte_alloca_with_zero_upper_half:
+; X86-NO-BMI2-NO-SHLD: # %bb.0:
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: subl $76, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movups (%ecx), %xmm0
+; X86-NO-BMI2-NO-SHLD-NEXT: xorps %xmm1, %xmm1
+; X86-NO-BMI2-NO-SHLD-NEXT: shll $3, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrb $5, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: movzbl %cl, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp,%ebx,4), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%esp,%ebx,4), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebp, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: andb $24, %al
+; X86-NO-BMI2-NO-SHLD-NEXT: notb %al
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 8(%esp,%ebx,4), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %ebx, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, 4(%edx)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, (%edx)
+; X86-NO-BMI2-NO-SHLD-NEXT: addl $76, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: retl
+;
+; X86-SHLD-LABEL: load_8byte_chunk_of_32byte_alloca_with_zero_upper_half:
+; X86-SHLD: # %bb.0:
+; X86-SHLD-NEXT: pushl %ebx
+; X86-SHLD-NEXT: pushl %edi
+; X86-SHLD-NEXT: pushl %esi
+; X86-SHLD-NEXT: subl $64, %esp
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SHLD-NEXT: movups (%edx), %xmm0
+; X86-SHLD-NEXT: xorps %xmm1, %xmm1
+; X86-SHLD-NEXT: shll $3, %ecx
+; X86-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-SHLD-NEXT: movl %ecx, %edx
+; X86-SHLD-NEXT: shrb $5, %dl
+; X86-SHLD-NEXT: movzbl %dl, %edx
+; X86-SHLD-NEXT: movl 8(%esp,%edx,4), %esi
+; X86-SHLD-NEXT: movl (%esp,%edx,4), %edi
+; X86-SHLD-NEXT: movl 4(%esp,%edx,4), %edx
+; X86-SHLD-NEXT: movl %edx, %ebx
+; X86-SHLD-NEXT: shrdl %cl, %esi, %ebx
+; X86-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-SHLD-NEXT: shrdl %cl, %edx, %edi
+; X86-SHLD-NEXT: movl %ebx, 4(%eax)
+; X86-SHLD-NEXT: movl %edi, (%eax)
+; X86-SHLD-NEXT: addl $64, %esp
+; X86-SHLD-NEXT: popl %esi
+; X86-SHLD-NEXT: popl %edi
+; X86-SHLD-NEXT: popl %ebx
+; X86-SHLD-NEXT: retl
+;
+; X86-HAVE-BMI2-NO-SHLD-LABEL: load_8byte_chunk_of_32byte_alloca_with_zero_upper_half:
+; X86-HAVE-BMI2-NO-SHLD: # %bb.0:
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $76, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movups (%edx), %xmm0
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm1, %xmm1
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $5, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %dl, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, (%esp,%edx,4), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %bl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%esp,%edx,4), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%esp,%edx,4), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edi,%edi), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $24, %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 4(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, (%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $76, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: retl
%init = load <16 x i8>, ptr %src, align 1
%intermediate.sroa.0.0.vec.expand = shufflevector <16 x i8> %init, <16 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%intermediate.sroa.0.0.vecblend = shufflevector <32 x i8> %intermediate.sroa.0.0.vec.expand, <32 x i8> <i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
@@ -956,70 +1581,288 @@ define void @load_8byte_chunk_of_32byte_alloca_with_zero_upper_half(ptr %src, i6
}
define void @load_16byte_chunk_of_32byte_alloca_with_zero_upper_half(ptr %src, i64 %byteOff, ptr %dst) nounwind {
-; X64-LABEL: load_16byte_chunk_of_32byte_alloca_with_zero_upper_half:
-; X64: # %bb.0:
-; X64-NEXT: movdqu (%rdi), %xmm0
-; X64-NEXT: shll $3, %esi
-; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; X64-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq %xmm1, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: shrb $3, %sil
-; X64-NEXT: movzbl %sil, %eax
-; X64-NEXT: movq -64(%rsp,%rax), %rcx
-; X64-NEXT: movq -56(%rsp,%rax), %rax
-; X64-NEXT: movq %rax, 8(%rdx)
-; X64-NEXT: movq %rcx, (%rdx)
-; X64-NEXT: retq
-;
-; X86-LABEL: load_16byte_chunk_of_32byte_alloca_with_zero_upper_half:
-; X86: # %bb.0:
-; X86-NEXT: pushl %edi
-; X86-NEXT: pushl %esi
-; X86-NEXT: subl $64, %esp
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movdqu (%edx), %xmm0
-; X86-NEXT: shll $3, %ecx
-; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
-; X86-NEXT: movd %xmm0, (%esp)
-; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: shrb $3, %cl
-; X86-NEXT: movzbl %cl, %ecx
-; X86-NEXT: movl (%esp,%ecx), %edx
-; X86-NEXT: movl 4(%esp,%ecx), %esi
-; X86-NEXT: movl 8(%esp,%ecx), %edi
-; X86-NEXT: movl 12(%esp,%ecx), %ecx
-; X86-NEXT: movl %ecx, 12(%eax)
-; X86-NEXT: movl %edi, 8(%eax)
-; X86-NEXT: movl %esi, 4(%eax)
-; X86-NEXT: movl %edx, (%eax)
-; X86-NEXT: addl $64, %esp
-; X86-NEXT: popl %esi
-; X86-NEXT: popl %edi
-; X86-NEXT: retl
+; X64-NO-BMI2-NO-SHLD-LABEL: load_16byte_chunk_of_32byte_alloca_with_zero_upper_half:
+; X64-NO-BMI2-NO-SHLD: # %bb.0:
+; X64-NO-BMI2-NO-SHLD-NEXT: movups (%rdi), %xmm0
+; X64-NO-BMI2-NO-SHLD-NEXT: xorps %xmm1, %xmm1
+; X64-NO-BMI2-NO-SHLD-NEXT: leal (,%rsi,8), %eax
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X64-NO-BMI2-NO-SHLD-NEXT: shrb $6, %cl
+; X64-NO-BMI2-NO-SHLD-NEXT: movzbl %cl, %edi
+; X64-NO-BMI2-NO-SHLD-NEXT: movq -72(%rsp,%rdi,8), %r8
+; X64-NO-BMI2-NO-SHLD-NEXT: movq -64(%rsp,%rdi,8), %r9
+; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %r8
+; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %esi
+; X64-NO-BMI2-NO-SHLD-NEXT: notb %sil
+; X64-NO-BMI2-NO-SHLD-NEXT: leaq (%r9,%r9), %r10
+; X64-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
+; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %r10
+; X64-NO-BMI2-NO-SHLD-NEXT: orq %r8, %r10
+; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %r9
+; X64-NO-BMI2-NO-SHLD-NEXT: movq -56(%rsp,%rdi,8), %rax
+; X64-NO-BMI2-NO-SHLD-NEXT: addq %rax, %rax
+; X64-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
+; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %rax
+; X64-NO-BMI2-NO-SHLD-NEXT: orq %r9, %rax
+; X64-NO-BMI2-NO-SHLD-NEXT: movq %rax, 8(%rdx)
+; X64-NO-BMI2-NO-SHLD-NEXT: movq %r10, (%rdx)
+; X64-NO-BMI2-NO-SHLD-NEXT: retq
+;
+; X64-NO-BMI2-HAVE-SHLD-LABEL: load_16byte_chunk_of_32byte_alloca_with_zero_upper_half:
+; X64-NO-BMI2-HAVE-SHLD: # %bb.0:
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movups (%rdi), %xmm0
+; X64-NO-BMI2-HAVE-SHLD-NEXT: xorps %xmm1, %xmm1
+; X64-NO-BMI2-HAVE-SHLD-NEXT: leal (,%rsi,8), %eax
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shrb $6, %cl
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movzbl %cl, %esi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -72(%rsp,%rsi,8), %rdi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -64(%rsp,%rsi,8), %r8
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r8, %r9
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shrq %cl, %r9
+; X64-NO-BMI2-HAVE-SHLD-NEXT: notb %cl
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -56(%rsp,%rsi,8), %rsi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: addq %rsi, %rsi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shlq %cl, %rsi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: orq %r9, %rsi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r8, %rdi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rdi, (%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rsi, 8(%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: retq
+;
+; X64-HAVE-BMI2-NO-SHLD-LABEL: load_16byte_chunk_of_32byte_alloca_with_zero_upper_half:
+; X64-HAVE-BMI2-NO-SHLD: # %bb.0:
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movups (%rdi), %xmm0
+; X64-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm1, %xmm1
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %esi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %eax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrb $6, %al
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movzbl %al, %eax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rsi, -72(%rsp,%rax,8), %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -64(%rsp,%rax,8), %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rsi, %rdi, %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: # kill: def $sil killed $sil killed $rsi def $rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: notb %sil
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -56(%rsp,%rax,8), %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rdi, %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %rdi, %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rcx, %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rax, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %rax, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r8, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, 8(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, (%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
+;
+; X64-HAVE-BMI2-HAVE-SHLD-LABEL: load_16byte_chunk_of_32byte_alloca_with_zero_upper_half:
+; X64-HAVE-BMI2-HAVE-SHLD: # %bb.0:
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movups (%rdi), %xmm0
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: xorps %xmm1, %xmm1
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: leal (,%rsi,8), %ecx
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, %eax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrb $6, %al
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movzbl %al, %eax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -72(%rsp,%rax,8), %rsi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -64(%rsp,%rax,8), %rdi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrxq %rcx, %rdi, %r8
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, %r9d
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: notb %r9b
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -56(%rsp,%rax,8), %rax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: addq %rax, %rax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shlxq %r9, %rax, %rax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: orq %r8, %rax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: # kill: def $cl killed $cl killed $rcx
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rdi, %rsi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rsi, (%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rax, 8(%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: retq
+;
+; X86-NO-BMI2-NO-SHLD-LABEL: load_16byte_chunk_of_32byte_alloca_with_zero_upper_half:
+; X86-NO-BMI2-NO-SHLD: # %bb.0:
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: subl $92, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movups (%ecx), %xmm0
+; X86-NO-BMI2-NO-SHLD-NEXT: xorps %xmm1, %xmm1
+; X86-NO-BMI2-NO-SHLD-NEXT: shll $3, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrb $5, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: movzbl %cl, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 16(%esp,%edi,4), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 20(%esp,%edi,4), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%ebp,%ebp), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %edx, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %al, %ch
+; X86-NO-BMI2-NO-SHLD-NEXT: andb $24, %ch
+; X86-NO-BMI2-NO-SHLD-NEXT: xorb $31, %ch
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 24(%esp,%edi,4), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%edx,%edx), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebp, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %al, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 28(%esp,%edi,4), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movb {{[-0-9]+}}(%e{{[sb]}}p), %cl # 1-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %edx, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %al, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 32(%esp,%edi,4), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %eax, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, 12(%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, 8(%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, 4(%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, (%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: addl $92, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: retl
+;
+; X86-SHLD-LABEL: load_16byte_chunk_of_32byte_alloca_with_zero_upper_half:
+; X86-SHLD: # %bb.0:
+; X86-SHLD-NEXT: pushl %ebp
+; X86-SHLD-NEXT: pushl %ebx
+; X86-SHLD-NEXT: pushl %edi
+; X86-SHLD-NEXT: pushl %esi
+; X86-SHLD-NEXT: subl $92, %esp
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SHLD-NEXT: movups (%eax), %xmm0
+; X86-SHLD-NEXT: xorps %xmm1, %xmm1
+; X86-SHLD-NEXT: shll $3, %ecx
+; X86-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movl %ecx, %eax
+; X86-SHLD-NEXT: shrb $5, %al
+; X86-SHLD-NEXT: movzbl %al, %ebx
+; X86-SHLD-NEXT: movl 24(%esp,%ebx,4), %esi
+; X86-SHLD-NEXT: movl 16(%esp,%ebx,4), %eax
+; X86-SHLD-NEXT: movl 20(%esp,%ebx,4), %edi
+; X86-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SHLD-NEXT: shrdl %cl, %esi, %edi
+; X86-SHLD-NEXT: movl 28(%esp,%ebx,4), %ebp
+; X86-SHLD-NEXT: shrdl %cl, %ebp, %esi
+; X86-SHLD-NEXT: movl 32(%esp,%ebx,4), %ebx
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SHLD-NEXT: shrdl %cl, %ebx, %ebp
+; X86-SHLD-NEXT: movl %ebp, 12(%edx)
+; X86-SHLD-NEXT: movl %esi, 8(%edx)
+; X86-SHLD-NEXT: movl %edi, 4(%edx)
+; X86-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X86-SHLD-NEXT: shrdl %cl, %esi, %eax
+; X86-SHLD-NEXT: movl %eax, (%edx)
+; X86-SHLD-NEXT: addl $92, %esp
+; X86-SHLD-NEXT: popl %esi
+; X86-SHLD-NEXT: popl %edi
+; X86-SHLD-NEXT: popl %ebx
+; X86-SHLD-NEXT: popl %ebp
+; X86-SHLD-NEXT: retl
+;
+; X86-HAVE-BMI2-NO-SHLD-LABEL: load_16byte_chunk_of_32byte_alloca_with_zero_upper_half:
+; X86-HAVE-BMI2-NO-SHLD: # %bb.0:
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $92, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movups (%ecx), %xmm0
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm1, %xmm1
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $5, %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %cl, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, 16(%esp,%ecx,4), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%esp,%ecx,4), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %esi, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %esi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%esp,%ecx,4), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $24, %bl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %bl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%esp,%ecx,4), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %esi, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 32(%esp,%ecx,4), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %ecx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %eax, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 12(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 8(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 4(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, (%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $92, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: retl
%init = load <16 x i8>, ptr %src, align 1
%intermediate.sroa.0.0.vec.expand = shufflevector <16 x i8> %init, <16 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%intermediate.sroa.0.0.vecblend = shufflevector <32 x i8> %intermediate.sroa.0.0.vec.expand, <32 x i8> <i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
@@ -1034,84 +1877,155 @@ define void @load_16byte_chunk_of_32byte_alloca_with_zero_upper_half(ptr %src, i
}
define void @load_1byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i64 %byteOff, ptr %dst) nounwind {
-; X64-LABEL: load_1byte_chunk_of_64byte_alloca_with_zero_upper_half:
-; X64: # %bb.0:
-; X64-NEXT: movdqu (%rdi), %xmm0
-; X64-NEXT: movdqu 16(%rdi), %xmm1
-; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
-; X64-NEXT: movq %xmm1, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq %xmm3, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq %xmm2, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: andl $63, %esi
-; X64-NEXT: movzbl -128(%rsp,%rsi), %eax
-; X64-NEXT: movb %al, (%rdx)
-; X64-NEXT: retq
-;
-; X86-LABEL: load_1byte_chunk_of_64byte_alloca_with_zero_upper_half:
-; X86: # %bb.0:
-; X86-NEXT: subl $128, %esp
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movdqu (%edx), %xmm0
-; X86-NEXT: movdqu 16(%edx), %xmm1
-; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
-; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
-; X86-NEXT: pshufd {{.*#+}} xmm4 = xmm0[3,3,3,3]
-; X86-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,1,1]
-; X86-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
-; X86-NEXT: pshufd {{.*#+}} xmm7 = xmm1[3,3,3,3]
-; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm0, (%esp)
-; X86-NEXT: movd %xmm7, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm6, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm5, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm4, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: andl $63, %ecx
-; X86-NEXT: movzbl (%esp,%ecx), %ecx
-; X86-NEXT: movb %cl, (%eax)
-; X86-NEXT: addl $128, %esp
-; X86-NEXT: retl
+; X64-NO-BMI2-LABEL: load_1byte_chunk_of_64byte_alloca_with_zero_upper_half:
+; X64-NO-BMI2: # %bb.0:
+; X64-NO-BMI2-NEXT: pushq %rax
+; X64-NO-BMI2-NEXT: movups (%rdi), %xmm0
+; X64-NO-BMI2-NEXT: movups 16(%rdi), %xmm1
+; X64-NO-BMI2-NEXT: xorps %xmm2, %xmm2
+; X64-NO-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: leal (,%rsi,8), %ecx
+; X64-NO-BMI2-NEXT: andl $56, %ecx
+; X64-NO-BMI2-NEXT: andl $56, %esi
+; X64-NO-BMI2-NEXT: movq -128(%rsp,%rsi), %rax
+; X64-NO-BMI2-NEXT: shrq %cl, %rax
+; X64-NO-BMI2-NEXT: movl -120(%rsp,%rsi), %esi
+; X64-NO-BMI2-NEXT: addl %esi, %esi
+; X64-NO-BMI2-NEXT: notl %ecx
+; X64-NO-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NO-BMI2-NEXT: shlq %cl, %rsi
+; X64-NO-BMI2-NEXT: orl %eax, %esi
+; X64-NO-BMI2-NEXT: movb %sil, (%rdx)
+; X64-NO-BMI2-NEXT: popq %rax
+; X64-NO-BMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: load_1byte_chunk_of_64byte_alloca_with_zero_upper_half:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: pushq %rax
+; X64-BMI2-NEXT: movups (%rdi), %xmm0
+; X64-BMI2-NEXT: movups 16(%rdi), %xmm1
+; X64-BMI2-NEXT: xorps %xmm2, %xmm2
+; X64-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: leal (,%rsi,8), %eax
+; X64-BMI2-NEXT: andl $56, %eax
+; X64-BMI2-NEXT: andl $56, %esi
+; X64-BMI2-NEXT: shrxq %rax, -128(%rsp,%rsi), %rcx
+; X64-BMI2-NEXT: # kill: def $eax killed $eax killed $rax def $rax
+; X64-BMI2-NEXT: notl %eax
+; X64-BMI2-NEXT: movl -120(%rsp,%rsi), %esi
+; X64-BMI2-NEXT: addl %esi, %esi
+; X64-BMI2-NEXT: shlxq %rax, %rsi, %rax
+; X64-BMI2-NEXT: orl %eax, %ecx
+; X64-BMI2-NEXT: movb %cl, (%rdx)
+; X64-BMI2-NEXT: popq %rax
+; X64-BMI2-NEXT: retq
+;
+; X86-NO-BMI2-NO-SHLD-LABEL: load_1byte_chunk_of_64byte_alloca_with_zero_upper_half:
+; X86-NO-BMI2-NO-SHLD: # %bb.0:
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: subl $136, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movups (%ecx), %xmm0
+; X86-NO-BMI2-NO-SHLD-NEXT: movups 16(%ecx), %xmm1
+; X86-NO-BMI2-NO-SHLD-NEXT: xorps %xmm2, %xmm2
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (,%edx,8), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: andl $60, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp,%edx), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%esp,%edx), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dl, (%eax)
+; X86-NO-BMI2-NO-SHLD-NEXT: addl $136, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: retl
+;
+; X86-SHLD-LABEL: load_1byte_chunk_of_64byte_alloca_with_zero_upper_half:
+; X86-SHLD: # %bb.0:
+; X86-SHLD-NEXT: pushl %ebx
+; X86-SHLD-NEXT: subl $136, %esp
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SHLD-NEXT: movups (%ecx), %xmm0
+; X86-SHLD-NEXT: movups 16(%ecx), %xmm1
+; X86-SHLD-NEXT: xorps %xmm2, %xmm2
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-SHLD-NEXT: leal (,%edx,8), %ecx
+; X86-SHLD-NEXT: andl $60, %edx
+; X86-SHLD-NEXT: movl (%esp,%edx), %ebx
+; X86-SHLD-NEXT: movl 4(%esp,%edx), %edx
+; X86-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-SHLD-NEXT: shrdl %cl, %edx, %ebx
+; X86-SHLD-NEXT: movb %bl, (%eax)
+; X86-SHLD-NEXT: addl $136, %esp
+; X86-SHLD-NEXT: popl %ebx
+; X86-SHLD-NEXT: retl
+;
+; X86-HAVE-BMI2-NO-SHLD-LABEL: load_1byte_chunk_of_64byte_alloca_with_zero_upper_half:
+; X86-HAVE-BMI2-NO-SHLD: # %bb.0:
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $136, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movups (%edx), %xmm0
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movups 16(%edx), %xmm1
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm2, %xmm2
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (,%ecx,8), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $60, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, (%esp,%ecx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%esp,%ecx), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movb %cl, (%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $136, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: retl
%init = load <32 x i8>, ptr %src, align 1
%intermediate.sroa.0.0.vec.expand = shufflevector <32 x i8> %init, <32 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%intermediate.sroa.0.0.vecblend = shufflevector <64 x i8> %intermediate.sroa.0.0.vec.expand, <64 x i8> <i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
@@ -1127,84 +2041,155 @@ define void @load_1byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i6
}
define void @load_2byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i64 %byteOff, ptr %dst) nounwind {
-; X64-LABEL: load_2byte_chunk_of_64byte_alloca_with_zero_upper_half:
-; X64: # %bb.0:
-; X64-NEXT: movdqu (%rdi), %xmm0
-; X64-NEXT: movdqu 16(%rdi), %xmm1
-; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
-; X64-NEXT: movq %xmm1, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq %xmm3, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq %xmm2, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: andl $63, %esi
-; X64-NEXT: movq -128(%rsp,%rsi), %rax
-; X64-NEXT: movw %ax, (%rdx)
-; X64-NEXT: retq
-;
-; X86-LABEL: load_2byte_chunk_of_64byte_alloca_with_zero_upper_half:
-; X86: # %bb.0:
-; X86-NEXT: subl $128, %esp
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movdqu (%edx), %xmm0
-; X86-NEXT: movdqu 16(%edx), %xmm1
-; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
-; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
-; X86-NEXT: pshufd {{.*#+}} xmm4 = xmm0[3,3,3,3]
-; X86-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,1,1]
-; X86-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
-; X86-NEXT: pshufd {{.*#+}} xmm7 = xmm1[3,3,3,3]
-; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm0, (%esp)
-; X86-NEXT: movd %xmm7, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm6, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm5, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm4, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: andl $63, %ecx
-; X86-NEXT: movl (%esp,%ecx), %ecx
-; X86-NEXT: movw %cx, (%eax)
-; X86-NEXT: addl $128, %esp
-; X86-NEXT: retl
+; X64-NO-BMI2-LABEL: load_2byte_chunk_of_64byte_alloca_with_zero_upper_half:
+; X64-NO-BMI2: # %bb.0:
+; X64-NO-BMI2-NEXT: pushq %rax
+; X64-NO-BMI2-NEXT: movups (%rdi), %xmm0
+; X64-NO-BMI2-NEXT: movups 16(%rdi), %xmm1
+; X64-NO-BMI2-NEXT: xorps %xmm2, %xmm2
+; X64-NO-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: leal (,%rsi,8), %ecx
+; X64-NO-BMI2-NEXT: andl $56, %ecx
+; X64-NO-BMI2-NEXT: andl $56, %esi
+; X64-NO-BMI2-NEXT: movq -128(%rsp,%rsi), %rax
+; X64-NO-BMI2-NEXT: shrq %cl, %rax
+; X64-NO-BMI2-NEXT: movl -120(%rsp,%rsi), %esi
+; X64-NO-BMI2-NEXT: addl %esi, %esi
+; X64-NO-BMI2-NEXT: notl %ecx
+; X64-NO-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NO-BMI2-NEXT: shlq %cl, %rsi
+; X64-NO-BMI2-NEXT: orl %eax, %esi
+; X64-NO-BMI2-NEXT: movw %si, (%rdx)
+; X64-NO-BMI2-NEXT: popq %rax
+; X64-NO-BMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: load_2byte_chunk_of_64byte_alloca_with_zero_upper_half:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: pushq %rax
+; X64-BMI2-NEXT: movups (%rdi), %xmm0
+; X64-BMI2-NEXT: movups 16(%rdi), %xmm1
+; X64-BMI2-NEXT: xorps %xmm2, %xmm2
+; X64-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: leal (,%rsi,8), %eax
+; X64-BMI2-NEXT: andl $56, %eax
+; X64-BMI2-NEXT: andl $56, %esi
+; X64-BMI2-NEXT: shrxq %rax, -128(%rsp,%rsi), %rcx
+; X64-BMI2-NEXT: # kill: def $eax killed $eax killed $rax def $rax
+; X64-BMI2-NEXT: notl %eax
+; X64-BMI2-NEXT: movl -120(%rsp,%rsi), %esi
+; X64-BMI2-NEXT: addl %esi, %esi
+; X64-BMI2-NEXT: shlxq %rax, %rsi, %rax
+; X64-BMI2-NEXT: orl %eax, %ecx
+; X64-BMI2-NEXT: movw %cx, (%rdx)
+; X64-BMI2-NEXT: popq %rax
+; X64-BMI2-NEXT: retq
+;
+; X86-NO-BMI2-NO-SHLD-LABEL: load_2byte_chunk_of_64byte_alloca_with_zero_upper_half:
+; X86-NO-BMI2-NO-SHLD: # %bb.0:
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: subl $136, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movups (%ecx), %xmm0
+; X86-NO-BMI2-NO-SHLD-NEXT: movups 16(%ecx), %xmm1
+; X86-NO-BMI2-NO-SHLD-NEXT: xorps %xmm2, %xmm2
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (,%edx,8), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: andl $60, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp,%edx), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%esp,%edx), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movw %dx, (%eax)
+; X86-NO-BMI2-NO-SHLD-NEXT: addl $136, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: retl
+;
+; X86-SHLD-LABEL: load_2byte_chunk_of_64byte_alloca_with_zero_upper_half:
+; X86-SHLD: # %bb.0:
+; X86-SHLD-NEXT: pushl %esi
+; X86-SHLD-NEXT: subl $136, %esp
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SHLD-NEXT: movups (%ecx), %xmm0
+; X86-SHLD-NEXT: movups 16(%ecx), %xmm1
+; X86-SHLD-NEXT: xorps %xmm2, %xmm2
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-SHLD-NEXT: leal (,%edx,8), %ecx
+; X86-SHLD-NEXT: andl $60, %edx
+; X86-SHLD-NEXT: movl (%esp,%edx), %esi
+; X86-SHLD-NEXT: movl 4(%esp,%edx), %edx
+; X86-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-SHLD-NEXT: shrdl %cl, %edx, %esi
+; X86-SHLD-NEXT: movw %si, (%eax)
+; X86-SHLD-NEXT: addl $136, %esp
+; X86-SHLD-NEXT: popl %esi
+; X86-SHLD-NEXT: retl
+;
+; X86-HAVE-BMI2-NO-SHLD-LABEL: load_2byte_chunk_of_64byte_alloca_with_zero_upper_half:
+; X86-HAVE-BMI2-NO-SHLD: # %bb.0:
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $136, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movups (%edx), %xmm0
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movups 16(%edx), %xmm1
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm2, %xmm2
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (,%ecx,8), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $60, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, (%esp,%ecx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%esp,%ecx), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movw %cx, (%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $136, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: retl
%init = load <32 x i8>, ptr %src, align 1
%intermediate.sroa.0.0.vec.expand = shufflevector <32 x i8> %init, <32 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%intermediate.sroa.0.0.vecblend = shufflevector <64 x i8> %intermediate.sroa.0.0.vec.expand, <64 x i8> <i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
@@ -1219,84 +2204,155 @@ define void @load_2byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i6
}
define void @load_4byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i64 %byteOff, ptr %dst) nounwind {
-; X64-LABEL: load_4byte_chunk_of_64byte_alloca_with_zero_upper_half:
-; X64: # %bb.0:
-; X64-NEXT: movdqu (%rdi), %xmm0
-; X64-NEXT: movdqu 16(%rdi), %xmm1
-; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
-; X64-NEXT: movq %xmm1, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq %xmm3, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq %xmm2, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: andl $63, %esi
-; X64-NEXT: movl -128(%rsp,%rsi), %eax
-; X64-NEXT: movl %eax, (%rdx)
-; X64-NEXT: retq
-;
-; X86-LABEL: load_4byte_chunk_of_64byte_alloca_with_zero_upper_half:
-; X86: # %bb.0:
-; X86-NEXT: subl $128, %esp
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movdqu (%edx), %xmm0
-; X86-NEXT: movdqu 16(%edx), %xmm1
-; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
-; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
-; X86-NEXT: pshufd {{.*#+}} xmm4 = xmm0[3,3,3,3]
-; X86-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,1,1]
-; X86-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
-; X86-NEXT: pshufd {{.*#+}} xmm7 = xmm1[3,3,3,3]
-; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm0, (%esp)
-; X86-NEXT: movd %xmm7, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm6, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm5, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm4, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: andl $63, %ecx
-; X86-NEXT: movl (%esp,%ecx), %ecx
-; X86-NEXT: movl %ecx, (%eax)
-; X86-NEXT: addl $128, %esp
-; X86-NEXT: retl
+; X64-NO-BMI2-LABEL: load_4byte_chunk_of_64byte_alloca_with_zero_upper_half:
+; X64-NO-BMI2: # %bb.0:
+; X64-NO-BMI2-NEXT: pushq %rax
+; X64-NO-BMI2-NEXT: movups (%rdi), %xmm0
+; X64-NO-BMI2-NEXT: movups 16(%rdi), %xmm1
+; X64-NO-BMI2-NEXT: xorps %xmm2, %xmm2
+; X64-NO-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: leal (,%rsi,8), %ecx
+; X64-NO-BMI2-NEXT: andl $56, %ecx
+; X64-NO-BMI2-NEXT: andl $56, %esi
+; X64-NO-BMI2-NEXT: movq -128(%rsp,%rsi), %rax
+; X64-NO-BMI2-NEXT: shrq %cl, %rax
+; X64-NO-BMI2-NEXT: movl -120(%rsp,%rsi), %esi
+; X64-NO-BMI2-NEXT: addl %esi, %esi
+; X64-NO-BMI2-NEXT: notl %ecx
+; X64-NO-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NO-BMI2-NEXT: shlq %cl, %rsi
+; X64-NO-BMI2-NEXT: orl %eax, %esi
+; X64-NO-BMI2-NEXT: movl %esi, (%rdx)
+; X64-NO-BMI2-NEXT: popq %rax
+; X64-NO-BMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: load_4byte_chunk_of_64byte_alloca_with_zero_upper_half:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: pushq %rax
+; X64-BMI2-NEXT: movups (%rdi), %xmm0
+; X64-BMI2-NEXT: movups 16(%rdi), %xmm1
+; X64-BMI2-NEXT: xorps %xmm2, %xmm2
+; X64-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: leal (,%rsi,8), %eax
+; X64-BMI2-NEXT: andl $56, %eax
+; X64-BMI2-NEXT: andl $56, %esi
+; X64-BMI2-NEXT: shrxq %rax, -128(%rsp,%rsi), %rcx
+; X64-BMI2-NEXT: # kill: def $eax killed $eax killed $rax def $rax
+; X64-BMI2-NEXT: notl %eax
+; X64-BMI2-NEXT: movl -120(%rsp,%rsi), %esi
+; X64-BMI2-NEXT: addl %esi, %esi
+; X64-BMI2-NEXT: shlxq %rax, %rsi, %rax
+; X64-BMI2-NEXT: orl %eax, %ecx
+; X64-BMI2-NEXT: movl %ecx, (%rdx)
+; X64-BMI2-NEXT: popq %rax
+; X64-BMI2-NEXT: retq
+;
+; X86-NO-BMI2-NO-SHLD-LABEL: load_4byte_chunk_of_64byte_alloca_with_zero_upper_half:
+; X86-NO-BMI2-NO-SHLD: # %bb.0:
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: subl $136, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movups (%ecx), %xmm0
+; X86-NO-BMI2-NO-SHLD-NEXT: movups 16(%ecx), %xmm1
+; X86-NO-BMI2-NO-SHLD-NEXT: xorps %xmm2, %xmm2
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (,%edx,8), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: andl $60, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp,%edx), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%esp,%edx), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, (%eax)
+; X86-NO-BMI2-NO-SHLD-NEXT: addl $136, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: retl
+;
+; X86-SHLD-LABEL: load_4byte_chunk_of_64byte_alloca_with_zero_upper_half:
+; X86-SHLD: # %bb.0:
+; X86-SHLD-NEXT: pushl %esi
+; X86-SHLD-NEXT: subl $136, %esp
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SHLD-NEXT: movups (%ecx), %xmm0
+; X86-SHLD-NEXT: movups 16(%ecx), %xmm1
+; X86-SHLD-NEXT: xorps %xmm2, %xmm2
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-SHLD-NEXT: leal (,%edx,8), %ecx
+; X86-SHLD-NEXT: andl $60, %edx
+; X86-SHLD-NEXT: movl (%esp,%edx), %esi
+; X86-SHLD-NEXT: movl 4(%esp,%edx), %edx
+; X86-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-SHLD-NEXT: shrdl %cl, %edx, %esi
+; X86-SHLD-NEXT: movl %esi, (%eax)
+; X86-SHLD-NEXT: addl $136, %esp
+; X86-SHLD-NEXT: popl %esi
+; X86-SHLD-NEXT: retl
+;
+; X86-HAVE-BMI2-NO-SHLD-LABEL: load_4byte_chunk_of_64byte_alloca_with_zero_upper_half:
+; X86-HAVE-BMI2-NO-SHLD: # %bb.0:
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $136, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movups (%edx), %xmm0
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movups 16(%edx), %xmm1
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm2, %xmm2
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (,%ecx,8), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $60, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, (%esp,%ecx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%esp,%ecx), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, (%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $136, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: retl
%init = load <32 x i8>, ptr %src, align 1
%intermediate.sroa.0.0.vec.expand = shufflevector <32 x i8> %init, <32 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%intermediate.sroa.0.0.vecblend = shufflevector <64 x i8> %intermediate.sroa.0.0.vec.expand, <64 x i8> <i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
@@ -1311,86 +2367,216 @@ define void @load_4byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i6
}
define void @load_8byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i64 %byteOff, ptr %dst) nounwind {
-; X64-LABEL: load_8byte_chunk_of_64byte_alloca_with_zero_upper_half:
-; X64: # %bb.0:
-; X64-NEXT: movdqu (%rdi), %xmm0
-; X64-NEXT: movdqu 16(%rdi), %xmm1
-; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
-; X64-NEXT: movq %xmm1, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq %xmm3, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq %xmm2, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: andl $63, %esi
-; X64-NEXT: movq -128(%rsp,%rsi), %rax
-; X64-NEXT: movq %rax, (%rdx)
-; X64-NEXT: retq
-;
-; X86-LABEL: load_8byte_chunk_of_64byte_alloca_with_zero_upper_half:
-; X86: # %bb.0:
-; X86-NEXT: subl $128, %esp
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movdqu (%edx), %xmm0
-; X86-NEXT: movdqu 16(%edx), %xmm1
-; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
-; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
-; X86-NEXT: pshufd {{.*#+}} xmm4 = xmm0[3,3,3,3]
-; X86-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,1,1]
-; X86-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
-; X86-NEXT: pshufd {{.*#+}} xmm7 = xmm1[3,3,3,3]
-; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm0, (%esp)
-; X86-NEXT: movd %xmm7, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm6, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm5, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm4, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: andl $63, %ecx
-; X86-NEXT: movl (%esp,%ecx), %edx
-; X86-NEXT: movl 4(%esp,%ecx), %ecx
-; X86-NEXT: movl %ecx, 4(%eax)
-; X86-NEXT: movl %edx, (%eax)
-; X86-NEXT: addl $128, %esp
-; X86-NEXT: retl
+; X64-NO-BMI2-NO-SHLD-LABEL: load_8byte_chunk_of_64byte_alloca_with_zero_upper_half:
+; X64-NO-BMI2-NO-SHLD: # %bb.0:
+; X64-NO-BMI2-NO-SHLD-NEXT: pushq %rax
+; X64-NO-BMI2-NO-SHLD-NEXT: movups (%rdi), %xmm0
+; X64-NO-BMI2-NO-SHLD-NEXT: movups 16(%rdi), %xmm1
+; X64-NO-BMI2-NO-SHLD-NEXT: xorps %xmm2, %xmm2
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: leal (,%rsi,8), %ecx
+; X64-NO-BMI2-NO-SHLD-NEXT: andl $56, %esi
+; X64-NO-BMI2-NO-SHLD-NEXT: movq -128(%rsp,%rsi), %rax
+; X64-NO-BMI2-NO-SHLD-NEXT: movq -120(%rsp,%rsi), %rsi
+; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %rax
+; X64-NO-BMI2-NO-SHLD-NEXT: notb %cl
+; X64-NO-BMI2-NO-SHLD-NEXT: addq %rsi, %rsi
+; X64-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %rsi
+; X64-NO-BMI2-NO-SHLD-NEXT: orq %rax, %rsi
+; X64-NO-BMI2-NO-SHLD-NEXT: movq %rsi, (%rdx)
+; X64-NO-BMI2-NO-SHLD-NEXT: popq %rax
+; X64-NO-BMI2-NO-SHLD-NEXT: retq
+;
+; X64-SHLD-LABEL: load_8byte_chunk_of_64byte_alloca_with_zero_upper_half:
+; X64-SHLD: # %bb.0:
+; X64-SHLD-NEXT: pushq %rax
+; X64-SHLD-NEXT: movups (%rdi), %xmm0
+; X64-SHLD-NEXT: movups 16(%rdi), %xmm1
+; X64-SHLD-NEXT: xorps %xmm2, %xmm2
+; X64-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-SHLD-NEXT: leal (,%rsi,8), %ecx
+; X64-SHLD-NEXT: andl $56, %esi
+; X64-SHLD-NEXT: movq -128(%rsp,%rsi), %rax
+; X64-SHLD-NEXT: movq -120(%rsp,%rsi), %rsi
+; X64-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-SHLD-NEXT: shrdq %cl, %rsi, %rax
+; X64-SHLD-NEXT: movq %rax, (%rdx)
+; X64-SHLD-NEXT: popq %rax
+; X64-SHLD-NEXT: retq
+;
+; X64-HAVE-BMI2-NO-SHLD-LABEL: load_8byte_chunk_of_64byte_alloca_with_zero_upper_half:
+; X64-HAVE-BMI2-NO-SHLD: # %bb.0:
+; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movups (%rdi), %xmm0
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movups 16(%rdi), %xmm1
+; X64-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm2, %xmm2
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leal (,%rsi,8), %eax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $56, %esi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, -128(%rsp,%rsi), %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: notb %al
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -120(%rsp,%rsi), %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rsi, %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rsi, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rcx, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, (%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
+;
+; X86-NO-BMI2-NO-SHLD-LABEL: load_8byte_chunk_of_64byte_alloca_with_zero_upper_half:
+; X86-NO-BMI2-NO-SHLD: # %bb.0:
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: subl $140, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movups (%ecx), %xmm0
+; X86-NO-BMI2-NO-SHLD-NEXT: movups 16(%ecx), %xmm1
+; X86-NO-BMI2-NO-SHLD-NEXT: xorps %xmm2, %xmm2
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: andl $60, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp,%ebx), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%esp,%ebx), %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: shll $3, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: andl $24, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%edi,%edi), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: notb %dl
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 8(%esp,%ebx), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %eax, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %edi, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, 4(%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, (%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: addl $140, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: retl
+;
+; X86-SHLD-LABEL: load_8byte_chunk_of_64byte_alloca_with_zero_upper_half:
+; X86-SHLD: # %bb.0:
+; X86-SHLD-NEXT: pushl %ebx
+; X86-SHLD-NEXT: pushl %edi
+; X86-SHLD-NEXT: pushl %esi
+; X86-SHLD-NEXT: subl $128, %esp
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SHLD-NEXT: movups (%edx), %xmm0
+; X86-SHLD-NEXT: movups 16(%edx), %xmm1
+; X86-SHLD-NEXT: xorps %xmm2, %xmm2
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-SHLD-NEXT: movl %ecx, %esi
+; X86-SHLD-NEXT: andl $60, %esi
+; X86-SHLD-NEXT: movl 8(%esp,%esi), %edi
+; X86-SHLD-NEXT: movl (%esp,%esi), %edx
+; X86-SHLD-NEXT: movl 4(%esp,%esi), %esi
+; X86-SHLD-NEXT: shll $3, %ecx
+; X86-SHLD-NEXT: andl $24, %ecx
+; X86-SHLD-NEXT: movl %esi, %ebx
+; X86-SHLD-NEXT: shrdl %cl, %edi, %ebx
+; X86-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-SHLD-NEXT: shrdl %cl, %esi, %edx
+; X86-SHLD-NEXT: movl %ebx, 4(%eax)
+; X86-SHLD-NEXT: movl %edx, (%eax)
+; X86-SHLD-NEXT: addl $128, %esp
+; X86-SHLD-NEXT: popl %esi
+; X86-SHLD-NEXT: popl %edi
+; X86-SHLD-NEXT: popl %ebx
+; X86-SHLD-NEXT: retl
+;
+; X86-HAVE-BMI2-NO-SHLD-LABEL: load_8byte_chunk_of_64byte_alloca_with_zero_upper_half:
+; X86-HAVE-BMI2-NO-SHLD: # %bb.0:
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $128, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movups (%edx), %xmm0
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movups 16(%edx), %xmm1
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm2, %xmm2
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (,%ecx,8), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $24, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $60, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, (%esp,%ecx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%esp,%ecx), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %edi, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: # kill: def $dl killed $dl killed $edx def $edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%esp,%ecx), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %edi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 4(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, (%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $128, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: retl
%init = load <32 x i8>, ptr %src, align 1
%intermediate.sroa.0.0.vec.expand = shufflevector <32 x i8> %init, <32 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%intermediate.sroa.0.0.vecblend = shufflevector <64 x i8> %intermediate.sroa.0.0.vec.expand, <64 x i8> <i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
@@ -1405,96 +2591,326 @@ define void @load_8byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i6
}
define void @load_16byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i64 %byteOff, ptr %dst) nounwind {
-; X64-LABEL: load_16byte_chunk_of_64byte_alloca_with_zero_upper_half:
-; X64: # %bb.0:
-; X64-NEXT: movdqu (%rdi), %xmm0
-; X64-NEXT: movdqu 16(%rdi), %xmm1
-; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
-; X64-NEXT: movq %xmm1, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq %xmm3, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq %xmm2, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: andl $63, %esi
-; X64-NEXT: movq -128(%rsp,%rsi), %rax
-; X64-NEXT: movq -120(%rsp,%rsi), %rcx
-; X64-NEXT: movq %rcx, 8(%rdx)
-; X64-NEXT: movq %rax, (%rdx)
-; X64-NEXT: retq
-;
-; X86-LABEL: load_16byte_chunk_of_64byte_alloca_with_zero_upper_half:
-; X86: # %bb.0:
-; X86-NEXT: pushl %edi
-; X86-NEXT: pushl %esi
-; X86-NEXT: subl $128, %esp
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movdqu (%edx), %xmm0
-; X86-NEXT: movdqu 16(%edx), %xmm1
-; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
-; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
-; X86-NEXT: pshufd {{.*#+}} xmm4 = xmm0[3,3,3,3]
-; X86-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,1,1]
-; X86-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
-; X86-NEXT: pshufd {{.*#+}} xmm7 = xmm1[3,3,3,3]
-; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm0, (%esp)
-; X86-NEXT: movd %xmm7, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm6, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm5, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm4, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: andl $63, %ecx
-; X86-NEXT: movl (%esp,%ecx), %edx
-; X86-NEXT: movl 4(%esp,%ecx), %esi
-; X86-NEXT: movl 8(%esp,%ecx), %edi
-; X86-NEXT: movl 12(%esp,%ecx), %ecx
-; X86-NEXT: movl %ecx, 12(%eax)
-; X86-NEXT: movl %edi, 8(%eax)
-; X86-NEXT: movl %esi, 4(%eax)
-; X86-NEXT: movl %edx, (%eax)
-; X86-NEXT: addl $128, %esp
-; X86-NEXT: popl %esi
-; X86-NEXT: popl %edi
-; X86-NEXT: retl
+; X64-NO-BMI2-NO-SHLD-LABEL: load_16byte_chunk_of_64byte_alloca_with_zero_upper_half:
+; X64-NO-BMI2-NO-SHLD: # %bb.0:
+; X64-NO-BMI2-NO-SHLD-NEXT: pushq %rax
+; X64-NO-BMI2-NO-SHLD-NEXT: movups (%rdi), %xmm0
+; X64-NO-BMI2-NO-SHLD-NEXT: movups 16(%rdi), %xmm1
+; X64-NO-BMI2-NO-SHLD-NEXT: xorps %xmm2, %xmm2
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: leal (,%rsi,8), %eax
+; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %edi
+; X64-NO-BMI2-NO-SHLD-NEXT: andl $56, %edi
+; X64-NO-BMI2-NO-SHLD-NEXT: andl $56, %esi
+; X64-NO-BMI2-NO-SHLD-NEXT: movq -128(%rsp,%rsi), %r8
+; X64-NO-BMI2-NO-SHLD-NEXT: movq -120(%rsp,%rsi), %r9
+; X64-NO-BMI2-NO-SHLD-NEXT: movl %edi, %ecx
+; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %r8
+; X64-NO-BMI2-NO-SHLD-NEXT: notb %cl
+; X64-NO-BMI2-NO-SHLD-NEXT: leaq (%r9,%r9), %r10
+; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %r10
+; X64-NO-BMI2-NO-SHLD-NEXT: orq %r8, %r10
+; X64-NO-BMI2-NO-SHLD-NEXT: movl %edi, %ecx
+; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %r9
+; X64-NO-BMI2-NO-SHLD-NEXT: notl %eax
+; X64-NO-BMI2-NO-SHLD-NEXT: movq -112(%rsp,%rsi), %rsi
+; X64-NO-BMI2-NO-SHLD-NEXT: addq %rsi, %rsi
+; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %rsi
+; X64-NO-BMI2-NO-SHLD-NEXT: orq %r9, %rsi
+; X64-NO-BMI2-NO-SHLD-NEXT: movq %rsi, 8(%rdx)
+; X64-NO-BMI2-NO-SHLD-NEXT: movq %r10, (%rdx)
+; X64-NO-BMI2-NO-SHLD-NEXT: popq %rax
+; X64-NO-BMI2-NO-SHLD-NEXT: retq
+;
+; X64-NO-BMI2-HAVE-SHLD-LABEL: load_16byte_chunk_of_64byte_alloca_with_zero_upper_half:
+; X64-NO-BMI2-HAVE-SHLD: # %bb.0:
+; X64-NO-BMI2-HAVE-SHLD-NEXT: pushq %rax
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movups (%rdi), %xmm0
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movups 16(%rdi), %xmm1
+; X64-NO-BMI2-HAVE-SHLD-NEXT: xorps %xmm2, %xmm2
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: leal (,%rsi,8), %eax
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %edi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: andl $56, %edi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: andl $56, %esi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -128(%rsp,%rsi), %r8
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -120(%rsp,%rsi), %r9
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r9, %r10
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, %ecx
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shrq %cl, %r10
+; X64-NO-BMI2-HAVE-SHLD-NEXT: notl %eax
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -112(%rsp,%rsi), %rsi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: addq %rsi, %rsi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shlq %cl, %rsi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: orq %r10, %rsi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, %ecx
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r9, %r8
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r8, (%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rsi, 8(%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: popq %rax
+; X64-NO-BMI2-HAVE-SHLD-NEXT: retq
+;
+; X64-HAVE-BMI2-NO-SHLD-LABEL: load_16byte_chunk_of_64byte_alloca_with_zero_upper_half:
+; X64-HAVE-BMI2-NO-SHLD: # %bb.0:
+; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movups (%rdi), %xmm0
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movups 16(%rdi), %xmm1
+; X64-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm2, %xmm2
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leal (,%rsi,8), %eax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $56, %ecx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $56, %esi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, -128(%rsp,%rsi), %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -120(%rsp,%rsi), %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r8, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $rcx def $rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: notb %cl
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -112(%rsp,%rsi), %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %r8, %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rcx, %r8, %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rdi, %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: notl %eax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rsi, %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rsi, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rax, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r9, 8(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, (%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
+;
+; X64-HAVE-BMI2-HAVE-SHLD-LABEL: load_16byte_chunk_of_64byte_alloca_with_zero_upper_half:
+; X64-HAVE-BMI2-HAVE-SHLD: # %bb.0:
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: pushq %rax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movups (%rdi), %xmm0
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movups 16(%rdi), %xmm1
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: xorps %xmm2, %xmm2
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: leal (,%rsi,8), %ecx
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, %eax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: notl %eax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: andl $56, %esi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -112(%rsp,%rsi), %rdi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: addq %rdi, %rdi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shlxq %rax, %rdi, %rax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: andl $56, %ecx
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -128(%rsp,%rsi), %rdi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -120(%rsp,%rsi), %rsi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrxq %rcx, %rsi, %r8
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: orq %rax, %r8
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: # kill: def $cl killed $cl killed $rcx
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rsi, %rdi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rdi, (%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r8, 8(%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: popq %rax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: retq
+;
+; X86-NO-BMI2-NO-SHLD-LABEL: load_16byte_chunk_of_64byte_alloca_with_zero_upper_half:
+; X86-NO-BMI2-NO-SHLD: # %bb.0:
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: subl $156, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movups (%ecx), %xmm0
+; X86-NO-BMI2-NO-SHLD-NEXT: movups 16(%ecx), %xmm1
+; X86-NO-BMI2-NO-SHLD-NEXT: xorps %xmm2, %xmm2
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: andl $60, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 16(%esp,%esi), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 20(%esp,%esi), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: shll $3, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: andl $24, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%ebp,%ebp), %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: notb %dl
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 24(%esp,%esi), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%ebx,%ebx), %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebp, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 28(%esp,%esi), %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%edi,%edi), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 32(%esp,%esi), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %eax, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %edi, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, 12(%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, 8(%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, 4(%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, (%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: addl $156, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: retl
+;
+; X86-SHLD-LABEL: load_16byte_chunk_of_64byte_alloca_with_zero_upper_half:
+; X86-SHLD: # %bb.0:
+; X86-SHLD-NEXT: pushl %ebp
+; X86-SHLD-NEXT: pushl %ebx
+; X86-SHLD-NEXT: pushl %edi
+; X86-SHLD-NEXT: pushl %esi
+; X86-SHLD-NEXT: subl $156, %esp
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SHLD-NEXT: movups (%eax), %xmm0
+; X86-SHLD-NEXT: movups 16(%eax), %xmm1
+; X86-SHLD-NEXT: xorps %xmm2, %xmm2
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movl %ecx, %edi
+; X86-SHLD-NEXT: andl $60, %edi
+; X86-SHLD-NEXT: movl 24(%esp,%edi), %esi
+; X86-SHLD-NEXT: movl 16(%esp,%edi), %eax
+; X86-SHLD-NEXT: movl 20(%esp,%edi), %ebx
+; X86-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SHLD-NEXT: shll $3, %ecx
+; X86-SHLD-NEXT: andl $24, %ecx
+; X86-SHLD-NEXT: shrdl %cl, %esi, %ebx
+; X86-SHLD-NEXT: movl 28(%esp,%edi), %ebp
+; X86-SHLD-NEXT: shrdl %cl, %ebp, %esi
+; X86-SHLD-NEXT: movl 32(%esp,%edi), %edi
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SHLD-NEXT: shrdl %cl, %edi, %ebp
+; X86-SHLD-NEXT: movl %ebp, 12(%edx)
+; X86-SHLD-NEXT: movl %esi, 8(%edx)
+; X86-SHLD-NEXT: movl %ebx, 4(%edx)
+; X86-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X86-SHLD-NEXT: shrdl %cl, %esi, %eax
+; X86-SHLD-NEXT: movl %eax, (%edx)
+; X86-SHLD-NEXT: addl $156, %esp
+; X86-SHLD-NEXT: popl %esi
+; X86-SHLD-NEXT: popl %edi
+; X86-SHLD-NEXT: popl %ebx
+; X86-SHLD-NEXT: popl %ebp
+; X86-SHLD-NEXT: retl
+;
+; X86-HAVE-BMI2-NO-SHLD-LABEL: load_16byte_chunk_of_64byte_alloca_with_zero_upper_half:
+; X86-HAVE-BMI2-NO-SHLD: # %bb.0:
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $156, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movups (%ecx), %xmm0
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movups 16(%ecx), %xmm1
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm2, %xmm2
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (,%eax,8), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $24, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $60, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, 16(%esp,%eax), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%esp,%eax), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edx, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %bl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%esp,%eax), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %ebp, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%ebp,%ebp), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%esp,%eax), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %ebp, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %ebp, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 32(%esp,%eax), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %eax, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %eax, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ecx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 12(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, 8(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, 4(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, (%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $156, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: retl
%init = load <32 x i8>, ptr %src, align 1
%intermediate.sroa.0.0.vec.expand = shufflevector <32 x i8> %init, <32 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%intermediate.sroa.0.0.vecblend = shufflevector <64 x i8> %intermediate.sroa.0.0.vec.expand, <64 x i8> <i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
@@ -1509,116 +2925,484 @@ define void @load_16byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i
}
define void @load_32byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i64 %byteOff, ptr %dst) nounwind {
-; X64-LABEL: load_32byte_chunk_of_64byte_alloca_with_zero_upper_half:
-; X64: # %bb.0:
-; X64-NEXT: movdqu (%rdi), %xmm0
-; X64-NEXT: movdqu 16(%rdi), %xmm1
-; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
-; X64-NEXT: movq %xmm1, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq %xmm3, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq %xmm2, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: andl $63, %esi
-; X64-NEXT: movq -128(%rsp,%rsi), %rax
-; X64-NEXT: movq -120(%rsp,%rsi), %rcx
-; X64-NEXT: movq -112(%rsp,%rsi), %rdi
-; X64-NEXT: movq -104(%rsp,%rsi), %rsi
-; X64-NEXT: movq %rsi, 24(%rdx)
-; X64-NEXT: movq %rdi, 16(%rdx)
-; X64-NEXT: movq %rcx, 8(%rdx)
-; X64-NEXT: movq %rax, (%rdx)
-; X64-NEXT: retq
-;
-; X86-LABEL: load_32byte_chunk_of_64byte_alloca_with_zero_upper_half:
-; X86: # %bb.0:
-; X86-NEXT: pushl %ebp
-; X86-NEXT: pushl %ebx
-; X86-NEXT: pushl %edi
-; X86-NEXT: pushl %esi
-; X86-NEXT: subl $136, %esp
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movdqu (%ecx), %xmm0
-; X86-NEXT: movdqu 16(%ecx), %xmm1
-; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
-; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
-; X86-NEXT: pshufd {{.*#+}} xmm4 = xmm0[3,3,3,3]
-; X86-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,1,1]
-; X86-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
-; X86-NEXT: pshufd {{.*#+}} xmm7 = xmm1[3,3,3,3]
-; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm0, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm7, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm6, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm5, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm4, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: andl $63, %eax
-; X86-NEXT: movl 8(%esp,%eax), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 12(%esp,%eax), %ecx
-; X86-NEXT: movl %ecx, (%esp) # 4-byte Spill
-; X86-NEXT: movl 16(%esp,%eax), %esi
-; X86-NEXT: movl 20(%esp,%eax), %edi
-; X86-NEXT: movl 24(%esp,%eax), %ebx
-; X86-NEXT: movl 28(%esp,%eax), %ebp
-; X86-NEXT: movl 32(%esp,%eax), %edx
-; X86-NEXT: movl 36(%esp,%eax), %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl %ecx, 28(%eax)
-; X86-NEXT: movl %edx, 24(%eax)
-; X86-NEXT: movl %ebp, 20(%eax)
-; X86-NEXT: movl %ebx, 16(%eax)
-; X86-NEXT: movl %edi, 12(%eax)
-; X86-NEXT: movl %esi, 8(%eax)
-; X86-NEXT: movl (%esp), %ecx # 4-byte Reload
-; X86-NEXT: movl %ecx, 4(%eax)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: movl %ecx, (%eax)
-; X86-NEXT: addl $136, %esp
-; X86-NEXT: popl %esi
-; X86-NEXT: popl %edi
-; X86-NEXT: popl %ebx
-; X86-NEXT: popl %ebp
-; X86-NEXT: retl
+; X64-NO-BMI2-NO-SHLD-LABEL: load_32byte_chunk_of_64byte_alloca_with_zero_upper_half:
+; X64-NO-BMI2-NO-SHLD: # %bb.0:
+; X64-NO-BMI2-NO-SHLD-NEXT: pushq %r14
+; X64-NO-BMI2-NO-SHLD-NEXT: pushq %rbx
+; X64-NO-BMI2-NO-SHLD-NEXT: pushq %rax
+; X64-NO-BMI2-NO-SHLD-NEXT: movups (%rdi), %xmm0
+; X64-NO-BMI2-NO-SHLD-NEXT: movups 16(%rdi), %xmm1
+; X64-NO-BMI2-NO-SHLD-NEXT: xorps %xmm2, %xmm2
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: leal (,%rsi,8), %eax
+; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %edi
+; X64-NO-BMI2-NO-SHLD-NEXT: andl $56, %edi
+; X64-NO-BMI2-NO-SHLD-NEXT: andl $56, %esi
+; X64-NO-BMI2-NO-SHLD-NEXT: movq -128(%rsp,%rsi), %r10
+; X64-NO-BMI2-NO-SHLD-NEXT: movq -120(%rsp,%rsi), %r11
+; X64-NO-BMI2-NO-SHLD-NEXT: movl %edi, %ecx
+; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %r10
+; X64-NO-BMI2-NO-SHLD-NEXT: movl %edi, %r8d
+; X64-NO-BMI2-NO-SHLD-NEXT: notb %r8b
+; X64-NO-BMI2-NO-SHLD-NEXT: leaq (%r11,%r11), %r9
+; X64-NO-BMI2-NO-SHLD-NEXT: movl %r8d, %ecx
+; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %r9
+; X64-NO-BMI2-NO-SHLD-NEXT: orq %r10, %r9
+; X64-NO-BMI2-NO-SHLD-NEXT: movl %edi, %ecx
+; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %r11
+; X64-NO-BMI2-NO-SHLD-NEXT: notl %eax
+; X64-NO-BMI2-NO-SHLD-NEXT: andl $63, %eax
+; X64-NO-BMI2-NO-SHLD-NEXT: movq -112(%rsp,%rsi), %r10
+; X64-NO-BMI2-NO-SHLD-NEXT: leaq (%r10,%r10), %rbx
+; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %rbx
+; X64-NO-BMI2-NO-SHLD-NEXT: orq %r11, %rbx
+; X64-NO-BMI2-NO-SHLD-NEXT: movl %edi, %ecx
+; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %r10
+; X64-NO-BMI2-NO-SHLD-NEXT: movq -104(%rsp,%rsi), %r11
+; X64-NO-BMI2-NO-SHLD-NEXT: leaq (%r11,%r11), %r14
+; X64-NO-BMI2-NO-SHLD-NEXT: movl %r8d, %ecx
+; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %r14
+; X64-NO-BMI2-NO-SHLD-NEXT: orq %r10, %r14
+; X64-NO-BMI2-NO-SHLD-NEXT: movl %edi, %ecx
+; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %r11
+; X64-NO-BMI2-NO-SHLD-NEXT: movq -96(%rsp,%rsi), %rsi
+; X64-NO-BMI2-NO-SHLD-NEXT: addq %rsi, %rsi
+; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %rsi
+; X64-NO-BMI2-NO-SHLD-NEXT: orq %r11, %rsi
+; X64-NO-BMI2-NO-SHLD-NEXT: movq %rsi, 24(%rdx)
+; X64-NO-BMI2-NO-SHLD-NEXT: movq %r14, 16(%rdx)
+; X64-NO-BMI2-NO-SHLD-NEXT: movq %rbx, 8(%rdx)
+; X64-NO-BMI2-NO-SHLD-NEXT: movq %r9, (%rdx)
+; X64-NO-BMI2-NO-SHLD-NEXT: addq $8, %rsp
+; X64-NO-BMI2-NO-SHLD-NEXT: popq %rbx
+; X64-NO-BMI2-NO-SHLD-NEXT: popq %r14
+; X64-NO-BMI2-NO-SHLD-NEXT: retq
+;
+; X64-NO-BMI2-HAVE-SHLD-LABEL: load_32byte_chunk_of_64byte_alloca_with_zero_upper_half:
+; X64-NO-BMI2-HAVE-SHLD: # %bb.0:
+; X64-NO-BMI2-HAVE-SHLD-NEXT: pushq %r14
+; X64-NO-BMI2-HAVE-SHLD-NEXT: pushq %rbx
+; X64-NO-BMI2-HAVE-SHLD-NEXT: pushq %rax
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movups (%rdi), %xmm0
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movups 16(%rdi), %xmm1
+; X64-NO-BMI2-HAVE-SHLD-NEXT: xorps %xmm2, %xmm2
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: leal (,%rsi,8), %edi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, %eax
+; X64-NO-BMI2-HAVE-SHLD-NEXT: andl $56, %eax
+; X64-NO-BMI2-HAVE-SHLD-NEXT: andl $56, %esi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -128(%rsp,%rsi), %r8
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -120(%rsp,%rsi), %r9
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r9, %r10
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shrq %cl, %r10
+; X64-NO-BMI2-HAVE-SHLD-NEXT: notl %edi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: andl $63, %edi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -112(%rsp,%rsi), %r11
+; X64-NO-BMI2-HAVE-SHLD-NEXT: leaq (%r11,%r11), %rbx
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, %ecx
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shlq %cl, %rbx
+; X64-NO-BMI2-HAVE-SHLD-NEXT: orq %r10, %rbx
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -104(%rsp,%rsi), %r10
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r10, %r14
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shrq %cl, %r14
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -96(%rsp,%rsi), %rsi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: addq %rsi, %rsi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %edi, %ecx
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shlq %cl, %rsi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: orq %r14, %rsi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r10, %r11
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r9, %r8
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r11, 16(%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r8, (%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rsi, 24(%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rbx, 8(%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: addq $8, %rsp
+; X64-NO-BMI2-HAVE-SHLD-NEXT: popq %rbx
+; X64-NO-BMI2-HAVE-SHLD-NEXT: popq %r14
+; X64-NO-BMI2-HAVE-SHLD-NEXT: retq
+;
+; X64-HAVE-BMI2-NO-SHLD-LABEL: load_32byte_chunk_of_64byte_alloca_with_zero_upper_half:
+; X64-HAVE-BMI2-NO-SHLD: # %bb.0:
+; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %r14
+; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %rbx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movups (%rdi), %xmm0
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movups 16(%rdi), %xmm1
+; X64-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm2, %xmm2
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leal (,%rsi,8), %eax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $56, %ecx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $56, %esi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, -128(%rsp,%rsi), %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -120(%rsp,%rsi), %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -112(%rsp,%rsi), %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r8, %r10
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r9, %r11
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -104(%rsp,%rsi), %rbx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %rbx, %r14
+; X64-HAVE-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $rcx def $rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: notb %cl
+; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %r8, %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rcx, %r8, %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rdi, %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: notl %eax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $63, %eax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r9,%r9), %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rdi, %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r10, %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%rbx,%rbx), %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rcx, %r9, %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r11, %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -96(%rsp,%rsi), %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rsi, %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rsi, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r14, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, 24(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, 16(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, 8(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r8, (%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: addq $8, %rsp
+; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %rbx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %r14
+; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
+;
+; X64-HAVE-BMI2-HAVE-SHLD-LABEL: load_32byte_chunk_of_64byte_alloca_with_zero_upper_half:
+; X64-HAVE-BMI2-HAVE-SHLD: # %bb.0:
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: pushq %rbx
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movups (%rdi), %xmm0
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movups 16(%rdi), %xmm1
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: xorps %xmm2, %xmm2
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: leal (,%rsi,8), %eax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: andl $56, %ecx
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: andl $56, %esi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -128(%rsp,%rsi), %rdi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -120(%rsp,%rsi), %r8
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrxq %rcx, %r8, %r9
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: notl %eax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: andl $63, %eax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -112(%rsp,%rsi), %r10
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: leaq (%r10,%r10), %r11
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shlxq %rax, %r11, %r11
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: orq %r9, %r11
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -104(%rsp,%rsi), %r9
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrxq %rcx, %r9, %rbx
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -96(%rsp,%rsi), %rsi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: addq %rsi, %rsi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shlxq %rax, %rsi, %rax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: orq %rbx, %rax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r9, %r10
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: # kill: def $cl killed $cl killed $rcx
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r8, %rdi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r10, 16(%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rdi, (%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rax, 24(%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %r11, 8(%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: popq %rbx
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: retq
+;
+; X86-NO-BMI2-NO-SHLD-LABEL: load_32byte_chunk_of_64byte_alloca_with_zero_upper_half:
+; X86-NO-BMI2-NO-SHLD: # %bb.0:
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: subl $172, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movups (%ecx), %xmm0
+; X86-NO-BMI2-NO-SHLD-NEXT: movups 16(%ecx), %xmm1
+; X86-NO-BMI2-NO-SHLD-NEXT: xorps %xmm2, %xmm2
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: andl $60, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 32(%esp,%edi), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 36(%esp,%edi), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: shll $3, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: andl $24, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: notb %dl
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 40(%esp,%edi), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%ebx,%ebx), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 44(%esp,%edi), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 48(%esp,%edi), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%ebx,%ebx), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 52(%esp,%edi), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 56(%esp,%edi), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%ebp,%ebp), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 60(%esp,%edi), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%ebx,%ebx), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebp, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 64(%esp,%edi), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %eax, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebx, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, 28(%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, 24(%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, 20(%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, 16(%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, 12(%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, 8(%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, 4(%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, (%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: addl $172, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: retl
+;
+; X86-SHLD-LABEL: load_32byte_chunk_of_64byte_alloca_with_zero_upper_half:
+; X86-SHLD: # %bb.0:
+; X86-SHLD-NEXT: pushl %ebp
+; X86-SHLD-NEXT: pushl %ebx
+; X86-SHLD-NEXT: pushl %edi
+; X86-SHLD-NEXT: pushl %esi
+; X86-SHLD-NEXT: subl $156, %esp
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SHLD-NEXT: movups (%eax), %xmm0
+; X86-SHLD-NEXT: movups 16(%eax), %xmm1
+; X86-SHLD-NEXT: xorps %xmm2, %xmm2
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movl %ecx, %edi
+; X86-SHLD-NEXT: andl $60, %edi
+; X86-SHLD-NEXT: movl 24(%esp,%edi), %edx
+; X86-SHLD-NEXT: movl 20(%esp,%edi), %eax
+; X86-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SHLD-NEXT: shll $3, %ecx
+; X86-SHLD-NEXT: andl $24, %ecx
+; X86-SHLD-NEXT: movl %eax, %esi
+; X86-SHLD-NEXT: movl %edx, %eax
+; X86-SHLD-NEXT: shrdl %cl, %edx, %esi
+; X86-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SHLD-NEXT: movl 28(%esp,%edi), %edx
+; X86-SHLD-NEXT: shrdl %cl, %edx, %eax
+; X86-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SHLD-NEXT: movl 32(%esp,%edi), %ebp
+; X86-SHLD-NEXT: shrdl %cl, %ebp, %edx
+; X86-SHLD-NEXT: movl %edx, (%esp) # 4-byte Spill
+; X86-SHLD-NEXT: movl 36(%esp,%edi), %esi
+; X86-SHLD-NEXT: shrdl %cl, %esi, %ebp
+; X86-SHLD-NEXT: movl 40(%esp,%edi), %edx
+; X86-SHLD-NEXT: shrdl %cl, %edx, %esi
+; X86-SHLD-NEXT: movl 44(%esp,%edi), %eax
+; X86-SHLD-NEXT: shrdl %cl, %eax, %edx
+; X86-SHLD-NEXT: movl 16(%esp,%edi), %ebx
+; X86-SHLD-NEXT: movl 48(%esp,%edi), %edi
+; X86-SHLD-NEXT: shrdl %cl, %edi, %eax
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-SHLD-NEXT: movl %eax, 28(%edi)
+; X86-SHLD-NEXT: movl %edx, 24(%edi)
+; X86-SHLD-NEXT: movl %esi, 20(%edi)
+; X86-SHLD-NEXT: movl %ebp, 16(%edi)
+; X86-SHLD-NEXT: movl (%esp), %eax # 4-byte Reload
+; X86-SHLD-NEXT: movl %eax, 12(%edi)
+; X86-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-SHLD-NEXT: movl %eax, 8(%edi)
+; X86-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-SHLD-NEXT: movl %eax, 4(%edi)
+; X86-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-SHLD-NEXT: shrdl %cl, %eax, %ebx
+; X86-SHLD-NEXT: movl %ebx, (%edi)
+; X86-SHLD-NEXT: addl $156, %esp
+; X86-SHLD-NEXT: popl %esi
+; X86-SHLD-NEXT: popl %edi
+; X86-SHLD-NEXT: popl %ebx
+; X86-SHLD-NEXT: popl %ebp
+; X86-SHLD-NEXT: retl
+;
+; X86-HAVE-BMI2-NO-SHLD-LABEL: load_32byte_chunk_of_64byte_alloca_with_zero_upper_half:
+; X86-HAVE-BMI2-NO-SHLD: # %bb.0:
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $156, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movups (%ecx), %xmm0
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movups 16(%ecx), %xmm1
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm2, %xmm2
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (,%eax,8), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $24, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $60, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, 16(%esp,%eax), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%esp,%eax), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %esi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %bl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%esp,%eax), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edx, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %ebp, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%ebp,%ebp), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%esp,%eax), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edx, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %esi, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 32(%esp,%eax), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %ebp, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edx, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, (%esp) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %esi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 36(%esp,%eax), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edx,%edx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %esi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edx, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 40(%esp,%eax), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edx,%edx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 44(%esp,%eax), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %ebp, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %ebp, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edx, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 48(%esp,%eax), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %eax, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %eax, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ecx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 28(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, 24(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, 20(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 16(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%esp), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 12(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 8(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 4(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, (%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $156, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: retl
%init = load <32 x i8>, ptr %src, align 1
%intermediate.sroa.0.0.vec.expand = shufflevector <32 x i8> %init, <32 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%intermediate.sroa.0.0.vecblend = shufflevector <64 x i8> %intermediate.sroa.0.0.vec.expand, <64 x i8> <i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
@@ -1633,9 +3417,9 @@ define void @load_32byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i
}
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; ALL: {{.*}}
-; X64-HAVE-BMI2-HAVE-SHLD: {{.*}}
-; X64-NO-BMI2-HAVE-SHLD: {{.*}}
+; X64: {{.*}}
; X64-NO-SHLD: {{.*}}
+; X86: {{.*}}
; X86-HAVE-BMI2-HAVE-SHLD: {{.*}}
; X86-NO-BMI2-HAVE-SHLD: {{.*}}
; X86-NO-SHLD: {{.*}}
diff --git a/llvm/test/CodeGen/X86/widen-load-of-small-alloca.ll b/llvm/test/CodeGen/X86/widen-load-of-small-alloca.ll
index 4a47e76..ff13f4b 100644
--- a/llvm/test/CodeGen/X86/widen-load-of-small-alloca.ll
+++ b/llvm/test/CodeGen/X86/widen-load-of-small-alloca.ll
@@ -603,32 +603,86 @@ define void @load_1byte_chunk_of_16byte_alloca(ptr %src, i64 %byteOff, ptr %dst)
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movb %sil, (%rdx)
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: retq
;
-; X86-LABEL: load_1byte_chunk_of_16byte_alloca:
-; X86: # %bb.0:
-; X86-NEXT: subl $32, %esp
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movdqu (%edx), %xmm0
-; X86-NEXT: shll $3, %ecx
-; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
-; X86-NEXT: movd %xmm0, (%esp)
-; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: shrb $3, %cl
-; X86-NEXT: andb $15, %cl
-; X86-NEXT: movzbl %cl, %ecx
-; X86-NEXT: movzbl (%esp,%ecx), %ecx
-; X86-NEXT: movb %cl, (%eax)
-; X86-NEXT: addl $32, %esp
-; X86-NEXT: retl
+; X86-NO-BMI2-NO-SHLD-LABEL: load_1byte_chunk_of_16byte_alloca:
+; X86-NO-BMI2-NO-SHLD: # %bb.0:
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: subl $40, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movups (%edx), %xmm0
+; X86-NO-BMI2-NO-SHLD-NEXT: shll $3, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: xorps %xmm1, %xmm1
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrb $3, %dl
+; X86-NO-BMI2-NO-SHLD-NEXT: andb $12, %dl
+; X86-NO-BMI2-NO-SHLD-NEXT: movzbl %dl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp,%edx), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%esp,%edx), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dl, (%eax)
+; X86-NO-BMI2-NO-SHLD-NEXT: addl $40, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: retl
+;
+; X86-SHLD-LABEL: load_1byte_chunk_of_16byte_alloca:
+; X86-SHLD: # %bb.0:
+; X86-SHLD-NEXT: pushl %ebx
+; X86-SHLD-NEXT: subl $40, %esp
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SHLD-NEXT: movups (%edx), %xmm0
+; X86-SHLD-NEXT: shll $3, %ecx
+; X86-SHLD-NEXT: xorps %xmm1, %xmm1
+; X86-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-SHLD-NEXT: movl %ecx, %edx
+; X86-SHLD-NEXT: shrb $3, %dl
+; X86-SHLD-NEXT: andb $12, %dl
+; X86-SHLD-NEXT: movzbl %dl, %edx
+; X86-SHLD-NEXT: movl (%esp,%edx), %ebx
+; X86-SHLD-NEXT: movl 4(%esp,%edx), %edx
+; X86-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-SHLD-NEXT: shrdl %cl, %edx, %ebx
+; X86-SHLD-NEXT: movb %bl, (%eax)
+; X86-SHLD-NEXT: addl $40, %esp
+; X86-SHLD-NEXT: popl %ebx
+; X86-SHLD-NEXT: retl
+;
+; X86-HAVE-BMI2-NO-SHLD-LABEL: load_1byte_chunk_of_16byte_alloca:
+; X86-HAVE-BMI2-NO-SHLD: # %bb.0:
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $40, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movups (%edx), %xmm0
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm1, %xmm1
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $12, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %dl, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, (%esp,%edx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%esp,%edx), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movb %cl, (%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $40, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: retl
%init = load <16 x i8>, ptr %src, align 1
%byteOff.numbits = shl nuw nsw i64 %byteOff, 3
%intermediate.val.frozen = freeze <16 x i8> %init
@@ -711,32 +765,86 @@ define void @load_2byte_chunk_of_16byte_alloca(ptr %src, i64 %byteOff, ptr %dst)
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movw %si, (%rdx)
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: retq
;
-; X86-LABEL: load_2byte_chunk_of_16byte_alloca:
-; X86: # %bb.0:
-; X86-NEXT: subl $32, %esp
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movdqu (%edx), %xmm0
-; X86-NEXT: shll $3, %ecx
-; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
-; X86-NEXT: movd %xmm0, (%esp)
-; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: shrb $3, %cl
-; X86-NEXT: andb $15, %cl
-; X86-NEXT: movzbl %cl, %ecx
-; X86-NEXT: movl (%esp,%ecx), %ecx
-; X86-NEXT: movw %cx, (%eax)
-; X86-NEXT: addl $32, %esp
-; X86-NEXT: retl
+; X86-NO-BMI2-NO-SHLD-LABEL: load_2byte_chunk_of_16byte_alloca:
+; X86-NO-BMI2-NO-SHLD: # %bb.0:
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: subl $40, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movups (%edx), %xmm0
+; X86-NO-BMI2-NO-SHLD-NEXT: shll $3, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: xorps %xmm1, %xmm1
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrb $3, %dl
+; X86-NO-BMI2-NO-SHLD-NEXT: andb $12, %dl
+; X86-NO-BMI2-NO-SHLD-NEXT: movzbl %dl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp,%edx), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%esp,%edx), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movw %dx, (%eax)
+; X86-NO-BMI2-NO-SHLD-NEXT: addl $40, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: retl
+;
+; X86-SHLD-LABEL: load_2byte_chunk_of_16byte_alloca:
+; X86-SHLD: # %bb.0:
+; X86-SHLD-NEXT: pushl %esi
+; X86-SHLD-NEXT: subl $40, %esp
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SHLD-NEXT: movups (%edx), %xmm0
+; X86-SHLD-NEXT: shll $3, %ecx
+; X86-SHLD-NEXT: xorps %xmm1, %xmm1
+; X86-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-SHLD-NEXT: movl %ecx, %edx
+; X86-SHLD-NEXT: shrb $3, %dl
+; X86-SHLD-NEXT: andb $12, %dl
+; X86-SHLD-NEXT: movzbl %dl, %edx
+; X86-SHLD-NEXT: movl (%esp,%edx), %esi
+; X86-SHLD-NEXT: movl 4(%esp,%edx), %edx
+; X86-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-SHLD-NEXT: shrdl %cl, %edx, %esi
+; X86-SHLD-NEXT: movw %si, (%eax)
+; X86-SHLD-NEXT: addl $40, %esp
+; X86-SHLD-NEXT: popl %esi
+; X86-SHLD-NEXT: retl
+;
+; X86-HAVE-BMI2-NO-SHLD-LABEL: load_2byte_chunk_of_16byte_alloca:
+; X86-HAVE-BMI2-NO-SHLD: # %bb.0:
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $40, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movups (%edx), %xmm0
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm1, %xmm1
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $12, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %dl, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, (%esp,%edx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%esp,%edx), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movw %cx, (%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $40, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: retl
%init = load <16 x i8>, ptr %src, align 1
%byteOff.numbits = shl nuw nsw i64 %byteOff, 3
%intermediate.val.frozen = freeze <16 x i8> %init
@@ -818,32 +926,86 @@ define void @load_4byte_chunk_of_16byte_alloca(ptr %src, i64 %byteOff, ptr %dst)
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movl %esi, (%rdx)
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: retq
;
-; X86-LABEL: load_4byte_chunk_of_16byte_alloca:
-; X86: # %bb.0:
-; X86-NEXT: subl $32, %esp
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movdqu (%edx), %xmm0
-; X86-NEXT: shll $3, %ecx
-; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
-; X86-NEXT: movd %xmm0, (%esp)
-; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: shrb $3, %cl
-; X86-NEXT: andb $15, %cl
-; X86-NEXT: movzbl %cl, %ecx
-; X86-NEXT: movl (%esp,%ecx), %ecx
-; X86-NEXT: movl %ecx, (%eax)
-; X86-NEXT: addl $32, %esp
-; X86-NEXT: retl
+; X86-NO-BMI2-NO-SHLD-LABEL: load_4byte_chunk_of_16byte_alloca:
+; X86-NO-BMI2-NO-SHLD: # %bb.0:
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: subl $40, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movups (%edx), %xmm0
+; X86-NO-BMI2-NO-SHLD-NEXT: shll $3, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: xorps %xmm1, %xmm1
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrb $3, %dl
+; X86-NO-BMI2-NO-SHLD-NEXT: andb $12, %dl
+; X86-NO-BMI2-NO-SHLD-NEXT: movzbl %dl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp,%edx), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%esp,%edx), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, (%eax)
+; X86-NO-BMI2-NO-SHLD-NEXT: addl $40, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: retl
+;
+; X86-SHLD-LABEL: load_4byte_chunk_of_16byte_alloca:
+; X86-SHLD: # %bb.0:
+; X86-SHLD-NEXT: pushl %esi
+; X86-SHLD-NEXT: subl $40, %esp
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SHLD-NEXT: movups (%edx), %xmm0
+; X86-SHLD-NEXT: shll $3, %ecx
+; X86-SHLD-NEXT: xorps %xmm1, %xmm1
+; X86-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-SHLD-NEXT: movl %ecx, %edx
+; X86-SHLD-NEXT: shrb $3, %dl
+; X86-SHLD-NEXT: andb $12, %dl
+; X86-SHLD-NEXT: movzbl %dl, %edx
+; X86-SHLD-NEXT: movl (%esp,%edx), %esi
+; X86-SHLD-NEXT: movl 4(%esp,%edx), %edx
+; X86-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-SHLD-NEXT: shrdl %cl, %edx, %esi
+; X86-SHLD-NEXT: movl %esi, (%eax)
+; X86-SHLD-NEXT: addl $40, %esp
+; X86-SHLD-NEXT: popl %esi
+; X86-SHLD-NEXT: retl
+;
+; X86-HAVE-BMI2-NO-SHLD-LABEL: load_4byte_chunk_of_16byte_alloca:
+; X86-HAVE-BMI2-NO-SHLD: # %bb.0:
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $40, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movups (%edx), %xmm0
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm1, %xmm1
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $12, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %dl, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, (%esp,%edx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%esp,%edx), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, (%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $40, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: retl
%init = load <16 x i8>, ptr %src, align 1
%byteOff.numbits = shl nuw nsw i64 %byteOff, 3
%intermediate.val.frozen = freeze <16 x i8> %init
@@ -925,34 +1087,125 @@ define void @load_8byte_chunk_of_16byte_alloca(ptr %src, i64 %byteOff, ptr %dst)
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rsi, (%rdx)
; X64-HAVE-BMI2-HAVE-SHLD-NEXT: retq
;
-; X86-LABEL: load_8byte_chunk_of_16byte_alloca:
-; X86: # %bb.0:
-; X86-NEXT: subl $32, %esp
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movdqu (%edx), %xmm0
-; X86-NEXT: shll $3, %ecx
-; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
-; X86-NEXT: movd %xmm0, (%esp)
-; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: shrb $3, %cl
-; X86-NEXT: andb $15, %cl
-; X86-NEXT: movzbl %cl, %ecx
-; X86-NEXT: movl (%esp,%ecx), %edx
-; X86-NEXT: movl 4(%esp,%ecx), %ecx
-; X86-NEXT: movl %ecx, 4(%eax)
-; X86-NEXT: movl %edx, (%eax)
-; X86-NEXT: addl $32, %esp
-; X86-NEXT: retl
+; X86-NO-BMI2-NO-SHLD-LABEL: load_8byte_chunk_of_16byte_alloca:
+; X86-NO-BMI2-NO-SHLD: # %bb.0:
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: subl $44, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movups (%ecx), %xmm0
+; X86-NO-BMI2-NO-SHLD-NEXT: shll $3, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: xorps %xmm1, %xmm1
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrb $3, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: andb $12, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: movzbl %cl, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp,%ebx), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%esp,%ebx), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebp, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: andb $24, %al
+; X86-NO-BMI2-NO-SHLD-NEXT: notb %al
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 8(%esp,%ebx), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %ebx, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, 4(%edx)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, (%edx)
+; X86-NO-BMI2-NO-SHLD-NEXT: addl $44, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: retl
+;
+; X86-SHLD-LABEL: load_8byte_chunk_of_16byte_alloca:
+; X86-SHLD: # %bb.0:
+; X86-SHLD-NEXT: pushl %ebx
+; X86-SHLD-NEXT: pushl %edi
+; X86-SHLD-NEXT: pushl %esi
+; X86-SHLD-NEXT: subl $32, %esp
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SHLD-NEXT: movups (%edx), %xmm0
+; X86-SHLD-NEXT: shll $3, %ecx
+; X86-SHLD-NEXT: xorps %xmm1, %xmm1
+; X86-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-SHLD-NEXT: movl %ecx, %edx
+; X86-SHLD-NEXT: shrb $3, %dl
+; X86-SHLD-NEXT: andb $12, %dl
+; X86-SHLD-NEXT: movzbl %dl, %edx
+; X86-SHLD-NEXT: movl 8(%esp,%edx), %esi
+; X86-SHLD-NEXT: movl (%esp,%edx), %edi
+; X86-SHLD-NEXT: movl 4(%esp,%edx), %edx
+; X86-SHLD-NEXT: movl %edx, %ebx
+; X86-SHLD-NEXT: shrdl %cl, %esi, %ebx
+; X86-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-SHLD-NEXT: shrdl %cl, %edx, %edi
+; X86-SHLD-NEXT: movl %ebx, 4(%eax)
+; X86-SHLD-NEXT: movl %edi, (%eax)
+; X86-SHLD-NEXT: addl $32, %esp
+; X86-SHLD-NEXT: popl %esi
+; X86-SHLD-NEXT: popl %edi
+; X86-SHLD-NEXT: popl %ebx
+; X86-SHLD-NEXT: retl
+;
+; X86-HAVE-BMI2-NO-SHLD-LABEL: load_8byte_chunk_of_16byte_alloca:
+; X86-HAVE-BMI2-NO-SHLD: # %bb.0:
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $44, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movups (%edx), %xmm0
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm1, %xmm1
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $12, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %dl, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, (%esp,%edx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %bl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%esp,%edx), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%esp,%edx), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edi,%edi), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $24, %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 4(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, (%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $44, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: retl
%init = load <16 x i8>, ptr %src, align 1
%byteOff.numbits = shl nuw nsw i64 %byteOff, 3
%intermediate.val.frozen = freeze <16 x i8> %init
@@ -967,64 +1220,128 @@ define void @load_8byte_chunk_of_16byte_alloca(ptr %src, i64 %byteOff, ptr %dst)
; no @load_16byte_chunk_of_16byte_alloca
define void @load_1byte_chunk_of_32byte_alloca(ptr %src, i64 %byteOff, ptr %dst) nounwind {
-; X64-LABEL: load_1byte_chunk_of_32byte_alloca:
-; X64: # %bb.0:
-; X64-NEXT: movdqu (%rdi), %xmm0
-; X64-NEXT: movdqu 16(%rdi), %xmm1
-; X64-NEXT: shll $3, %esi
-; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
-; X64-NEXT: movq %xmm1, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq %xmm3, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq %xmm2, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: shrb $3, %sil
-; X64-NEXT: movzbl %sil, %eax
-; X64-NEXT: movzbl -64(%rsp,%rax), %eax
-; X64-NEXT: movb %al, (%rdx)
-; X64-NEXT: retq
-;
-; X86-LABEL: load_1byte_chunk_of_32byte_alloca:
-; X86: # %bb.0:
-; X86-NEXT: subl $64, %esp
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movdqu (%edx), %xmm0
-; X86-NEXT: movdqu 16(%edx), %xmm1
-; X86-NEXT: shll $3, %ecx
-; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
-; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
-; X86-NEXT: pshufd {{.*#+}} xmm4 = xmm0[3,3,3,3]
-; X86-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,1,1]
-; X86-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
-; X86-NEXT: pshufd {{.*#+}} xmm7 = xmm1[3,3,3,3]
-; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm0, (%esp)
-; X86-NEXT: movd %xmm7, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm6, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm5, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm4, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: shrb $3, %cl
-; X86-NEXT: movzbl %cl, %ecx
-; X86-NEXT: movzbl (%esp,%ecx), %ecx
-; X86-NEXT: movb %cl, (%eax)
-; X86-NEXT: addl $64, %esp
-; X86-NEXT: retl
+; X64-NO-BMI2-LABEL: load_1byte_chunk_of_32byte_alloca:
+; X64-NO-BMI2: # %bb.0:
+; X64-NO-BMI2-NEXT: movups (%rdi), %xmm0
+; X64-NO-BMI2-NEXT: movups 16(%rdi), %xmm1
+; X64-NO-BMI2-NEXT: leal (,%rsi,8), %ecx
+; X64-NO-BMI2-NEXT: xorps %xmm2, %xmm2
+; X64-NO-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movl %ecx, %eax
+; X64-NO-BMI2-NEXT: shrb $6, %al
+; X64-NO-BMI2-NEXT: movzbl %al, %eax
+; X64-NO-BMI2-NEXT: movq -72(%rsp,%rax,8), %rax
+; X64-NO-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NO-BMI2-NEXT: shrq %cl, %rax
+; X64-NO-BMI2-NEXT: movb %al, (%rdx)
+; X64-NO-BMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: load_1byte_chunk_of_32byte_alloca:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movups (%rdi), %xmm0
+; X64-BMI2-NEXT: movups 16(%rdi), %xmm1
+; X64-BMI2-NEXT: shll $3, %esi
+; X64-BMI2-NEXT: xorps %xmm2, %xmm2
+; X64-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movl %esi, %eax
+; X64-BMI2-NEXT: shrb $6, %al
+; X64-BMI2-NEXT: movzbl %al, %eax
+; X64-BMI2-NEXT: shrxq %rsi, -72(%rsp,%rax,8), %rax
+; X64-BMI2-NEXT: movb %al, (%rdx)
+; X64-BMI2-NEXT: retq
+;
+; X86-NO-BMI2-NO-SHLD-LABEL: load_1byte_chunk_of_32byte_alloca:
+; X86-NO-BMI2-NO-SHLD: # %bb.0:
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: subl $72, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movups (%edx), %xmm0
+; X86-NO-BMI2-NO-SHLD-NEXT: movups 16(%edx), %xmm1
+; X86-NO-BMI2-NO-SHLD-NEXT: shll $3, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: xorps %xmm2, %xmm2
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrb $5, %dl
+; X86-NO-BMI2-NO-SHLD-NEXT: movzbl %dl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp,%edx,4), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%esp,%edx,4), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %dl, (%eax)
+; X86-NO-BMI2-NO-SHLD-NEXT: addl $72, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: retl
+;
+; X86-SHLD-LABEL: load_1byte_chunk_of_32byte_alloca:
+; X86-SHLD: # %bb.0:
+; X86-SHLD-NEXT: pushl %ebx
+; X86-SHLD-NEXT: subl $72, %esp
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SHLD-NEXT: movups (%edx), %xmm0
+; X86-SHLD-NEXT: movups 16(%edx), %xmm1
+; X86-SHLD-NEXT: shll $3, %ecx
+; X86-SHLD-NEXT: xorps %xmm2, %xmm2
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-SHLD-NEXT: movl %ecx, %edx
+; X86-SHLD-NEXT: shrb $5, %dl
+; X86-SHLD-NEXT: movzbl %dl, %edx
+; X86-SHLD-NEXT: movl (%esp,%edx,4), %ebx
+; X86-SHLD-NEXT: movl 4(%esp,%edx,4), %edx
+; X86-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-SHLD-NEXT: shrdl %cl, %edx, %ebx
+; X86-SHLD-NEXT: movb %bl, (%eax)
+; X86-SHLD-NEXT: addl $72, %esp
+; X86-SHLD-NEXT: popl %ebx
+; X86-SHLD-NEXT: retl
+;
+; X86-HAVE-BMI2-NO-SHLD-LABEL: load_1byte_chunk_of_32byte_alloca:
+; X86-HAVE-BMI2-NO-SHLD: # %bb.0:
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $72, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movups (%edx), %xmm0
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movups 16(%edx), %xmm1
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm2, %xmm2
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $5, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %dl, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, (%esp,%edx,4), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%esp,%edx,4), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movb %cl, (%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $72, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: retl
%init = load <32 x i8>, ptr %src, align 1
%byteOff.numbits = shl nuw nsw i64 %byteOff, 3
%intermediate.val.frozen = freeze <32 x i8> %init
@@ -1038,64 +1355,141 @@ define void @load_1byte_chunk_of_32byte_alloca(ptr %src, i64 %byteOff, ptr %dst)
}
define void @load_2byte_chunk_of_32byte_alloca(ptr %src, i64 %byteOff, ptr %dst) nounwind {
-; X64-LABEL: load_2byte_chunk_of_32byte_alloca:
-; X64: # %bb.0:
-; X64-NEXT: movdqu (%rdi), %xmm0
-; X64-NEXT: movdqu 16(%rdi), %xmm1
-; X64-NEXT: shll $3, %esi
-; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
-; X64-NEXT: movq %xmm1, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq %xmm3, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq %xmm2, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: shrb $3, %sil
-; X64-NEXT: movzbl %sil, %eax
-; X64-NEXT: movq -64(%rsp,%rax), %rax
-; X64-NEXT: movw %ax, (%rdx)
-; X64-NEXT: retq
-;
-; X86-LABEL: load_2byte_chunk_of_32byte_alloca:
-; X86: # %bb.0:
-; X86-NEXT: subl $64, %esp
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movdqu (%edx), %xmm0
-; X86-NEXT: movdqu 16(%edx), %xmm1
-; X86-NEXT: shll $3, %ecx
-; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
-; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
-; X86-NEXT: pshufd {{.*#+}} xmm4 = xmm0[3,3,3,3]
-; X86-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,1,1]
-; X86-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
-; X86-NEXT: pshufd {{.*#+}} xmm7 = xmm1[3,3,3,3]
-; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm0, (%esp)
-; X86-NEXT: movd %xmm7, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm6, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm5, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm4, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: shrb $3, %cl
-; X86-NEXT: movzbl %cl, %ecx
-; X86-NEXT: movl (%esp,%ecx), %ecx
-; X86-NEXT: movw %cx, (%eax)
-; X86-NEXT: addl $64, %esp
-; X86-NEXT: retl
+; X64-NO-BMI2-LABEL: load_2byte_chunk_of_32byte_alloca:
+; X64-NO-BMI2: # %bb.0:
+; X64-NO-BMI2-NEXT: movups (%rdi), %xmm0
+; X64-NO-BMI2-NEXT: movups 16(%rdi), %xmm1
+; X64-NO-BMI2-NEXT: leal (,%rsi,8), %ecx
+; X64-NO-BMI2-NEXT: xorps %xmm2, %xmm2
+; X64-NO-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movl %ecx, %eax
+; X64-NO-BMI2-NEXT: shrb $6, %al
+; X64-NO-BMI2-NEXT: movzbl %al, %eax
+; X64-NO-BMI2-NEXT: movq -72(%rsp,%rax,8), %rsi
+; X64-NO-BMI2-NEXT: shrq %cl, %rsi
+; X64-NO-BMI2-NEXT: movl -64(%rsp,%rax,8), %eax
+; X64-NO-BMI2-NEXT: addl %eax, %eax
+; X64-NO-BMI2-NEXT: andb $56, %cl
+; X64-NO-BMI2-NEXT: notb %cl
+; X64-NO-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NO-BMI2-NEXT: shlq %cl, %rax
+; X64-NO-BMI2-NEXT: orl %esi, %eax
+; X64-NO-BMI2-NEXT: movw %ax, (%rdx)
+; X64-NO-BMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: load_2byte_chunk_of_32byte_alloca:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movups (%rdi), %xmm0
+; X64-BMI2-NEXT: movups 16(%rdi), %xmm1
+; X64-BMI2-NEXT: shll $3, %esi
+; X64-BMI2-NEXT: xorps %xmm2, %xmm2
+; X64-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movl %esi, %eax
+; X64-BMI2-NEXT: shrb $6, %al
+; X64-BMI2-NEXT: movzbl %al, %eax
+; X64-BMI2-NEXT: shrxq %rsi, -72(%rsp,%rax,8), %rcx
+; X64-BMI2-NEXT: # kill: def $sil killed $sil killed $rsi def $rsi
+; X64-BMI2-NEXT: andb $56, %sil
+; X64-BMI2-NEXT: notb %sil
+; X64-BMI2-NEXT: movl -64(%rsp,%rax,8), %eax
+; X64-BMI2-NEXT: addl %eax, %eax
+; X64-BMI2-NEXT: shlxq %rsi, %rax, %rax
+; X64-BMI2-NEXT: orl %eax, %ecx
+; X64-BMI2-NEXT: movw %cx, (%rdx)
+; X64-BMI2-NEXT: retq
+;
+; X86-NO-BMI2-NO-SHLD-LABEL: load_2byte_chunk_of_32byte_alloca:
+; X86-NO-BMI2-NO-SHLD: # %bb.0:
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: subl $72, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movups (%edx), %xmm0
+; X86-NO-BMI2-NO-SHLD-NEXT: movups 16(%edx), %xmm1
+; X86-NO-BMI2-NO-SHLD-NEXT: shll $3, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: xorps %xmm2, %xmm2
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrb $5, %dl
+; X86-NO-BMI2-NO-SHLD-NEXT: movzbl %dl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp,%edx,4), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%esp,%edx,4), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movw %dx, (%eax)
+; X86-NO-BMI2-NO-SHLD-NEXT: addl $72, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: retl
+;
+; X86-SHLD-LABEL: load_2byte_chunk_of_32byte_alloca:
+; X86-SHLD: # %bb.0:
+; X86-SHLD-NEXT: pushl %esi
+; X86-SHLD-NEXT: subl $72, %esp
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SHLD-NEXT: movups (%edx), %xmm0
+; X86-SHLD-NEXT: movups 16(%edx), %xmm1
+; X86-SHLD-NEXT: shll $3, %ecx
+; X86-SHLD-NEXT: xorps %xmm2, %xmm2
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-SHLD-NEXT: movl %ecx, %edx
+; X86-SHLD-NEXT: shrb $5, %dl
+; X86-SHLD-NEXT: movzbl %dl, %edx
+; X86-SHLD-NEXT: movl (%esp,%edx,4), %esi
+; X86-SHLD-NEXT: movl 4(%esp,%edx,4), %edx
+; X86-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-SHLD-NEXT: shrdl %cl, %edx, %esi
+; X86-SHLD-NEXT: movw %si, (%eax)
+; X86-SHLD-NEXT: addl $72, %esp
+; X86-SHLD-NEXT: popl %esi
+; X86-SHLD-NEXT: retl
+;
+; X86-HAVE-BMI2-NO-SHLD-LABEL: load_2byte_chunk_of_32byte_alloca:
+; X86-HAVE-BMI2-NO-SHLD: # %bb.0:
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $72, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movups (%edx), %xmm0
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movups 16(%edx), %xmm1
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm2, %xmm2
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $5, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %dl, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, (%esp,%edx,4), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%esp,%edx,4), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movw %cx, (%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $72, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: retl
%init = load <32 x i8>, ptr %src, align 1
%byteOff.numbits = shl nuw nsw i64 %byteOff, 3
%intermediate.val.frozen = freeze <32 x i8> %init
@@ -1108,64 +1502,141 @@ define void @load_2byte_chunk_of_32byte_alloca(ptr %src, i64 %byteOff, ptr %dst)
}
define void @load_4byte_chunk_of_32byte_alloca(ptr %src, i64 %byteOff, ptr %dst) nounwind {
-; X64-LABEL: load_4byte_chunk_of_32byte_alloca:
-; X64: # %bb.0:
-; X64-NEXT: movdqu (%rdi), %xmm0
-; X64-NEXT: movdqu 16(%rdi), %xmm1
-; X64-NEXT: shll $3, %esi
-; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
-; X64-NEXT: movq %xmm1, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq %xmm3, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq %xmm2, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: shrb $3, %sil
-; X64-NEXT: movzbl %sil, %eax
-; X64-NEXT: movl -64(%rsp,%rax), %eax
-; X64-NEXT: movl %eax, (%rdx)
-; X64-NEXT: retq
-;
-; X86-LABEL: load_4byte_chunk_of_32byte_alloca:
-; X86: # %bb.0:
-; X86-NEXT: subl $64, %esp
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movdqu (%edx), %xmm0
-; X86-NEXT: movdqu 16(%edx), %xmm1
-; X86-NEXT: shll $3, %ecx
-; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
-; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
-; X86-NEXT: pshufd {{.*#+}} xmm4 = xmm0[3,3,3,3]
-; X86-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,1,1]
-; X86-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
-; X86-NEXT: pshufd {{.*#+}} xmm7 = xmm1[3,3,3,3]
-; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm0, (%esp)
-; X86-NEXT: movd %xmm7, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm6, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm5, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm4, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: shrb $3, %cl
-; X86-NEXT: movzbl %cl, %ecx
-; X86-NEXT: movl (%esp,%ecx), %ecx
-; X86-NEXT: movl %ecx, (%eax)
-; X86-NEXT: addl $64, %esp
-; X86-NEXT: retl
+; X64-NO-BMI2-LABEL: load_4byte_chunk_of_32byte_alloca:
+; X64-NO-BMI2: # %bb.0:
+; X64-NO-BMI2-NEXT: movups (%rdi), %xmm0
+; X64-NO-BMI2-NEXT: movups 16(%rdi), %xmm1
+; X64-NO-BMI2-NEXT: leal (,%rsi,8), %ecx
+; X64-NO-BMI2-NEXT: xorps %xmm2, %xmm2
+; X64-NO-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NEXT: movl %ecx, %eax
+; X64-NO-BMI2-NEXT: shrb $6, %al
+; X64-NO-BMI2-NEXT: movzbl %al, %eax
+; X64-NO-BMI2-NEXT: movq -72(%rsp,%rax,8), %rsi
+; X64-NO-BMI2-NEXT: shrq %cl, %rsi
+; X64-NO-BMI2-NEXT: movl -64(%rsp,%rax,8), %eax
+; X64-NO-BMI2-NEXT: addl %eax, %eax
+; X64-NO-BMI2-NEXT: andb $56, %cl
+; X64-NO-BMI2-NEXT: notb %cl
+; X64-NO-BMI2-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NO-BMI2-NEXT: shlq %cl, %rax
+; X64-NO-BMI2-NEXT: orl %esi, %eax
+; X64-NO-BMI2-NEXT: movl %eax, (%rdx)
+; X64-NO-BMI2-NEXT: retq
+;
+; X64-BMI2-LABEL: load_4byte_chunk_of_32byte_alloca:
+; X64-BMI2: # %bb.0:
+; X64-BMI2-NEXT: movups (%rdi), %xmm0
+; X64-BMI2-NEXT: movups 16(%rdi), %xmm1
+; X64-BMI2-NEXT: shll $3, %esi
+; X64-BMI2-NEXT: xorps %xmm2, %xmm2
+; X64-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-BMI2-NEXT: movl %esi, %eax
+; X64-BMI2-NEXT: shrb $6, %al
+; X64-BMI2-NEXT: movzbl %al, %eax
+; X64-BMI2-NEXT: shrxq %rsi, -72(%rsp,%rax,8), %rcx
+; X64-BMI2-NEXT: # kill: def $sil killed $sil killed $rsi def $rsi
+; X64-BMI2-NEXT: andb $56, %sil
+; X64-BMI2-NEXT: notb %sil
+; X64-BMI2-NEXT: movl -64(%rsp,%rax,8), %eax
+; X64-BMI2-NEXT: addl %eax, %eax
+; X64-BMI2-NEXT: shlxq %rsi, %rax, %rax
+; X64-BMI2-NEXT: orl %eax, %ecx
+; X64-BMI2-NEXT: movl %ecx, (%rdx)
+; X64-BMI2-NEXT: retq
+;
+; X86-NO-BMI2-NO-SHLD-LABEL: load_4byte_chunk_of_32byte_alloca:
+; X86-NO-BMI2-NO-SHLD: # %bb.0:
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: subl $72, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movups (%edx), %xmm0
+; X86-NO-BMI2-NO-SHLD-NEXT: movups 16(%edx), %xmm1
+; X86-NO-BMI2-NO-SHLD-NEXT: shll $3, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: xorps %xmm2, %xmm2
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrb $5, %dl
+; X86-NO-BMI2-NO-SHLD-NEXT: movzbl %dl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp,%edx,4), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%esp,%edx,4), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edx, (%eax)
+; X86-NO-BMI2-NO-SHLD-NEXT: addl $72, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: retl
+;
+; X86-SHLD-LABEL: load_4byte_chunk_of_32byte_alloca:
+; X86-SHLD: # %bb.0:
+; X86-SHLD-NEXT: pushl %esi
+; X86-SHLD-NEXT: subl $72, %esp
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SHLD-NEXT: movups (%edx), %xmm0
+; X86-SHLD-NEXT: movups 16(%edx), %xmm1
+; X86-SHLD-NEXT: shll $3, %ecx
+; X86-SHLD-NEXT: xorps %xmm2, %xmm2
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-SHLD-NEXT: movl %ecx, %edx
+; X86-SHLD-NEXT: shrb $5, %dl
+; X86-SHLD-NEXT: movzbl %dl, %edx
+; X86-SHLD-NEXT: movl (%esp,%edx,4), %esi
+; X86-SHLD-NEXT: movl 4(%esp,%edx,4), %edx
+; X86-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-SHLD-NEXT: shrdl %cl, %edx, %esi
+; X86-SHLD-NEXT: movl %esi, (%eax)
+; X86-SHLD-NEXT: addl $72, %esp
+; X86-SHLD-NEXT: popl %esi
+; X86-SHLD-NEXT: retl
+;
+; X86-HAVE-BMI2-NO-SHLD-LABEL: load_4byte_chunk_of_32byte_alloca:
+; X86-HAVE-BMI2-NO-SHLD: # %bb.0:
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $72, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movups (%edx), %xmm0
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movups 16(%edx), %xmm1
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm2, %xmm2
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $5, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %dl, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, (%esp,%edx,4), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%esp,%edx,4), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, (%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $72, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: retl
%init = load <32 x i8>, ptr %src, align 1
%byteOff.numbits = shl nuw nsw i64 %byteOff, 3
%intermediate.val.frozen = freeze <32 x i8> %init
@@ -1178,66 +1649,197 @@ define void @load_4byte_chunk_of_32byte_alloca(ptr %src, i64 %byteOff, ptr %dst)
}
define void @load_8byte_chunk_of_32byte_alloca(ptr %src, i64 %byteOff, ptr %dst) nounwind {
-; X64-LABEL: load_8byte_chunk_of_32byte_alloca:
-; X64: # %bb.0:
-; X64-NEXT: movdqu (%rdi), %xmm0
-; X64-NEXT: movdqu 16(%rdi), %xmm1
-; X64-NEXT: shll $3, %esi
-; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
-; X64-NEXT: movq %xmm1, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq %xmm3, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq %xmm2, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: shrb $3, %sil
-; X64-NEXT: movzbl %sil, %eax
-; X64-NEXT: movq -64(%rsp,%rax), %rax
-; X64-NEXT: movq %rax, (%rdx)
-; X64-NEXT: retq
-;
-; X86-LABEL: load_8byte_chunk_of_32byte_alloca:
-; X86: # %bb.0:
-; X86-NEXT: subl $64, %esp
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movdqu (%edx), %xmm0
-; X86-NEXT: movdqu 16(%edx), %xmm1
-; X86-NEXT: shll $3, %ecx
-; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
-; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
-; X86-NEXT: pshufd {{.*#+}} xmm4 = xmm0[3,3,3,3]
-; X86-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,1,1]
-; X86-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
-; X86-NEXT: pshufd {{.*#+}} xmm7 = xmm1[3,3,3,3]
-; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm0, (%esp)
-; X86-NEXT: movd %xmm7, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm6, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm5, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm4, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: shrb $3, %cl
-; X86-NEXT: movzbl %cl, %ecx
-; X86-NEXT: movl (%esp,%ecx), %edx
-; X86-NEXT: movl 4(%esp,%ecx), %ecx
-; X86-NEXT: movl %ecx, 4(%eax)
-; X86-NEXT: movl %edx, (%eax)
-; X86-NEXT: addl $64, %esp
-; X86-NEXT: retl
+; X64-NO-BMI2-NO-SHLD-LABEL: load_8byte_chunk_of_32byte_alloca:
+; X64-NO-BMI2-NO-SHLD: # %bb.0:
+; X64-NO-BMI2-NO-SHLD-NEXT: movups (%rdi), %xmm0
+; X64-NO-BMI2-NO-SHLD-NEXT: movups 16(%rdi), %xmm1
+; X64-NO-BMI2-NO-SHLD-NEXT: leal (,%rsi,8), %ecx
+; X64-NO-BMI2-NO-SHLD-NEXT: xorps %xmm2, %xmm2
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movl %ecx, %eax
+; X64-NO-BMI2-NO-SHLD-NEXT: shrb $6, %al
+; X64-NO-BMI2-NO-SHLD-NEXT: movzbl %al, %eax
+; X64-NO-BMI2-NO-SHLD-NEXT: movq -72(%rsp,%rax,8), %rsi
+; X64-NO-BMI2-NO-SHLD-NEXT: movq -64(%rsp,%rax,8), %rax
+; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %rsi
+; X64-NO-BMI2-NO-SHLD-NEXT: notb %cl
+; X64-NO-BMI2-NO-SHLD-NEXT: addq %rax, %rax
+; X64-NO-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %rax
+; X64-NO-BMI2-NO-SHLD-NEXT: orq %rsi, %rax
+; X64-NO-BMI2-NO-SHLD-NEXT: movq %rax, (%rdx)
+; X64-NO-BMI2-NO-SHLD-NEXT: retq
+;
+; X64-SHLD-LABEL: load_8byte_chunk_of_32byte_alloca:
+; X64-SHLD: # %bb.0:
+; X64-SHLD-NEXT: movups (%rdi), %xmm0
+; X64-SHLD-NEXT: movups 16(%rdi), %xmm1
+; X64-SHLD-NEXT: leal (,%rsi,8), %ecx
+; X64-SHLD-NEXT: xorps %xmm2, %xmm2
+; X64-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-SHLD-NEXT: movl %ecx, %eax
+; X64-SHLD-NEXT: shrb $6, %al
+; X64-SHLD-NEXT: movzbl %al, %eax
+; X64-SHLD-NEXT: movq -72(%rsp,%rax,8), %rsi
+; X64-SHLD-NEXT: movq -64(%rsp,%rax,8), %rax
+; X64-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-SHLD-NEXT: shrdq %cl, %rax, %rsi
+; X64-SHLD-NEXT: movq %rsi, (%rdx)
+; X64-SHLD-NEXT: retq
+;
+; X64-HAVE-BMI2-NO-SHLD-LABEL: load_8byte_chunk_of_32byte_alloca:
+; X64-HAVE-BMI2-NO-SHLD: # %bb.0:
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movups (%rdi), %xmm0
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movups 16(%rdi), %xmm1
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %esi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm2, %xmm2
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %eax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrb $6, %al
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movzbl %al, %eax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rsi, -72(%rsp,%rax,8), %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: notb %sil
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -64(%rsp,%rax,8), %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rax, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %rax, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rcx, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, (%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
+;
+; X86-NO-BMI2-NO-SHLD-LABEL: load_8byte_chunk_of_32byte_alloca:
+; X86-NO-BMI2-NO-SHLD: # %bb.0:
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: subl $76, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movups (%ecx), %xmm0
+; X86-NO-BMI2-NO-SHLD-NEXT: movups 16(%ecx), %xmm1
+; X86-NO-BMI2-NO-SHLD-NEXT: shll $3, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: xorps %xmm2, %xmm2
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrb $5, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: movzbl %cl, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl (%esp,%ebx,4), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 4(%esp,%ebx,4), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebp, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: andb $24, %al
+; X86-NO-BMI2-NO-SHLD-NEXT: notb %al
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 8(%esp,%ebx,4), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %ebx, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, 4(%edx)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %edi, (%edx)
+; X86-NO-BMI2-NO-SHLD-NEXT: addl $76, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: retl
+;
+; X86-SHLD-LABEL: load_8byte_chunk_of_32byte_alloca:
+; X86-SHLD: # %bb.0:
+; X86-SHLD-NEXT: pushl %ebx
+; X86-SHLD-NEXT: pushl %edi
+; X86-SHLD-NEXT: pushl %esi
+; X86-SHLD-NEXT: subl $64, %esp
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SHLD-NEXT: movups (%edx), %xmm0
+; X86-SHLD-NEXT: movups 16(%edx), %xmm1
+; X86-SHLD-NEXT: shll $3, %ecx
+; X86-SHLD-NEXT: xorps %xmm2, %xmm2
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-SHLD-NEXT: movl %ecx, %edx
+; X86-SHLD-NEXT: shrb $5, %dl
+; X86-SHLD-NEXT: movzbl %dl, %edx
+; X86-SHLD-NEXT: movl 8(%esp,%edx,4), %esi
+; X86-SHLD-NEXT: movl (%esp,%edx,4), %edi
+; X86-SHLD-NEXT: movl 4(%esp,%edx,4), %edx
+; X86-SHLD-NEXT: movl %edx, %ebx
+; X86-SHLD-NEXT: shrdl %cl, %esi, %ebx
+; X86-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-SHLD-NEXT: shrdl %cl, %edx, %edi
+; X86-SHLD-NEXT: movl %ebx, 4(%eax)
+; X86-SHLD-NEXT: movl %edi, (%eax)
+; X86-SHLD-NEXT: addl $64, %esp
+; X86-SHLD-NEXT: popl %esi
+; X86-SHLD-NEXT: popl %edi
+; X86-SHLD-NEXT: popl %ebx
+; X86-SHLD-NEXT: retl
+;
+; X86-HAVE-BMI2-NO-SHLD-LABEL: load_8byte_chunk_of_32byte_alloca:
+; X86-HAVE-BMI2-NO-SHLD: # %bb.0:
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $76, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movups (%edx), %xmm0
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movups 16(%edx), %xmm1
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm2, %xmm2
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, (%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $5, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %dl, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, (%esp,%edx,4), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %bl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%esp,%edx,4), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%esp,%edx,4), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edi,%edi), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $24, %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 4(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, (%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $76, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: retl
%init = load <32 x i8>, ptr %src, align 1
%byteOff.numbits = shl nuw nsw i64 %byteOff, 3
%intermediate.val.frozen = freeze <32 x i8> %init
@@ -1250,76 +1852,295 @@ define void @load_8byte_chunk_of_32byte_alloca(ptr %src, i64 %byteOff, ptr %dst)
}
define void @load_16byte_chunk_of_32byte_alloca(ptr %src, i64 %byteOff, ptr %dst) nounwind {
-; X64-LABEL: load_16byte_chunk_of_32byte_alloca:
-; X64: # %bb.0:
-; X64-NEXT: movdqu (%rdi), %xmm0
-; X64-NEXT: movdqu 16(%rdi), %xmm1
-; X64-NEXT: shll $3, %esi
-; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
-; X64-NEXT: movq %xmm1, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq %xmm3, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq %xmm2, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: shrb $3, %sil
-; X64-NEXT: movzbl %sil, %eax
-; X64-NEXT: movq -64(%rsp,%rax), %rcx
-; X64-NEXT: movq -56(%rsp,%rax), %rax
-; X64-NEXT: movq %rax, 8(%rdx)
-; X64-NEXT: movq %rcx, (%rdx)
-; X64-NEXT: retq
-;
-; X86-LABEL: load_16byte_chunk_of_32byte_alloca:
-; X86: # %bb.0:
-; X86-NEXT: pushl %edi
-; X86-NEXT: pushl %esi
-; X86-NEXT: subl $64, %esp
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movdqu (%edx), %xmm0
-; X86-NEXT: movdqu 16(%edx), %xmm1
-; X86-NEXT: shll $3, %ecx
-; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
-; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
-; X86-NEXT: pshufd {{.*#+}} xmm4 = xmm0[3,3,3,3]
-; X86-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,1,1]
-; X86-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
-; X86-NEXT: pshufd {{.*#+}} xmm7 = xmm1[3,3,3,3]
-; X86-NEXT: movd %xmm1, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm0, (%esp)
-; X86-NEXT: movd %xmm7, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm6, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm5, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm4, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm3, {{[0-9]+}}(%esp)
-; X86-NEXT: movd %xmm2, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: shrb $3, %cl
-; X86-NEXT: movzbl %cl, %ecx
-; X86-NEXT: movl (%esp,%ecx), %edx
-; X86-NEXT: movl 4(%esp,%ecx), %esi
-; X86-NEXT: movl 8(%esp,%ecx), %edi
-; X86-NEXT: movl 12(%esp,%ecx), %ecx
-; X86-NEXT: movl %ecx, 12(%eax)
-; X86-NEXT: movl %edi, 8(%eax)
-; X86-NEXT: movl %esi, 4(%eax)
-; X86-NEXT: movl %edx, (%eax)
-; X86-NEXT: addl $64, %esp
-; X86-NEXT: popl %esi
-; X86-NEXT: popl %edi
-; X86-NEXT: retl
+; X64-NO-BMI2-NO-SHLD-LABEL: load_16byte_chunk_of_32byte_alloca:
+; X64-NO-BMI2-NO-SHLD: # %bb.0:
+; X64-NO-BMI2-NO-SHLD-NEXT: movups (%rdi), %xmm0
+; X64-NO-BMI2-NO-SHLD-NEXT: movups 16(%rdi), %xmm1
+; X64-NO-BMI2-NO-SHLD-NEXT: leal (,%rsi,8), %eax
+; X64-NO-BMI2-NO-SHLD-NEXT: xorps %xmm2, %xmm2
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X64-NO-BMI2-NO-SHLD-NEXT: shrb $6, %cl
+; X64-NO-BMI2-NO-SHLD-NEXT: movzbl %cl, %edi
+; X64-NO-BMI2-NO-SHLD-NEXT: movq -72(%rsp,%rdi,8), %r8
+; X64-NO-BMI2-NO-SHLD-NEXT: movq -64(%rsp,%rdi,8), %r9
+; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %r8
+; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %esi
+; X64-NO-BMI2-NO-SHLD-NEXT: notb %sil
+; X64-NO-BMI2-NO-SHLD-NEXT: leaq (%r9,%r9), %r10
+; X64-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
+; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %r10
+; X64-NO-BMI2-NO-SHLD-NEXT: orq %r8, %r10
+; X64-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X64-NO-BMI2-NO-SHLD-NEXT: shrq %cl, %r9
+; X64-NO-BMI2-NO-SHLD-NEXT: movq -56(%rsp,%rdi,8), %rax
+; X64-NO-BMI2-NO-SHLD-NEXT: addq %rax, %rax
+; X64-NO-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
+; X64-NO-BMI2-NO-SHLD-NEXT: shlq %cl, %rax
+; X64-NO-BMI2-NO-SHLD-NEXT: orq %r9, %rax
+; X64-NO-BMI2-NO-SHLD-NEXT: movq %rax, 8(%rdx)
+; X64-NO-BMI2-NO-SHLD-NEXT: movq %r10, (%rdx)
+; X64-NO-BMI2-NO-SHLD-NEXT: retq
+;
+; X64-NO-BMI2-HAVE-SHLD-LABEL: load_16byte_chunk_of_32byte_alloca:
+; X64-NO-BMI2-HAVE-SHLD: # %bb.0:
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movups (%rdi), %xmm0
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movups 16(%rdi), %xmm1
+; X64-NO-BMI2-HAVE-SHLD-NEXT: leal (,%rsi,8), %eax
+; X64-NO-BMI2-HAVE-SHLD-NEXT: xorps %xmm2, %xmm2
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shrb $6, %cl
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movzbl %cl, %esi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -72(%rsp,%rsi,8), %rdi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -64(%rsp,%rsi,8), %r8
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %r8, %r9
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shrq %cl, %r9
+; X64-NO-BMI2-HAVE-SHLD-NEXT: notb %cl
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq -56(%rsp,%rsi,8), %rsi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: addq %rsi, %rsi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shlq %cl, %rsi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: orq %r9, %rsi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movl %eax, %ecx
+; X64-NO-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %r8, %rdi
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rdi, (%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: movq %rsi, 8(%rdx)
+; X64-NO-BMI2-HAVE-SHLD-NEXT: retq
+;
+; X64-HAVE-BMI2-NO-SHLD-LABEL: load_16byte_chunk_of_32byte_alloca:
+; X64-HAVE-BMI2-NO-SHLD: # %bb.0:
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movups (%rdi), %xmm0
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movups 16(%rdi), %xmm1
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %esi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm2, %xmm2
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %eax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrb $6, %al
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movzbl %al, %eax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rsi, -72(%rsp,%rax,8), %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -64(%rsp,%rax,8), %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rsi, %rdi, %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: # kill: def $sil killed $sil killed $rsi def $rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: notb %sil
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -56(%rsp,%rax,8), %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rdi, %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %rdi, %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rcx, %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rax, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %rax, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r8, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, 8(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, (%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
+;
+; X64-HAVE-BMI2-HAVE-SHLD-LABEL: load_16byte_chunk_of_32byte_alloca:
+; X64-HAVE-BMI2-HAVE-SHLD: # %bb.0:
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movups (%rdi), %xmm0
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movups 16(%rdi), %xmm1
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: leal (,%rsi,8), %ecx
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: xorps %xmm2, %xmm2
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, %eax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrb $6, %al
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movzbl %al, %eax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -72(%rsp,%rax,8), %rsi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -64(%rsp,%rax,8), %rdi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrxq %rcx, %rdi, %r8
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movl %ecx, %r9d
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: notb %r9b
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq -56(%rsp,%rax,8), %rax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: addq %rax, %rax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shlxq %r9, %rax, %rax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: orq %r8, %rax
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: # kill: def $cl killed $cl killed $rcx
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: shrdq %cl, %rdi, %rsi
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rsi, (%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: movq %rax, 8(%rdx)
+; X64-HAVE-BMI2-HAVE-SHLD-NEXT: retq
+;
+; X86-NO-BMI2-NO-SHLD-LABEL: load_16byte_chunk_of_32byte_alloca:
+; X86-NO-BMI2-NO-SHLD: # %bb.0:
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: subl $92, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movups (%ecx), %xmm0
+; X86-NO-BMI2-NO-SHLD-NEXT: movups 16(%ecx), %xmm1
+; X86-NO-BMI2-NO-SHLD-NEXT: shll $3, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: xorps %xmm2, %xmm2
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrb $5, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: movzbl %cl, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 16(%esp,%edi,4), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 20(%esp,%edi,4), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%ebp,%ebp), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %edx, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %al, %ch
+; X86-NO-BMI2-NO-SHLD-NEXT: andb $24, %ch
+; X86-NO-BMI2-NO-SHLD-NEXT: xorb $31, %ch
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 24(%esp,%edi,4), %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%edx,%edx), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %ebp, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %al, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 28(%esp,%edi,4), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movb {{[-0-9]+}}(%e{{[sb]}}p), %cl # 1-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %edx, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %al, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: movl 32(%esp,%edi,4), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: addl %eax, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movb %ch, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT: shll %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: orl %esi, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, 12(%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebp, 8(%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %ebx, 4(%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT: movl %eax, (%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT: addl $92, %esp
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %edi
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT: popl %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT: retl
+;
+; X86-SHLD-LABEL: load_16byte_chunk_of_32byte_alloca:
+; X86-SHLD: # %bb.0:
+; X86-SHLD-NEXT: pushl %ebp
+; X86-SHLD-NEXT: pushl %ebx
+; X86-SHLD-NEXT: pushl %edi
+; X86-SHLD-NEXT: pushl %esi
+; X86-SHLD-NEXT: subl $92, %esp
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SHLD-NEXT: movups (%eax), %xmm0
+; X86-SHLD-NEXT: movups 16(%eax), %xmm1
+; X86-SHLD-NEXT: shll $3, %ecx
+; X86-SHLD-NEXT: xorps %xmm2, %xmm2
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-SHLD-NEXT: movl %ecx, %eax
+; X86-SHLD-NEXT: shrb $5, %al
+; X86-SHLD-NEXT: movzbl %al, %ebx
+; X86-SHLD-NEXT: movl 24(%esp,%ebx,4), %esi
+; X86-SHLD-NEXT: movl 16(%esp,%ebx,4), %eax
+; X86-SHLD-NEXT: movl 20(%esp,%ebx,4), %edi
+; X86-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SHLD-NEXT: shrdl %cl, %esi, %edi
+; X86-SHLD-NEXT: movl 28(%esp,%ebx,4), %ebp
+; X86-SHLD-NEXT: shrdl %cl, %ebp, %esi
+; X86-SHLD-NEXT: movl 32(%esp,%ebx,4), %ebx
+; X86-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SHLD-NEXT: shrdl %cl, %ebx, %ebp
+; X86-SHLD-NEXT: movl %ebp, 12(%edx)
+; X86-SHLD-NEXT: movl %esi, 8(%edx)
+; X86-SHLD-NEXT: movl %edi, 4(%edx)
+; X86-SHLD-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X86-SHLD-NEXT: shrdl %cl, %esi, %eax
+; X86-SHLD-NEXT: movl %eax, (%edx)
+; X86-SHLD-NEXT: addl $92, %esp
+; X86-SHLD-NEXT: popl %esi
+; X86-SHLD-NEXT: popl %edi
+; X86-SHLD-NEXT: popl %ebx
+; X86-SHLD-NEXT: popl %ebp
+; X86-SHLD-NEXT: retl
+;
+; X86-HAVE-BMI2-NO-SHLD-LABEL: load_16byte_chunk_of_32byte_alloca:
+; X86-HAVE-BMI2-NO-SHLD: # %bb.0:
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $92, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movups (%ecx), %xmm0
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movups 16(%ecx), %xmm1
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shll $3, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm2, %xmm2
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $5, %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %cl, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, 16(%esp,%ecx,4), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%esp,%ecx,4), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %esi, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %esi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%esp,%ecx,4), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $24, %bl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %bl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%esp,%ecx,4), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %esi, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 32(%esp,%ecx,4), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %ecx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %eax, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 12(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 8(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 4(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, (%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $92, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: retl
%init = load <32 x i8>, ptr %src, align 1
%byteOff.numbits = shl nuw nsw i64 %byteOff, 3
%intermediate.val.frozen = freeze <32 x i8> %init
@@ -1334,7 +2155,7 @@ define void @load_16byte_chunk_of_32byte_alloca(ptr %src, i64 %byteOff, ptr %dst
; no @load_32byte_chunk_of_32byte_alloca
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; ALL: {{.*}}
+; X64: {{.*}}
; X64-NO-SHLD: {{.*}}
-; X64-SHLD: {{.*}}
+; X86: {{.*}}
; X86-NO-SHLD: {{.*}}
-; X86-SHLD: {{.*}}
diff --git a/llvm/test/DebugInfo/Generic/debug-ranges-duplication.ll b/llvm/test/DebugInfo/Generic/debug-ranges-duplication.ll
index e9c2310..b31469e 100644
--- a/llvm/test/DebugInfo/Generic/debug-ranges-duplication.ll
+++ b/llvm/test/DebugInfo/Generic/debug-ranges-duplication.ll
@@ -1,5 +1,5 @@
; AIX doesn't currently support DWARF 5 section .debug_rnglists
-; XFAIL: target={{.*}}-aix{{.*}}
+; XFAIL: target={{.*}}-zos{{.*}}, target={{.*}}-aix{{.*}}
; RUN: %llc_dwarf -O0 -filetype=obj < %s | llvm-dwarfdump -debug-info - | FileCheck %s
;
diff --git a/llvm/test/DebugInfo/NVPTX/debug-info.ll b/llvm/test/DebugInfo/NVPTX/debug-info.ll
index 9948925..922a420 100644
--- a/llvm/test/DebugInfo/NVPTX/debug-info.ll
+++ b/llvm/test/DebugInfo/NVPTX/debug-info.ll
@@ -25,6 +25,10 @@
; CHECK-DAG: .reg .b64 %rd<8>;
; CHECK: .loc [[DEBUG_INFO_CU:[0-9]+]] 5 0
; CHECK: ld.param.u32 %r{{.+}}, [{{.+}}];
+; CHECK: ld.param.u64 %rd{{.+}}, [{{.+}}];
+; CHECK: cvta.to.global.u64 %rd{{.+}}, %rd{{.+}};
+; CHECK: ld.param.u64 %rd{{.+}}, [{{.+}}];
+; CHECK: cvta.to.global.u64 %rd{{.+}}, %rd{{.+}};
; CHECK: .loc [[BUILTUIN_VARS_H:[0-9]+]] 78 180
; CHECK: mov.u32 %r{{.+}}, %ctaid.x;
; CHECK: .loc [[BUILTUIN_VARS_H]] 89 180
@@ -38,10 +42,6 @@
; CHECK: .loc [[DEBUG_INFO_CU]] 7 7
; CHECK: @%p{{.+}} bra [[BB:\$L__.+]];
; CHECK: ld.param.f32 %f{{.+}}, [{{.+}}];
-; CHECK: ld.param.u64 %rd{{.+}}, [{{.+}}];
-; CHECK: cvta.to.global.u64 %rd{{.+}}, %rd{{.+}};
-; CHECK: ld.param.u64 %rd{{.+}}, [{{.+}}];
-; CHECK: cvta.to.global.u64 %rd{{.+}}, %rd{{.+}};
; CHECK: .loc [[DEBUG_INFO_CU]] 8 13
; CHECK: mul.wide.u32 %rd{{.+}}, %r{{.+}}, 4;
; CHECK: add.s64 %rd{{.+}}, %rd{{.+}}, %rd{{.+}};
@@ -2661,22 +2661,22 @@ if.end: ; preds = %if.then, %entry
; CHECK-NEXT:.b32 4579 // DW_AT_type
; CHECK-NEXT:.b8 25 // Abbrev [25] 0x8aa:0x18 DW_TAG_inlined_subroutine
; CHECK-NEXT:.b32 707 // DW_AT_abstract_origin
-; CHECK-NEXT:.b64 $L__tmp0 // DW_AT_low_pc
-; CHECK-NEXT:.b64 $L__tmp1 // DW_AT_high_pc
+; CHECK-NEXT:.b64 $L__tmp1 // DW_AT_low_pc
+; CHECK-NEXT:.b64 $L__tmp2 // DW_AT_high_pc
; CHECK-NEXT:.b8 1 // DW_AT_call_file
; CHECK-NEXT:.b8 6 // DW_AT_call_line
; CHECK-NEXT:.b8 11 // DW_AT_call_column
; CHECK-NEXT:.b8 25 // Abbrev [25] 0x8c2:0x18 DW_TAG_inlined_subroutine
; CHECK-NEXT:.b32 1466 // DW_AT_abstract_origin
-; CHECK-NEXT:.b64 $L__tmp1 // DW_AT_low_pc
-; CHECK-NEXT:.b64 $L__tmp2 // DW_AT_high_pc
+; CHECK-NEXT:.b64 $L__tmp2 // DW_AT_low_pc
+; CHECK-NEXT:.b64 $L__tmp3 // DW_AT_high_pc
; CHECK-NEXT:.b8 1 // DW_AT_call_file
; CHECK-NEXT:.b8 6 // DW_AT_call_line
; CHECK-NEXT:.b8 24 // DW_AT_call_column
; CHECK-NEXT:.b8 25 // Abbrev [25] 0x8da:0x18 DW_TAG_inlined_subroutine
; CHECK-NEXT:.b32 2060 // DW_AT_abstract_origin
-; CHECK-NEXT:.b64 $L__tmp2 // DW_AT_low_pc
-; CHECK-NEXT:.b64 $L__tmp3 // DW_AT_high_pc
+; CHECK-NEXT:.b64 $L__tmp3 // DW_AT_low_pc
+; CHECK-NEXT:.b64 $L__tmp4 // DW_AT_high_pc
; CHECK-NEXT:.b8 1 // DW_AT_call_file
; CHECK-NEXT:.b8 6 // DW_AT_call_line
; CHECK-NEXT:.b8 37 // DW_AT_call_column
diff --git a/llvm/test/ExecutionEngine/JITLink/x86-64/ELF_R_X86_64_PC8.s b/llvm/test/ExecutionEngine/JITLink/x86-64/ELF_R_X86_64_PC.s
index 46b851a..d88875e 100644
--- a/llvm/test/ExecutionEngine/JITLink/x86-64/ELF_R_X86_64_PC8.s
+++ b/llvm/test/ExecutionEngine/JITLink/x86-64/ELF_R_X86_64_PC.s
@@ -2,7 +2,7 @@
# RUN: -filetype=obj -o %t.o %s
# RUN: llvm-jitlink -noexec %t.o
#
-# Check R_X86_64_PC8 handling.
+# Check R_X86_64_PC* handling.
.text
.globl main
@@ -14,3 +14,6 @@ main:
.rodata
.byte main-. # Generate R_X86_64_PC8 relocation.
+ .short main-. # Generate R_X86_64_PC16 relocation.
+ .long main-. # Generate R_X86_64_PC32 relocation.
+ .quad main-. # Generate R_X86_64_PC64 relocation.
diff --git a/llvm/test/MC/AMDGPU/amdhsa-kd-kernarg-preload.s b/llvm/test/MC/AMDGPU/amdhsa-kd-kernarg-preload.s
new file mode 100644
index 0000000..f4ae23f
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/amdhsa-kd-kernarg-preload.s
@@ -0,0 +1,21 @@
+// RUN: llvm-mc -triple amdgcn-amd-amdhsa -mcpu=gfx940 -filetype=obj < %s -o - | llvm-objdump -s -j .rodata - | FileCheck --check-prefix=OBJDUMP %s
+
+.amdgcn_target "amdgcn-amd-amdhsa--gfx940"
+
+.rodata
+
+// Account for preload kernarg SGPRs in KD field GRANULATED_WAVEFRONT_SGPR_COUNT.
+
+// OBJDUMP: Contents of section .rodata:
+// OBJDUMP-NEXT: 0000 00000000 00000000 00000000 00000000 ................
+// OBJDUMP-NEXT: 0010 00000000 00000000 00000000 00000000 ................
+// OBJDUMP-NEXT: 0020 00000000 00000000 00000000 00000000 ................
+// OBJDUMP-NOT: 0030 0000ac00 92000000 00000900 00000000 ................
+// OBJDUMP-NEXT: 0030 4000ac00 92000000 00000900 00000000 @...............
+
+.amdhsa_kernel amdhsa_kd_kernarg
+ .amdhsa_user_sgpr_kernarg_preload_length 9
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_accum_offset 4
+.end_amdhsa_kernel
diff --git a/llvm/test/MC/AMDGPU/flat-global.s b/llvm/test/MC/AMDGPU/flat-global.s
index e81fae8..2ce613b 100644
--- a/llvm/test/MC/AMDGPU/flat-global.s
+++ b/llvm/test/MC/AMDGPU/flat-global.s
@@ -210,10 +210,9 @@ global_store_dword v3, v1, s[2:3] offset:-8
// GFX9: global_store_dword v3, v1, s[2:3] offset:-8 ; encoding: [0xf8,0x9f,0x70,0xdc,0x03,0x01,0x02,0x00]
// VI-ERR: :[[@LINE-3]]:1: error: instruction not supported on this GPU
-// XXX: Is this valid?
global_store_dword v3, v1, exec
-// GFX10: encoding: [0x00,0x80,0x70,0xdc,0x03,0x01,0x7e,0x00]
-// GFX9: global_store_dword v3, v1, exec ; encoding: [0x00,0x80,0x70,0xdc,0x03,0x01,0x7e,0x00]
+// GFX10-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX9-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
// VI-ERR: :[[@LINE-3]]:{{[0-9]+}}: error: instruction not supported on this GPU
global_load_dword v1, v[3:4], s2
diff --git a/llvm/test/MC/AMDGPU/gfx10_flat_instructions_err.s b/llvm/test/MC/AMDGPU/gfx10_flat_instructions_err.s
new file mode 100644
index 0000000..193e91e
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/gfx10_flat_instructions_err.s
@@ -0,0 +1,268 @@
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1010 %s 2>&1 | FileCheck --check-prefixes=GFX1010,GFX10 --implicit-check-not=error: %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1030 %s 2>&1 | FileCheck --check-prefixes=GFX1030,GFX10 --implicit-check-not=error: %s
+
+global_atomic_add v2, v4, null
+// GFX10: :[[@LINE-1]]:27: error: invalid operand for instruction
+
+global_atomic_add v0, v2, v4, null glc
+// GFX10: :[[@LINE-1]]:31: error: invalid operand for instruction
+
+global_atomic_add_x2 v2, v[4:5], null
+// GFX10: :[[@LINE-1]]:34: error: invalid operand for instruction
+
+global_atomic_add_x2 v[0:1], v2, v[4:5], null
+// GFX10: :[[@LINE-1]]:42: error: invalid operand for instruction
+
+global_atomic_and v2, v4, null
+// GFX10: :[[@LINE-1]]:27: error: invalid operand for instruction
+
+global_atomic_and v0, v2, v4, null
+// GFX10: :[[@LINE-1]]:31: error: invalid operand for instruction
+
+global_atomic_and_x2 v2, v[4:5], null
+// GFX10: :[[@LINE-1]]:34: error: invalid operand for instruction
+
+global_atomic_and_x2 v0, v2, v[4:5], null
+// GFX10: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+
+global_atomic_cmpswap v2, v[4:5], null
+// GFX10: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_cmpswap v0, v2, v[4:5], null
+// GFX10: :[[@LINE-1]]:39: error: invalid operand for instruction
+
+global_atomic_cmpswap_x2 v2, v[4:7], null
+// GFX10: :[[@LINE-1]]:38: error: invalid operand for instruction
+
+global_atomic_cmpswap_x2 v[0:1], v2, v[4:7], null
+// GFX10: :[[@LINE-1]]:46: error: invalid operand for instruction
+
+global_atomic_csub v2, v4, null
+// GFX1010: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX1030: :[[@LINE-2]]:28: error: invalid operand for instruction
+
+global_atomic_csub v0, v2, v4, null
+// GFX1010: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+// GFX1030: :[[@LINE-2]]:32: error: invalid operand for instruction
+
+global_atomic_dec v2, v4, null
+// GFX10: :[[@LINE-1]]:27: error: invalid operand for instruction
+
+global_atomic_dec v0, v2, v4, null
+// GFX10: :[[@LINE-1]]:31: error: invalid operand for instruction
+
+global_atomic_dec_x2 v2, v[4:5], null
+// GFX10: :[[@LINE-1]]:34: error: invalid operand for instruction
+
+global_atomic_dec_x2 v[0:1], v2, v[4:5], null
+// GFX10: :[[@LINE-1]]:42: error: invalid operand for instruction
+
+global_atomic_fcmpswap v2, v[4:5], null
+// GFX10: :[[@LINE-1]]:36: error: invalid operand for instruction
+
+global_atomic_fcmpswap v0, v2, v[4:5], null
+// GFX10: :[[@LINE-1]]:40: error: invalid operand for instruction
+
+global_atomic_fcmpswap_x2 v2, v[4:7], null
+// GFX10: :[[@LINE-1]]:39: error: invalid operand for instruction
+
+global_atomic_fcmpswap_x2 v[0:1], v2, v[4:7], null
+// GFX10: :[[@LINE-1]]:47: error: invalid operand for instruction
+
+global_atomic_fmax v2, v4, null
+// GFX10: :[[@LINE-1]]:28: error: invalid operand for instruction
+
+global_atomic_fmax v0, v2, v4, null
+// GFX10: :[[@LINE-1]]:32: error: invalid operand for instruction
+
+global_atomic_fmax_x2 v2, v[4:5], null
+// GFX10: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_fmax_x2 v[0:1], v2, v[4:5], null
+// GFX10: :[[@LINE-1]]:43: error: invalid operand for instruction
+
+global_atomic_fmin v2, v4, null
+// GFX10: :[[@LINE-1]]:28: error: invalid operand for instruction
+
+global_atomic_fmin v0, v2, v4, null
+// GFX10: :[[@LINE-1]]:32: error: invalid operand for instruction
+
+global_atomic_fmin_x2 v2, v[4:5], null
+// GFX10: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_fmin_x2 v[0:1], v2, v[4:5], null
+// GFX10: :[[@LINE-1]]:43: error: invalid operand for instruction
+
+global_atomic_inc v2, v4, null
+// GFX10: :[[@LINE-1]]:27: error: invalid operand for instruction
+
+global_atomic_inc v0, v2, v4, null
+// GFX10: :[[@LINE-1]]:31: error: invalid operand for instruction
+
+global_atomic_inc_x2 v2, v[4:5], null
+// GFX10: :[[@LINE-1]]:34: error: invalid operand for instruction
+
+global_atomic_inc_x2 v[0:1], v2, v[4:5], null
+// GFX10: :[[@LINE-1]]:42: error: invalid operand for instruction
+
+global_atomic_or v2, v4, null
+// GFX10: :[[@LINE-1]]:26: error: invalid operand for instruction
+
+global_atomic_or v0, v2, v4, null
+// GFX10: :[[@LINE-1]]:30: error: invalid operand for instruction
+
+global_atomic_or_x2 v2, v[4:5], null
+// GFX10: :[[@LINE-1]]:33: error: invalid operand for instruction
+
+global_atomic_or_x2 v[0:1], v2, v[4:5], null
+// GFX10: :[[@LINE-1]]:41: error: invalid operand for instruction
+
+global_atomic_smax v2, v4, null
+// GFX10: :[[@LINE-1]]:28: error: invalid operand for instruction
+
+global_atomic_smax v0, v2, v4, null
+// GFX10: :[[@LINE-1]]:32: error: invalid operand for instruction
+
+global_atomic_smax_x2 v2, v[4:5], null
+// GFX10: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_smax_x2 v[0:1], v2, v[4:5], null
+// GFX10: :[[@LINE-1]]:43: error: invalid operand for instruction
+
+global_atomic_smin v2, v4, null
+// GFX10: :[[@LINE-1]]:28: error: invalid operand for instruction
+
+global_atomic_smin v0, v2, v4, null
+// GFX10: :[[@LINE-1]]:32: error: invalid operand for instruction
+
+global_atomic_smin_x2 v2, v[4:5], null
+// GFX10: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_smin_x2 v[0:1], v2, v[4:5], null
+// GFX10: :[[@LINE-1]]:43: error: invalid operand for instruction
+
+global_atomic_sub v2, v4, null
+// GFX10: :[[@LINE-1]]:27: error: invalid operand for instruction
+
+global_atomic_sub v0, v2, v4, null
+// GFX10: :[[@LINE-1]]:31: error: invalid operand for instruction
+
+global_atomic_sub_x2 v2, v[4:5], null
+// GFX10: :[[@LINE-1]]:34: error: invalid operand for instruction
+
+global_atomic_sub_x2 v[0:1], v2, v[4:5], null
+// GFX10: :[[@LINE-1]]:42: error: invalid operand for instruction
+
+global_atomic_swap v2, v4, null
+// GFX10: :[[@LINE-1]]:28: error: invalid operand for instruction
+
+global_atomic_swap v0, v2, v4, null
+// GFX10: :[[@LINE-1]]:32: error: invalid operand for instruction
+
+global_atomic_swap_x2 v2, v[4:5], null
+// GFX10: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_swap_x2 v[0:1], v2, v[4:5], null
+// GFX10: :[[@LINE-1]]:43: error: invalid operand for instruction
+
+global_atomic_umax v2, v4, null
+// GFX10: :[[@LINE-1]]:28: error: invalid operand for instruction
+
+global_atomic_umax v0, v2, v4, null
+// GFX10: :[[@LINE-1]]:32: error: invalid operand for instruction
+
+global_atomic_umax_x2 v2, v[4:5], null
+// GFX10: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_umax_x2 v[0:1], v2, v[4:5], null
+// GFX10: :[[@LINE-1]]:43: error: invalid operand for instruction
+
+global_atomic_umin v2, v4, null
+// GFX10: :[[@LINE-1]]:28: error: invalid operand for instruction
+
+global_atomic_umin v0, v2, v4, null
+// GFX10: :[[@LINE-1]]:32: error: invalid operand for instruction
+
+global_atomic_umin_x2 v2, v[4:5], null
+// GFX10: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_umin_x2 v[0:1], v2, v[4:5], null
+// GFX10: :[[@LINE-1]]:43: error: invalid operand for instruction
+
+global_atomic_xor v2, v4, null
+// GFX10: :[[@LINE-1]]:27: error: invalid operand for instruction
+
+global_atomic_xor v0, v2, v4, null
+// GFX10: :[[@LINE-1]]:31: error: invalid operand for instruction
+
+global_atomic_xor_x2 v2, v[4:5], null
+// GFX10: :[[@LINE-1]]:34: error: invalid operand for instruction
+
+global_atomic_xor_x2 v[0:1], v2, v[4:5], null
+// GFX10: :[[@LINE-1]]:42: error: invalid operand for instruction
+
+global_load_dword v0, v4, null
+// GFX10: :[[@LINE-1]]:27: error: invalid operand for instruction
+
+global_load_dwordx2 v[0:1], v4, null
+// GFX10: :[[@LINE-1]]:33: error: invalid operand for instruction
+
+global_load_dwordx3 v[0:2], v4, null
+// GFX10: :[[@LINE-1]]:33: error: invalid operand for instruction
+
+global_load_dwordx4 v[0:3], v4, null
+// GFX10: :[[@LINE-1]]:33: error: invalid operand for instruction
+
+global_load_sbyte v0, v2, null
+// GFX10: :[[@LINE-1]]:27: error: invalid operand for instruction
+
+global_load_sbyte_d16 v0, v2, null
+// GFX10: :[[@LINE-1]]:31: error: invalid operand for instruction
+
+global_load_sbyte_d16_hi v0, v2, null
+// GFX10: :[[@LINE-1]]:34: error: invalid operand for instruction
+
+global_load_short_d16 v0, v2, null
+// GFX10: :[[@LINE-1]]:31: error: invalid operand for instruction
+
+global_load_short_d16_hi v0, v2, null
+// GFX10: :[[@LINE-1]]:34: error: invalid operand for instruction
+
+global_load_sshort v0, v2, null
+// GFX10: :[[@LINE-1]]:28: error: invalid operand for instruction
+
+global_load_ubyte v0, v2, null
+// GFX10: :[[@LINE-1]]:27: error: invalid operand for instruction
+
+global_load_ubyte_d16 v0, v2, null
+// GFX10: :[[@LINE-1]]:31: error: invalid operand for instruction
+
+global_load_ubyte_d16_hi v0, v2, null
+// GFX10: :[[@LINE-1]]:34: error: invalid operand for instruction
+
+global_load_ushort v0, v2, null
+// GFX10: :[[@LINE-1]]:28: error: invalid operand for instruction
+
+global_store_byte v0, v2, null
+// GFX10: :[[@LINE-1]]:27: error: invalid operand for instruction
+
+global_store_byte_d16_hi v0, v2, null
+// GFX10: :[[@LINE-1]]:34: error: invalid operand for instruction
+
+global_store_dword v0, v2, null
+// GFX10: :[[@LINE-1]]:28: error: invalid operand for instruction
+
+global_store_dwordx2 v0, v[2:3], null
+// GFX10: :[[@LINE-1]]:34: error: invalid operand for instruction
+
+global_store_dwordx3 v0, v[2:4], null
+// GFX10: :[[@LINE-1]]:34: error: invalid operand for instruction
+
+global_store_dwordx4 v0, v[2:5], null
+// GFX10: :[[@LINE-1]]:34: error: invalid operand for instruction
+
+global_store_short v0, v2, null
+// GFX10: :[[@LINE-1]]:28: error: invalid operand for instruction
+
+global_store_short_d16_hi v0, v2, null
+// GFX10: :[[@LINE-1]]:35: error: invalid operand for instruction
diff --git a/llvm/test/MC/AMDGPU/gfx10_unsupported.s b/llvm/test/MC/AMDGPU/gfx10_unsupported.s
index 1374417..5a9f382 100644
--- a/llvm/test/MC/AMDGPU/gfx10_unsupported.s
+++ b/llvm/test/MC/AMDGPU/gfx10_unsupported.s
@@ -215,6 +215,9 @@ buffer_store_d16_hi_format_x v1, off, s[12:15], -1 offset:4095
buffer_store_lds_dword s[4:7], -1 offset:4095 lds
// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+buffer_wbinvl1
+// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
buffer_wbinvl1_vol
// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/gfx1150_asm_sopp.s b/llvm/test/MC/AMDGPU/gfx1150_asm_sopp.s
deleted file mode 100644
index 044ce48..0000000
--- a/llvm/test/MC/AMDGPU/gfx1150_asm_sopp.s
+++ /dev/null
@@ -1,10 +0,0 @@
-// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1150 -show-encoding %s | FileCheck --check-prefixes=GFX1150 %s
-
-s_singleuse_vdst 0x0000
-// GFX1150: encoding: [0x00,0x00,0x93,0xbf]
-
-s_singleuse_vdst 0xffff
-// GFX1150: encoding: [0xff,0xff,0x93,0xbf]
-
-s_singleuse_vdst 0x1234
-// GFX1150: encoding: [0x34,0x12,0x93,0xbf]
diff --git a/llvm/test/MC/AMDGPU/gfx11_asm_vop3-fake16.s b/llvm/test/MC/AMDGPU/gfx11_asm_vop3-fake16.s
new file mode 100644
index 0000000..d78673d
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/gfx11_asm_vop3-fake16.s
@@ -0,0 +1,6199 @@
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize32,-real-true16 -show-encoding %s | FileCheck --check-prefixes=GFX11,W32 %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64,-real-true16 -show-encoding %s | FileCheck --check-prefixes=GFX11,W64 %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize32,-real-true16 %s 2>&1 | FileCheck --check-prefix=W32-ERR --implicit-check-not=error: %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64,-real-true16 %s 2>&1 | FileCheck --check-prefix=W64-ERR --implicit-check-not=error: %s
+
+v_add3_u32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x55,0xd6,0x01,0x05,0x0e,0x00]
+
+v_add3_u32 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x55,0xd6,0xff,0x05,0xa4,0x01]
+
+v_add3_u32 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x55,0xd6,0x01,0xfe,0xff,0x01]
+
+v_add3_u32 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x55,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_add3_u32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x55,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_add3_u32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x55,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_add3_u32 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x55,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_add3_u32 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x55,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_add3_u32 v5, exec_lo, -1, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x55,0xd6,0x7e,0x82,0xad,0x01]
+
+v_add3_u32 v5, exec_hi, null, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x55,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_add3_u32 v5, null, exec_lo, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x55,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_add3_u32 v5, -1, exec_hi, src_scc
+// GFX11: encoding: [0x05,0x00,0x55,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_add3_u32 v5, 0.5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x55,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_add3_u32 v5, src_scc, vcc_lo, -1
+// GFX11: encoding: [0x05,0x00,0x55,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_add3_u32 v255, 0xaf123456, vcc_hi, null
+// GFX11: encoding: [0xff,0x00,0x55,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_add_co_u32 v5, s6, v1, v2
+// W32: encoding: [0x05,0x06,0x00,0xd7,0x01,0x05,0x02,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s6, v255, v255
+// W32: encoding: [0x05,0x06,0x00,0xd7,0xff,0xff,0x03,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s6, s1, s2
+// W32: encoding: [0x05,0x06,0x00,0xd7,0x01,0x04,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s6, s105, s105
+// W32: encoding: [0x05,0x06,0x00,0xd7,0x69,0xd2,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s6, vcc_lo, ttmp15
+// W32: encoding: [0x05,0x06,0x00,0xd7,0x6a,0xf6,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s6, vcc_hi, 0xaf123456
+// W32: encoding: [0x05,0x06,0x00,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s6, ttmp15, src_scc
+// W32: encoding: [0x05,0x06,0x00,0xd7,0x7b,0xfa,0x01,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s6, m0, 0.5
+// W32: encoding: [0x05,0x06,0x00,0xd7,0x7d,0xe0,0x01,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s6, exec_lo, -1
+// W32: encoding: [0x05,0x06,0x00,0xd7,0x7e,0x82,0x01,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s6, exec_hi, null
+// W32: encoding: [0x05,0x06,0x00,0xd7,0x7f,0xf8,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s105, null, exec_lo
+// W32: encoding: [0x05,0x69,0x00,0xd7,0x7c,0xfc,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, vcc_lo, -1, exec_hi
+// W32: encoding: [0x05,0x6a,0x00,0xd7,0xc1,0xfe,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, vcc_hi, 0.5, m0
+// W32: encoding: [0x05,0x6b,0x00,0xd7,0xf0,0xfa,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, ttmp15, src_scc, vcc_lo
+// W32: encoding: [0x05,0x7b,0x00,0xd7,0xfd,0xd4,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s[12:13], v1, v2
+// W64: encoding: [0x05,0x0c,0x00,0xd7,0x01,0x05,0x02,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s[12:13], v255, v255
+// W64: encoding: [0x05,0x0c,0x00,0xd7,0xff,0xff,0x03,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s[12:13], s1, s2
+// W64: encoding: [0x05,0x0c,0x00,0xd7,0x01,0x04,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s[12:13], s105, s105
+// W64: encoding: [0x05,0x0c,0x00,0xd7,0x69,0xd2,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s[12:13], vcc_lo, ttmp15
+// W64: encoding: [0x05,0x0c,0x00,0xd7,0x6a,0xf6,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s[12:13], vcc_hi, 0xaf123456
+// W64: encoding: [0x05,0x0c,0x00,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s[12:13], ttmp15, src_scc
+// W64: encoding: [0x05,0x0c,0x00,0xd7,0x7b,0xfa,0x01,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s[12:13], m0, 0.5
+// W64: encoding: [0x05,0x0c,0x00,0xd7,0x7d,0xe0,0x01,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s[12:13], exec_lo, -1
+// W64: encoding: [0x05,0x0c,0x00,0xd7,0x7e,0x82,0x01,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s[12:13], exec_hi, null
+// W64: encoding: [0x05,0x0c,0x00,0xd7,0x7f,0xf8,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s[12:13], null, exec_lo
+// W64: encoding: [0x05,0x0c,0x00,0xd7,0x7c,0xfc,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s[104:105], -1, exec_hi
+// W64: encoding: [0x05,0x68,0x00,0xd7,0xc1,0xfe,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, vcc, 0.5, m0
+// W64: encoding: [0x05,0x6a,0x00,0xd7,0xf0,0xfa,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode
+
+v_add_co_u32 v5, ttmp[14:15], src_scc, vcc_lo
+// W64: encoding: [0x05,0x7a,0x00,0xd7,0xfd,0xd4,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v255, null, 0xaf123456, vcc_hi clamp
+// GFX11: encoding: [0xff,0xfc,0x00,0xd7,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_add_f64 v[5:6], v[1:2], v[2:3]
+// GFX11: encoding: [0x05,0x00,0x27,0xd7,0x01,0x05,0x02,0x00]
+
+v_add_f64 v[5:6], v[254:255], v[254:255]
+// GFX11: encoding: [0x05,0x00,0x27,0xd7,0xfe,0xfd,0x03,0x00]
+
+v_add_f64 v[5:6], s[2:3], s[4:5]
+// GFX11: encoding: [0x05,0x00,0x27,0xd7,0x02,0x08,0x00,0x00]
+
+v_add_f64 v[5:6], s[104:105], s[104:105]
+// GFX11: encoding: [0x05,0x00,0x27,0xd7,0x68,0xd0,0x00,0x00]
+
+v_add_f64 v[5:6], vcc, ttmp[14:15]
+// GFX11: encoding: [0x05,0x00,0x27,0xd7,0x6a,0xf4,0x00,0x00]
+
+v_add_f64 v[5:6], ttmp[14:15], 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x27,0xd7,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_add_f64 v[5:6], -|exec|, src_scc
+// GFX11: encoding: [0x05,0x01,0x27,0xd7,0x7e,0xfa,0x01,0x20]
+
+v_add_f64 v[5:6], null, 0.5
+// GFX11: encoding: [0x05,0x00,0x27,0xd7,0x7c,0xe0,0x01,0x00]
+
+v_add_f64 v[5:6], -1, -1
+// GFX11: encoding: [0x05,0x00,0x27,0xd7,0xc1,0x82,0x01,0x00]
+
+v_add_f64 v[5:6], 0.5, null mul:2
+// GFX11: encoding: [0x05,0x00,0x27,0xd7,0xf0,0xf8,0x00,0x08]
+
+v_add_f64 v[5:6], -|src_scc|, -|exec| mul:4
+// GFX11: encoding: [0x05,0x03,0x27,0xd7,0xfd,0xfc,0x00,0x70]
+
+v_add_f64 v[254:255], 0xaf123456, -|vcc| clamp div:2
+// GFX11: encoding: [0xfe,0x82,0x27,0xd7,0xff,0xd4,0x00,0x58,0x56,0x34,0x12,0xaf]
+
+v_add_lshl_u32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x47,0xd6,0x01,0x05,0x0e,0x00]
+
+v_add_lshl_u32 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x47,0xd6,0xff,0x05,0xa4,0x01]
+
+v_add_lshl_u32 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x47,0xd6,0x01,0xfe,0xff,0x01]
+
+v_add_lshl_u32 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x47,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_add_lshl_u32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x47,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_add_lshl_u32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x47,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_add_lshl_u32 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x47,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_add_lshl_u32 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x47,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_add_lshl_u32 v5, exec_lo, -1, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x47,0xd6,0x7e,0x82,0xad,0x01]
+
+v_add_lshl_u32 v5, exec_hi, null, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x47,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_add_lshl_u32 v5, null, exec_lo, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x47,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_add_lshl_u32 v5, -1, exec_hi, src_scc
+// GFX11: encoding: [0x05,0x00,0x47,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_add_lshl_u32 v5, 0.5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x47,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_add_lshl_u32 v5, src_scc, vcc_lo, -1
+// GFX11: encoding: [0x05,0x00,0x47,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_add_lshl_u32 v255, 0xaf123456, vcc_hi, null
+// GFX11: encoding: [0xff,0x00,0x47,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_add_nc_i16 v5, v1, v2
+// GFX11: encoding: [0x05,0x00,0x0d,0xd7,0x01,0x05,0x02,0x00]
+
+v_add_nc_i16 v5, v255, v255
+// GFX11: encoding: [0x05,0x00,0x0d,0xd7,0xff,0xff,0x03,0x00]
+
+v_add_nc_i16 v5, s1, s2
+// GFX11: encoding: [0x05,0x00,0x0d,0xd7,0x01,0x04,0x00,0x00]
+
+v_add_nc_i16 v5, s105, s105
+// GFX11: encoding: [0x05,0x00,0x0d,0xd7,0x69,0xd2,0x00,0x00]
+
+v_add_nc_i16 v5, vcc_lo, ttmp15
+// GFX11: encoding: [0x05,0x00,0x0d,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_add_nc_i16 v5, vcc_hi, 0xfe0b
+// GFX11: encoding: [0x05,0x00,0x0d,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_add_nc_i16 v5, ttmp15, src_scc
+// GFX11: encoding: [0x05,0x00,0x0d,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_add_nc_i16 v5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x0d,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_add_nc_i16 v5, exec_lo, -1
+// GFX11: encoding: [0x05,0x00,0x0d,0xd7,0x7e,0x82,0x01,0x00]
+
+v_add_nc_i16 v5, exec_hi, null
+// GFX11: encoding: [0x05,0x00,0x0d,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_add_nc_i16 v5, null, exec_lo op_sel:[1,1,1]
+// GFX11: encoding: [0x05,0x58,0x0d,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_add_nc_i16 v5, -1, exec_hi op_sel:[0,0,0]
+// GFX11: encoding: [0x05,0x00,0x0d,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_add_nc_i16 v5, 0.5, m0 op_sel:[1,0,0]
+// GFX11: encoding: [0x05,0x08,0x0d,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_add_nc_i16 v5, src_scc, vcc_lo op_sel:[0,1,0]
+// GFX11: encoding: [0x05,0x10,0x0d,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_add_nc_i16 v255, 0xfe0b, vcc_hi op_sel:[0,0,1] clamp
+// GFX11: encoding: [0xff,0xc0,0x0d,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_add_nc_i32 v5, v1, v2
+// GFX11: encoding: [0x05,0x00,0x26,0xd7,0x01,0x05,0x02,0x00]
+
+v_add_nc_i32 v5, v255, v255
+// GFX11: encoding: [0x05,0x00,0x26,0xd7,0xff,0xff,0x03,0x00]
+
+v_add_nc_i32 v5, s1, s2
+// GFX11: encoding: [0x05,0x00,0x26,0xd7,0x01,0x04,0x00,0x00]
+
+v_add_nc_i32 v5, s105, s105
+// GFX11: encoding: [0x05,0x00,0x26,0xd7,0x69,0xd2,0x00,0x00]
+
+v_add_nc_i32 v5, vcc_lo, ttmp15
+// GFX11: encoding: [0x05,0x00,0x26,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_add_nc_i32 v5, vcc_hi, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x26,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_add_nc_i32 v5, ttmp15, src_scc
+// GFX11: encoding: [0x05,0x00,0x26,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_add_nc_i32 v5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x26,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_add_nc_i32 v5, exec_lo, -1
+// GFX11: encoding: [0x05,0x00,0x26,0xd7,0x7e,0x82,0x01,0x00]
+
+v_add_nc_i32 v5, exec_hi, null
+// GFX11: encoding: [0x05,0x00,0x26,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_add_nc_i32 v5, null, exec_lo
+// GFX11: encoding: [0x05,0x00,0x26,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_add_nc_i32 v5, -1, exec_hi
+// GFX11: encoding: [0x05,0x00,0x26,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_add_nc_i32 v5, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x26,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_add_nc_i32 v5, src_scc, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x26,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_add_nc_i32 v255, 0xaf123456, vcc_hi clamp
+// GFX11: encoding: [0xff,0x80,0x26,0xd7,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_add_nc_u16 v5, v1, v2
+// GFX11: encoding: [0x05,0x00,0x03,0xd7,0x01,0x05,0x02,0x00]
+
+v_add_nc_u16 v5, v255, v255
+// GFX11: encoding: [0x05,0x00,0x03,0xd7,0xff,0xff,0x03,0x00]
+
+v_add_nc_u16 v5, s1, s2
+// GFX11: encoding: [0x05,0x00,0x03,0xd7,0x01,0x04,0x00,0x00]
+
+v_add_nc_u16 v5, s105, s105
+// GFX11: encoding: [0x05,0x00,0x03,0xd7,0x69,0xd2,0x00,0x00]
+
+v_add_nc_u16 v5, vcc_lo, ttmp15
+// GFX11: encoding: [0x05,0x00,0x03,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_add_nc_u16 v5, vcc_hi, 0xfe0b
+// GFX11: encoding: [0x05,0x00,0x03,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_add_nc_u16 v5, ttmp15, src_scc
+// GFX11: encoding: [0x05,0x00,0x03,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_add_nc_u16 v5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x03,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_add_nc_u16 v5, exec_lo, -1
+// GFX11: encoding: [0x05,0x00,0x03,0xd7,0x7e,0x82,0x01,0x00]
+
+v_add_nc_u16 v5, exec_hi, null
+// GFX11: encoding: [0x05,0x00,0x03,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_add_nc_u16 v5, null, exec_lo op_sel:[1,1,1]
+// GFX11: encoding: [0x05,0x58,0x03,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_add_nc_u16 v5, -1, exec_hi op_sel:[0,0,0]
+// GFX11: encoding: [0x05,0x00,0x03,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_add_nc_u16 v5, 0.5, m0 op_sel:[1,0,0]
+// GFX11: encoding: [0x05,0x08,0x03,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_add_nc_u16 v5, src_scc, vcc_lo op_sel:[0,1,0]
+// GFX11: encoding: [0x05,0x10,0x03,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_add_nc_u16 v255, 0xfe0b, vcc_hi op_sel:[0,0,1] clamp
+// GFX11: encoding: [0xff,0xc0,0x03,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_alignbit_b32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x16,0xd6,0x01,0x05,0x0e,0x00]
+
+v_alignbit_b32 v5, v255, s2, s3
+// GFX11: encoding: [0x05,0x00,0x16,0xd6,0xff,0x05,0x0c,0x00]
+
+v_alignbit_b32 v5, s1, v255, s3
+// GFX11: encoding: [0x05,0x00,0x16,0xd6,0x01,0xfe,0x0f,0x00]
+
+v_alignbit_b32 v5, s105, s105, s105
+// GFX11: encoding: [0x05,0x00,0x16,0xd6,0x69,0xd2,0xa4,0x01]
+
+v_alignbit_b32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x16,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_alignbit_b32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x16,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_alignbit_b32 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x16,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_alignbit_b32 v5, m0, 0.5, exec_lo
+// GFX11: encoding: [0x05,0x00,0x16,0xd6,0x7d,0xe0,0xf9,0x01]
+
+v_alignbit_b32 v5, exec_lo, -1, m0
+// GFX11: encoding: [0x05,0x00,0x16,0xd6,0x7e,0x82,0xf5,0x01]
+
+v_alignbit_b32 v5, exec_hi, null, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x16,0xd6,0x7f,0xf8,0xac,0x01]
+
+v_alignbit_b32 v5, null, exec_lo, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x16,0xd6,0x7c,0xfc,0xa8,0x01]
+
+v_alignbit_b32 v5, -1, exec_hi, src_scc
+// GFX11: encoding: [0x05,0x00,0x16,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_alignbit_b32 v5, 0.5, m0, exec_hi
+// GFX11: encoding: [0x05,0x00,0x16,0xd6,0xf0,0xfa,0xfc,0x01]
+
+v_alignbit_b32 v5, src_scc, vcc_lo, -1
+// GFX11: encoding: [0x05,0x00,0x16,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_alignbit_b32 v255, 0xaf123456, vcc_hi, null
+// GFX11: encoding: [0xff,0x00,0x16,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_alignbyte_b32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x17,0xd6,0x01,0x05,0x0e,0x00]
+
+v_alignbyte_b32 v5, v255, s2, s3
+// GFX11: encoding: [0x05,0x00,0x17,0xd6,0xff,0x05,0x0c,0x00]
+
+v_alignbyte_b32 v5, s1, v255, s3
+// GFX11: encoding: [0x05,0x00,0x17,0xd6,0x01,0xfe,0x0f,0x00]
+
+v_alignbyte_b32 v5, s105, s105, s105
+// GFX11: encoding: [0x05,0x00,0x17,0xd6,0x69,0xd2,0xa4,0x01]
+
+v_alignbyte_b32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x17,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_alignbyte_b32 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x17,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_alignbyte_b32 v5, m0, 0.5, exec_lo
+// GFX11: encoding: [0x05,0x00,0x17,0xd6,0x7d,0xe0,0xf9,0x01]
+
+v_alignbyte_b32 v5, exec_lo, -1, m0
+// GFX11: encoding: [0x05,0x00,0x17,0xd6,0x7e,0x82,0xf5,0x01]
+
+v_alignbyte_b32 v5, exec_hi, null, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x17,0xd6,0x7f,0xf8,0xac,0x01]
+
+v_alignbyte_b32 v5, null, exec_lo, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x17,0xd6,0x7c,0xfc,0xa8,0x01]
+
+v_alignbyte_b32 v5, -1, exec_hi, src_scc
+// GFX11: encoding: [0x05,0x00,0x17,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_alignbyte_b32 v5, 0.5, m0, exec_hi
+// GFX11: encoding: [0x05,0x00,0x17,0xd6,0xf0,0xfa,0xfc,0x01]
+
+v_alignbyte_b32 v5, src_scc, vcc_lo, -1
+// GFX11: encoding: [0x05,0x00,0x17,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_alignbyte_b32 v255, 0xaf123456, vcc_hi, null
+// GFX11: encoding: [0xff,0x00,0x17,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_and_b16 v5, v1, v2
+// GFX11: encoding: [0x05,0x00,0x62,0xd7,0x01,0x05,0x02,0x00]
+
+v_and_b16 v5, v255, v255
+// GFX11: encoding: [0x05,0x00,0x62,0xd7,0xff,0xff,0x03,0x00]
+
+v_and_b16 v5, s1, s2
+// GFX11: encoding: [0x05,0x00,0x62,0xd7,0x01,0x04,0x00,0x00]
+
+v_and_b16 v5, s105, s105
+// GFX11: encoding: [0x05,0x00,0x62,0xd7,0x69,0xd2,0x00,0x00]
+
+v_and_b16 v5, vcc_lo, ttmp15
+// GFX11: encoding: [0x05,0x00,0x62,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_and_b16 v5, vcc_hi, 0xfe0b
+// GFX11: encoding: [0x05,0x00,0x62,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_and_b16 v5, ttmp15, src_scc
+// GFX11: encoding: [0x05,0x00,0x62,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_and_b16 v5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x62,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_and_b16 v5, exec_lo, -1
+// GFX11: encoding: [0x05,0x00,0x62,0xd7,0x7e,0x82,0x01,0x00]
+
+v_and_b16 v5, exec_hi, null
+// GFX11: encoding: [0x05,0x00,0x62,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_and_b16 v5, null, exec_lo
+// GFX11: encoding: [0x05,0x00,0x62,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_and_b16 v5, -1, exec_hi
+// GFX11: encoding: [0x05,0x00,0x62,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_and_b16 v5, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x62,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_and_b16 v5, src_scc, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x62,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_and_b16 v255, 0xfe0b, vcc_hi
+// GFX11: encoding: [0xff,0x00,0x62,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_and_or_b32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x57,0xd6,0x01,0x05,0x0e,0x00]
+
+v_and_or_b32 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x57,0xd6,0xff,0x05,0xa4,0x01]
+
+v_and_or_b32 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x57,0xd6,0x01,0xfe,0xff,0x01]
+
+v_and_or_b32 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x57,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_and_or_b32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x57,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_and_or_b32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x57,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_and_or_b32 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x57,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_and_or_b32 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x57,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_and_or_b32 v5, exec_lo, -1, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x57,0xd6,0x7e,0x82,0xad,0x01]
+
+v_and_or_b32 v5, exec_hi, null, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x57,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_and_or_b32 v5, null, exec_lo, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x57,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_and_or_b32 v5, -1, exec_hi, src_scc
+// GFX11: encoding: [0x05,0x00,0x57,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_and_or_b32 v5, 0.5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x57,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_and_or_b32 v5, src_scc, vcc_lo, -1
+// GFX11: encoding: [0x05,0x00,0x57,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_and_or_b32 v255, 0xaf123456, vcc_hi, null
+// GFX11: encoding: [0xff,0x00,0x57,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_ashrrev_i16 v5, v1, v2
+// GFX11: encoding: [0x05,0x00,0x3a,0xd7,0x01,0x05,0x02,0x00]
+
+v_ashrrev_i16 v5, v255, v255
+// GFX11: encoding: [0x05,0x00,0x3a,0xd7,0xff,0xff,0x03,0x00]
+
+v_ashrrev_i16 v5, s1, s2
+// GFX11: encoding: [0x05,0x00,0x3a,0xd7,0x01,0x04,0x00,0x00]
+
+v_ashrrev_i16 v5, s105, s105
+// GFX11: encoding: [0x05,0x00,0x3a,0xd7,0x69,0xd2,0x00,0x00]
+
+v_ashrrev_i16 v5, vcc_lo, ttmp15
+// GFX11: encoding: [0x05,0x00,0x3a,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_ashrrev_i16 v5, vcc_hi, 0xfe0b
+// GFX11: encoding: [0x05,0x00,0x3a,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_ashrrev_i16 v5, ttmp15, src_scc
+// GFX11: encoding: [0x05,0x00,0x3a,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_ashrrev_i16 v5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x3a,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_ashrrev_i16 v5, exec_lo, -1
+// GFX11: encoding: [0x05,0x00,0x3a,0xd7,0x7e,0x82,0x01,0x00]
+
+v_ashrrev_i16 v5, exec_hi, null
+// GFX11: encoding: [0x05,0x00,0x3a,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_ashrrev_i16 v5, null, exec_lo
+// GFX11: encoding: [0x05,0x00,0x3a,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_ashrrev_i16 v5, -1, exec_hi
+// GFX11: encoding: [0x05,0x00,0x3a,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_ashrrev_i16 v5, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x3a,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_ashrrev_i16 v5, src_scc, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x3a,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_ashrrev_i16 v255, 0xfe0b, vcc_hi
+// GFX11: encoding: [0xff,0x00,0x3a,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_ashrrev_i64 v[5:6], v1, vcc
+// GFX11: encoding: [0x05,0x00,0x3e,0xd7,0x01,0xd5,0x00,0x00]
+
+v_ashrrev_i64 v[5:6], v255, exec
+// GFX11: encoding: [0x05,0x00,0x3e,0xd7,0xff,0xfd,0x00,0x00]
+
+v_ashrrev_i64 v[5:6], exec_lo, v[2:3]
+// GFX11: encoding: [0x05,0x00,0x3e,0xd7,0x7e,0x04,0x02,0x00]
+
+v_ashrrev_i64 v[5:6], exec_hi, v[254:255]
+// GFX11: encoding: [0x05,0x00,0x3e,0xd7,0x7f,0xfc,0x03,0x00]
+
+v_ashrrev_i64 v[5:6], null, null
+// GFX11: encoding: [0x05,0x00,0x3e,0xd7,0x7c,0xf8,0x00,0x00]
+
+v_ashrrev_i64 v[5:6], -1, -1
+// GFX11: encoding: [0x05,0x00,0x3e,0xd7,0xc1,0x82,0x01,0x00]
+
+v_ashrrev_i64 v[5:6], 0.5, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x3e,0xd7,0xf0,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_ashrrev_i64 v[5:6], src_scc, src_scc
+// GFX11: encoding: [0x05,0x00,0x3e,0xd7,0xfd,0xfa,0x01,0x00]
+
+v_ashrrev_i64 v[254:255], 0xaf123456, 0.5
+// GFX11: encoding: [0xfe,0x00,0x3e,0xd7,0xff,0xe0,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_bcnt_u32_b32 v5, v1, v2
+// GFX11: encoding: [0x05,0x00,0x1e,0xd7,0x01,0x05,0x02,0x00]
+
+v_bcnt_u32_b32 v5, v255, v255
+// GFX11: encoding: [0x05,0x00,0x1e,0xd7,0xff,0xff,0x03,0x00]
+
+v_bcnt_u32_b32 v5, s1, s2
+// GFX11: encoding: [0x05,0x00,0x1e,0xd7,0x01,0x04,0x00,0x00]
+
+v_bcnt_u32_b32 v5, s105, s105
+// GFX11: encoding: [0x05,0x00,0x1e,0xd7,0x69,0xd2,0x00,0x00]
+
+v_bcnt_u32_b32 v5, vcc_lo, ttmp15
+// GFX11: encoding: [0x05,0x00,0x1e,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_bcnt_u32_b32 v5, vcc_hi, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x1e,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_bcnt_u32_b32 v5, ttmp15, src_scc
+// GFX11: encoding: [0x05,0x00,0x1e,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_bcnt_u32_b32 v5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x1e,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_bcnt_u32_b32 v5, exec_lo, -1
+// GFX11: encoding: [0x05,0x00,0x1e,0xd7,0x7e,0x82,0x01,0x00]
+
+v_bcnt_u32_b32 v5, exec_hi, null
+// GFX11: encoding: [0x05,0x00,0x1e,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_bcnt_u32_b32 v5, null, exec_lo
+// GFX11: encoding: [0x05,0x00,0x1e,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_bcnt_u32_b32 v5, -1, exec_hi
+// GFX11: encoding: [0x05,0x00,0x1e,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_bcnt_u32_b32 v5, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x1e,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_bcnt_u32_b32 v5, src_scc, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x1e,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_bcnt_u32_b32 v255, 0xaf123456, vcc_hi
+// GFX11: encoding: [0xff,0x00,0x1e,0xd7,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_bfe_i32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x11,0xd6,0x01,0x05,0x0e,0x00]
+
+v_bfe_i32 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x11,0xd6,0xff,0x05,0xa4,0x01]
+
+v_bfe_i32 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x11,0xd6,0x01,0xfe,0xff,0x01]
+
+v_bfe_i32 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x11,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_bfe_i32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x11,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_bfe_i32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x11,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_bfe_i32 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x11,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_bfe_i32 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x11,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_bfe_i32 v5, exec_lo, -1, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x11,0xd6,0x7e,0x82,0xad,0x01]
+
+v_bfe_i32 v5, exec_hi, null, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x11,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_bfe_i32 v5, null, exec_lo, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x11,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_bfe_i32 v5, -1, exec_hi, src_scc
+// GFX11: encoding: [0x05,0x00,0x11,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_bfe_i32 v5, 0.5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x11,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_bfe_i32 v5, src_scc, vcc_lo, -1
+// GFX11: encoding: [0x05,0x00,0x11,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_bfe_i32 v255, 0xaf123456, vcc_hi, null
+// GFX11: encoding: [0xff,0x00,0x11,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_bfe_u32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x10,0xd6,0x01,0x05,0x0e,0x00]
+
+v_bfe_u32 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x10,0xd6,0xff,0x05,0xa4,0x01]
+
+v_bfe_u32 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x10,0xd6,0x01,0xfe,0xff,0x01]
+
+v_bfe_u32 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x10,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_bfe_u32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x10,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_bfe_u32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x10,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_bfe_u32 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x10,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_bfe_u32 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x10,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_bfe_u32 v5, exec_lo, -1, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x10,0xd6,0x7e,0x82,0xad,0x01]
+
+v_bfe_u32 v5, exec_hi, null, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x10,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_bfe_u32 v5, null, exec_lo, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x10,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_bfe_u32 v5, -1, exec_hi, src_scc
+// GFX11: encoding: [0x05,0x00,0x10,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_bfe_u32 v5, 0.5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x10,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_bfe_u32 v5, src_scc, vcc_lo, -1
+// GFX11: encoding: [0x05,0x00,0x10,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_bfe_u32 v255, 0xaf123456, vcc_hi, null
+// GFX11: encoding: [0xff,0x00,0x10,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_bfi_b32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x12,0xd6,0x01,0x05,0x0e,0x00]
+
+v_bfi_b32 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x12,0xd6,0xff,0x05,0xa4,0x01]
+
+v_bfi_b32 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x12,0xd6,0x01,0xfe,0xff,0x01]
+
+v_bfi_b32 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x12,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_bfi_b32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x12,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_bfi_b32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x12,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_bfi_b32 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x12,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_bfi_b32 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x12,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_bfi_b32 v5, exec_lo, -1, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x12,0xd6,0x7e,0x82,0xad,0x01]
+
+v_bfi_b32 v5, exec_hi, null, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x12,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_bfi_b32 v5, null, exec_lo, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x12,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_bfi_b32 v5, -1, exec_hi, src_scc
+// GFX11: encoding: [0x05,0x00,0x12,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_bfi_b32 v5, 0.5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x12,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_bfi_b32 v5, src_scc, vcc_lo, -1
+// GFX11: encoding: [0x05,0x00,0x12,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_bfi_b32 v255, 0xaf123456, vcc_hi, null
+// GFX11: encoding: [0xff,0x00,0x12,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_bfm_b32 v5, v1, v2
+// GFX11: encoding: [0x05,0x00,0x1d,0xd7,0x01,0x05,0x02,0x00]
+
+v_bfm_b32 v5, v255, v255
+// GFX11: encoding: [0x05,0x00,0x1d,0xd7,0xff,0xff,0x03,0x00]
+
+v_bfm_b32 v5, s1, s2
+// GFX11: encoding: [0x05,0x00,0x1d,0xd7,0x01,0x04,0x00,0x00]
+
+v_bfm_b32 v5, s105, s105
+// GFX11: encoding: [0x05,0x00,0x1d,0xd7,0x69,0xd2,0x00,0x00]
+
+v_bfm_b32 v5, vcc_lo, ttmp15
+// GFX11: encoding: [0x05,0x00,0x1d,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_bfm_b32 v5, vcc_hi, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x1d,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_bfm_b32 v5, ttmp15, src_scc
+// GFX11: encoding: [0x05,0x00,0x1d,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_bfm_b32 v5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x1d,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_bfm_b32 v5, exec_lo, -1
+// GFX11: encoding: [0x05,0x00,0x1d,0xd7,0x7e,0x82,0x01,0x00]
+
+v_bfm_b32 v5, exec_hi, null
+// GFX11: encoding: [0x05,0x00,0x1d,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_bfm_b32 v5, null, exec_lo
+// GFX11: encoding: [0x05,0x00,0x1d,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_bfm_b32 v5, -1, exec_hi
+// GFX11: encoding: [0x05,0x00,0x1d,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_bfm_b32 v5, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x1d,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_bfm_b32 v5, src_scc, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x1d,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_bfm_b32 v255, 0xaf123456, vcc_hi
+// GFX11: encoding: [0xff,0x00,0x1d,0xd7,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_cndmask_b16 v5, v1, src_scc, s3
+// W32: encoding: [0x05,0x00,0x5d,0xd6,0x01,0xfb,0x0d,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, v255, 0.5, s3
+// W32: encoding: [0x05,0x00,0x5d,0xd6,0xff,0xe1,0x0d,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, s105, s105, s3
+// W32: encoding: [0x05,0x00,0x5d,0xd6,0x69,0xd2,0x0c,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, vcc_hi, v2, s3
+// W32: encoding: [0x05,0x00,0x5d,0xd6,0x6b,0x04,0x0e,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, ttmp15, ttmp15, s3
+// W32: encoding: [0x05,0x00,0x5d,0xd6,0x7b,0xf6,0x0c,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, m0, v255, s3
+// W32: encoding: [0x05,0x00,0x5d,0xd6,0x7d,0xfe,0x0f,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, exec_lo, exec_lo, s3
+// W32: encoding: [0x05,0x00,0x5d,0xd6,0x7e,0xfc,0x0c,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, exec_hi, exec_hi, s3
+// W32: encoding: [0x05,0x00,0x5d,0xd6,0x7f,0xfe,0x0c,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, null, m0, s105
+// W32: encoding: [0x05,0x00,0x5d,0xd6,0x7c,0xfa,0xa4,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, -1, -|vcc_lo|, vcc_lo
+// W32: encoding: [0x05,0x02,0x5d,0xd6,0xc1,0xd4,0xa8,0x41]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, 0.5, -1, vcc_hi
+// W32: encoding: [0x05,0x00,0x5d,0xd6,0xf0,0x82,0xad,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, -|src_scc|, null, ttmp15
+// W32: encoding: [0x05,0x01,0x5d,0xd6,0xfd,0xf8,0xec,0x21]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, v1, src_scc, s[6:7]
+// W64: encoding: [0x05,0x00,0x5d,0xd6,0x01,0xfb,0x19,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, v255, 0.5, s[6:7]
+// W64: encoding: [0x05,0x00,0x5d,0xd6,0xff,0xe1,0x19,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, s105, s105, s[6:7]
+// W64: encoding: [0x05,0x00,0x5d,0xd6,0x69,0xd2,0x18,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, vcc_hi, v2, s[6:7]
+// W64: encoding: [0x05,0x00,0x5d,0xd6,0x6b,0x04,0x1a,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, ttmp15, ttmp15, s[6:7]
+// W64: encoding: [0x05,0x00,0x5d,0xd6,0x7b,0xf6,0x18,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, m0, v255, s[6:7]
+// W64: encoding: [0x05,0x00,0x5d,0xd6,0x7d,0xfe,0x1b,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, exec_lo, exec_lo, s[6:7]
+// W64: encoding: [0x05,0x00,0x5d,0xd6,0x7e,0xfc,0x18,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, exec_hi, exec_hi, s[6:7]
+// W64: encoding: [0x05,0x00,0x5d,0xd6,0x7f,0xfe,0x18,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, null, m0, s[6:7]
+// W64: encoding: [0x05,0x00,0x5d,0xd6,0x7c,0xfa,0x18,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, -1, -|vcc_lo|, s[104:105]
+// W64: encoding: [0x05,0x02,0x5d,0xd6,0xc1,0xd4,0xa0,0x41]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, 0.5, -1, vcc
+// W64: encoding: [0x05,0x00,0x5d,0xd6,0xf0,0x82,0xa9,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, -|src_scc|, null, ttmp[14:15]
+// W64: encoding: [0x05,0x01,0x5d,0xd6,0xfd,0xf8,0xe8,0x21]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v255, -|0xfe0b|, -|vcc_hi|, null
+// GFX11: encoding: [0xff,0x03,0x5d,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00]
+
+v_cubeid_f32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x0c,0xd6,0x01,0x05,0x0e,0x00]
+
+v_cubeid_f32 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x0c,0xd6,0xff,0x05,0xa4,0x01]
+
+v_cubeid_f32 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x0c,0xd6,0x01,0xfe,0xff,0x01]
+
+v_cubeid_f32 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x0c,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_cubeid_f32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x0c,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_cubeid_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x0c,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_cubeid_f32 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX11: encoding: [0x05,0x07,0x0c,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_cubeid_f32 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x0c,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_cubeid_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX11: encoding: [0x05,0x01,0x0c,0xd6,0x7e,0x82,0xad,0x01]
+
+v_cubeid_f32 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX11: encoding: [0x05,0x05,0x0c,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_cubeid_f32 v5, null, exec_lo, -|0xaf123456|
+// GFX11: encoding: [0x05,0x04,0x0c,0xd6,0x7c,0xfc,0xfc,0x83,0x56,0x34,0x12,0xaf]
+
+v_cubeid_f32 v5, -1, -|exec_hi|, -|src_scc|
+// GFX11: encoding: [0x05,0x06,0x0c,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_cubeid_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX11: encoding: [0x05,0x00,0x0c,0xd6,0xf0,0xfa,0xc0,0x4b]
+
+v_cubeid_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX11: encoding: [0x05,0x02,0x0c,0xd6,0xfd,0xd4,0x04,0x33]
+
+v_cubeid_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX11: encoding: [0xff,0x83,0x0c,0xd6,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_cubema_f32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x0f,0xd6,0x01,0x05,0x0e,0x00]
+
+v_cubema_f32 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x0f,0xd6,0xff,0x05,0xa4,0x01]
+
+v_cubema_f32 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x0f,0xd6,0x01,0xfe,0xff,0x01]
+
+v_cubema_f32 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x0f,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_cubema_f32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x0f,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_cubema_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x0f,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_cubema_f32 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX11: encoding: [0x05,0x07,0x0f,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_cubema_f32 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x0f,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_cubema_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX11: encoding: [0x05,0x01,0x0f,0xd6,0x7e,0x82,0xad,0x01]
+
+v_cubema_f32 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX11: encoding: [0x05,0x05,0x0f,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_cubema_f32 v5, null, exec_lo, -|0xaf123456|
+// GFX11: encoding: [0x05,0x04,0x0f,0xd6,0x7c,0xfc,0xfc,0x83,0x56,0x34,0x12,0xaf]
+
+v_cubema_f32 v5, -1, -|exec_hi|, -|src_scc|
+// GFX11: encoding: [0x05,0x06,0x0f,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_cubema_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX11: encoding: [0x05,0x00,0x0f,0xd6,0xf0,0xfa,0xc0,0x4b]
+
+v_cubema_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX11: encoding: [0x05,0x02,0x0f,0xd6,0xfd,0xd4,0x04,0x33]
+
+v_cubema_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX11: encoding: [0xff,0x83,0x0f,0xd6,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_cubesc_f32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x0d,0xd6,0x01,0x05,0x0e,0x00]
+
+v_cubesc_f32 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x0d,0xd6,0xff,0x05,0xa4,0x01]
+
+v_cubesc_f32 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x0d,0xd6,0x01,0xfe,0xff,0x01]
+
+v_cubesc_f32 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x0d,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_cubesc_f32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x0d,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_cubesc_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x0d,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_cubesc_f32 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX11: encoding: [0x05,0x07,0x0d,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_cubesc_f32 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x0d,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_cubesc_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX11: encoding: [0x05,0x01,0x0d,0xd6,0x7e,0x82,0xad,0x01]
+
+v_cubesc_f32 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX11: encoding: [0x05,0x05,0x0d,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_cubesc_f32 v5, null, exec_lo, -|0xaf123456|
+// GFX11: encoding: [0x05,0x04,0x0d,0xd6,0x7c,0xfc,0xfc,0x83,0x56,0x34,0x12,0xaf]
+
+v_cubesc_f32 v5, -1, -|exec_hi|, -|src_scc|
+// GFX11: encoding: [0x05,0x06,0x0d,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_cubesc_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX11: encoding: [0x05,0x00,0x0d,0xd6,0xf0,0xfa,0xc0,0x4b]
+
+v_cubesc_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX11: encoding: [0x05,0x02,0x0d,0xd6,0xfd,0xd4,0x04,0x33]
+
+v_cubesc_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX11: encoding: [0xff,0x83,0x0d,0xd6,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_cubetc_f32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x0e,0xd6,0x01,0x05,0x0e,0x00]
+
+v_cubetc_f32 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x0e,0xd6,0xff,0x05,0xa4,0x01]
+
+v_cubetc_f32 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x0e,0xd6,0x01,0xfe,0xff,0x01]
+
+v_cubetc_f32 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x0e,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_cubetc_f32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x0e,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_cubetc_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x0e,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_cubetc_f32 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX11: encoding: [0x05,0x07,0x0e,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_cubetc_f32 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x0e,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_cubetc_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX11: encoding: [0x05,0x01,0x0e,0xd6,0x7e,0x82,0xad,0x01]
+
+v_cubetc_f32 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX11: encoding: [0x05,0x05,0x0e,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_cubetc_f32 v5, null, exec_lo, -|0xaf123456|
+// GFX11: encoding: [0x05,0x04,0x0e,0xd6,0x7c,0xfc,0xfc,0x83,0x56,0x34,0x12,0xaf]
+
+v_cubetc_f32 v5, -1, -|exec_hi|, -|src_scc|
+// GFX11: encoding: [0x05,0x06,0x0e,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_cubetc_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX11: encoding: [0x05,0x00,0x0e,0xd6,0xf0,0xfa,0xc0,0x4b]
+
+v_cubetc_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX11: encoding: [0x05,0x02,0x0e,0xd6,0xfd,0xd4,0x04,0x33]
+
+v_cubetc_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX11: encoding: [0xff,0x83,0x0e,0xd6,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_cvt_pk_i16_f32 v5, v1, v2
+// GFX11: encoding: [0x05,0x00,0x06,0xd7,0x01,0x05,0x02,0x00]
+
+v_cvt_pk_i16_f32 v5, v255, v255
+// GFX11: encoding: [0x05,0x00,0x06,0xd7,0xff,0xff,0x03,0x00]
+
+v_cvt_pk_i16_f32 v5, s1, s2
+// GFX11: encoding: [0x05,0x00,0x06,0xd7,0x01,0x04,0x00,0x00]
+
+v_cvt_pk_i16_f32 v5, s105, s105
+// GFX11: encoding: [0x05,0x00,0x06,0xd7,0x69,0xd2,0x00,0x00]
+
+v_cvt_pk_i16_f32 v5, vcc_lo, ttmp15
+// GFX11: encoding: [0x05,0x00,0x06,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_cvt_pk_i16_f32 v5, vcc_hi, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x06,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cvt_pk_i16_f32 v5, ttmp15, src_scc
+// GFX11: encoding: [0x05,0x00,0x06,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_cvt_pk_i16_f32 v5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x06,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_cvt_pk_i16_f32 v5, exec_lo, -1
+// GFX11: encoding: [0x05,0x00,0x06,0xd7,0x7e,0x82,0x01,0x00]
+
+v_cvt_pk_i16_f32 v5, |exec_hi|, null
+// GFX11: encoding: [0x05,0x01,0x06,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_cvt_pk_i16_f32 v5, null, exec_lo
+// GFX11: encoding: [0x05,0x00,0x06,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_cvt_pk_i16_f32 v5, -1, exec_hi
+// GFX11: encoding: [0x05,0x00,0x06,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_cvt_pk_i16_f32 v5, 0.5, -m0
+// GFX11: encoding: [0x05,0x00,0x06,0xd7,0xf0,0xfa,0x00,0x40]
+
+v_cvt_pk_i16_f32 v5, -src_scc, |vcc_lo|
+// GFX11: encoding: [0x05,0x02,0x06,0xd7,0xfd,0xd4,0x00,0x20]
+
+v_cvt_pk_i16_f32 v255, -|0xaf123456|, -|vcc_hi|
+// GFX11: encoding: [0xff,0x03,0x06,0xd7,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+v_cvt_pk_i16_i32 v5, v1, v2
+// GFX11: encoding: [0x05,0x00,0x24,0xd7,0x01,0x05,0x02,0x00]
+
+v_cvt_pk_i16_i32 v5, v255, v255
+// GFX11: encoding: [0x05,0x00,0x24,0xd7,0xff,0xff,0x03,0x00]
+
+v_cvt_pk_i16_i32 v5, s1, s2
+// GFX11: encoding: [0x05,0x00,0x24,0xd7,0x01,0x04,0x00,0x00]
+
+v_cvt_pk_i16_i32 v5, s105, s105
+// GFX11: encoding: [0x05,0x00,0x24,0xd7,0x69,0xd2,0x00,0x00]
+
+v_cvt_pk_i16_i32 v5, vcc_lo, ttmp15
+// GFX11: encoding: [0x05,0x00,0x24,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_cvt_pk_i16_i32 v5, vcc_hi, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x24,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cvt_pk_i16_i32 v5, ttmp15, src_scc
+// GFX11: encoding: [0x05,0x00,0x24,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_cvt_pk_i16_i32 v5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x24,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_cvt_pk_i16_i32 v5, exec_lo, -1
+// GFX11: encoding: [0x05,0x00,0x24,0xd7,0x7e,0x82,0x01,0x00]
+
+v_cvt_pk_i16_i32 v5, exec_hi, null
+// GFX11: encoding: [0x05,0x00,0x24,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_cvt_pk_i16_i32 v5, null, exec_lo
+// GFX11: encoding: [0x05,0x00,0x24,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_cvt_pk_i16_i32 v5, -1, exec_hi
+// GFX11: encoding: [0x05,0x00,0x24,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_cvt_pk_i16_i32 v5, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x24,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_cvt_pk_i16_i32 v5, src_scc, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x24,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_cvt_pk_i16_i32 v255, 0xaf123456, vcc_hi
+// GFX11: encoding: [0xff,0x00,0x24,0xd7,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_cvt_pk_norm_i16_f16 v5, v1, v2
+// GFX11: encoding: [0x05,0x00,0x12,0xd7,0x01,0x05,0x02,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, v255, v255
+// GFX11: encoding: [0x05,0x00,0x12,0xd7,0xff,0xff,0x03,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, s1, s2
+// GFX11: encoding: [0x05,0x00,0x12,0xd7,0x01,0x04,0x00,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, s105, s105
+// GFX11: encoding: [0x05,0x00,0x12,0xd7,0x69,0xd2,0x00,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, vcc_lo, ttmp15
+// GFX11: encoding: [0x05,0x00,0x12,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, vcc_hi, 0xfe0b
+// GFX11: encoding: [0x05,0x00,0x12,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, ttmp15, src_scc
+// GFX11: encoding: [0x05,0x00,0x12,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x12,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, exec_lo, -1
+// GFX11: encoding: [0x05,0x00,0x12,0xd7,0x7e,0x82,0x01,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, |exec_hi|, null
+// GFX11: encoding: [0x05,0x01,0x12,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, null, exec_lo
+// GFX11: encoding: [0x05,0x00,0x12,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, -1, exec_hi
+// GFX11: encoding: [0x05,0x00,0x12,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, 0.5, -m0 op_sel:[0,0,0]
+// GFX11: encoding: [0x05,0x00,0x12,0xd7,0xf0,0xfa,0x00,0x40]
+
+v_cvt_pk_norm_i16_f16 v5, -src_scc, |vcc_lo| op_sel:[1,0,0]
+// GFX11: encoding: [0x05,0x0a,0x12,0xd7,0xfd,0xd4,0x00,0x20]
+
+v_cvt_pk_norm_i16_f16 v255, -|0xfe0b|, -|vcc_hi| op_sel:[0,1,0]
+// GFX11: encoding: [0xff,0x13,0x12,0xd7,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, v1, v2
+// GFX11: encoding: [0x05,0x00,0x13,0xd7,0x01,0x05,0x02,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, v255, v255
+// GFX11: encoding: [0x05,0x00,0x13,0xd7,0xff,0xff,0x03,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, s1, s2
+// GFX11: encoding: [0x05,0x00,0x13,0xd7,0x01,0x04,0x00,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, s105, s105
+// GFX11: encoding: [0x05,0x00,0x13,0xd7,0x69,0xd2,0x00,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, vcc_lo, ttmp15
+// GFX11: encoding: [0x05,0x00,0x13,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, vcc_hi, 0xfe0b
+// GFX11: encoding: [0x05,0x00,0x13,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, ttmp15, src_scc
+// GFX11: encoding: [0x05,0x00,0x13,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x13,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, exec_lo, -1
+// GFX11: encoding: [0x05,0x00,0x13,0xd7,0x7e,0x82,0x01,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, |exec_hi|, null
+// GFX11: encoding: [0x05,0x01,0x13,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, null, exec_lo
+// GFX11: encoding: [0x05,0x00,0x13,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, -1, exec_hi
+// GFX11: encoding: [0x05,0x00,0x13,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, 0.5, -m0 op_sel:[0,0,0]
+// GFX11: encoding: [0x05,0x00,0x13,0xd7,0xf0,0xfa,0x00,0x40]
+
+v_cvt_pk_norm_u16_f16 v5, -src_scc, |vcc_lo| op_sel:[1,0,0]
+// GFX11: encoding: [0x05,0x0a,0x13,0xd7,0xfd,0xd4,0x00,0x20]
+
+v_cvt_pk_norm_u16_f16 v255, -|0xfe0b|, -|vcc_hi| op_sel:[0,1,0]
+// GFX11: encoding: [0xff,0x13,0x13,0xd7,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+v_cvt_pk_u16_f32 v5, v1, v2
+// GFX11: encoding: [0x05,0x00,0x07,0xd7,0x01,0x05,0x02,0x00]
+
+v_cvt_pk_u16_f32 v5, v255, v255
+// GFX11: encoding: [0x05,0x00,0x07,0xd7,0xff,0xff,0x03,0x00]
+
+v_cvt_pk_u16_f32 v5, s1, s2
+// GFX11: encoding: [0x05,0x00,0x07,0xd7,0x01,0x04,0x00,0x00]
+
+v_cvt_pk_u16_f32 v5, s105, s105
+// GFX11: encoding: [0x05,0x00,0x07,0xd7,0x69,0xd2,0x00,0x00]
+
+v_cvt_pk_u16_f32 v5, vcc_lo, ttmp15
+// GFX11: encoding: [0x05,0x00,0x07,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_cvt_pk_u16_f32 v5, vcc_hi, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x07,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cvt_pk_u16_f32 v5, ttmp15, src_scc
+// GFX11: encoding: [0x05,0x00,0x07,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_cvt_pk_u16_f32 v5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x07,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_cvt_pk_u16_f32 v5, exec_lo, -1
+// GFX11: encoding: [0x05,0x00,0x07,0xd7,0x7e,0x82,0x01,0x00]
+
+v_cvt_pk_u16_f32 v5, |exec_hi|, null
+// GFX11: encoding: [0x05,0x01,0x07,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_cvt_pk_u16_f32 v5, null, exec_lo
+// GFX11: encoding: [0x05,0x00,0x07,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_cvt_pk_u16_f32 v5, -1, exec_hi
+// GFX11: encoding: [0x05,0x00,0x07,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_cvt_pk_u16_f32 v5, 0.5, -m0
+// GFX11: encoding: [0x05,0x00,0x07,0xd7,0xf0,0xfa,0x00,0x40]
+
+v_cvt_pk_u16_f32 v5, -src_scc, |vcc_lo|
+// GFX11: encoding: [0x05,0x02,0x07,0xd7,0xfd,0xd4,0x00,0x20]
+
+v_cvt_pk_u16_f32 v255, -|0xaf123456|, -|vcc_hi|
+// GFX11: encoding: [0xff,0x03,0x07,0xd7,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+v_cvt_pk_u16_u32 v5, v1, v2
+// GFX11: encoding: [0x05,0x00,0x23,0xd7,0x01,0x05,0x02,0x00]
+
+v_cvt_pk_u16_u32 v5, v255, v255
+// GFX11: encoding: [0x05,0x00,0x23,0xd7,0xff,0xff,0x03,0x00]
+
+v_cvt_pk_u16_u32 v5, s1, s2
+// GFX11: encoding: [0x05,0x00,0x23,0xd7,0x01,0x04,0x00,0x00]
+
+v_cvt_pk_u16_u32 v5, s105, s105
+// GFX11: encoding: [0x05,0x00,0x23,0xd7,0x69,0xd2,0x00,0x00]
+
+v_cvt_pk_u16_u32 v5, vcc_lo, ttmp15
+// GFX11: encoding: [0x05,0x00,0x23,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_cvt_pk_u16_u32 v5, vcc_hi, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x23,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cvt_pk_u16_u32 v5, ttmp15, src_scc
+// GFX11: encoding: [0x05,0x00,0x23,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_cvt_pk_u16_u32 v5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x23,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_cvt_pk_u16_u32 v5, exec_lo, -1
+// GFX11: encoding: [0x05,0x00,0x23,0xd7,0x7e,0x82,0x01,0x00]
+
+v_cvt_pk_u16_u32 v5, exec_hi, null
+// GFX11: encoding: [0x05,0x00,0x23,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_cvt_pk_u16_u32 v5, null, exec_lo
+// GFX11: encoding: [0x05,0x00,0x23,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_cvt_pk_u16_u32 v5, -1, exec_hi
+// GFX11: encoding: [0x05,0x00,0x23,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_cvt_pk_u16_u32 v5, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x23,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_cvt_pk_u16_u32 v5, src_scc, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x23,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_cvt_pk_u16_u32 v255, 0xaf123456, vcc_hi
+// GFX11: encoding: [0xff,0x00,0x23,0xd7,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_cvt_pk_u8_f32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x26,0xd6,0x01,0x05,0x0e,0x00]
+
+v_cvt_pk_u8_f32 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x26,0xd6,0xff,0x05,0xa4,0x01]
+
+v_cvt_pk_u8_f32 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x26,0xd6,0x01,0xfe,0xff,0x01]
+
+v_cvt_pk_u8_f32 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x26,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_cvt_pk_u8_f32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x26,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_cvt_pk_u8_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x26,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_cvt_pk_u8_f32 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x26,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_cvt_pk_u8_f32 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x26,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_cvt_pk_u8_f32 v5, exec_lo, -1, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x26,0xd6,0x7e,0x82,0xad,0x01]
+
+v_cvt_pk_u8_f32 v5, exec_hi, null, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x26,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_cvt_pk_u8_f32 v5, null, exec_lo, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x26,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_cvt_pk_u8_f32 v5, -1, exec_hi, src_scc
+// GFX11: encoding: [0x05,0x00,0x26,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_cvt_pk_u8_f32 v5, 0.5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x26,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_cvt_pk_u8_f32 v5, src_scc, vcc_lo, -1
+// GFX11: encoding: [0x05,0x00,0x26,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_cvt_pk_u8_f32 v255, -|0xaf123456|, vcc_hi, null
+// GFX11: encoding: [0xff,0x01,0x26,0xd6,0xff,0xd6,0xf0,0x21,0x56,0x34,0x12,0xaf]
+
+v_cvt_pk_norm_i16_f16 v5, v1, v2
+// GFX11: encoding: [0x05,0x00,0x12,0xd7,0x01,0x05,0x02,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, v255, v255
+// GFX11: encoding: [0x05,0x00,0x12,0xd7,0xff,0xff,0x03,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, s1, s2
+// GFX11: encoding: [0x05,0x00,0x12,0xd7,0x01,0x04,0x00,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, s105, s105
+// GFX11: encoding: [0x05,0x00,0x12,0xd7,0x69,0xd2,0x00,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, vcc_lo, ttmp15
+// GFX11: encoding: [0x05,0x00,0x12,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, vcc_hi, 0xfe0b
+// GFX11: encoding: [0x05,0x00,0x12,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, ttmp15, src_scc
+// GFX11: encoding: [0x05,0x00,0x12,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x12,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, exec_lo, -1
+// GFX11: encoding: [0x05,0x00,0x12,0xd7,0x7e,0x82,0x01,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, |exec_hi|, null
+// GFX11: encoding: [0x05,0x01,0x12,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, null, exec_lo
+// GFX11: encoding: [0x05,0x00,0x12,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, -1, exec_hi
+// GFX11: encoding: [0x05,0x00,0x12,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, 0.5, -m0 op_sel:[0,0,0]
+// GFX11: encoding: [0x05,0x00,0x12,0xd7,0xf0,0xfa,0x00,0x40]
+
+v_cvt_pk_norm_i16_f16 v5, -src_scc, |vcc_lo| op_sel:[1,0,0]
+// GFX11: encoding: [0x05,0x0a,0x12,0xd7,0xfd,0xd4,0x00,0x20]
+
+v_cvt_pk_norm_i16_f16 v255, -|0xfe0b|, -|vcc_hi| op_sel:[0,1,0]
+// GFX11: encoding: [0xff,0x13,0x12,0xd7,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+v_cvt_pk_norm_i16_f32 v5, v1, v2
+// GFX11: encoding: [0x05,0x00,0x21,0xd7,0x01,0x05,0x02,0x00]
+
+v_cvt_pk_norm_i16_f32 v5, v255, v255
+// GFX11: encoding: [0x05,0x00,0x21,0xd7,0xff,0xff,0x03,0x00]
+
+v_cvt_pk_norm_i16_f32 v5, s1, s2
+// GFX11: encoding: [0x05,0x00,0x21,0xd7,0x01,0x04,0x00,0x00]
+
+v_cvt_pk_norm_i16_f32 v5, s105, s105
+// GFX11: encoding: [0x05,0x00,0x21,0xd7,0x69,0xd2,0x00,0x00]
+
+v_cvt_pk_norm_i16_f32 v5, vcc_lo, ttmp15
+// GFX11: encoding: [0x05,0x00,0x21,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_cvt_pk_norm_i16_f32 v5, vcc_hi, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x21,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cvt_pk_norm_i16_f32 v5, ttmp15, src_scc
+// GFX11: encoding: [0x05,0x00,0x21,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_cvt_pk_norm_i16_f32 v5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x21,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_cvt_pk_norm_i16_f32 v5, exec_lo, -1
+// GFX11: encoding: [0x05,0x00,0x21,0xd7,0x7e,0x82,0x01,0x00]
+
+v_cvt_pk_norm_i16_f32 v5, |exec_hi|, null
+// GFX11: encoding: [0x05,0x01,0x21,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_cvt_pk_norm_i16_f32 v5, null, exec_lo
+// GFX11: encoding: [0x05,0x00,0x21,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_cvt_pk_norm_i16_f32 v5, -1, exec_hi
+// GFX11: encoding: [0x05,0x00,0x21,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_cvt_pk_norm_i16_f32 v5, 0.5, -m0
+// GFX11: encoding: [0x05,0x00,0x21,0xd7,0xf0,0xfa,0x00,0x40]
+
+v_cvt_pk_norm_i16_f32 v5, -src_scc, |vcc_lo|
+// GFX11: encoding: [0x05,0x02,0x21,0xd7,0xfd,0xd4,0x00,0x20]
+
+v_cvt_pk_norm_i16_f32 v255, -|0xaf123456|, -|vcc_hi|
+// GFX11: encoding: [0xff,0x03,0x21,0xd7,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+v_cvt_pk_norm_u16_f16 v5, v1, v2
+// GFX11: encoding: [0x05,0x00,0x13,0xd7,0x01,0x05,0x02,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, v255, v255
+// GFX11: encoding: [0x05,0x00,0x13,0xd7,0xff,0xff,0x03,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, s1, s2
+// GFX11: encoding: [0x05,0x00,0x13,0xd7,0x01,0x04,0x00,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, s105, s105
+// GFX11: encoding: [0x05,0x00,0x13,0xd7,0x69,0xd2,0x00,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, vcc_lo, ttmp15
+// GFX11: encoding: [0x05,0x00,0x13,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, vcc_hi, 0xfe0b
+// GFX11: encoding: [0x05,0x00,0x13,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, ttmp15, src_scc
+// GFX11: encoding: [0x05,0x00,0x13,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x13,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, exec_lo, -1
+// GFX11: encoding: [0x05,0x00,0x13,0xd7,0x7e,0x82,0x01,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, |exec_hi|, null
+// GFX11: encoding: [0x05,0x01,0x13,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, null, exec_lo
+// GFX11: encoding: [0x05,0x00,0x13,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, -1, exec_hi
+// GFX11: encoding: [0x05,0x00,0x13,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, 0.5, -m0 op_sel:[0,0,0]
+// GFX11: encoding: [0x05,0x00,0x13,0xd7,0xf0,0xfa,0x00,0x40]
+
+v_cvt_pk_norm_u16_f16 v5, -src_scc, |vcc_lo| op_sel:[1,0,0]
+// GFX11: encoding: [0x05,0x0a,0x13,0xd7,0xfd,0xd4,0x00,0x20]
+
+v_cvt_pk_norm_u16_f16 v255, -|0xfe0b|, -|vcc_hi| op_sel:[0,1,0]
+// GFX11: encoding: [0xff,0x13,0x13,0xd7,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+v_cvt_pk_norm_u16_f32 v5, v1, v2
+// GFX11: encoding: [0x05,0x00,0x22,0xd7,0x01,0x05,0x02,0x00]
+
+v_cvt_pk_norm_u16_f32 v5, v255, v255
+// GFX11: encoding: [0x05,0x00,0x22,0xd7,0xff,0xff,0x03,0x00]
+
+v_cvt_pk_norm_u16_f32 v5, s1, s2
+// GFX11: encoding: [0x05,0x00,0x22,0xd7,0x01,0x04,0x00,0x00]
+
+v_cvt_pk_norm_u16_f32 v5, s105, s105
+// GFX11: encoding: [0x05,0x00,0x22,0xd7,0x69,0xd2,0x00,0x00]
+
+v_cvt_pk_norm_u16_f32 v5, vcc_lo, ttmp15
+// GFX11: encoding: [0x05,0x00,0x22,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_cvt_pk_norm_u16_f32 v5, vcc_hi, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x22,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cvt_pk_norm_u16_f32 v5, ttmp15, src_scc
+// GFX11: encoding: [0x05,0x00,0x22,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_cvt_pk_norm_u16_f32 v5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x22,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_cvt_pk_norm_u16_f32 v5, exec_lo, -1
+// GFX11: encoding: [0x05,0x00,0x22,0xd7,0x7e,0x82,0x01,0x00]
+
+v_cvt_pk_norm_u16_f32 v5, |exec_hi|, null
+// GFX11: encoding: [0x05,0x01,0x22,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_cvt_pk_norm_u16_f32 v5, null, exec_lo
+// GFX11: encoding: [0x05,0x00,0x22,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_cvt_pk_norm_u16_f32 v5, -1, exec_hi
+// GFX11: encoding: [0x05,0x00,0x22,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_cvt_pk_norm_u16_f32 v5, 0.5, -m0
+// GFX11: encoding: [0x05,0x00,0x22,0xd7,0xf0,0xfa,0x00,0x40]
+
+v_cvt_pk_norm_u16_f32 v5, -src_scc, |vcc_lo|
+// GFX11: encoding: [0x05,0x02,0x22,0xd7,0xfd,0xd4,0x00,0x20]
+
+v_cvt_pk_norm_u16_f32 v255, -|0xaf123456|, -|vcc_hi|
+// GFX11: encoding: [0xff,0x03,0x22,0xd7,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+v_div_fixup_f16 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x54,0xd6,0x01,0x05,0x0e,0x00]
+
+v_div_fixup_f16 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x54,0xd6,0xff,0x05,0xa4,0x01]
+
+v_div_fixup_f16 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x54,0xd6,0x01,0xfe,0xff,0x01]
+
+v_div_fixup_f16 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x54,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_div_fixup_f16 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x54,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_div_fixup_f16 v5, vcc_hi, 0xfe0b, v255
+// GFX11: encoding: [0x05,0x00,0x54,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+v_div_fixup_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX11: encoding: [0x05,0x07,0x54,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_div_fixup_f16 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x54,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_div_fixup_f16 v5, |exec_lo|, -1, vcc_hi
+// GFX11: encoding: [0x05,0x01,0x54,0xd6,0x7e,0x82,0xad,0x01]
+
+v_div_fixup_f16 v5, -|exec_hi|, null, -|vcc_lo| op_sel:[1,1,1,1]
+// GFX11: encoding: [0x05,0x7d,0x54,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_div_fixup_f16 v5, null, exec_lo, -|0xfe0b| op_sel:[0,0,0,0]
+// GFX11: encoding: [0x05,0x04,0x54,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
+
+v_div_fixup_f16 v5, -1, -|exec_hi|, -|src_scc| op_sel:[1,0,0,0]
+// GFX11: encoding: [0x05,0x0e,0x54,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_div_fixup_f16 v5, 0.5, -m0, 0.5 op_sel:[0,1,0,0]
+// GFX11: encoding: [0x05,0x10,0x54,0xd6,0xf0,0xfa,0xc0,0x43]
+
+v_div_fixup_f16 v5, -src_scc, |vcc_lo|, -1 op_sel:[0,0,1,0]
+// GFX11: encoding: [0x05,0x22,0x54,0xd6,0xfd,0xd4,0x04,0x23]
+
+v_div_fixup_f16 v255, -|0xfe0b|, -|vcc_hi|, null op_sel:[0,0,0,1] clamp
+// GFX11: encoding: [0xff,0xc3,0x54,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00]
+
+v_div_fixup_f16 v5, 0.5, -m0, 0.5 op_sel:[0,1,0,0] mul:2
+// GFX11: encoding: [0x05,0x10,0x54,0xd6,0xf0,0xfa,0xc0,0x4b]
+
+v_div_fixup_f32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x27,0xd6,0x01,0x05,0x0e,0x00]
+
+v_div_fixup_f32 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x27,0xd6,0xff,0x05,0xa4,0x01]
+
+v_div_fixup_f32 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x27,0xd6,0x01,0xfe,0xff,0x01]
+
+v_div_fixup_f32 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x27,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_div_fixup_f32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x27,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_div_fixup_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x27,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_div_fixup_f32 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX11: encoding: [0x05,0x07,0x27,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_div_fixup_f32 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x27,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_div_fixup_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX11: encoding: [0x05,0x01,0x27,0xd6,0x7e,0x82,0xad,0x01]
+
+v_div_fixup_f32 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX11: encoding: [0x05,0x05,0x27,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_div_fixup_f32 v5, null, exec_lo, -|0xaf123456|
+// GFX11: encoding: [0x05,0x04,0x27,0xd6,0x7c,0xfc,0xfc,0x83,0x56,0x34,0x12,0xaf]
+
+v_div_fixup_f32 v5, -1, -|exec_hi|, -|src_scc|
+// GFX11: encoding: [0x05,0x06,0x27,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_div_fixup_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX11: encoding: [0x05,0x00,0x27,0xd6,0xf0,0xfa,0xc0,0x4b]
+
+v_div_fixup_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX11: encoding: [0x05,0x02,0x27,0xd6,0xfd,0xd4,0x04,0x33]
+
+v_div_fixup_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX11: encoding: [0xff,0x83,0x27,0xd6,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_div_fixup_f64 v[5:6], v[1:2], v[2:3], v[3:4]
+// GFX11: encoding: [0x05,0x00,0x28,0xd6,0x01,0x05,0x0e,0x04]
+
+v_div_fixup_f64 v[5:6], v[254:255], v[254:255], s[6:7]
+// GFX11: encoding: [0x05,0x00,0x28,0xd6,0xfe,0xfd,0x1b,0x00]
+
+v_div_fixup_f64 v[5:6], s[2:3], s[4:5], v[254:255]
+// GFX11: encoding: [0x05,0x00,0x28,0xd6,0x02,0x08,0xf8,0x07]
+
+v_div_fixup_f64 v[5:6], -|s[104:105]|, s[104:105], -|s[104:105]|
+// GFX11: encoding: [0x05,0x05,0x28,0xd6,0x68,0xd0,0xa0,0xa1]
+
+v_div_fixup_f64 v[5:6], vcc, -|ttmp[14:15]|, -|ttmp[14:15]|
+// GFX11: encoding: [0x05,0x06,0x28,0xd6,0x6a,0xf4,0xe8,0xc1]
+
+v_div_fixup_f64 v[5:6], -|ttmp[14:15]|, 0xaf123456, null
+// GFX11: encoding: [0x05,0x01,0x28,0xd6,0x7a,0xfe,0xf1,0x21,0x56,0x34,0x12,0xaf]
+
+v_div_fixup_f64 v[5:6], -|exec|, -|src_scc|, -|exec|
+// GFX11: encoding: [0x05,0x07,0x28,0xd6,0x7e,0xfa,0xf9,0xe1]
+
+v_div_fixup_f64 v[5:6], null, 0.5, vcc
+// GFX11: encoding: [0x05,0x00,0x28,0xd6,0x7c,0xe0,0xa9,0x01]
+
+v_div_fixup_f64 v[5:6], -1, -1, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x28,0xd6,0xc1,0x82,0xfd,0x03,0x56,0x34,0x12,0xaf]
+
+v_div_fixup_f64 v[5:6], 0.5, null, -|src_scc| mul:2
+// GFX11: encoding: [0x05,0x04,0x28,0xd6,0xf0,0xf8,0xf4,0x8b]
+
+v_div_fixup_f64 v[5:6], -|src_scc|, -|exec|, 0.5 mul:4
+// GFX11: encoding: [0x05,0x03,0x28,0xd6,0xfd,0xfc,0xc0,0x73]
+
+v_div_fixup_f64 v[254:255], 0xaf123456, -|vcc|, -1 clamp div:2
+// GFX11: encoding: [0xfe,0x82,0x28,0xd6,0xff,0xd4,0x04,0x5b,0x56,0x34,0x12,0xaf]
+
+v_div_fmas_f32 v5, vcc_lo, v2, vcc_lo
+// W32: encoding: [0x05,0x00,0x37,0xd6,0x6a,0x04,0xaa,0x01]
+
+v_div_fmas_f32 v5, ttmp15, ttmp15, ttmp15
+// W32: encoding: [0x05,0x00,0x37,0xd6,0x7b,0xf6,0xec,0x01]
+
+v_div_fmas_f32 v5, -|m0|, -|v255|, v3
+// W32: encoding: [0x05,0x03,0x37,0xd6,0x7d,0xfe,0x0f,0x64]
+
+v_div_fmas_f32 v5, -|exec_lo|, -|exec_lo|, -|exec_lo|
+// W32: encoding: [0x05,0x07,0x37,0xd6,0x7e,0xfc,0xf8,0xe1]
+
+v_div_fmas_f32 v5, -|exec_hi|, 0.5, -|v255|
+// W32: encoding: [0x05,0x05,0x37,0xd6,0x7f,0xe0,0xfd,0xa7]
+
+v_div_fmas_f32 v5, null, exec_hi, -|exec_hi|
+// W32: encoding: [0x05,0x04,0x37,0xd6,0x7c,0xfe,0xfc,0x81]
+
+v_div_fmas_f32 v5, -1, -|m0|, -|m0|
+// W32: encoding: [0x05,0x06,0x37,0xd6,0xc1,0xfa,0xf4,0xc1]
+
+v_div_fmas_f32 v5, 0.5, -|vcc_lo|, 0.5 mul:2
+// W32: encoding: [0x05,0x02,0x37,0xd6,0xf0,0xd4,0xc0,0x4b]
+
+v_div_fmas_f32 v5, vcc_lo, v2, v3
+// W64: encoding: [0x05,0x00,0x37,0xd6,0x6a,0x04,0x0e,0x04]
+
+v_div_fmas_f32 v5, vcc_hi, v255, vcc_hi
+// W64: encoding: [0x05,0x00,0x37,0xd6,0x6b,0xfe,0xaf,0x01]
+
+v_div_fmas_f32 v5, -|ttmp15|, -|ttmp15|, ttmp15
+// W64: encoding: [0x05,0x03,0x37,0xd6,0x7b,0xf6,0xec,0x61]
+
+v_div_fmas_f32 v5, m0, 0.5, v255
+// W64: encoding: [0x05,0x00,0x37,0xd6,0x7d,0xe0,0xfd,0x07]
+
+v_div_fmas_f32 v5, -|exec_lo|, exec_lo, -|exec_lo|
+// W64: encoding: [0x05,0x05,0x37,0xd6,0x7e,0xfc,0xf8,0xa1]
+
+v_div_fmas_f32 v5, -|exec_hi|, -|exec_hi|, -|exec_hi|
+// W64: encoding: [0x05,0x07,0x37,0xd6,0x7f,0xfe,0xfc,0xe1]
+
+v_div_fmas_f32 v5, null, m0, -|m0|
+// W64: encoding: [0x05,0x04,0x37,0xd6,0x7c,0xfa,0xf4,0x81]
+
+v_div_fmas_f32 v5, -1, -|vcc_lo|, -|vcc_lo|
+// W64: encoding: [0x05,0x06,0x37,0xd6,0xc1,0xd4,0xa8,0xc1]
+
+v_div_fmas_f32 v5, 0.5, -|vcc_hi|, 0.5 mul:2
+// W64: encoding: [0x05,0x02,0x37,0xd6,0xf0,0xd6,0xc0,0x4b]
+
+v_div_fmas_f32 v5, v1, 0xaf123456, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x37,0xd6,0x01,0xff,0xfd,0x03,0x56,0x34,0x12,0xaf]
+
+v_div_fmas_f32 v5, v255, src_scc, src_scc
+// GFX11: encoding: [0x05,0x00,0x37,0xd6,0xff,0xfb,0xf5,0x03]
+
+v_div_fmas_f32 v5, s105, s105, s105
+// GFX11: encoding: [0x05,0x00,0x37,0xd6,0x69,0xd2,0xa4,0x01]
+
+v_div_fmas_f32 v5, src_scc, -1, -1 mul:4
+// GFX11: encoding: [0x05,0x00,0x37,0xd6,0xfd,0x82,0x05,0x13]
+
+v_div_fmas_f32 v255, -|0xaf123456|, null, null clamp div:2
+// GFX11: encoding: [0xff,0x81,0x37,0xd6,0xff,0xf8,0xf0,0x39,0x56,0x34,0x12,0xaf]
+
+v_div_fmas_f64 v[5:6], v[1:2], 0xaf123456, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x38,0xd6,0x01,0xff,0xfd,0x03,0x56,0x34,0x12,0xaf]
+
+v_div_fmas_f64 v[5:6], v[254:255], src_scc, v[3:4]
+// GFX11: encoding: [0x05,0x00,0x38,0xd6,0xfe,0xfb,0x0d,0x04]
+
+v_div_fmas_f64 v[5:6], s[104:105], |s[104:105]|, s[104:105]
+// GFX11: encoding: [0x05,0x02,0x38,0xd6,0x68,0xd0,0xa0,0x01]
+
+v_div_fmas_f64 v[5:6], -|vcc|, v[2:3], -|v[254:255]|
+// GFX11: encoding: [0x05,0x05,0x38,0xd6,0x6a,0x04,0xfa,0xa7]
+
+v_div_fmas_f64 v[5:6], -|ttmp[14:15]|, -|ttmp[14:15]|, -|ttmp[14:15]|
+// GFX11: encoding: [0x05,0x07,0x38,0xd6,0x7a,0xf4,0xe8,0xe1]
+
+v_div_fmas_f64 v[5:6], -|exec|, -|v[254:255]|, null
+// GFX11: encoding: [0x05,0x03,0x38,0xd6,0x7e,0xfc,0xf3,0x61]
+
+v_div_fmas_f64 v[5:6], null, 0.5, -src_scc
+// GFX11: encoding: [0x05,0x00,0x38,0xd6,0x7c,0xe0,0xf5,0x83]
+
+v_div_fmas_f64 v[5:6], -1, -exec, |exec|
+// GFX11: encoding: [0x05,0x04,0x38,0xd6,0xc1,0xfc,0xf8,0x41]
+
+v_div_fmas_f64 v[5:6], 0.5, -|vcc|, -|vcc| mul:2
+// GFX11: encoding: [0x05,0x06,0x38,0xd6,0xf0,0xd4,0xa8,0xc9]
+
+v_div_fmas_f64 v[5:6], -|src_scc|, -1, 0.5 mul:4
+// GFX11: encoding: [0x05,0x01,0x38,0xd6,0xfd,0x82,0xc1,0x33]
+
+v_div_fmas_f64 v[254:255], 0xaf123456, null, -1 clamp div:2
+// GFX11: encoding: [0xfe,0x80,0x38,0xd6,0xff,0xf8,0x04,0x1b,0x56,0x34,0x12,0xaf]
+
+v_div_scale_f32 v5, vcc_lo, v1, v2, s3
+// W32: encoding: [0x05,0x6a,0xfc,0xd6,0x01,0x05,0x0e,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc_lo, v255, s2, s105
+// W32: encoding: [0x05,0x6a,0xfc,0xd6,0xff,0x05,0xa4,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc_lo, s1, v255, exec_hi
+// W32: encoding: [0x05,0x6a,0xfc,0xd6,0x01,0xfe,0xff,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc_lo, s105, s105, exec_lo
+// W32: encoding: [0x05,0x6a,0xfc,0xd6,0x69,0xd2,0xf8,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc_lo, vcc_lo, ttmp15, v3
+// W32: encoding: [0x05,0x6a,0xfc,0xd6,0x6a,0xf6,0x0c,0x04]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc_lo, vcc_hi, 0xaf123456, v255
+// W32: encoding: [0x05,0x6a,0xfc,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc_lo, -ttmp15, -src_scc, -ttmp15
+// W32: encoding: [0x05,0x6a,0xfc,0xd6,0x7b,0xfa,0xed,0xe1]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc_lo, m0, 0.5, m0
+// W32: encoding: [0x05,0x6a,0xfc,0xd6,0x7d,0xe0,0xf5,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc_lo, exec_lo, -1, vcc_hi
+// W32: encoding: [0x05,0x6a,0xfc,0xd6,0x7e,0x82,0xad,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc_lo, -exec_hi, null, -vcc_lo
+// W32: encoding: [0x05,0x6a,0xfc,0xd6,0x7f,0xf8,0xa8,0xa1]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc_lo, null, exec_lo, neg(0xaf123456)
+// W32: encoding: [0x05,0x6a,0xfc,0xd6,0x7c,0xfc,0xfc,0x83,0x56,0x34,0x12,0xaf]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc_lo, -1, -exec_hi, -src_scc
+// W32: encoding: [0x05,0x6a,0xfc,0xd6,0xc1,0xfe,0xf4,0xc3]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc_lo, 0.5, -m0, 0.5 mul:2
+// W32: encoding: [0x05,0x6a,0xfc,0xd6,0xf0,0xfa,0xc0,0x4b]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc_lo, -src_scc, vcc_lo, -1 mul:4
+// W32: encoding: [0x05,0x6a,0xfc,0xd6,0xfd,0xd4,0x04,0x33]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v255, vcc_lo, neg(0xaf123456), -vcc_hi, null clamp div:2
+// W32: encoding: [0xff,0xea,0xfc,0xd6,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc, v1, v2, s3
+// W64: encoding: [0x05,0x6a,0xfc,0xd6,0x01,0x05,0x0e,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc, v255, s2, s105
+// W64: encoding: [0x05,0x6a,0xfc,0xd6,0xff,0x05,0xa4,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc, s1, v255, exec_hi
+// W64: encoding: [0x05,0x6a,0xfc,0xd6,0x01,0xfe,0xff,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc, s105, s105, exec_lo
+// W64: encoding: [0x05,0x6a,0xfc,0xd6,0x69,0xd2,0xf8,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc, vcc_lo, ttmp15, v3
+// W64: encoding: [0x05,0x6a,0xfc,0xd6,0x6a,0xf6,0x0c,0x04]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc, vcc_hi, 0xaf123456, v255
+// W64: encoding: [0x05,0x6a,0xfc,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc, -ttmp15, -src_scc, -ttmp15
+// W64: encoding: [0x05,0x6a,0xfc,0xd6,0x7b,0xfa,0xed,0xe1]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc, m0, 0.5, m0
+// W64: encoding: [0x05,0x6a,0xfc,0xd6,0x7d,0xe0,0xf5,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc, exec_lo, -1, vcc_hi
+// W64: encoding: [0x05,0x6a,0xfc,0xd6,0x7e,0x82,0xad,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc, -exec_hi, null, -vcc_lo
+// W64: encoding: [0x05,0x6a,0xfc,0xd6,0x7f,0xf8,0xa8,0xa1]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc, null, exec_lo, neg(0xaf123456)
+// W64: encoding: [0x05,0x6a,0xfc,0xd6,0x7c,0xfc,0xfc,0x83,0x56,0x34,0x12,0xaf]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc, -1, -exec_hi, -src_scc
+// W64: encoding: [0x05,0x6a,0xfc,0xd6,0xc1,0xfe,0xf4,0xc3]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc, 0.5, -m0, 0.5 mul:2
+// W64: encoding: [0x05,0x6a,0xfc,0xd6,0xf0,0xfa,0xc0,0x4b]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc, -src_scc, vcc_lo, -1 mul:4
+// W64: encoding: [0x05,0x6a,0xfc,0xd6,0xfd,0xd4,0x04,0x33]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v255, vcc, neg(0xaf123456), -vcc_hi, null clamp div:2
+// W64: encoding: [0xff,0xea,0xfc,0xd6,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc_lo, v[1:2], v[2:3], v[3:4]
+// W32: encoding: [0x05,0x6a,0xfd,0xd6,0x01,0x05,0x0e,0x04]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc_lo, v[254:255], v[254:255], s[6:7]
+// W32: encoding: [0x05,0x6a,0xfd,0xd6,0xfe,0xfd,0x1b,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc_lo, s[2:3], s[4:5], v[254:255]
+// W32: encoding: [0x05,0x6a,0xfd,0xd6,0x02,0x08,0xf8,0x07]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc_lo, -s[104:105], s[104:105], -s[104:105]
+// W32: encoding: [0x05,0x6a,0xfd,0xd6,0x68,0xd0,0xa0,0xa1]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc_lo, vcc, -ttmp[14:15], -ttmp[14:15]
+// W32: encoding: [0x05,0x6a,0xfd,0xd6,0x6a,0xf4,0xe8,0xc1]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc_lo, -ttmp[14:15], 0xaf123456, null
+// W32: encoding: [0x05,0x6a,0xfd,0xd6,0x7a,0xfe,0xf1,0x21,0x56,0x34,0x12,0xaf]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc_lo, -exec, -src_scc, -exec
+// W32: encoding: [0x05,0x6a,0xfd,0xd6,0x7e,0xfa,0xf9,0xe1]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc_lo, null, 0.5, vcc
+// W32: encoding: [0x05,0x6a,0xfd,0xd6,0x7c,0xe0,0xa9,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc_lo, -1, -1, 0xaf123456
+// W32: encoding: [0x05,0x6a,0xfd,0xd6,0xc1,0x82,0xfd,0x03,0x56,0x34,0x12,0xaf]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc_lo, 0.5, null, -src_scc mul:2
+// W32: encoding: [0x05,0x6a,0xfd,0xd6,0xf0,0xf8,0xf4,0x8b]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc_lo, -src_scc, -exec, 0.5 mul:4
+// W32: encoding: [0x05,0x6a,0xfd,0xd6,0xfd,0xfc,0xc0,0x73]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[254:255], vcc_lo, 0xaf123456, -vcc, -1 clamp div:2
+// W32: encoding: [0xfe,0xea,0xfd,0xd6,0xff,0xd4,0x04,0x5b,0x56,0x34,0x12,0xaf]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc, v[1:2], v[2:3], v[3:4]
+// W64: encoding: [0x05,0x6a,0xfd,0xd6,0x01,0x05,0x0e,0x04]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc, v[254:255], v[254:255], s[6:7]
+// W64: encoding: [0x05,0x6a,0xfd,0xd6,0xfe,0xfd,0x1b,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc, s[2:3], s[4:5], v[254:255]
+// W64: encoding: [0x05,0x6a,0xfd,0xd6,0x02,0x08,0xf8,0x07]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc, -s[104:105], s[104:105], -s[104:105]
+// W64: encoding: [0x05,0x6a,0xfd,0xd6,0x68,0xd0,0xa0,0xa1]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc, vcc, -ttmp[14:15], -ttmp[14:15]
+// W64: encoding: [0x05,0x6a,0xfd,0xd6,0x6a,0xf4,0xe8,0xc1]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc, -ttmp[14:15], 0xaf123456, null
+// W64: encoding: [0x05,0x6a,0xfd,0xd6,0x7a,0xfe,0xf1,0x21,0x56,0x34,0x12,0xaf]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc, -exec, -src_scc, -exec
+// W64: encoding: [0x05,0x6a,0xfd,0xd6,0x7e,0xfa,0xf9,0xe1]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc, null, 0.5, vcc
+// W64: encoding: [0x05,0x6a,0xfd,0xd6,0x7c,0xe0,0xa9,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc, -1, -1, 0xaf123456
+// W64: encoding: [0x05,0x6a,0xfd,0xd6,0xc1,0x82,0xfd,0x03,0x56,0x34,0x12,0xaf]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc, 0.5, null, -src_scc mul:2
+// W64: encoding: [0x05,0x6a,0xfd,0xd6,0xf0,0xf8,0xf4,0x8b]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc, -src_scc, -exec, 0.5 mul:4
+// W64: encoding: [0x05,0x6a,0xfd,0xd6,0xfd,0xfc,0xc0,0x73]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[254:255], vcc, 0xaf123456, -vcc, -1 clamp div:2
+// W64: encoding: [0xfe,0xea,0xfd,0xd6,0xff,0xd4,0x04,0x5b,0x56,0x34,0x12,0xaf]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_dot2_bf16_bf16 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x67,0xd6,0x01,0x05,0x0e,0x00]
+
+v_dot2_bf16_bf16 v5, v255, v255, s105
+// GFX11: encoding: [0x05,0x00,0x67,0xd6,0xff,0xff,0xa7,0x01]
+
+v_dot2_bf16_bf16 v5, s1, s2, v3
+// GFX11: encoding: [0x05,0x00,0x67,0xd6,0x01,0x04,0x0c,0x04]
+
+v_dot2_bf16_bf16 v5, s105, s105, m0
+// GFX11: encoding: [0x05,0x00,0x67,0xd6,0x69,0xd2,0xf4,0x01]
+
+v_dot2_bf16_bf16 v5, vcc_lo, ttmp15, v255
+// GFX11: encoding: [0x05,0x00,0x67,0xd6,0x6a,0xf6,0xfc,0x07]
+
+v_dot2_bf16_bf16 v5, vcc_hi, 0xfe0b, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x67,0xd6,0x6b,0xfe,0xad,0x01,0x0b,0xfe,0x00,0x00]
+
+v_dot2_bf16_bf16 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x67,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_dot2_bf16_bf16 v5, |m0|, -1, -vcc_lo
+// GFX11: encoding: [0x05,0x01,0x67,0xd6,0x7d,0x82,0xa9,0x81]
+
+v_dot2_bf16_bf16 v5, -|exec_lo|, null, -|0xfe0b|
+// GFX11: encoding: [0x05,0x05,0x67,0xd6,0x7e,0xf8,0xfc,0xa3,0x0b,0xfe,0x00,0x00]
+
+v_dot2_bf16_bf16 v5, -|exec_hi|, -|exec_lo|, -|exec_lo|
+// GFX11: encoding: [0x05,0x07,0x67,0xd6,0x7f,0xfc,0xf8,0xe1]
+
+v_dot2_bf16_bf16 v5, null, -exec_hi, |src_scc|
+// GFX11: encoding: [0x05,0x04,0x67,0xd6,0x7c,0xfe,0xf4,0x43]
+
+v_dot2_bf16_bf16 v5, -1, -|m0|, -|exec_hi| op_sel:[0,0,0,0]
+// GFX11: encoding: [0x05,0x06,0x67,0xd6,0xc1,0xfa,0xfc,0xc1]
+
+v_dot2_bf16_bf16 v5, -src_scc, |vcc_lo|, -1 op_sel:[0,0,1,0]
+// GFX11: encoding: [0x05,0x22,0x67,0xd6,0xfd,0xd4,0x04,0x23]
+
+v_dot2_bf16_bf16 v255, -|0xfe0b|, -|vcc_hi|, null op_sel:[0,0,0,1]
+// GFX11: encoding: [0xff,0x43,0x67,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00]
+
+v_dot2_bf16_bf16 v2, v0, 0x20004000, v2
+// GFX11: v_dot2_bf16_bf16 v2, v0, 0x20004000, v2 ; encoding: [0x02,0x00,0x67,0xd6,0x00,0xff,0x09,0x04,0x00,0x40,0x00,0x20]
+
+v_dot2_bf16_bf16 v2, 0x20004000, v0, v2
+// GFX11: v_dot2_bf16_bf16 v2, 0x20004000, v0, v2 ; encoding: [0x02,0x00,0x67,0xd6,0xff,0x00,0x0a,0x04,0x00,0x40,0x00,0x20]
+
+v_dot2_f16_f16 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x66,0xd6,0x01,0x05,0x0e,0x00]
+
+v_dot2_f16_f16 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x66,0xd6,0xff,0x05,0xa4,0x01]
+
+v_dot2_f16_f16 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x66,0xd6,0x01,0xfe,0xff,0x01]
+
+v_dot2_f16_f16 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x66,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_dot2_f16_f16 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x66,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_dot2_f16_f16 v5, vcc_hi, 0xfe0b, v255
+// GFX11: encoding: [0x05,0x00,0x66,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+v_dot2_f16_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX11: encoding: [0x05,0x07,0x66,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_dot2_f16_f16 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x66,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_dot2_f16_f16 v5, |exec_lo|, -1, vcc_hi
+// GFX11: encoding: [0x05,0x01,0x66,0xd6,0x7e,0x82,0xad,0x01]
+
+v_dot2_f16_f16 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX11: encoding: [0x05,0x05,0x66,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_dot2_f16_f16 v5, null, exec_lo, -|0xfe0b|
+// GFX11: encoding: [0x05,0x04,0x66,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
+
+v_dot2_f16_f16 v5, -1, -|exec_hi|, -|src_scc|
+// GFX11: encoding: [0x05,0x06,0x66,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_dot2_f16_f16 v5, 0.5, -m0, 0.5 op_sel:[0,0,0,0]
+// GFX11: encoding: [0x05,0x00,0x66,0xd6,0xf0,0xfa,0xc0,0x43]
+
+v_dot2_f16_f16 v5, -src_scc, |vcc_lo|, -1 op_sel:[0,0,1,0]
+// GFX11: encoding: [0x05,0x22,0x66,0xd6,0xfd,0xd4,0x04,0x23]
+
+v_dot2_f16_f16 v255, -|0xfe0b|, -|vcc_hi|, null op_sel:[0,0,0,1]
+// GFX11: encoding: [0xff,0x43,0x66,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00]
+
+v_dot2_f16_f16 v2, v0, 0x20004000, v2
+// GFX11: v_dot2_f16_f16 v2, v0, 0x20004000, v2 ; encoding: [0x02,0x00,0x66,0xd6,0x00,0xff,0x09,0x04,0x00,0x40,0x00,0x20]
+
+v_dot2_f16_f16 v2, 0x20004000, v0, v2
+// GFX11: v_dot2_f16_f16 v2, 0x20004000, v0, v2 ; encoding: [0x02,0x00,0x66,0xd6,0xff,0x00,0x0a,0x04,0x00,0x40,0x00,0x20]
+
+v_fma_dx9_zero_f32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x09,0xd6,0x01,0x05,0x0e,0x00]
+
+v_fma_dx9_zero_f32 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x09,0xd6,0xff,0x05,0xa4,0x01]
+
+v_fma_dx9_zero_f32 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x09,0xd6,0x01,0xfe,0xff,0x01]
+
+v_fma_dx9_zero_f32 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x09,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_fma_dx9_zero_f32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x09,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_fma_dx9_zero_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x09,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_fma_dx9_zero_f32 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX11: encoding: [0x05,0x07,0x09,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_fma_dx9_zero_f32 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x09,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_fma_dx9_zero_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX11: encoding: [0x05,0x01,0x09,0xd6,0x7e,0x82,0xad,0x01]
+
+v_fma_dx9_zero_f32 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX11: encoding: [0x05,0x05,0x09,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_fma_dx9_zero_f32 v5, null, exec_lo, -|0xaf123456|
+// GFX11: encoding: [0x05,0x04,0x09,0xd6,0x7c,0xfc,0xfc,0x83,0x56,0x34,0x12,0xaf]
+
+v_fma_dx9_zero_f32 v5, -1, -|exec_hi|, -|src_scc|
+// GFX11: encoding: [0x05,0x06,0x09,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_fma_dx9_zero_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX11: encoding: [0x05,0x00,0x09,0xd6,0xf0,0xfa,0xc0,0x4b]
+
+v_fma_dx9_zero_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX11: encoding: [0x05,0x02,0x09,0xd6,0xfd,0xd4,0x04,0x33]
+
+v_fma_dx9_zero_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX11: encoding: [0xff,0x83,0x09,0xd6,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_fma_f16 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x48,0xd6,0x01,0x05,0x0e,0x00]
+
+v_fma_f16 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x48,0xd6,0xff,0x05,0xa4,0x01]
+
+v_fma_f16 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x48,0xd6,0x01,0xfe,0xff,0x01]
+
+v_fma_f16 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x48,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_fma_f16 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x48,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_fma_f16 v5, vcc_hi, 0xfe0b, v255
+// GFX11: encoding: [0x05,0x00,0x48,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+v_fma_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX11: encoding: [0x05,0x07,0x48,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_fma_f16 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x48,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_fma_f16 v5, |exec_lo|, -1, vcc_hi
+// GFX11: encoding: [0x05,0x01,0x48,0xd6,0x7e,0x82,0xad,0x01]
+
+v_fma_f16 v5, -|exec_hi|, null, -|vcc_lo| op_sel:[1,1,1,1]
+// GFX11: encoding: [0x05,0x7d,0x48,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_fma_f16 v5, null, exec_lo, -|0xfe0b| op_sel:[0,0,0,0]
+// GFX11: encoding: [0x05,0x04,0x48,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
+
+v_fma_f16 v5, -1, -|exec_hi|, -|src_scc| op_sel:[1,0,0,0]
+// GFX11: encoding: [0x05,0x0e,0x48,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_fma_f16 v5, 0.5, -m0, 0.5 op_sel:[0,1,0,0]
+// GFX11: encoding: [0x05,0x10,0x48,0xd6,0xf0,0xfa,0xc0,0x43]
+
+v_fma_f16 v5, -src_scc, |vcc_lo|, -1 op_sel:[0,0,1,0]
+// GFX11: encoding: [0x05,0x22,0x48,0xd6,0xfd,0xd4,0x04,0x23]
+
+v_fma_f16 v255, -|0xfe0b|, -|vcc_hi|, null op_sel:[0,0,0,1] clamp
+// GFX11: encoding: [0xff,0xc3,0x48,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00]
+
+v_fma_f16 v255, -|0xfe0b|, -|vcc_hi|, null op_sel:[0,0,0,1] clamp div:2
+// GFX11: encoding: [0xff,0xc3,0x48,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+
+v_fma_f32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x13,0xd6,0x01,0x05,0x0e,0x00]
+
+v_fma_f32 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x13,0xd6,0xff,0x05,0xa4,0x01]
+
+v_fma_f32 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x13,0xd6,0x01,0xfe,0xff,0x01]
+
+v_fma_f32 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x13,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_fma_f32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x13,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_fma_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x13,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_fma_f32 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX11: encoding: [0x05,0x07,0x13,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_fma_f32 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x13,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_fma_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX11: encoding: [0x05,0x01,0x13,0xd6,0x7e,0x82,0xad,0x01]
+
+v_fma_f32 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX11: encoding: [0x05,0x05,0x13,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_fma_f32 v5, null, exec_lo, -|0xaf123456|
+// GFX11: encoding: [0x05,0x04,0x13,0xd6,0x7c,0xfc,0xfc,0x83,0x56,0x34,0x12,0xaf]
+
+v_fma_f32 v5, -1, -|exec_hi|, -|src_scc|
+// GFX11: encoding: [0x05,0x06,0x13,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_fma_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX11: encoding: [0x05,0x00,0x13,0xd6,0xf0,0xfa,0xc0,0x4b]
+
+v_fma_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX11: encoding: [0x05,0x02,0x13,0xd6,0xfd,0xd4,0x04,0x33]
+
+v_fma_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX11: encoding: [0xff,0x83,0x13,0xd6,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_fma_f64 v[5:6], v[1:2], v[2:3], v[3:4]
+// GFX11: encoding: [0x05,0x00,0x14,0xd6,0x01,0x05,0x0e,0x04]
+
+v_fma_f64 v[5:6], v[254:255], v[254:255], s[6:7]
+// GFX11: encoding: [0x05,0x00,0x14,0xd6,0xfe,0xfd,0x1b,0x00]
+
+v_fma_f64 v[5:6], s[2:3], s[4:5], v[254:255]
+// GFX11: encoding: [0x05,0x00,0x14,0xd6,0x02,0x08,0xf8,0x07]
+
+v_fma_f64 v[5:6], -|s[104:105]|, s[104:105], -|s[104:105]|
+// GFX11: encoding: [0x05,0x05,0x14,0xd6,0x68,0xd0,0xa0,0xa1]
+
+v_fma_f64 v[5:6], vcc, -|ttmp[14:15]|, -|ttmp[14:15]|
+// GFX11: encoding: [0x05,0x06,0x14,0xd6,0x6a,0xf4,0xe8,0xc1]
+
+v_fma_f64 v[5:6], -|ttmp[14:15]|, 0xaf123456, null
+// GFX11: encoding: [0x05,0x01,0x14,0xd6,0x7a,0xfe,0xf1,0x21,0x56,0x34,0x12,0xaf]
+
+v_fma_f64 v[5:6], -|exec|, -|src_scc|, -|exec|
+// GFX11: encoding: [0x05,0x07,0x14,0xd6,0x7e,0xfa,0xf9,0xe1]
+
+v_fma_f64 v[5:6], null, 0.5, vcc
+// GFX11: encoding: [0x05,0x00,0x14,0xd6,0x7c,0xe0,0xa9,0x01]
+
+v_fma_f64 v[5:6], -1, -1, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x14,0xd6,0xc1,0x82,0xfd,0x03,0x56,0x34,0x12,0xaf]
+
+v_fma_f64 v[5:6], 0.5, null, -|src_scc| mul:2
+// GFX11: encoding: [0x05,0x04,0x14,0xd6,0xf0,0xf8,0xf4,0x8b]
+
+v_fma_f64 v[5:6], -|src_scc|, -|exec|, 0.5 mul:4
+// GFX11: encoding: [0x05,0x03,0x14,0xd6,0xfd,0xfc,0xc0,0x73]
+
+v_fma_f64 v[254:255], 0xaf123456, -|vcc|, -1 clamp div:2
+// GFX11: encoding: [0xfe,0x82,0x14,0xd6,0xff,0xd4,0x04,0x5b,0x56,0x34,0x12,0xaf]
+
+v_fma_legacy_f32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x09,0xd6,0x01,0x05,0x0e,0x00]
+
+v_fma_legacy_f32 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x09,0xd6,0xff,0x05,0xa4,0x01]
+
+v_fma_legacy_f32 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x09,0xd6,0x01,0xfe,0xff,0x01]
+
+v_fma_legacy_f32 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x09,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_fma_legacy_f32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x09,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_fma_legacy_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x09,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_fma_legacy_f32 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX11: encoding: [0x05,0x07,0x09,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_fma_legacy_f32 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x09,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_fma_legacy_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX11: encoding: [0x05,0x01,0x09,0xd6,0x7e,0x82,0xad,0x01]
+
+v_fma_legacy_f32 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX11: encoding: [0x05,0x05,0x09,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_fma_legacy_f32 v5, null, exec_lo, -|0xaf123456|
+// GFX11: encoding: [0x05,0x04,0x09,0xd6,0x7c,0xfc,0xfc,0x83,0x56,0x34,0x12,0xaf]
+
+v_fma_legacy_f32 v5, -1, -|exec_hi|, -|src_scc|
+// GFX11: encoding: [0x05,0x06,0x09,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_fma_legacy_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX11: encoding: [0x05,0x00,0x09,0xd6,0xf0,0xfa,0xc0,0x4b]
+
+v_fma_legacy_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX11: encoding: [0x05,0x02,0x09,0xd6,0xfd,0xd4,0x04,0x33]
+
+v_fma_legacy_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX11: encoding: [0xff,0x83,0x09,0xd6,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_ldexp_f32 v5, v1, v2
+// GFX11: encoding: [0x05,0x00,0x1c,0xd7,0x01,0x05,0x02,0x00]
+
+v_ldexp_f32 v5, v255, v255
+// GFX11: encoding: [0x05,0x00,0x1c,0xd7,0xff,0xff,0x03,0x00]
+
+v_ldexp_f32 v5, s1, s2
+// GFX11: encoding: [0x05,0x00,0x1c,0xd7,0x01,0x04,0x00,0x00]
+
+v_ldexp_f32 v5, s105, s105
+// GFX11: encoding: [0x05,0x00,0x1c,0xd7,0x69,0xd2,0x00,0x00]
+
+v_ldexp_f32 v5, vcc_lo, ttmp15
+// GFX11: encoding: [0x05,0x00,0x1c,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_ldexp_f32 v5, vcc_hi, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x1c,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_ldexp_f32 v5, ttmp15, src_scc
+// GFX11: encoding: [0x05,0x00,0x1c,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_ldexp_f32 v5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x1c,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_ldexp_f32 v5, exec_lo, -1
+// GFX11: encoding: [0x05,0x00,0x1c,0xd7,0x7e,0x82,0x01,0x00]
+
+v_ldexp_f32 v5, exec_hi, null
+// GFX11: encoding: [0x05,0x00,0x1c,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_ldexp_f32 v5, null, exec_lo
+// GFX11: encoding: [0x05,0x00,0x1c,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_ldexp_f32 v5, -1, exec_hi
+// GFX11: encoding: [0x05,0x00,0x1c,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_ldexp_f32 v5, 0.5, m0 mul:2
+// GFX11: encoding: [0x05,0x00,0x1c,0xd7,0xf0,0xfa,0x00,0x08]
+
+v_ldexp_f32 v5, src_scc, vcc_lo mul:4
+// GFX11: encoding: [0x05,0x00,0x1c,0xd7,0xfd,0xd4,0x00,0x10]
+
+v_ldexp_f32 v255, -|0xaf123456|, vcc_hi clamp div:2
+// GFX11: encoding: [0xff,0x81,0x1c,0xd7,0xff,0xd6,0x00,0x38,0x56,0x34,0x12,0xaf]
+
+v_ldexp_f64 v[5:6], v[1:2], v2
+// GFX11: encoding: [0x05,0x00,0x2b,0xd7,0x01,0x05,0x02,0x00]
+
+v_ldexp_f64 v[5:6], v[1:2], v255
+// GFX11: encoding: [0x05,0x00,0x2b,0xd7,0x01,0xff,0x03,0x00]
+
+v_ldexp_f64 v[5:6], v[1:2], s2
+// GFX11: encoding: [0x05,0x00,0x2b,0xd7,0x01,0x05,0x00,0x00]
+
+v_ldexp_f64 v[5:6], v[1:2], s105
+// GFX11: encoding: [0x05,0x00,0x2b,0xd7,0x01,0xd3,0x00,0x00]
+
+v_ldexp_f64 v[5:6], v[254:255], ttmp15
+// GFX11: encoding: [0x05,0x00,0x2b,0xd7,0xfe,0xf7,0x00,0x00]
+
+v_ldexp_f64 v[5:6], s[2:3], vcc_hi
+// GFX11: encoding: [0x05,0x00,0x2b,0xd7,0x02,0xd6,0x00,0x00]
+
+v_ldexp_f64 v[5:6], s[104:105], vcc_lo
+// GFX11: encoding: [0x05,0x00,0x2b,0xd7,0x68,0xd4,0x00,0x00]
+
+v_ldexp_f64 v[5:6], vcc, m0
+// GFX11: encoding: [0x05,0x00,0x2b,0xd7,0x6a,0xfa,0x00,0x00]
+
+v_ldexp_f64 v[5:6], ttmp[14:15], exec_hi
+// GFX11: encoding: [0x05,0x00,0x2b,0xd7,0x7a,0xfe,0x00,0x00]
+
+v_ldexp_f64 v[5:6], exec, exec_lo
+// GFX11: encoding: [0x05,0x00,0x2b,0xd7,0x7e,0xfc,0x00,0x00]
+
+v_ldexp_f64 v[5:6], null, null
+// GFX11: encoding: [0x05,0x00,0x2b,0xd7,0x7c,0xf8,0x00,0x00]
+
+v_ldexp_f64 v[5:6], -1, -1
+// GFX11: encoding: [0x05,0x00,0x2b,0xd7,0xc1,0x82,0x01,0x00]
+
+v_ldexp_f64 v[5:6], 0.5, 0.5 mul:2
+// GFX11: encoding: [0x05,0x00,0x2b,0xd7,0xf0,0xe0,0x01,0x08]
+
+v_ldexp_f64 v[5:6], -|src_scc|, src_scc mul:4
+// GFX11: encoding: [0x05,0x01,0x2b,0xd7,0xfd,0xfa,0x01,0x30]
+
+v_ldexp_f64 v[254:255], 0xaf123456, 0xaf123456 clamp div:2
+// GFX11: encoding: [0xfe,0x80,0x2b,0xd7,0xff,0xfe,0x01,0x18,0x56,0x34,0x12,0xaf]
+
+v_lerp_u8 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x15,0xd6,0x01,0x05,0x0e,0x00]
+
+v_lerp_u8 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x15,0xd6,0xff,0x05,0xa4,0x01]
+
+v_lerp_u8 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x15,0xd6,0x01,0xfe,0xff,0x01]
+
+v_lerp_u8 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x15,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_lerp_u8 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x15,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_lerp_u8 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x15,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_lerp_u8 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x15,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_lerp_u8 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x15,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_lerp_u8 v5, exec_lo, -1, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x15,0xd6,0x7e,0x82,0xad,0x01]
+
+v_lerp_u8 v5, exec_hi, null, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x15,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_lerp_u8 v5, null, exec_lo, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x15,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_lerp_u8 v5, -1, exec_hi, src_scc
+// GFX11: encoding: [0x05,0x00,0x15,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_lerp_u8 v5, 0.5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x15,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_lerp_u8 v5, src_scc, vcc_lo, -1
+// GFX11: encoding: [0x05,0x00,0x15,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_lerp_u8 v255, 0xaf123456, vcc_hi, null
+// GFX11: encoding: [0xff,0x00,0x15,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_lshl_add_u32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x46,0xd6,0x01,0x05,0x0e,0x00]
+
+v_lshl_add_u32 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x46,0xd6,0xff,0x05,0xa4,0x01]
+
+v_lshl_add_u32 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x46,0xd6,0x01,0xfe,0xff,0x01]
+
+v_lshl_add_u32 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x46,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_lshl_add_u32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x46,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_lshl_add_u32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x46,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_lshl_add_u32 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x46,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_lshl_add_u32 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x46,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_lshl_add_u32 v5, exec_lo, -1, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x46,0xd6,0x7e,0x82,0xad,0x01]
+
+v_lshl_add_u32 v5, exec_hi, null, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x46,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_lshl_add_u32 v5, null, exec_lo, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x46,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_lshl_add_u32 v5, -1, exec_hi, src_scc
+// GFX11: encoding: [0x05,0x00,0x46,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_lshl_add_u32 v5, 0.5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x46,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_lshl_add_u32 v5, src_scc, vcc_lo, -1
+// GFX11: encoding: [0x05,0x00,0x46,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_lshl_add_u32 v255, 0xaf123456, vcc_hi, null
+// GFX11: encoding: [0xff,0x00,0x46,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_lshl_or_b32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x56,0xd6,0x01,0x05,0x0e,0x00]
+
+v_lshl_or_b32 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x56,0xd6,0xff,0x05,0xa4,0x01]
+
+v_lshl_or_b32 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x56,0xd6,0x01,0xfe,0xff,0x01]
+
+v_lshl_or_b32 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x56,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_lshl_or_b32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x56,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_lshl_or_b32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x56,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_lshl_or_b32 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x56,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_lshl_or_b32 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x56,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_lshl_or_b32 v5, exec_lo, -1, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x56,0xd6,0x7e,0x82,0xad,0x01]
+
+v_lshl_or_b32 v5, exec_hi, null, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x56,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_lshl_or_b32 v5, null, exec_lo, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x56,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_lshl_or_b32 v5, -1, exec_hi, src_scc
+// GFX11: encoding: [0x05,0x00,0x56,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_lshl_or_b32 v5, 0.5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x56,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_lshl_or_b32 v5, src_scc, vcc_lo, -1
+// GFX11: encoding: [0x05,0x00,0x56,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_lshl_or_b32 v255, 0xaf123456, vcc_hi, null
+// GFX11: encoding: [0xff,0x00,0x56,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_lshlrev_b16 v5, v1, v2
+// GFX11: encoding: [0x05,0x00,0x38,0xd7,0x01,0x05,0x02,0x00]
+
+v_lshlrev_b16 v5, v255, v255
+// GFX11: encoding: [0x05,0x00,0x38,0xd7,0xff,0xff,0x03,0x00]
+
+v_lshlrev_b16 v5, s1, s2
+// GFX11: encoding: [0x05,0x00,0x38,0xd7,0x01,0x04,0x00,0x00]
+
+v_lshlrev_b16 v5, s105, s105
+// GFX11: encoding: [0x05,0x00,0x38,0xd7,0x69,0xd2,0x00,0x00]
+
+v_lshlrev_b16 v5, vcc_lo, ttmp15
+// GFX11: encoding: [0x05,0x00,0x38,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_lshlrev_b16 v5, vcc_hi, 0xfe0b
+// GFX11: encoding: [0x05,0x00,0x38,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_lshlrev_b16 v5, ttmp15, src_scc
+// GFX11: encoding: [0x05,0x00,0x38,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_lshlrev_b16 v5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x38,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_lshlrev_b16 v5, exec_lo, -1
+// GFX11: encoding: [0x05,0x00,0x38,0xd7,0x7e,0x82,0x01,0x00]
+
+v_lshlrev_b16 v5, exec_hi, null
+// GFX11: encoding: [0x05,0x00,0x38,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_lshlrev_b16 v5, null, exec_lo
+// GFX11: encoding: [0x05,0x00,0x38,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_lshlrev_b16 v5, -1, exec_hi
+// GFX11: encoding: [0x05,0x00,0x38,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_lshlrev_b16 v5, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x38,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_lshlrev_b16 v5, src_scc, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x38,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_lshlrev_b16 v255, 0xfe0b, vcc_hi
+// GFX11: encoding: [0xff,0x00,0x38,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_lshlrev_b64 v[5:6], v1, vcc
+// GFX11: encoding: [0x05,0x00,0x3c,0xd7,0x01,0xd5,0x00,0x00]
+
+v_lshlrev_b64 v[5:6], v255, exec
+// GFX11: encoding: [0x05,0x00,0x3c,0xd7,0xff,0xfd,0x00,0x00]
+
+v_lshlrev_b64 v[5:6], exec_lo, v[2:3]
+// GFX11: encoding: [0x05,0x00,0x3c,0xd7,0x7e,0x04,0x02,0x00]
+
+v_lshlrev_b64 v[5:6], exec_hi, v[254:255]
+// GFX11: encoding: [0x05,0x00,0x3c,0xd7,0x7f,0xfc,0x03,0x00]
+
+v_lshlrev_b64 v[5:6], null, null
+// GFX11: encoding: [0x05,0x00,0x3c,0xd7,0x7c,0xf8,0x00,0x00]
+
+v_lshlrev_b64 v[5:6], -1, -1
+// GFX11: encoding: [0x05,0x00,0x3c,0xd7,0xc1,0x82,0x01,0x00]
+
+v_lshlrev_b64 v[5:6], 0.5, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x3c,0xd7,0xf0,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_lshlrev_b64 v[5:6], src_scc, src_scc
+// GFX11: encoding: [0x05,0x00,0x3c,0xd7,0xfd,0xfa,0x01,0x00]
+
+v_lshlrev_b64 v[254:255], 0xaf123456, 0.5
+// GFX11: encoding: [0xfe,0x00,0x3c,0xd7,0xff,0xe0,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_lshrrev_b16 v5, v1, v2
+// GFX11: encoding: [0x05,0x00,0x39,0xd7,0x01,0x05,0x02,0x00]
+
+v_lshrrev_b16 v5, v255, v255
+// GFX11: encoding: [0x05,0x00,0x39,0xd7,0xff,0xff,0x03,0x00]
+
+v_lshrrev_b16 v5, s1, s2
+// GFX11: encoding: [0x05,0x00,0x39,0xd7,0x01,0x04,0x00,0x00]
+
+v_lshrrev_b16 v5, s105, s105
+// GFX11: encoding: [0x05,0x00,0x39,0xd7,0x69,0xd2,0x00,0x00]
+
+v_lshrrev_b16 v5, vcc_lo, ttmp15
+// GFX11: encoding: [0x05,0x00,0x39,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_lshrrev_b16 v5, vcc_hi, 0xfe0b
+// GFX11: encoding: [0x05,0x00,0x39,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_lshrrev_b16 v5, ttmp15, src_scc
+// GFX11: encoding: [0x05,0x00,0x39,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_lshrrev_b16 v5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x39,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_lshrrev_b16 v5, exec_lo, -1
+// GFX11: encoding: [0x05,0x00,0x39,0xd7,0x7e,0x82,0x01,0x00]
+
+v_lshrrev_b16 v5, exec_hi, null
+// GFX11: encoding: [0x05,0x00,0x39,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_lshrrev_b16 v5, null, exec_lo
+// GFX11: encoding: [0x05,0x00,0x39,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_lshrrev_b16 v5, -1, exec_hi
+// GFX11: encoding: [0x05,0x00,0x39,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_lshrrev_b16 v5, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x39,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_lshrrev_b16 v5, src_scc, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x39,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_lshrrev_b16 v255, 0xfe0b, vcc_hi
+// GFX11: encoding: [0xff,0x00,0x39,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_lshrrev_b64 v[5:6], v1, vcc
+// GFX11: encoding: [0x05,0x00,0x3d,0xd7,0x01,0xd5,0x00,0x00]
+
+v_lshrrev_b64 v[5:6], v255, exec
+// GFX11: encoding: [0x05,0x00,0x3d,0xd7,0xff,0xfd,0x00,0x00]
+
+v_lshrrev_b64 v[5:6], exec_lo, v[2:3]
+// GFX11: encoding: [0x05,0x00,0x3d,0xd7,0x7e,0x04,0x02,0x00]
+
+v_lshrrev_b64 v[5:6], exec_hi, v[254:255]
+// GFX11: encoding: [0x05,0x00,0x3d,0xd7,0x7f,0xfc,0x03,0x00]
+
+v_lshrrev_b64 v[5:6], null, null
+// GFX11: encoding: [0x05,0x00,0x3d,0xd7,0x7c,0xf8,0x00,0x00]
+
+v_lshrrev_b64 v[5:6], -1, -1
+// GFX11: encoding: [0x05,0x00,0x3d,0xd7,0xc1,0x82,0x01,0x00]
+
+v_lshrrev_b64 v[5:6], 0.5, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x3d,0xd7,0xf0,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_lshrrev_b64 v[5:6], src_scc, src_scc
+// GFX11: encoding: [0x05,0x00,0x3d,0xd7,0xfd,0xfa,0x01,0x00]
+
+v_lshrrev_b64 v[254:255], 0xaf123456, 0.5
+// GFX11: encoding: [0xfe,0x00,0x3d,0xd7,0xff,0xe0,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_mad_i16 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x53,0xd6,0x01,0x05,0x0e,0x00]
+
+v_mad_i16 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x53,0xd6,0xff,0x05,0xa4,0x01]
+
+v_mad_i16 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x53,0xd6,0x01,0xfe,0xff,0x01]
+
+v_mad_i16 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x53,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_mad_i16 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x53,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_mad_i16 v5, vcc_hi, 0xfe0b, v255
+// GFX11: encoding: [0x05,0x00,0x53,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+v_mad_i16 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x53,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_mad_i16 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x53,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_mad_i16 v5, exec_lo, -1, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x53,0xd6,0x7e,0x82,0xad,0x01]
+
+v_mad_i16 v5, exec_hi, null, vcc_lo op_sel:[1,1,1,1]
+// GFX11: encoding: [0x05,0x78,0x53,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_mad_i16 v5, null, exec_lo, 0xfe0b op_sel:[0,0,0,0]
+// GFX11: encoding: [0x05,0x00,0x53,0xd6,0x7c,0xfc,0xfc,0x03,0x0b,0xfe,0x00,0x00]
+
+v_mad_i16 v5, -1, exec_hi, src_scc op_sel:[1,0,0,0]
+// GFX11: encoding: [0x05,0x08,0x53,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_mad_i16 v5, 0.5, m0, 0.5 op_sel:[0,1,0,0]
+// GFX11: encoding: [0x05,0x10,0x53,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_mad_i16 v5, src_scc, vcc_lo, -1 op_sel:[0,0,1,0]
+// GFX11: encoding: [0x05,0x20,0x53,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_mad_i16 v255, 0xfe0b, vcc_hi, null op_sel:[0,0,0,1] clamp
+// GFX11: encoding: [0xff,0xc0,0x53,0xd6,0xff,0xd6,0xf0,0x01,0x0b,0xfe,0x00,0x00]
+
+v_mad_i32_i16 v5, v1, v2, v3
+// GFX11: encoding: [0x05,0x00,0x5a,0xd6,0x01,0x05,0x0e,0x04]
+
+v_mad_i32_i16 v5, v255, v255, s3
+// GFX11: encoding: [0x05,0x00,0x5a,0xd6,0xff,0xff,0x0f,0x00]
+
+v_mad_i32_i16 v5, s1, s2, v255
+// GFX11: encoding: [0x05,0x00,0x5a,0xd6,0x01,0x04,0xfc,0x07]
+
+v_mad_i32_i16 v5, s105, s105, s105
+// GFX11: encoding: [0x05,0x00,0x5a,0xd6,0x69,0xd2,0xa4,0x01]
+
+v_mad_i32_i16 v5, vcc_lo, ttmp15, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x5a,0xd6,0x6a,0xf6,0xa8,0x01]
+
+v_mad_i32_i16 v5, vcc_hi, 0xfe0b, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x5a,0xd6,0x6b,0xfe,0xad,0x01,0x0b,0xfe,0x00,0x00]
+
+v_mad_i32_i16 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x5a,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_mad_i32_i16 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x5a,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_mad_i32_i16 v5, exec_lo, -1, exec_hi
+// GFX11: encoding: [0x05,0x00,0x5a,0xd6,0x7e,0x82,0xfd,0x01]
+
+v_mad_i32_i16 v5, exec_hi, null, exec_lo
+// GFX11: encoding: [0x05,0x00,0x5a,0xd6,0x7f,0xf8,0xf8,0x01]
+
+v_mad_i32_i16 v5, null, exec_lo, null
+// GFX11: encoding: [0x05,0x00,0x5a,0xd6,0x7c,0xfc,0xf0,0x01]
+
+v_mad_i32_i16 v5, -1, exec_hi, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x5a,0xd6,0xc1,0xfe,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_mad_i32_i16 v5, 0.5, m0, -1 op_sel:[0,0,0,0]
+// GFX11: encoding: [0x05,0x00,0x5a,0xd6,0xf0,0xfa,0x04,0x03]
+
+v_mad_i32_i16 v5, src_scc, vcc_lo, src_scc op_sel:[1,0,0,0]
+// GFX11: encoding: [0x05,0x08,0x5a,0xd6,0xfd,0xd4,0xf4,0x03]
+
+v_mad_i32_i16 v255, 0xfe0b, vcc_hi, 0.5 op_sel:[0,1,0,0] clamp
+// GFX11: encoding: [0xff,0x90,0x5a,0xd6,0xff,0xd6,0xc0,0x03,0x0b,0xfe,0x00,0x00]
+
+v_mad_i32_i24 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x0a,0xd6,0x01,0x05,0x0e,0x00]
+
+v_mad_i32_i24 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x0a,0xd6,0xff,0x05,0xa4,0x01]
+
+v_mad_i32_i24 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x0a,0xd6,0x01,0xfe,0xff,0x01]
+
+v_mad_i32_i24 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x0a,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_mad_i32_i24 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x0a,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_mad_i32_i24 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x0a,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_mad_i32_i24 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x0a,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_mad_i32_i24 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x0a,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_mad_i32_i24 v5, exec_lo, -1, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x0a,0xd6,0x7e,0x82,0xad,0x01]
+
+v_mad_i32_i24 v5, exec_hi, null, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x0a,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_mad_i32_i24 v5, null, exec_lo, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x0a,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_mad_i32_i24 v5, -1, exec_hi, src_scc
+// GFX11: encoding: [0x05,0x00,0x0a,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_mad_i32_i24 v5, 0.5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x0a,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_mad_i32_i24 v5, src_scc, vcc_lo, -1
+// GFX11: encoding: [0x05,0x00,0x0a,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_mad_i32_i24 v255, 0xaf123456, vcc_hi, null clamp
+// GFX11: encoding: [0xff,0x80,0x0a,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_mad_i64_i32 v[5:6], s6, s105, s105, s[6:7]
+// W32: encoding: [0x05,0x06,0xff,0xd6,0x69,0xd2,0x18,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_i64_i32 v[5:6], s6, ttmp15, ttmp15, s[104:105]
+// W32: encoding: [0x05,0x06,0xff,0xd6,0x7b,0xf6,0xa0,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_i64_i32 v[5:6], s6, m0, 0.5, ttmp[14:15]
+// W32: encoding: [0x05,0x06,0xff,0xd6,0x7d,0xe0,0xe9,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_i64_i32 v[5:6], s6, exec_lo, -1, exec
+// W32: encoding: [0x05,0x06,0xff,0xd6,0x7e,0x82,0xf9,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_i64_i32 v[5:6], s6, exec_hi, null, vcc
+// W32: encoding: [0x05,0x06,0xff,0xd6,0x7f,0xf8,0xa8,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_i64_i32 v[5:6], s105, null, exec_lo, null
+// W32: encoding: [0x05,0x69,0xff,0xd6,0x7c,0xfc,0xf0,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_i64_i32 v[5:6], vcc_lo, -1, exec_hi, -1
+// W32: encoding: [0x05,0x6a,0xff,0xd6,0xc1,0xfe,0x04,0x03]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_i64_i32 v[5:6], vcc_hi, 0.5, m0, 0xaf123456
+// W32: encoding: [0x05,0x6b,0xff,0xd6,0xf0,0xfa,0xfc,0x03,0x56,0x34,0x12,0xaf]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_i64_i32 v[5:6], ttmp15, src_scc, vcc_lo, src_scc
+// W32: encoding: [0x05,0x7b,0xff,0xd6,0xfd,0xd4,0xf4,0x03]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_i64_i32 v[5:6], s[12:13], s105, s105, s[6:7]
+// W64: encoding: [0x05,0x0c,0xff,0xd6,0x69,0xd2,0x18,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_i64_i32 v[5:6], s[12:13], ttmp15, ttmp15, s[104:105]
+// W64: encoding: [0x05,0x0c,0xff,0xd6,0x7b,0xf6,0xa0,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_i64_i32 v[5:6], s[12:13], m0, 0.5, ttmp[14:15]
+// W64: encoding: [0x05,0x0c,0xff,0xd6,0x7d,0xe0,0xe9,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_i64_i32 v[5:6], s[12:13], exec_lo, -1, exec
+// W64: encoding: [0x05,0x0c,0xff,0xd6,0x7e,0x82,0xf9,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_i64_i32 v[5:6], s[12:13], exec_hi, null, vcc
+// W64: encoding: [0x05,0x0c,0xff,0xd6,0x7f,0xf8,0xa8,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_i64_i32 v[5:6], s[12:13], null, exec_lo, null
+// W64: encoding: [0x05,0x0c,0xff,0xd6,0x7c,0xfc,0xf0,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_i64_i32 v[5:6], s[104:105], -1, exec_hi, -1
+// W64: encoding: [0x05,0x68,0xff,0xd6,0xc1,0xfe,0x04,0x03]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_i64_i32 v[5:6], vcc, 0.5, m0, 0xaf123456
+// W64: encoding: [0x05,0x6a,0xff,0xd6,0xf0,0xfa,0xfc,0x03,0x56,0x34,0x12,0xaf]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_i64_i32 v[5:6], ttmp[14:15], src_scc, vcc_lo, src_scc
+// W64: encoding: [0x05,0x7a,0xff,0xd6,0xfd,0xd4,0xf4,0x03]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_i64_i32 v[254:255], null, 0xaf123456, vcc_hi, 0.5 clamp
+// GFX11: encoding: [0xfe,0xfc,0xff,0xd6,0xff,0xd6,0xc0,0x03,0x56,0x34,0x12,0xaf]
+
+v_mad_u16 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x41,0xd6,0x01,0x05,0x0e,0x00]
+
+v_mad_u16 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x41,0xd6,0xff,0x05,0xa4,0x01]
+
+v_mad_u16 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x41,0xd6,0x01,0xfe,0xff,0x01]
+
+v_mad_u16 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x41,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_mad_u16 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x41,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_mad_u16 v5, vcc_hi, 0xfe0b, v255
+// GFX11: encoding: [0x05,0x00,0x41,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+v_mad_u16 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x41,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_mad_u16 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x41,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_mad_u16 v5, exec_lo, -1, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x41,0xd6,0x7e,0x82,0xad,0x01]
+
+v_mad_u16 v5, exec_hi, null, vcc_lo op_sel:[1,1,1,1]
+// GFX11: encoding: [0x05,0x78,0x41,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_mad_u16 v5, null, exec_lo, 0xfe0b op_sel:[0,0,0,0]
+// GFX11: encoding: [0x05,0x00,0x41,0xd6,0x7c,0xfc,0xfc,0x03,0x0b,0xfe,0x00,0x00]
+
+v_mad_u16 v5, -1, exec_hi, src_scc op_sel:[1,0,0,0]
+// GFX11: encoding: [0x05,0x08,0x41,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_mad_u16 v5, 0.5, m0, 0.5 op_sel:[0,1,0,0]
+// GFX11: encoding: [0x05,0x10,0x41,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_mad_u16 v5, src_scc, vcc_lo, -1 op_sel:[0,0,1,0]
+// GFX11: encoding: [0x05,0x20,0x41,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_mad_u16 v255, 0xfe0b, vcc_hi, null op_sel:[0,0,0,1] clamp
+// GFX11: encoding: [0xff,0xc0,0x41,0xd6,0xff,0xd6,0xf0,0x01,0x0b,0xfe,0x00,0x00]
+
+v_mad_u32_u16 v5, v1, v2, v3
+// GFX11: encoding: [0x05,0x00,0x59,0xd6,0x01,0x05,0x0e,0x04]
+
+v_mad_u32_u16 v5, v255, v255, s3
+// GFX11: encoding: [0x05,0x00,0x59,0xd6,0xff,0xff,0x0f,0x00]
+
+v_mad_u32_u16 v5, s1, s2, v255
+// GFX11: encoding: [0x05,0x00,0x59,0xd6,0x01,0x04,0xfc,0x07]
+
+v_mad_u32_u16 v5, s105, s105, s105
+// GFX11: encoding: [0x05,0x00,0x59,0xd6,0x69,0xd2,0xa4,0x01]
+
+v_mad_u32_u16 v5, vcc_lo, ttmp15, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x59,0xd6,0x6a,0xf6,0xa8,0x01]
+
+v_mad_u32_u16 v5, vcc_hi, 0xfe0b, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x59,0xd6,0x6b,0xfe,0xad,0x01,0x0b,0xfe,0x00,0x00]
+
+v_mad_u32_u16 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x59,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_mad_u32_u16 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x59,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_mad_u32_u16 v5, exec_lo, -1, exec_hi
+// GFX11: encoding: [0x05,0x00,0x59,0xd6,0x7e,0x82,0xfd,0x01]
+
+v_mad_u32_u16 v5, exec_hi, null, exec_lo
+// GFX11: encoding: [0x05,0x00,0x59,0xd6,0x7f,0xf8,0xf8,0x01]
+
+v_mad_u32_u16 v5, null, exec_lo, null
+// GFX11: encoding: [0x05,0x00,0x59,0xd6,0x7c,0xfc,0xf0,0x01]
+
+v_mad_u32_u16 v5, -1, exec_hi, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x59,0xd6,0xc1,0xfe,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_mad_u32_u16 v5, 0.5, m0, -1 op_sel:[0,0,0,0]
+// GFX11: encoding: [0x05,0x00,0x59,0xd6,0xf0,0xfa,0x04,0x03]
+
+v_mad_u32_u16 v5, src_scc, vcc_lo, src_scc op_sel:[1,0,0,0]
+// GFX11: encoding: [0x05,0x08,0x59,0xd6,0xfd,0xd4,0xf4,0x03]
+
+v_mad_u32_u16 v255, 0xfe0b, vcc_hi, 0.5 op_sel:[0,1,0,0] clamp
+// GFX11: encoding: [0xff,0x90,0x59,0xd6,0xff,0xd6,0xc0,0x03,0x0b,0xfe,0x00,0x00]
+
+v_mad_u32_u24 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x0b,0xd6,0x01,0x05,0x0e,0x00]
+
+v_mad_u32_u24 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x0b,0xd6,0xff,0x05,0xa4,0x01]
+
+v_mad_u32_u24 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x0b,0xd6,0x01,0xfe,0xff,0x01]
+
+v_mad_u32_u24 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x0b,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_mad_u32_u24 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x0b,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_mad_u32_u24 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x0b,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_mad_u32_u24 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x0b,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_mad_u32_u24 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x0b,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_mad_u32_u24 v5, exec_lo, -1, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x0b,0xd6,0x7e,0x82,0xad,0x01]
+
+v_mad_u32_u24 v5, exec_hi, null, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x0b,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_mad_u32_u24 v5, null, exec_lo, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x0b,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_mad_u32_u24 v5, -1, exec_hi, src_scc
+// GFX11: encoding: [0x05,0x00,0x0b,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_mad_u32_u24 v5, 0.5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x0b,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_mad_u32_u24 v5, src_scc, vcc_lo, -1
+// GFX11: encoding: [0x05,0x00,0x0b,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_mad_u32_u24 v255, 0xaf123456, vcc_hi, null clamp
+// GFX11: encoding: [0xff,0x80,0x0b,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_mad_u64_u32 v[5:6], s6, s105, s105, s[6:7]
+// W32: encoding: [0x05,0x06,0xfe,0xd6,0x69,0xd2,0x18,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_u64_u32 v[5:6], s6, ttmp15, ttmp15, s[104:105]
+// W32: encoding: [0x05,0x06,0xfe,0xd6,0x7b,0xf6,0xa0,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_u64_u32 v[5:6], s6, m0, 0.5, ttmp[14:15]
+// W32: encoding: [0x05,0x06,0xfe,0xd6,0x7d,0xe0,0xe9,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_u64_u32 v[5:6], s6, exec_lo, -1, exec
+// W32: encoding: [0x05,0x06,0xfe,0xd6,0x7e,0x82,0xf9,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_u64_u32 v[5:6], s6, exec_hi, null, vcc
+// W32: encoding: [0x05,0x06,0xfe,0xd6,0x7f,0xf8,0xa8,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_u64_u32 v[5:6], s105, null, exec_lo, null
+// W32: encoding: [0x05,0x69,0xfe,0xd6,0x7c,0xfc,0xf0,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_u64_u32 v[5:6], vcc_lo, -1, exec_hi, -1
+// W32: encoding: [0x05,0x6a,0xfe,0xd6,0xc1,0xfe,0x04,0x03]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_u64_u32 v[5:6], vcc_hi, 0.5, m0, 0xaf123456
+// W32: encoding: [0x05,0x6b,0xfe,0xd6,0xf0,0xfa,0xfc,0x03,0x56,0x34,0x12,0xaf]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_u64_u32 v[5:6], ttmp15, src_scc, vcc_lo, src_scc
+// W32: encoding: [0x05,0x7b,0xfe,0xd6,0xfd,0xd4,0xf4,0x03]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_u64_u32 v[5:6], s[12:13], s105, s105, s[6:7]
+// W64: encoding: [0x05,0x0c,0xfe,0xd6,0x69,0xd2,0x18,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_u64_u32 v[5:6], s[12:13], ttmp15, ttmp15, s[104:105]
+// W64: encoding: [0x05,0x0c,0xfe,0xd6,0x7b,0xf6,0xa0,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_u64_u32 v[5:6], s[12:13], m0, 0.5, ttmp[14:15]
+// W64: encoding: [0x05,0x0c,0xfe,0xd6,0x7d,0xe0,0xe9,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_u64_u32 v[5:6], s[12:13], exec_lo, -1, exec
+// W64: encoding: [0x05,0x0c,0xfe,0xd6,0x7e,0x82,0xf9,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_u64_u32 v[5:6], s[12:13], exec_hi, null, vcc
+// W64: encoding: [0x05,0x0c,0xfe,0xd6,0x7f,0xf8,0xa8,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_u64_u32 v[5:6], s[12:13], null, exec_lo, null
+// W64: encoding: [0x05,0x0c,0xfe,0xd6,0x7c,0xfc,0xf0,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_u64_u32 v[5:6], s[104:105], -1, exec_hi, -1
+// W64: encoding: [0x05,0x68,0xfe,0xd6,0xc1,0xfe,0x04,0x03]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_u64_u32 v[5:6], vcc, 0.5, m0, 0xaf123456
+// W64: encoding: [0x05,0x6a,0xfe,0xd6,0xf0,0xfa,0xfc,0x03,0x56,0x34,0x12,0xaf]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_u64_u32 v[5:6], ttmp[14:15], src_scc, vcc_lo, src_scc
+// W64: encoding: [0x05,0x7a,0xfe,0xd6,0xfd,0xd4,0xf4,0x03]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_u64_u32 v[254:255], null, 0xaf123456, vcc_hi, 0.5 clamp
+// GFX11: encoding: [0xfe,0xfc,0xfe,0xd6,0xff,0xd6,0xc0,0x03,0x56,0x34,0x12,0xaf]
+
+v_max3_f16 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x4c,0xd6,0x01,0x05,0x0e,0x00]
+
+v_max3_f16 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x4c,0xd6,0xff,0x05,0xa4,0x01]
+
+v_max3_f16 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x4c,0xd6,0x01,0xfe,0xff,0x01]
+
+v_max3_f16 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x4c,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_max3_f16 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x4c,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_max3_f16 v5, vcc_hi, 0xfe0b, v255
+// GFX11: encoding: [0x05,0x00,0x4c,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+v_max3_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX11: encoding: [0x05,0x07,0x4c,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_max3_f16 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x4c,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_max3_f16 v5, |exec_lo|, -1, vcc_hi
+// GFX11: encoding: [0x05,0x01,0x4c,0xd6,0x7e,0x82,0xad,0x01]
+
+v_max3_f16 v5, -|exec_hi|, null, -|vcc_lo| op_sel:[1,1,1,1]
+// GFX11: encoding: [0x05,0x7d,0x4c,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_max3_f16 v5, null, exec_lo, -|0xfe0b| op_sel:[0,0,0,0]
+// GFX11: encoding: [0x05,0x04,0x4c,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
+
+v_max3_f16 v5, -1, -|exec_hi|, -|src_scc| op_sel:[1,0,0,0]
+// GFX11: encoding: [0x05,0x0e,0x4c,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_max3_f16 v5, 0.5, -m0, 0.5 op_sel:[0,1,0,0]
+// GFX11: encoding: [0x05,0x10,0x4c,0xd6,0xf0,0xfa,0xc0,0x43]
+
+v_max3_f16 v5, -src_scc, |vcc_lo|, -1 op_sel:[0,0,1,0]
+// GFX11: encoding: [0x05,0x22,0x4c,0xd6,0xfd,0xd4,0x04,0x23]
+
+v_max3_f16 v255, -|0xfe0b|, -|vcc_hi|, null op_sel:[0,0,0,1] clamp
+// GFX11: encoding: [0xff,0xc3,0x4c,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00]
+
+v_max3_f16 v5, v255, s2, s105 mul:2
+// GFX11: encoding: [0x05,0x00,0x4c,0xd6,0xff,0x05,0xa4,0x09]
+
+v_max3_f32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x1c,0xd6,0x01,0x05,0x0e,0x00]
+
+v_max3_f32 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x1c,0xd6,0xff,0x05,0xa4,0x01]
+
+v_max3_f32 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x1c,0xd6,0x01,0xfe,0xff,0x01]
+
+v_max3_f32 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x1c,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_max3_f32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x1c,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_max3_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x1c,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_max3_f32 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX11: encoding: [0x05,0x07,0x1c,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_max3_f32 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x1c,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_max3_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX11: encoding: [0x05,0x01,0x1c,0xd6,0x7e,0x82,0xad,0x01]
+
+v_max3_f32 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX11: encoding: [0x05,0x05,0x1c,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_max3_f32 v5, null, exec_lo, -|0xaf123456|
+// GFX11: encoding: [0x05,0x04,0x1c,0xd6,0x7c,0xfc,0xfc,0x83,0x56,0x34,0x12,0xaf]
+
+v_max3_f32 v5, -1, -|exec_hi|, -|src_scc|
+// GFX11: encoding: [0x05,0x06,0x1c,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_max3_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX11: encoding: [0x05,0x00,0x1c,0xd6,0xf0,0xfa,0xc0,0x4b]
+
+v_max3_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX11: encoding: [0x05,0x02,0x1c,0xd6,0xfd,0xd4,0x04,0x33]
+
+v_max3_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX11: encoding: [0xff,0x83,0x1c,0xd6,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_max3_i16 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x4d,0xd6,0x01,0x05,0x0e,0x00]
+
+v_max3_i16 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x4d,0xd6,0xff,0x05,0xa4,0x01]
+
+v_max3_i16 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x4d,0xd6,0x01,0xfe,0xff,0x01]
+
+v_max3_i16 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x4d,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_max3_i16 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x4d,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_max3_i16 v5, vcc_hi, 0xfe0b, v255
+// GFX11: encoding: [0x05,0x00,0x4d,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+v_max3_i16 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x4d,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_max3_i16 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x4d,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_max3_i16 v5, exec_lo, -1, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x4d,0xd6,0x7e,0x82,0xad,0x01]
+
+v_max3_i16 v5, exec_hi, null, vcc_lo op_sel:[1,1,1,1]
+// GFX11: encoding: [0x05,0x78,0x4d,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_max3_i16 v5, null, exec_lo, 0xfe0b op_sel:[0,0,0,0]
+// GFX11: encoding: [0x05,0x00,0x4d,0xd6,0x7c,0xfc,0xfc,0x03,0x0b,0xfe,0x00,0x00]
+
+v_max3_i16 v5, -1, exec_hi, src_scc op_sel:[1,0,0,0]
+// GFX11: encoding: [0x05,0x08,0x4d,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_max3_i16 v5, 0.5, m0, 0.5 op_sel:[0,1,0,0]
+// GFX11: encoding: [0x05,0x10,0x4d,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_max3_i16 v5, src_scc, vcc_lo, -1 op_sel:[0,0,1,0]
+// GFX11: encoding: [0x05,0x20,0x4d,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_max3_i16 v255, 0xfe0b, vcc_hi, null op_sel:[0,0,0,1]
+// GFX11: encoding: [0xff,0x40,0x4d,0xd6,0xff,0xd6,0xf0,0x01,0x0b,0xfe,0x00,0x00]
+
+v_max3_i32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x1d,0xd6,0x01,0x05,0x0e,0x00]
+
+v_max3_i32 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x1d,0xd6,0xff,0x05,0xa4,0x01]
+
+v_max3_i32 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x1d,0xd6,0x01,0xfe,0xff,0x01]
+
+v_max3_i32 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x1d,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_max3_i32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x1d,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_max3_i32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x1d,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_max3_i32 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x1d,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_max3_i32 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x1d,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_max3_i32 v5, exec_lo, -1, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x1d,0xd6,0x7e,0x82,0xad,0x01]
+
+v_max3_i32 v5, exec_hi, null, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x1d,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_max3_i32 v5, null, exec_lo, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x1d,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_max3_i32 v5, -1, exec_hi, src_scc
+// GFX11: encoding: [0x05,0x00,0x1d,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_max3_i32 v5, 0.5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x1d,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_max3_i32 v5, src_scc, vcc_lo, -1
+// GFX11: encoding: [0x05,0x00,0x1d,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_max3_i32 v255, 0xaf123456, vcc_hi, null
+// GFX11: encoding: [0xff,0x00,0x1d,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_max3_u16 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x4e,0xd6,0x01,0x05,0x0e,0x00]
+
+v_max3_u16 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x4e,0xd6,0xff,0x05,0xa4,0x01]
+
+v_max3_u16 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x4e,0xd6,0x01,0xfe,0xff,0x01]
+
+v_max3_u16 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x4e,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_max3_u16 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x4e,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_max3_u16 v5, vcc_hi, 0xfe0b, v255
+// GFX11: encoding: [0x05,0x00,0x4e,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+v_max3_u16 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x4e,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_max3_u16 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x4e,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_max3_u16 v5, exec_lo, -1, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x4e,0xd6,0x7e,0x82,0xad,0x01]
+
+v_max3_u16 v5, exec_hi, null, vcc_lo op_sel:[1,1,1,1]
+// GFX11: encoding: [0x05,0x78,0x4e,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_max3_u16 v5, null, exec_lo, 0xfe0b op_sel:[0,0,0,0]
+// GFX11: encoding: [0x05,0x00,0x4e,0xd6,0x7c,0xfc,0xfc,0x03,0x0b,0xfe,0x00,0x00]
+
+v_max3_u16 v5, -1, exec_hi, src_scc op_sel:[1,0,0,0]
+// GFX11: encoding: [0x05,0x08,0x4e,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_max3_u16 v5, 0.5, m0, 0.5 op_sel:[0,1,0,0]
+// GFX11: encoding: [0x05,0x10,0x4e,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_max3_u16 v5, src_scc, vcc_lo, -1 op_sel:[0,0,1,0]
+// GFX11: encoding: [0x05,0x20,0x4e,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_max3_u16 v255, 0xfe0b, vcc_hi, null op_sel:[0,0,0,1]
+// GFX11: encoding: [0xff,0x40,0x4e,0xd6,0xff,0xd6,0xf0,0x01,0x0b,0xfe,0x00,0x00]
+
+v_max3_u32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x1e,0xd6,0x01,0x05,0x0e,0x00]
+
+v_max3_u32 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x1e,0xd6,0xff,0x05,0xa4,0x01]
+
+v_max3_u32 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x1e,0xd6,0x01,0xfe,0xff,0x01]
+
+v_max3_u32 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x1e,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_max3_u32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x1e,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_max3_u32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x1e,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_max3_u32 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x1e,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_max3_u32 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x1e,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_max3_u32 v5, exec_lo, -1, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x1e,0xd6,0x7e,0x82,0xad,0x01]
+
+v_max3_u32 v5, exec_hi, null, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x1e,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_max3_u32 v5, null, exec_lo, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x1e,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_max3_u32 v5, -1, exec_hi, src_scc
+// GFX11: encoding: [0x05,0x00,0x1e,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_max3_u32 v5, 0.5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x1e,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_max3_u32 v5, src_scc, vcc_lo, -1
+// GFX11: encoding: [0x05,0x00,0x1e,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_max3_u32 v255, 0xaf123456, vcc_hi, null
+// GFX11: encoding: [0xff,0x00,0x1e,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_max_f64 v[5:6], v[1:2], v[2:3]
+// GFX11: encoding: [0x05,0x00,0x2a,0xd7,0x01,0x05,0x02,0x00]
+
+v_max_f64 v[5:6], v[254:255], v[254:255]
+// GFX11: encoding: [0x05,0x00,0x2a,0xd7,0xfe,0xfd,0x03,0x00]
+
+v_max_f64 v[5:6], s[2:3], s[4:5]
+// GFX11: encoding: [0x05,0x00,0x2a,0xd7,0x02,0x08,0x00,0x00]
+
+v_max_f64 v[5:6], s[104:105], s[104:105]
+// GFX11: encoding: [0x05,0x00,0x2a,0xd7,0x68,0xd0,0x00,0x00]
+
+v_max_f64 v[5:6], vcc, ttmp[14:15]
+// GFX11: encoding: [0x05,0x00,0x2a,0xd7,0x6a,0xf4,0x00,0x00]
+
+v_max_f64 v[5:6], ttmp[14:15], 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x2a,0xd7,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_max_f64 v[5:6], -|exec|, src_scc
+// GFX11: encoding: [0x05,0x01,0x2a,0xd7,0x7e,0xfa,0x01,0x20]
+
+v_max_f64 v[5:6], null, 0.5
+// GFX11: encoding: [0x05,0x00,0x2a,0xd7,0x7c,0xe0,0x01,0x00]
+
+v_max_f64 v[5:6], -1, -1
+// GFX11: encoding: [0x05,0x00,0x2a,0xd7,0xc1,0x82,0x01,0x00]
+
+v_max_f64 v[5:6], 0.5, null mul:2
+// GFX11: encoding: [0x05,0x00,0x2a,0xd7,0xf0,0xf8,0x00,0x08]
+
+v_max_f64 v[5:6], -|src_scc|, -|exec| mul:4
+// GFX11: encoding: [0x05,0x03,0x2a,0xd7,0xfd,0xfc,0x00,0x70]
+
+v_max_f64 v[254:255], 0xaf123456, -|vcc| clamp div:2
+// GFX11: encoding: [0xfe,0x82,0x2a,0xd7,0xff,0xd4,0x00,0x58,0x56,0x34,0x12,0xaf]
+
+v_max_i16 v5, v1, v2
+// GFX11: encoding: [0x05,0x00,0x0a,0xd7,0x01,0x05,0x02,0x00]
+
+v_max_i16 v5, v255, v255
+// GFX11: encoding: [0x05,0x00,0x0a,0xd7,0xff,0xff,0x03,0x00]
+
+v_max_i16 v5, s1, s2
+// GFX11: encoding: [0x05,0x00,0x0a,0xd7,0x01,0x04,0x00,0x00]
+
+v_max_i16 v5, s105, s105
+// GFX11: encoding: [0x05,0x00,0x0a,0xd7,0x69,0xd2,0x00,0x00]
+
+v_max_i16 v5, vcc_lo, ttmp15
+// GFX11: encoding: [0x05,0x00,0x0a,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_max_i16 v5, vcc_hi, 0xfe0b
+// GFX11: encoding: [0x05,0x00,0x0a,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_max_i16 v5, ttmp15, src_scc
+// GFX11: encoding: [0x05,0x00,0x0a,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_max_i16 v5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x0a,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_max_i16 v5, exec_lo, -1
+// GFX11: encoding: [0x05,0x00,0x0a,0xd7,0x7e,0x82,0x01,0x00]
+
+v_max_i16 v5, exec_hi, null
+// GFX11: encoding: [0x05,0x00,0x0a,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_max_i16 v5, null, exec_lo
+// GFX11: encoding: [0x05,0x00,0x0a,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_max_i16 v5, -1, exec_hi
+// GFX11: encoding: [0x05,0x00,0x0a,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_max_i16 v5, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x0a,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_max_i16 v5, src_scc, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x0a,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_max_i16 v255, 0xfe0b, vcc_hi
+// GFX11: encoding: [0xff,0x00,0x0a,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_max_u16 v5, v1, v2
+// GFX11: encoding: [0x05,0x00,0x09,0xd7,0x01,0x05,0x02,0x00]
+
+v_max_u16 v5, v255, v255
+// GFX11: encoding: [0x05,0x00,0x09,0xd7,0xff,0xff,0x03,0x00]
+
+v_max_u16 v5, s1, s2
+// GFX11: encoding: [0x05,0x00,0x09,0xd7,0x01,0x04,0x00,0x00]
+
+v_max_u16 v5, s105, s105
+// GFX11: encoding: [0x05,0x00,0x09,0xd7,0x69,0xd2,0x00,0x00]
+
+v_max_u16 v5, vcc_lo, ttmp15
+// GFX11: encoding: [0x05,0x00,0x09,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_max_u16 v5, vcc_hi, 0xfe0b
+// GFX11: encoding: [0x05,0x00,0x09,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_max_u16 v5, ttmp15, src_scc
+// GFX11: encoding: [0x05,0x00,0x09,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_max_u16 v5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x09,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_max_u16 v5, exec_lo, -1
+// GFX11: encoding: [0x05,0x00,0x09,0xd7,0x7e,0x82,0x01,0x00]
+
+v_max_u16 v5, exec_hi, null
+// GFX11: encoding: [0x05,0x00,0x09,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_max_u16 v5, null, exec_lo
+// GFX11: encoding: [0x05,0x00,0x09,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_max_u16 v5, -1, exec_hi
+// GFX11: encoding: [0x05,0x00,0x09,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_max_u16 v5, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x09,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_max_u16 v5, src_scc, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x09,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_max_u16 v255, 0xfe0b, vcc_hi
+// GFX11: encoding: [0xff,0x00,0x09,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_maxmin_f16 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x60,0xd6,0x01,0x05,0x0e,0x00]
+
+v_maxmin_f16 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x60,0xd6,0xff,0x05,0xa4,0x01]
+
+v_maxmin_f16 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x60,0xd6,0x01,0xfe,0xff,0x01]
+
+v_maxmin_f16 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x60,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_maxmin_f16 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x60,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_maxmin_f16 v5, vcc_hi, 0xfe0b, v255
+// GFX11: encoding: [0x05,0x00,0x60,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+v_maxmin_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX11: encoding: [0x05,0x07,0x60,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_maxmin_f16 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x60,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_maxmin_f16 v5, |exec_lo|, -1, vcc_hi
+// GFX11: encoding: [0x05,0x01,0x60,0xd6,0x7e,0x82,0xad,0x01]
+
+v_maxmin_f16 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX11: encoding: [0x05,0x05,0x60,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_maxmin_f16 v5, null, exec_lo, -|0xfe0b|
+// GFX11: encoding: [0x05,0x04,0x60,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
+
+v_maxmin_f16 v5, -1, -|exec_hi|, -|src_scc|
+// GFX11: encoding: [0x05,0x06,0x60,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_maxmin_f16 v5, 0.5, -m0, 0.5 mul:2
+// GFX11: encoding: [0x05,0x00,0x60,0xd6,0xf0,0xfa,0xc0,0x4b]
+
+v_maxmin_f16 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX11: encoding: [0x05,0x02,0x60,0xd6,0xfd,0xd4,0x04,0x33]
+
+v_maxmin_f16 v255, -|0xfe0b|, -|vcc_hi|, null clamp div:2
+// GFX11: encoding: [0xff,0x83,0x60,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+
+v_maxmin_f32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x5e,0xd6,0x01,0x05,0x0e,0x00]
+
+v_maxmin_f32 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x5e,0xd6,0xff,0x05,0xa4,0x01]
+
+v_maxmin_f32 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x5e,0xd6,0x01,0xfe,0xff,0x01]
+
+v_maxmin_f32 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x5e,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_maxmin_f32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x5e,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_maxmin_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x5e,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_maxmin_f32 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX11: encoding: [0x05,0x07,0x5e,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_maxmin_f32 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x5e,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_maxmin_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX11: encoding: [0x05,0x01,0x5e,0xd6,0x7e,0x82,0xad,0x01]
+
+v_maxmin_f32 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX11: encoding: [0x05,0x05,0x5e,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_maxmin_f32 v5, null, exec_lo, -|0xaf123456|
+// GFX11: encoding: [0x05,0x04,0x5e,0xd6,0x7c,0xfc,0xfc,0x83,0x56,0x34,0x12,0xaf]
+
+v_maxmin_f32 v5, -1, -|exec_hi|, -|src_scc|
+// GFX11: encoding: [0x05,0x06,0x5e,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_maxmin_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX11: encoding: [0x05,0x00,0x5e,0xd6,0xf0,0xfa,0xc0,0x4b]
+
+v_maxmin_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX11: encoding: [0x05,0x02,0x5e,0xd6,0xfd,0xd4,0x04,0x33]
+
+v_maxmin_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX11: encoding: [0xff,0x83,0x5e,0xd6,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_maxmin_i32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x64,0xd6,0x01,0x05,0x0e,0x00]
+
+v_maxmin_i32 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x64,0xd6,0xff,0x05,0xa4,0x01]
+
+v_maxmin_i32 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x64,0xd6,0x01,0xfe,0xff,0x01]
+
+v_maxmin_i32 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x64,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_maxmin_i32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x64,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_maxmin_i32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x64,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_maxmin_i32 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x64,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_maxmin_i32 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x64,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_maxmin_i32 v5, exec_lo, -1, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x64,0xd6,0x7e,0x82,0xad,0x01]
+
+v_maxmin_i32 v5, exec_hi, null, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x64,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_maxmin_i32 v5, null, exec_lo, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x64,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_maxmin_i32 v5, -1, exec_hi, src_scc
+// GFX11: encoding: [0x05,0x00,0x64,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_maxmin_i32 v5, 0.5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x64,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_maxmin_i32 v5, src_scc, vcc_lo, -1
+// GFX11: encoding: [0x05,0x00,0x64,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_maxmin_i32 v255, 0xaf123456, vcc_hi, null
+// GFX11: encoding: [0xff,0x00,0x64,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_maxmin_u32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x62,0xd6,0x01,0x05,0x0e,0x00]
+
+v_maxmin_u32 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x62,0xd6,0xff,0x05,0xa4,0x01]
+
+v_maxmin_u32 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x62,0xd6,0x01,0xfe,0xff,0x01]
+
+v_maxmin_u32 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x62,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_maxmin_u32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x62,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_maxmin_u32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x62,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_maxmin_u32 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x62,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_maxmin_u32 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x62,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_maxmin_u32 v5, exec_lo, -1, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x62,0xd6,0x7e,0x82,0xad,0x01]
+
+v_maxmin_u32 v5, exec_hi, null, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x62,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_maxmin_u32 v5, null, exec_lo, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x62,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_maxmin_u32 v5, -1, exec_hi, src_scc
+// GFX11: encoding: [0x05,0x00,0x62,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_maxmin_u32 v5, 0.5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x62,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_maxmin_u32 v5, src_scc, vcc_lo, -1
+// GFX11: encoding: [0x05,0x00,0x62,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_maxmin_u32 v255, 0xaf123456, vcc_hi, null
+// GFX11: encoding: [0xff,0x00,0x62,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_mbcnt_hi_u32_b32 v5, v1, v2
+// GFX11: encoding: [0x05,0x00,0x20,0xd7,0x01,0x05,0x02,0x00]
+
+v_mbcnt_hi_u32_b32 v5, v255, v255
+// GFX11: encoding: [0x05,0x00,0x20,0xd7,0xff,0xff,0x03,0x00]
+
+v_mbcnt_hi_u32_b32 v5, s1, s2
+// GFX11: encoding: [0x05,0x00,0x20,0xd7,0x01,0x04,0x00,0x00]
+
+v_mbcnt_hi_u32_b32 v5, s105, s105
+// GFX11: encoding: [0x05,0x00,0x20,0xd7,0x69,0xd2,0x00,0x00]
+
+v_mbcnt_hi_u32_b32 v5, vcc_lo, ttmp15
+// GFX11: encoding: [0x05,0x00,0x20,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_mbcnt_hi_u32_b32 v5, vcc_hi, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x20,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_mbcnt_hi_u32_b32 v5, ttmp15, src_scc
+// GFX11: encoding: [0x05,0x00,0x20,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_mbcnt_hi_u32_b32 v5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x20,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_mbcnt_hi_u32_b32 v5, exec_lo, -1
+// GFX11: encoding: [0x05,0x00,0x20,0xd7,0x7e,0x82,0x01,0x00]
+
+v_mbcnt_hi_u32_b32 v5, exec_hi, null
+// GFX11: encoding: [0x05,0x00,0x20,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_mbcnt_hi_u32_b32 v5, null, exec_lo
+// GFX11: encoding: [0x05,0x00,0x20,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_mbcnt_hi_u32_b32 v5, -1, exec_hi
+// GFX11: encoding: [0x05,0x00,0x20,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_mbcnt_hi_u32_b32 v5, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x20,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_mbcnt_hi_u32_b32 v5, src_scc, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x20,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_mbcnt_hi_u32_b32 v255, 0xaf123456, vcc_hi
+// GFX11: encoding: [0xff,0x00,0x20,0xd7,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_mbcnt_lo_u32_b32 v5, v1, v2
+// GFX11: encoding: [0x05,0x00,0x1f,0xd7,0x01,0x05,0x02,0x00]
+
+v_mbcnt_lo_u32_b32 v5, v255, v255
+// GFX11: encoding: [0x05,0x00,0x1f,0xd7,0xff,0xff,0x03,0x00]
+
+v_mbcnt_lo_u32_b32 v5, s1, s2
+// GFX11: encoding: [0x05,0x00,0x1f,0xd7,0x01,0x04,0x00,0x00]
+
+v_mbcnt_lo_u32_b32 v5, s105, s105
+// GFX11: encoding: [0x05,0x00,0x1f,0xd7,0x69,0xd2,0x00,0x00]
+
+v_mbcnt_lo_u32_b32 v5, vcc_lo, ttmp15
+// GFX11: encoding: [0x05,0x00,0x1f,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_mbcnt_lo_u32_b32 v5, vcc_hi, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x1f,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_mbcnt_lo_u32_b32 v5, ttmp15, src_scc
+// GFX11: encoding: [0x05,0x00,0x1f,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_mbcnt_lo_u32_b32 v5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x1f,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_mbcnt_lo_u32_b32 v5, exec_lo, -1
+// GFX11: encoding: [0x05,0x00,0x1f,0xd7,0x7e,0x82,0x01,0x00]
+
+v_mbcnt_lo_u32_b32 v5, exec_hi, null
+// GFX11: encoding: [0x05,0x00,0x1f,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_mbcnt_lo_u32_b32 v5, null, exec_lo
+// GFX11: encoding: [0x05,0x00,0x1f,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_mbcnt_lo_u32_b32 v5, -1, exec_hi
+// GFX11: encoding: [0x05,0x00,0x1f,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_mbcnt_lo_u32_b32 v5, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x1f,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_mbcnt_lo_u32_b32 v5, src_scc, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x1f,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_mbcnt_lo_u32_b32 v255, 0xaf123456, vcc_hi
+// GFX11: encoding: [0xff,0x00,0x1f,0xd7,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_med3_f16 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x4f,0xd6,0x01,0x05,0x0e,0x00]
+
+v_med3_f16 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x4f,0xd6,0xff,0x05,0xa4,0x01]
+
+v_med3_f16 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x4f,0xd6,0x01,0xfe,0xff,0x01]
+
+v_med3_f16 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x4f,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_med3_f16 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x4f,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_med3_f16 v5, vcc_hi, 0xfe0b, v255
+// GFX11: encoding: [0x05,0x00,0x4f,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+v_med3_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX11: encoding: [0x05,0x07,0x4f,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_med3_f16 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x4f,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_med3_f16 v5, |exec_lo|, -1, vcc_hi
+// GFX11: encoding: [0x05,0x01,0x4f,0xd6,0x7e,0x82,0xad,0x01]
+
+v_med3_f16 v5, -|exec_hi|, null, -|vcc_lo| op_sel:[1,1,1,1]
+// GFX11: encoding: [0x05,0x7d,0x4f,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_med3_f16 v5, null, exec_lo, -|0xfe0b| op_sel:[0,0,0,0]
+// GFX11: encoding: [0x05,0x04,0x4f,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
+
+v_med3_f16 v5, -1, -|exec_hi|, -|src_scc| op_sel:[1,0,0,0]
+// GFX11: encoding: [0x05,0x0e,0x4f,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_med3_f16 v5, 0.5, -m0, 0.5 op_sel:[0,1,0,0]
+// GFX11: encoding: [0x05,0x10,0x4f,0xd6,0xf0,0xfa,0xc0,0x43]
+
+v_med3_f16 v5, -src_scc, |vcc_lo|, -1 op_sel:[0,0,1,0]
+// GFX11: encoding: [0x05,0x22,0x4f,0xd6,0xfd,0xd4,0x04,0x23]
+
+v_med3_f16 v255, -|0xfe0b|, -|vcc_hi|, null op_sel:[0,0,0,1] clamp
+// GFX11: encoding: [0xff,0xc3,0x4f,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00]
+
+v_med3_f16 v5, 0.5, -m0, 0.5 op_sel:[0,1,0,0] div:2
+// GFX11: encoding: [0x05,0x10,0x4f,0xd6,0xf0,0xfa,0xc0,0x5b]
+
+v_med3_f32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x1f,0xd6,0x01,0x05,0x0e,0x00]
+
+v_med3_f32 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x1f,0xd6,0xff,0x05,0xa4,0x01]
+
+v_med3_f32 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x1f,0xd6,0x01,0xfe,0xff,0x01]
+
+v_med3_f32 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x1f,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_med3_f32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x1f,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_med3_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x1f,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_med3_f32 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX11: encoding: [0x05,0x07,0x1f,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_med3_f32 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x1f,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_med3_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX11: encoding: [0x05,0x01,0x1f,0xd6,0x7e,0x82,0xad,0x01]
+
+v_med3_f32 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX11: encoding: [0x05,0x05,0x1f,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_med3_f32 v5, null, exec_lo, -|0xaf123456|
+// GFX11: encoding: [0x05,0x04,0x1f,0xd6,0x7c,0xfc,0xfc,0x83,0x56,0x34,0x12,0xaf]
+
+v_med3_f32 v5, -1, -|exec_hi|, -|src_scc|
+// GFX11: encoding: [0x05,0x06,0x1f,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_med3_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX11: encoding: [0x05,0x00,0x1f,0xd6,0xf0,0xfa,0xc0,0x4b]
+
+v_med3_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX11: encoding: [0x05,0x02,0x1f,0xd6,0xfd,0xd4,0x04,0x33]
+
+v_med3_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX11: encoding: [0xff,0x83,0x1f,0xd6,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_med3_i16 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x50,0xd6,0x01,0x05,0x0e,0x00]
+
+v_med3_i16 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x50,0xd6,0xff,0x05,0xa4,0x01]
+
+v_med3_i16 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x50,0xd6,0x01,0xfe,0xff,0x01]
+
+v_med3_i16 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x50,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_med3_i16 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x50,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_med3_i16 v5, vcc_hi, 0xfe0b, v255
+// GFX11: encoding: [0x05,0x00,0x50,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+v_med3_i16 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x50,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_med3_i16 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x50,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_med3_i16 v5, exec_lo, -1, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x50,0xd6,0x7e,0x82,0xad,0x01]
+
+v_med3_i16 v5, exec_hi, null, vcc_lo op_sel:[1,1,1,1]
+// GFX11: encoding: [0x05,0x78,0x50,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_med3_i16 v5, null, exec_lo, 0xfe0b op_sel:[0,0,0,0]
+// GFX11: encoding: [0x05,0x00,0x50,0xd6,0x7c,0xfc,0xfc,0x03,0x0b,0xfe,0x00,0x00]
+
+v_med3_i16 v5, -1, exec_hi, src_scc op_sel:[1,0,0,0]
+// GFX11: encoding: [0x05,0x08,0x50,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_med3_i16 v5, 0.5, m0, 0.5 op_sel:[0,1,0,0]
+// GFX11: encoding: [0x05,0x10,0x50,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_med3_i16 v5, src_scc, vcc_lo, -1 op_sel:[0,0,1,0]
+// GFX11: encoding: [0x05,0x20,0x50,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_med3_i16 v255, 0xfe0b, vcc_hi, null op_sel:[0,0,0,1]
+// GFX11: encoding: [0xff,0x40,0x50,0xd6,0xff,0xd6,0xf0,0x01,0x0b,0xfe,0x00,0x00]
+
+v_med3_i32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x20,0xd6,0x01,0x05,0x0e,0x00]
+
+v_med3_i32 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x20,0xd6,0xff,0x05,0xa4,0x01]
+
+v_med3_i32 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x20,0xd6,0x01,0xfe,0xff,0x01]
+
+v_med3_i32 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x20,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_med3_i32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x20,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_med3_i32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x20,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_med3_i32 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x20,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_med3_i32 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x20,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_med3_i32 v5, exec_lo, -1, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x20,0xd6,0x7e,0x82,0xad,0x01]
+
+v_med3_i32 v5, exec_hi, null, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x20,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_med3_i32 v5, null, exec_lo, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x20,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_med3_i32 v5, -1, exec_hi, src_scc
+// GFX11: encoding: [0x05,0x00,0x20,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_med3_i32 v5, 0.5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x20,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_med3_i32 v5, src_scc, vcc_lo, -1
+// GFX11: encoding: [0x05,0x00,0x20,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_med3_i32 v255, 0xaf123456, vcc_hi, null
+// GFX11: encoding: [0xff,0x00,0x20,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_med3_u16 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x51,0xd6,0x01,0x05,0x0e,0x00]
+
+v_med3_u16 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x51,0xd6,0xff,0x05,0xa4,0x01]
+
+v_med3_u16 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x51,0xd6,0x01,0xfe,0xff,0x01]
+
+v_med3_u16 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x51,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_med3_u16 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x51,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_med3_u16 v5, vcc_hi, 0xfe0b, v255
+// GFX11: encoding: [0x05,0x00,0x51,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+v_med3_u16 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x51,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_med3_u16 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x51,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_med3_u16 v5, exec_lo, -1, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x51,0xd6,0x7e,0x82,0xad,0x01]
+
+v_med3_u16 v5, exec_hi, null, vcc_lo op_sel:[1,1,1,1]
+// GFX11: encoding: [0x05,0x78,0x51,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_med3_u16 v5, null, exec_lo, 0xfe0b op_sel:[0,0,0,0]
+// GFX11: encoding: [0x05,0x00,0x51,0xd6,0x7c,0xfc,0xfc,0x03,0x0b,0xfe,0x00,0x00]
+
+v_med3_u16 v5, -1, exec_hi, src_scc op_sel:[1,0,0,0]
+// GFX11: encoding: [0x05,0x08,0x51,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_med3_u16 v5, 0.5, m0, 0.5 op_sel:[0,1,0,0]
+// GFX11: encoding: [0x05,0x10,0x51,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_med3_u16 v5, src_scc, vcc_lo, -1 op_sel:[0,0,1,0]
+// GFX11: encoding: [0x05,0x20,0x51,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_med3_u16 v255, 0xfe0b, vcc_hi, null op_sel:[0,0,0,1]
+// GFX11: encoding: [0xff,0x40,0x51,0xd6,0xff,0xd6,0xf0,0x01,0x0b,0xfe,0x00,0x00]
+
+v_med3_u32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x21,0xd6,0x01,0x05,0x0e,0x00]
+
+v_med3_u32 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x21,0xd6,0xff,0x05,0xa4,0x01]
+
+v_med3_u32 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x21,0xd6,0x01,0xfe,0xff,0x01]
+
+v_med3_u32 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x21,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_med3_u32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x21,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_med3_u32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x21,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_med3_u32 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x21,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_med3_u32 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x21,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_med3_u32 v5, exec_lo, -1, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x21,0xd6,0x7e,0x82,0xad,0x01]
+
+v_med3_u32 v5, exec_hi, null, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x21,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_med3_u32 v5, null, exec_lo, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x21,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_med3_u32 v5, -1, exec_hi, src_scc
+// GFX11: encoding: [0x05,0x00,0x21,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_med3_u32 v5, 0.5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x21,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_med3_u32 v5, src_scc, vcc_lo, -1
+// GFX11: encoding: [0x05,0x00,0x21,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_med3_u32 v255, 0xaf123456, vcc_hi, null
+// GFX11: encoding: [0xff,0x00,0x21,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_min3_f16 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x49,0xd6,0x01,0x05,0x0e,0x00]
+
+v_min3_f16 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x49,0xd6,0xff,0x05,0xa4,0x01]
+
+v_min3_f16 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x49,0xd6,0x01,0xfe,0xff,0x01]
+
+v_min3_f16 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x49,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_min3_f16 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x49,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_min3_f16 v5, vcc_hi, 0xfe0b, v255
+// GFX11: encoding: [0x05,0x00,0x49,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+v_min3_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX11: encoding: [0x05,0x07,0x49,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_min3_f16 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x49,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_min3_f16 v5, |exec_lo|, -1, vcc_hi
+// GFX11: encoding: [0x05,0x01,0x49,0xd6,0x7e,0x82,0xad,0x01]
+
+v_min3_f16 v5, -|exec_hi|, null, -|vcc_lo| op_sel:[1,1,1,1]
+// GFX11: encoding: [0x05,0x7d,0x49,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_min3_f16 v5, null, exec_lo, -|0xfe0b| op_sel:[0,0,0,0]
+// GFX11: encoding: [0x05,0x04,0x49,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
+
+v_min3_f16 v5, -1, -|exec_hi|, -|src_scc| op_sel:[1,0,0,0]
+// GFX11: encoding: [0x05,0x0e,0x49,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_min3_f16 v5, 0.5, -m0, 0.5 op_sel:[0,1,0,0]
+// GFX11: encoding: [0x05,0x10,0x49,0xd6,0xf0,0xfa,0xc0,0x43]
+
+v_min3_f16 v5, -src_scc, |vcc_lo|, -1 op_sel:[0,0,1,0]
+// GFX11: encoding: [0x05,0x22,0x49,0xd6,0xfd,0xd4,0x04,0x23]
+
+v_min3_f16 v255, -|0xfe0b|, -|vcc_hi|, null op_sel:[0,0,0,1] clamp
+// GFX11: encoding: [0xff,0xc3,0x49,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00]
+
+v_min3_f16 v5, m0, 0.5, m0 clamp mul:4
+// GFX11: encoding: [0x05,0x80,0x49,0xd6,0x7d,0xe0,0xf5,0x11]
+
+v_min3_f32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x19,0xd6,0x01,0x05,0x0e,0x00]
+
+v_min3_f32 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x19,0xd6,0xff,0x05,0xa4,0x01]
+
+v_min3_f32 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x19,0xd6,0x01,0xfe,0xff,0x01]
+
+v_min3_f32 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x19,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_min3_f32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x19,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_min3_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x19,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_min3_f32 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX11: encoding: [0x05,0x07,0x19,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_min3_f32 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x19,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_min3_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX11: encoding: [0x05,0x01,0x19,0xd6,0x7e,0x82,0xad,0x01]
+
+v_min3_f32 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX11: encoding: [0x05,0x05,0x19,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_min3_f32 v5, null, exec_lo, -|0xaf123456|
+// GFX11: encoding: [0x05,0x04,0x19,0xd6,0x7c,0xfc,0xfc,0x83,0x56,0x34,0x12,0xaf]
+
+v_min3_f32 v5, -1, -|exec_hi|, -|src_scc|
+// GFX11: encoding: [0x05,0x06,0x19,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_min3_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX11: encoding: [0x05,0x00,0x19,0xd6,0xf0,0xfa,0xc0,0x4b]
+
+v_min3_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX11: encoding: [0x05,0x02,0x19,0xd6,0xfd,0xd4,0x04,0x33]
+
+v_min3_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX11: encoding: [0xff,0x83,0x19,0xd6,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_min3_i16 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x4a,0xd6,0x01,0x05,0x0e,0x00]
+
+v_min3_i16 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x4a,0xd6,0xff,0x05,0xa4,0x01]
+
+v_min3_i16 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x4a,0xd6,0x01,0xfe,0xff,0x01]
+
+v_min3_i16 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x4a,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_min3_i16 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x4a,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_min3_i16 v5, vcc_hi, 0xfe0b, v255
+// GFX11: encoding: [0x05,0x00,0x4a,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+v_min3_i16 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x4a,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_min3_i16 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x4a,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_min3_i16 v5, exec_lo, -1, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x4a,0xd6,0x7e,0x82,0xad,0x01]
+
+v_min3_i16 v5, exec_hi, null, vcc_lo op_sel:[1,1,1,1]
+// GFX11: encoding: [0x05,0x78,0x4a,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_min3_i16 v5, null, exec_lo, 0xfe0b op_sel:[0,0,0,0]
+// GFX11: encoding: [0x05,0x00,0x4a,0xd6,0x7c,0xfc,0xfc,0x03,0x0b,0xfe,0x00,0x00]
+
+v_min3_i16 v5, -1, exec_hi, src_scc op_sel:[1,0,0,0]
+// GFX11: encoding: [0x05,0x08,0x4a,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_min3_i16 v5, 0.5, m0, 0.5 op_sel:[0,1,0,0]
+// GFX11: encoding: [0x05,0x10,0x4a,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_min3_i16 v5, src_scc, vcc_lo, -1 op_sel:[0,0,1,0]
+// GFX11: encoding: [0x05,0x20,0x4a,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_min3_i16 v255, 0xfe0b, vcc_hi, null op_sel:[0,0,0,1]
+// GFX11: encoding: [0xff,0x40,0x4a,0xd6,0xff,0xd6,0xf0,0x01,0x0b,0xfe,0x00,0x00]
+
+v_min3_i32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x1a,0xd6,0x01,0x05,0x0e,0x00]
+
+v_min3_i32 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x1a,0xd6,0xff,0x05,0xa4,0x01]
+
+v_min3_i32 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x1a,0xd6,0x01,0xfe,0xff,0x01]
+
+v_min3_i32 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x1a,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_min3_i32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x1a,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_min3_i32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x1a,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_min3_i32 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x1a,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_min3_i32 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x1a,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_min3_i32 v5, exec_lo, -1, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x1a,0xd6,0x7e,0x82,0xad,0x01]
+
+v_min3_i32 v5, exec_hi, null, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x1a,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_min3_i32 v5, null, exec_lo, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x1a,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_min3_i32 v5, -1, exec_hi, src_scc
+// GFX11: encoding: [0x05,0x00,0x1a,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_min3_i32 v5, 0.5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x1a,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_min3_i32 v5, src_scc, vcc_lo, -1
+// GFX11: encoding: [0x05,0x00,0x1a,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_min3_i32 v255, 0xaf123456, vcc_hi, null
+// GFX11: encoding: [0xff,0x00,0x1a,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_min3_u16 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x4b,0xd6,0x01,0x05,0x0e,0x00]
+
+v_min3_u16 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x4b,0xd6,0xff,0x05,0xa4,0x01]
+
+v_min3_u16 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x4b,0xd6,0x01,0xfe,0xff,0x01]
+
+v_min3_u16 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x4b,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_min3_u16 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x4b,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_min3_u16 v5, vcc_hi, 0xfe0b, v255
+// GFX11: encoding: [0x05,0x00,0x4b,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+v_min3_u16 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x4b,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_min3_u16 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x4b,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_min3_u16 v5, exec_lo, -1, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x4b,0xd6,0x7e,0x82,0xad,0x01]
+
+v_min3_u16 v5, exec_hi, null, vcc_lo op_sel:[1,1,1,1]
+// GFX11: encoding: [0x05,0x78,0x4b,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_min3_u16 v5, null, exec_lo, 0xfe0b op_sel:[0,0,0,0]
+// GFX11: encoding: [0x05,0x00,0x4b,0xd6,0x7c,0xfc,0xfc,0x03,0x0b,0xfe,0x00,0x00]
+
+v_min3_u16 v5, -1, exec_hi, src_scc op_sel:[1,0,0,0]
+// GFX11: encoding: [0x05,0x08,0x4b,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_min3_u16 v5, 0.5, m0, 0.5 op_sel:[0,1,0,0]
+// GFX11: encoding: [0x05,0x10,0x4b,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_min3_u16 v5, src_scc, vcc_lo, -1 op_sel:[0,0,1,0]
+// GFX11: encoding: [0x05,0x20,0x4b,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_min3_u16 v255, 0xfe0b, vcc_hi, null op_sel:[0,0,0,1]
+// GFX11: encoding: [0xff,0x40,0x4b,0xd6,0xff,0xd6,0xf0,0x01,0x0b,0xfe,0x00,0x00]
+
+v_min3_u32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x1b,0xd6,0x01,0x05,0x0e,0x00]
+
+v_min3_u32 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x1b,0xd6,0xff,0x05,0xa4,0x01]
+
+v_min3_u32 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x1b,0xd6,0x01,0xfe,0xff,0x01]
+
+v_min3_u32 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x1b,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_min3_u32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x1b,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_min3_u32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x1b,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_min3_u32 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x1b,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_min3_u32 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x1b,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_min3_u32 v5, exec_lo, -1, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x1b,0xd6,0x7e,0x82,0xad,0x01]
+
+v_min3_u32 v5, exec_hi, null, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x1b,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_min3_u32 v5, null, exec_lo, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x1b,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_min3_u32 v5, -1, exec_hi, src_scc
+// GFX11: encoding: [0x05,0x00,0x1b,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_min3_u32 v5, 0.5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x1b,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_min3_u32 v5, src_scc, vcc_lo, -1
+// GFX11: encoding: [0x05,0x00,0x1b,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_min3_u32 v255, 0xaf123456, vcc_hi, null
+// GFX11: encoding: [0xff,0x00,0x1b,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_min_f64 v[5:6], v[1:2], v[2:3]
+// GFX11: encoding: [0x05,0x00,0x29,0xd7,0x01,0x05,0x02,0x00]
+
+v_min_f64 v[5:6], v[254:255], v[254:255]
+// GFX11: encoding: [0x05,0x00,0x29,0xd7,0xfe,0xfd,0x03,0x00]
+
+v_min_f64 v[5:6], s[2:3], s[4:5]
+// GFX11: encoding: [0x05,0x00,0x29,0xd7,0x02,0x08,0x00,0x00]
+
+v_min_f64 v[5:6], s[104:105], s[104:105]
+// GFX11: encoding: [0x05,0x00,0x29,0xd7,0x68,0xd0,0x00,0x00]
+
+v_min_f64 v[5:6], vcc, ttmp[14:15]
+// GFX11: encoding: [0x05,0x00,0x29,0xd7,0x6a,0xf4,0x00,0x00]
+
+v_min_f64 v[5:6], ttmp[14:15], 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x29,0xd7,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_min_f64 v[5:6], -|exec|, src_scc
+// GFX11: encoding: [0x05,0x01,0x29,0xd7,0x7e,0xfa,0x01,0x20]
+
+v_min_f64 v[5:6], null, 0.5
+// GFX11: encoding: [0x05,0x00,0x29,0xd7,0x7c,0xe0,0x01,0x00]
+
+v_min_f64 v[5:6], -1, -1
+// GFX11: encoding: [0x05,0x00,0x29,0xd7,0xc1,0x82,0x01,0x00]
+
+v_min_f64 v[5:6], 0.5, null mul:2
+// GFX11: encoding: [0x05,0x00,0x29,0xd7,0xf0,0xf8,0x00,0x08]
+
+v_min_f64 v[5:6], -|src_scc|, -|exec| mul:4
+// GFX11: encoding: [0x05,0x03,0x29,0xd7,0xfd,0xfc,0x00,0x70]
+
+v_min_f64 v[254:255], 0xaf123456, -|vcc| clamp div:2
+// GFX11: encoding: [0xfe,0x82,0x29,0xd7,0xff,0xd4,0x00,0x58,0x56,0x34,0x12,0xaf]
+
+v_min_i16 v5, v1, v2
+// GFX11: encoding: [0x05,0x00,0x0c,0xd7,0x01,0x05,0x02,0x00]
+
+v_min_i16 v5, v255, v255
+// GFX11: encoding: [0x05,0x00,0x0c,0xd7,0xff,0xff,0x03,0x00]
+
+v_min_i16 v5, s1, s2
+// GFX11: encoding: [0x05,0x00,0x0c,0xd7,0x01,0x04,0x00,0x00]
+
+v_min_i16 v5, s105, s105
+// GFX11: encoding: [0x05,0x00,0x0c,0xd7,0x69,0xd2,0x00,0x00]
+
+v_min_i16 v5, vcc_lo, ttmp15
+// GFX11: encoding: [0x05,0x00,0x0c,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_min_i16 v5, vcc_hi, 0xfe0b
+// GFX11: encoding: [0x05,0x00,0x0c,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_min_i16 v5, ttmp15, src_scc
+// GFX11: encoding: [0x05,0x00,0x0c,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_min_i16 v5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x0c,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_min_i16 v5, exec_lo, -1
+// GFX11: encoding: [0x05,0x00,0x0c,0xd7,0x7e,0x82,0x01,0x00]
+
+v_min_i16 v5, exec_hi, null
+// GFX11: encoding: [0x05,0x00,0x0c,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_min_i16 v5, null, exec_lo
+// GFX11: encoding: [0x05,0x00,0x0c,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_min_i16 v5, -1, exec_hi
+// GFX11: encoding: [0x05,0x00,0x0c,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_min_i16 v5, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x0c,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_min_i16 v5, src_scc, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x0c,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_min_i16 v255, 0xfe0b, vcc_hi
+// GFX11: encoding: [0xff,0x00,0x0c,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_min_u16 v5, v1, v2
+// GFX11: encoding: [0x05,0x00,0x0b,0xd7,0x01,0x05,0x02,0x00]
+
+v_min_u16 v5, v255, v255
+// GFX11: encoding: [0x05,0x00,0x0b,0xd7,0xff,0xff,0x03,0x00]
+
+v_min_u16 v5, s1, s2
+// GFX11: encoding: [0x05,0x00,0x0b,0xd7,0x01,0x04,0x00,0x00]
+
+v_min_u16 v5, s105, s105
+// GFX11: encoding: [0x05,0x00,0x0b,0xd7,0x69,0xd2,0x00,0x00]
+
+v_min_u16 v5, vcc_lo, ttmp15
+// GFX11: encoding: [0x05,0x00,0x0b,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_min_u16 v5, vcc_hi, 0xfe0b
+// GFX11: encoding: [0x05,0x00,0x0b,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_min_u16 v5, ttmp15, src_scc
+// GFX11: encoding: [0x05,0x00,0x0b,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_min_u16 v5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x0b,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_min_u16 v5, exec_lo, -1
+// GFX11: encoding: [0x05,0x00,0x0b,0xd7,0x7e,0x82,0x01,0x00]
+
+v_min_u16 v5, exec_hi, null
+// GFX11: encoding: [0x05,0x00,0x0b,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_min_u16 v5, null, exec_lo
+// GFX11: encoding: [0x05,0x00,0x0b,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_min_u16 v5, -1, exec_hi
+// GFX11: encoding: [0x05,0x00,0x0b,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_min_u16 v5, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x0b,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_min_u16 v5, src_scc, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x0b,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_min_u16 v255, 0xfe0b, vcc_hi
+// GFX11: encoding: [0xff,0x00,0x0b,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_minmax_f16 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x61,0xd6,0x01,0x05,0x0e,0x00]
+
+v_minmax_f16 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x61,0xd6,0xff,0x05,0xa4,0x01]
+
+v_minmax_f16 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x61,0xd6,0x01,0xfe,0xff,0x01]
+
+v_minmax_f16 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x61,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_minmax_f16 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x61,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_minmax_f16 v5, vcc_hi, 0xfe0b, v255
+// GFX11: encoding: [0x05,0x00,0x61,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+v_minmax_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX11: encoding: [0x05,0x07,0x61,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_minmax_f16 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x61,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_minmax_f16 v5, |exec_lo|, -1, vcc_hi
+// GFX11: encoding: [0x05,0x01,0x61,0xd6,0x7e,0x82,0xad,0x01]
+
+v_minmax_f16 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX11: encoding: [0x05,0x05,0x61,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_minmax_f16 v5, null, exec_lo, -|0xfe0b|
+// GFX11: encoding: [0x05,0x04,0x61,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
+
+v_minmax_f16 v5, -1, -|exec_hi|, -|src_scc|
+// GFX11: encoding: [0x05,0x06,0x61,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_minmax_f16 v5, 0.5, -m0, 0.5 mul:2
+// GFX11: encoding: [0x05,0x00,0x61,0xd6,0xf0,0xfa,0xc0,0x4b]
+
+v_minmax_f16 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX11: encoding: [0x05,0x02,0x61,0xd6,0xfd,0xd4,0x04,0x33]
+
+v_minmax_f16 v255, -|0xfe0b|, -|vcc_hi|, null clamp div:2
+// GFX11: encoding: [0xff,0x83,0x61,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+
+v_minmax_f32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x5f,0xd6,0x01,0x05,0x0e,0x00]
+
+v_minmax_f32 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x5f,0xd6,0xff,0x05,0xa4,0x01]
+
+v_minmax_f32 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x5f,0xd6,0x01,0xfe,0xff,0x01]
+
+v_minmax_f32 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x5f,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_minmax_f32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x5f,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_minmax_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x5f,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_minmax_f32 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX11: encoding: [0x05,0x07,0x5f,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_minmax_f32 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x5f,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_minmax_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX11: encoding: [0x05,0x01,0x5f,0xd6,0x7e,0x82,0xad,0x01]
+
+v_minmax_f32 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX11: encoding: [0x05,0x05,0x5f,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_minmax_f32 v5, null, exec_lo, -|0xaf123456|
+// GFX11: encoding: [0x05,0x04,0x5f,0xd6,0x7c,0xfc,0xfc,0x83,0x56,0x34,0x12,0xaf]
+
+v_minmax_f32 v5, -1, -|exec_hi|, -|src_scc|
+// GFX11: encoding: [0x05,0x06,0x5f,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_minmax_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX11: encoding: [0x05,0x00,0x5f,0xd6,0xf0,0xfa,0xc0,0x4b]
+
+v_minmax_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX11: encoding: [0x05,0x02,0x5f,0xd6,0xfd,0xd4,0x04,0x33]
+
+v_minmax_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX11: encoding: [0xff,0x83,0x5f,0xd6,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_minmax_i32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x65,0xd6,0x01,0x05,0x0e,0x00]
+
+v_minmax_i32 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x65,0xd6,0xff,0x05,0xa4,0x01]
+
+v_minmax_i32 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x65,0xd6,0x01,0xfe,0xff,0x01]
+
+v_minmax_i32 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x65,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_minmax_i32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x65,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_minmax_i32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x65,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_minmax_i32 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x65,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_minmax_i32 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x65,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_minmax_i32 v5, exec_lo, -1, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x65,0xd6,0x7e,0x82,0xad,0x01]
+
+v_minmax_i32 v5, exec_hi, null, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x65,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_minmax_i32 v5, null, exec_lo, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x65,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_minmax_i32 v5, -1, exec_hi, src_scc
+// GFX11: encoding: [0x05,0x00,0x65,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_minmax_i32 v5, 0.5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x65,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_minmax_i32 v5, src_scc, vcc_lo, -1
+// GFX11: encoding: [0x05,0x00,0x65,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_minmax_i32 v255, 0xaf123456, vcc_hi, null
+// GFX11: encoding: [0xff,0x00,0x65,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_minmax_u32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x63,0xd6,0x01,0x05,0x0e,0x00]
+
+v_minmax_u32 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x63,0xd6,0xff,0x05,0xa4,0x01]
+
+v_minmax_u32 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x63,0xd6,0x01,0xfe,0xff,0x01]
+
+v_minmax_u32 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x63,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_minmax_u32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x63,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_minmax_u32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x63,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_minmax_u32 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x63,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_minmax_u32 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x63,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_minmax_u32 v5, exec_lo, -1, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x63,0xd6,0x7e,0x82,0xad,0x01]
+
+v_minmax_u32 v5, exec_hi, null, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x63,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_minmax_u32 v5, null, exec_lo, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x63,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_minmax_u32 v5, -1, exec_hi, src_scc
+// GFX11: encoding: [0x05,0x00,0x63,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_minmax_u32 v5, 0.5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x63,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_minmax_u32 v5, src_scc, vcc_lo, -1
+// GFX11: encoding: [0x05,0x00,0x63,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_minmax_u32 v255, 0xaf123456, vcc_hi, null
+// GFX11: encoding: [0xff,0x00,0x63,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_mqsad_pk_u16_u8 v[5:6], v[1:2], v2, ttmp[14:15]
+// GFX11: encoding: [0x05,0x00,0x3b,0xd6,0x01,0x05,0xea,0x01]
+
+v_mqsad_pk_u16_u8 v[5:6], v[1:2], v255, ttmp[14:15]
+// GFX11: encoding: [0x05,0x00,0x3b,0xd6,0x01,0xff,0xeb,0x01]
+
+v_mqsad_pk_u16_u8 v[5:6], v[1:2], s2, ttmp[14:15]
+// GFX11: encoding: [0x05,0x00,0x3b,0xd6,0x01,0x05,0xe8,0x01]
+
+v_mqsad_pk_u16_u8 v[5:6], v[1:2], s105, ttmp[14:15]
+// GFX11: encoding: [0x05,0x00,0x3b,0xd6,0x01,0xd3,0xe8,0x01]
+
+v_mqsad_pk_u16_u8 v[5:6], v[254:255], ttmp15, s[6:7]
+// GFX11: encoding: [0x05,0x00,0x3b,0xd6,0xfe,0xf7,0x18,0x00]
+
+v_mqsad_pk_u16_u8 v[5:6], s[2:3], vcc_hi, v[3:4]
+// GFX11: encoding: [0x05,0x00,0x3b,0xd6,0x02,0xd6,0x0c,0x04]
+
+v_mqsad_pk_u16_u8 v[5:6], s[104:105], vcc_lo, s[104:105]
+// GFX11: encoding: [0x05,0x00,0x3b,0xd6,0x68,0xd4,0xa0,0x01]
+
+v_mqsad_pk_u16_u8 v[5:6], vcc, m0, v[254:255]
+// GFX11: encoding: [0x05,0x00,0x3b,0xd6,0x6a,0xfa,0xf8,0x07]
+
+v_mqsad_pk_u16_u8 v[5:6], ttmp[14:15], exec_hi, null
+// GFX11: encoding: [0x05,0x00,0x3b,0xd6,0x7a,0xfe,0xf0,0x01]
+
+v_mqsad_pk_u16_u8 v[5:6], exec, exec_lo, exec
+// GFX11: encoding: [0x05,0x00,0x3b,0xd6,0x7e,0xfc,0xf8,0x01]
+
+v_mqsad_pk_u16_u8 v[5:6], null, null, vcc
+// GFX11: encoding: [0x05,0x00,0x3b,0xd6,0x7c,0xf8,0xa8,0x01]
+
+v_mqsad_pk_u16_u8 v[5:6], -1, -1, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x3b,0xd6,0xc1,0x82,0xfd,0x03,0x56,0x34,0x12,0xaf]
+
+v_mqsad_pk_u16_u8 v[5:6], 0.5, 0.5, src_scc
+// GFX11: encoding: [0x05,0x00,0x3b,0xd6,0xf0,0xe0,0xf5,0x03]
+
+v_mqsad_pk_u16_u8 v[5:6], src_scc, src_scc, 0.5
+// GFX11: encoding: [0x05,0x00,0x3b,0xd6,0xfd,0xfa,0xc1,0x03]
+
+v_mqsad_pk_u16_u8 v[254:255], 0xaf123456, 0xaf123456, -1 clamp
+// GFX11: encoding: [0xfe,0x80,0x3b,0xd6,0xff,0xfe,0x05,0x03,0x56,0x34,0x12,0xaf]
+
+v_mqsad_u32_u8 v[5:8], v[1:2], v2, v[252:255]
+// GFX11: encoding: [0x05,0x00,0x3d,0xd6,0x01,0x05,0xf2,0x07]
+
+v_mqsad_u32_u8 v[5:8], v[1:2], v255, v[252:255]
+// GFX11: encoding: [0x05,0x00,0x3d,0xd6,0x01,0xff,0xf3,0x07]
+
+v_mqsad_u32_u8 v[5:8], v[1:2], s2, v[252:255]
+// GFX11: encoding: [0x05,0x00,0x3d,0xd6,0x01,0x05,0xf0,0x07]
+
+v_mqsad_u32_u8 v[5:8], v[1:2], s105, v[252:255]
+// GFX11: encoding: [0x05,0x00,0x3d,0xd6,0x01,0xd3,0xf0,0x07]
+
+v_mqsad_u32_u8 v[5:8], v[254:255], ttmp15, v[252:255]
+// GFX11: encoding: [0x05,0x00,0x3d,0xd6,0xfe,0xf7,0xf0,0x07]
+
+v_mqsad_u32_u8 v[5:8], s[2:3], vcc_hi, v[252:255]
+// GFX11: encoding: [0x05,0x00,0x3d,0xd6,0x02,0xd6,0xf0,0x07]
+
+v_mqsad_u32_u8 v[5:8], s[104:105], vcc_lo, v[252:255]
+// GFX11: encoding: [0x05,0x00,0x3d,0xd6,0x68,0xd4,0xf0,0x07]
+
+v_mqsad_u32_u8 v[5:8], vcc, m0, v[252:255]
+// GFX11: encoding: [0x05,0x00,0x3d,0xd6,0x6a,0xfa,0xf0,0x07]
+
+v_mqsad_u32_u8 v[5:8], ttmp[14:15], exec_hi, v[252:255]
+// GFX11: encoding: [0x05,0x00,0x3d,0xd6,0x7a,0xfe,0xf0,0x07]
+
+v_mqsad_u32_u8 v[5:8], exec, exec_lo, v[252:255]
+// GFX11: encoding: [0x05,0x00,0x3d,0xd6,0x7e,0xfc,0xf0,0x07]
+
+v_mqsad_u32_u8 v[5:8], null, null, v[252:255]
+// GFX11: encoding: [0x05,0x00,0x3d,0xd6,0x7c,0xf8,0xf0,0x07]
+
+v_mqsad_u32_u8 v[5:8], -1, -1, v[252:255]
+// GFX11: encoding: [0x05,0x00,0x3d,0xd6,0xc1,0x82,0xf1,0x07]
+
+v_mqsad_u32_u8 v[5:8], 0.5, 0.5, v[252:255]
+// GFX11: encoding: [0x05,0x00,0x3d,0xd6,0xf0,0xe0,0xf1,0x07]
+
+v_mqsad_u32_u8 v[5:8], src_scc, src_scc, v[252:255]
+// GFX11: encoding: [0x05,0x00,0x3d,0xd6,0xfd,0xfa,0xf1,0x07]
+
+v_mqsad_u32_u8 v[252:255], 0xaf123456, 0xaf123456, v[3:6] clamp
+// GFX11: encoding: [0xfc,0x80,0x3d,0xd6,0xff,0xfe,0x0d,0x04,0x56,0x34,0x12,0xaf]
+
+v_msad_u8 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x39,0xd6,0x01,0x05,0x0e,0x00]
+
+v_msad_u8 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x39,0xd6,0xff,0x05,0xa4,0x01]
+
+v_msad_u8 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x39,0xd6,0x01,0xfe,0xff,0x01]
+
+v_msad_u8 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x39,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_msad_u8 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x39,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_msad_u8 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x39,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_msad_u8 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x39,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_msad_u8 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x39,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_msad_u8 v5, exec_lo, -1, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x39,0xd6,0x7e,0x82,0xad,0x01]
+
+v_msad_u8 v5, exec_hi, null, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x39,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_msad_u8 v5, null, exec_lo, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x39,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_msad_u8 v5, -1, exec_hi, src_scc
+// GFX11: encoding: [0x05,0x00,0x39,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_msad_u8 v5, 0.5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x39,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_msad_u8 v5, src_scc, vcc_lo, -1
+// GFX11: encoding: [0x05,0x00,0x39,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_msad_u8 v255, 0xaf123456, vcc_hi, null clamp
+// GFX11: encoding: [0xff,0x80,0x39,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_mul_f64 v[5:6], v[1:2], v[2:3]
+// GFX11: encoding: [0x05,0x00,0x28,0xd7,0x01,0x05,0x02,0x00]
+
+v_mul_f64 v[5:6], v[254:255], v[254:255]
+// GFX11: encoding: [0x05,0x00,0x28,0xd7,0xfe,0xfd,0x03,0x00]
+
+v_mul_f64 v[5:6], s[2:3], s[4:5]
+// GFX11: encoding: [0x05,0x00,0x28,0xd7,0x02,0x08,0x00,0x00]
+
+v_mul_f64 v[5:6], s[104:105], s[104:105]
+// GFX11: encoding: [0x05,0x00,0x28,0xd7,0x68,0xd0,0x00,0x00]
+
+v_mul_f64 v[5:6], vcc, ttmp[14:15]
+// GFX11: encoding: [0x05,0x00,0x28,0xd7,0x6a,0xf4,0x00,0x00]
+
+v_mul_f64 v[5:6], ttmp[14:15], 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x28,0xd7,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_mul_f64 v[5:6], -|exec|, src_scc
+// GFX11: encoding: [0x05,0x01,0x28,0xd7,0x7e,0xfa,0x01,0x20]
+
+v_mul_f64 v[5:6], null, 0.5
+// GFX11: encoding: [0x05,0x00,0x28,0xd7,0x7c,0xe0,0x01,0x00]
+
+v_mul_f64 v[5:6], -1, -1
+// GFX11: encoding: [0x05,0x00,0x28,0xd7,0xc1,0x82,0x01,0x00]
+
+v_mul_f64 v[5:6], 0.5, null mul:2
+// GFX11: encoding: [0x05,0x00,0x28,0xd7,0xf0,0xf8,0x00,0x08]
+
+v_mul_f64 v[5:6], -|src_scc|, -|exec| mul:4
+// GFX11: encoding: [0x05,0x03,0x28,0xd7,0xfd,0xfc,0x00,0x70]
+
+v_mul_f64 v[254:255], 0xaf123456, -|vcc| clamp div:2
+// GFX11: encoding: [0xfe,0x82,0x28,0xd7,0xff,0xd4,0x00,0x58,0x56,0x34,0x12,0xaf]
+
+v_mul_hi_i32 v5, v1, v2
+// GFX11: encoding: [0x05,0x00,0x2e,0xd7,0x01,0x05,0x02,0x00]
+
+v_mul_hi_i32 v5, v255, v255
+// GFX11: encoding: [0x05,0x00,0x2e,0xd7,0xff,0xff,0x03,0x00]
+
+v_mul_hi_i32 v5, s1, s2
+// GFX11: encoding: [0x05,0x00,0x2e,0xd7,0x01,0x04,0x00,0x00]
+
+v_mul_hi_i32 v5, s105, s105
+// GFX11: encoding: [0x05,0x00,0x2e,0xd7,0x69,0xd2,0x00,0x00]
+
+v_mul_hi_i32 v5, vcc_lo, ttmp15
+// GFX11: encoding: [0x05,0x00,0x2e,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_mul_hi_i32 v5, vcc_hi, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x2e,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_mul_hi_i32 v5, ttmp15, src_scc
+// GFX11: encoding: [0x05,0x00,0x2e,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_mul_hi_i32 v5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x2e,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_mul_hi_i32 v5, exec_lo, -1
+// GFX11: encoding: [0x05,0x00,0x2e,0xd7,0x7e,0x82,0x01,0x00]
+
+v_mul_hi_i32 v5, exec_hi, null
+// GFX11: encoding: [0x05,0x00,0x2e,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_mul_hi_i32 v5, null, exec_lo
+// GFX11: encoding: [0x05,0x00,0x2e,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_mul_hi_i32 v5, -1, exec_hi
+// GFX11: encoding: [0x05,0x00,0x2e,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_mul_hi_i32 v5, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x2e,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_mul_hi_i32 v5, src_scc, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x2e,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_mul_hi_i32 v255, 0xaf123456, vcc_hi
+// GFX11: encoding: [0xff,0x00,0x2e,0xd7,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_mul_hi_u32 v5, v1, v2
+// GFX11: encoding: [0x05,0x00,0x2d,0xd7,0x01,0x05,0x02,0x00]
+
+v_mul_hi_u32 v5, v255, v255
+// GFX11: encoding: [0x05,0x00,0x2d,0xd7,0xff,0xff,0x03,0x00]
+
+v_mul_hi_u32 v5, s1, s2
+// GFX11: encoding: [0x05,0x00,0x2d,0xd7,0x01,0x04,0x00,0x00]
+
+v_mul_hi_u32 v5, s105, s105
+// GFX11: encoding: [0x05,0x00,0x2d,0xd7,0x69,0xd2,0x00,0x00]
+
+v_mul_hi_u32 v5, vcc_lo, ttmp15
+// GFX11: encoding: [0x05,0x00,0x2d,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_mul_hi_u32 v5, vcc_hi, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x2d,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_mul_hi_u32 v5, ttmp15, src_scc
+// GFX11: encoding: [0x05,0x00,0x2d,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_mul_hi_u32 v5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x2d,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_mul_hi_u32 v5, exec_lo, -1
+// GFX11: encoding: [0x05,0x00,0x2d,0xd7,0x7e,0x82,0x01,0x00]
+
+v_mul_hi_u32 v5, exec_hi, null
+// GFX11: encoding: [0x05,0x00,0x2d,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_mul_hi_u32 v5, null, exec_lo
+// GFX11: encoding: [0x05,0x00,0x2d,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_mul_hi_u32 v5, -1, exec_hi
+// GFX11: encoding: [0x05,0x00,0x2d,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_mul_hi_u32 v5, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x2d,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_mul_hi_u32 v5, src_scc, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x2d,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_mul_hi_u32 v255, 0xaf123456, vcc_hi
+// GFX11: encoding: [0xff,0x00,0x2d,0xd7,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_mul_lo_u16 v5, v1, v2
+// GFX11: encoding: [0x05,0x00,0x05,0xd7,0x01,0x05,0x02,0x00]
+
+v_mul_lo_u16 v5, v255, v255
+// GFX11: encoding: [0x05,0x00,0x05,0xd7,0xff,0xff,0x03,0x00]
+
+v_mul_lo_u16 v5, s1, s2
+// GFX11: encoding: [0x05,0x00,0x05,0xd7,0x01,0x04,0x00,0x00]
+
+v_mul_lo_u16 v5, s105, s105
+// GFX11: encoding: [0x05,0x00,0x05,0xd7,0x69,0xd2,0x00,0x00]
+
+v_mul_lo_u16 v5, vcc_lo, ttmp15
+// GFX11: encoding: [0x05,0x00,0x05,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_mul_lo_u16 v5, vcc_hi, 0xfe0b
+// GFX11: encoding: [0x05,0x00,0x05,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_mul_lo_u16 v5, ttmp15, src_scc
+// GFX11: encoding: [0x05,0x00,0x05,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_mul_lo_u16 v5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x05,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_mul_lo_u16 v5, exec_lo, -1
+// GFX11: encoding: [0x05,0x00,0x05,0xd7,0x7e,0x82,0x01,0x00]
+
+v_mul_lo_u16 v5, exec_hi, null
+// GFX11: encoding: [0x05,0x00,0x05,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_mul_lo_u16 v5, null, exec_lo
+// GFX11: encoding: [0x05,0x00,0x05,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_mul_lo_u16 v5, -1, exec_hi
+// GFX11: encoding: [0x05,0x00,0x05,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_mul_lo_u16 v5, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x05,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_mul_lo_u16 v5, src_scc, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x05,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_mul_lo_u16 v255, 0xfe0b, vcc_hi
+// GFX11: encoding: [0xff,0x00,0x05,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_mul_lo_u32 v5, v1, v2
+// GFX11: encoding: [0x05,0x00,0x2c,0xd7,0x01,0x05,0x02,0x00]
+
+v_mul_lo_u32 v5, v255, v255
+// GFX11: encoding: [0x05,0x00,0x2c,0xd7,0xff,0xff,0x03,0x00]
+
+v_mul_lo_u32 v5, s1, s2
+// GFX11: encoding: [0x05,0x00,0x2c,0xd7,0x01,0x04,0x00,0x00]
+
+v_mul_lo_u32 v5, s105, s105
+// GFX11: encoding: [0x05,0x00,0x2c,0xd7,0x69,0xd2,0x00,0x00]
+
+v_mul_lo_u32 v5, vcc_lo, ttmp15
+// GFX11: encoding: [0x05,0x00,0x2c,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_mul_lo_u32 v5, vcc_hi, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x2c,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_mul_lo_u32 v5, ttmp15, src_scc
+// GFX11: encoding: [0x05,0x00,0x2c,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_mul_lo_u32 v5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x2c,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_mul_lo_u32 v5, exec_lo, -1
+// GFX11: encoding: [0x05,0x00,0x2c,0xd7,0x7e,0x82,0x01,0x00]
+
+v_mul_lo_u32 v5, exec_hi, null
+// GFX11: encoding: [0x05,0x00,0x2c,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_mul_lo_u32 v5, null, exec_lo
+// GFX11: encoding: [0x05,0x00,0x2c,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_mul_lo_u32 v5, -1, exec_hi
+// GFX11: encoding: [0x05,0x00,0x2c,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_mul_lo_u32 v5, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x2c,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_mul_lo_u32 v5, src_scc, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x2c,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_mul_lo_u32 v255, 0xaf123456, vcc_hi
+// GFX11: encoding: [0xff,0x00,0x2c,0xd7,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_mullit_f32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x18,0xd6,0x01,0x05,0x0e,0x00]
+
+v_mullit_f32 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x18,0xd6,0xff,0x05,0xa4,0x01]
+
+v_mullit_f32 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x18,0xd6,0x01,0xfe,0xff,0x01]
+
+v_mullit_f32 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x18,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_mullit_f32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x18,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_mullit_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x18,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_mullit_f32 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX11: encoding: [0x05,0x07,0x18,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_mullit_f32 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x18,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_mullit_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX11: encoding: [0x05,0x01,0x18,0xd6,0x7e,0x82,0xad,0x01]
+
+v_mullit_f32 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX11: encoding: [0x05,0x05,0x18,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_mullit_f32 v5, null, exec_lo, -|0xaf123456|
+// GFX11: encoding: [0x05,0x04,0x18,0xd6,0x7c,0xfc,0xfc,0x83,0x56,0x34,0x12,0xaf]
+
+v_mullit_f32 v5, -1, -|exec_hi|, -|src_scc|
+// GFX11: encoding: [0x05,0x06,0x18,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_mullit_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX11: encoding: [0x05,0x00,0x18,0xd6,0xf0,0xfa,0xc0,0x4b]
+
+v_mullit_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX11: encoding: [0x05,0x02,0x18,0xd6,0xfd,0xd4,0x04,0x33]
+
+v_mullit_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX11: encoding: [0xff,0x83,0x18,0xd6,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_or3_b32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x58,0xd6,0x01,0x05,0x0e,0x00]
+
+v_or3_b32 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x58,0xd6,0xff,0x05,0xa4,0x01]
+
+v_or3_b32 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x58,0xd6,0x01,0xfe,0xff,0x01]
+
+v_or3_b32 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x58,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_or3_b32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x58,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_or3_b32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x58,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_or3_b32 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x58,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_or3_b32 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x58,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_or3_b32 v5, exec_lo, -1, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x58,0xd6,0x7e,0x82,0xad,0x01]
+
+v_or3_b32 v5, exec_hi, null, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x58,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_or3_b32 v5, null, exec_lo, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x58,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_or3_b32 v5, -1, exec_hi, src_scc
+// GFX11: encoding: [0x05,0x00,0x58,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_or3_b32 v5, 0.5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x58,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_or3_b32 v5, src_scc, vcc_lo, -1
+// GFX11: encoding: [0x05,0x00,0x58,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_or3_b32 v255, 0xaf123456, vcc_hi, null
+// GFX11: encoding: [0xff,0x00,0x58,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_or_b16 v5, v1, v2
+// GFX11: encoding: [0x05,0x00,0x63,0xd7,0x01,0x05,0x02,0x00]
+
+v_or_b16 v5, v255, v255
+// GFX11: encoding: [0x05,0x00,0x63,0xd7,0xff,0xff,0x03,0x00]
+
+v_or_b16 v5, s1, s2
+// GFX11: encoding: [0x05,0x00,0x63,0xd7,0x01,0x04,0x00,0x00]
+
+v_or_b16 v5, s105, s105
+// GFX11: encoding: [0x05,0x00,0x63,0xd7,0x69,0xd2,0x00,0x00]
+
+v_or_b16 v5, vcc_lo, ttmp15
+// GFX11: encoding: [0x05,0x00,0x63,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_or_b16 v5, vcc_hi, 0xfe0b
+// GFX11: encoding: [0x05,0x00,0x63,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_or_b16 v5, ttmp15, src_scc
+// GFX11: encoding: [0x05,0x00,0x63,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_or_b16 v5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x63,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_or_b16 v5, exec_lo, -1
+// GFX11: encoding: [0x05,0x00,0x63,0xd7,0x7e,0x82,0x01,0x00]
+
+v_or_b16 v5, exec_hi, null
+// GFX11: encoding: [0x05,0x00,0x63,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_or_b16 v5, null, exec_lo
+// GFX11: encoding: [0x05,0x00,0x63,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_or_b16 v5, -1, exec_hi
+// GFX11: encoding: [0x05,0x00,0x63,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_or_b16 v5, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x63,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_or_b16 v5, src_scc, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x63,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_or_b16 v255, 0xfe0b, vcc_hi
+// GFX11: encoding: [0xff,0x00,0x63,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_pack_b32_f16 v5, v1, v2
+// GFX11: encoding: [0x05,0x00,0x11,0xd7,0x01,0x05,0x02,0x00]
+
+v_pack_b32_f16 v5, v255, v255
+// GFX11: encoding: [0x05,0x00,0x11,0xd7,0xff,0xff,0x03,0x00]
+
+v_pack_b32_f16 v5, s1, s2
+// GFX11: encoding: [0x05,0x00,0x11,0xd7,0x01,0x04,0x00,0x00]
+
+v_pack_b32_f16 v5, s105, s105
+// GFX11: encoding: [0x05,0x00,0x11,0xd7,0x69,0xd2,0x00,0x00]
+
+v_pack_b32_f16 v5, vcc_lo, ttmp15
+// GFX11: encoding: [0x05,0x00,0x11,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_pack_b32_f16 v5, vcc_hi, 0xfe0b
+// GFX11: encoding: [0x05,0x00,0x11,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_pack_b32_f16 v5, ttmp15, src_scc
+// GFX11: encoding: [0x05,0x00,0x11,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_pack_b32_f16 v5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x11,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_pack_b32_f16 v5, exec_lo, -1
+// GFX11: encoding: [0x05,0x00,0x11,0xd7,0x7e,0x82,0x01,0x00]
+
+v_pack_b32_f16 v5, |exec_hi|, null
+// GFX11: encoding: [0x05,0x01,0x11,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_pack_b32_f16 v5, null, exec_lo
+// GFX11: encoding: [0x05,0x00,0x11,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_pack_b32_f16 v5, -1, exec_hi
+// GFX11: encoding: [0x05,0x00,0x11,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_pack_b32_f16 v5, 0.5, -m0 op_sel:[0,0,0]
+// GFX11: encoding: [0x05,0x00,0x11,0xd7,0xf0,0xfa,0x00,0x40]
+
+v_pack_b32_f16 v5, -src_scc, |vcc_lo| op_sel:[1,0,0]
+// GFX11: encoding: [0x05,0x0a,0x11,0xd7,0xfd,0xd4,0x00,0x20]
+
+v_pack_b32_f16 v255, -|0xfe0b|, -|vcc_hi| op_sel:[0,1,0]
+// GFX11: encoding: [0xff,0x13,0x11,0xd7,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+v_perm_b32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x44,0xd6,0x01,0x05,0x0e,0x00]
+
+v_perm_b32 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x44,0xd6,0xff,0x05,0xa4,0x01]
+
+v_perm_b32 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x44,0xd6,0x01,0xfe,0xff,0x01]
+
+v_perm_b32 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x44,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_perm_b32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x44,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_perm_b32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x44,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_perm_b32 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x44,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_perm_b32 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x44,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_perm_b32 v5, exec_lo, -1, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x44,0xd6,0x7e,0x82,0xad,0x01]
+
+v_perm_b32 v5, exec_hi, null, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x44,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_perm_b32 v5, null, exec_lo, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x44,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_perm_b32 v5, -1, exec_hi, src_scc
+// GFX11: encoding: [0x05,0x00,0x44,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_perm_b32 v5, 0.5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x44,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_perm_b32 v5, src_scc, vcc_lo, -1
+// GFX11: encoding: [0x05,0x00,0x44,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_perm_b32 v255, 0xaf123456, vcc_hi, null
+// GFX11: encoding: [0xff,0x00,0x44,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_permlane16_b32 v5, v1, s2, s3
+// GFX11: encoding: [0x05,0x00,0x5b,0xd6,0x01,0x05,0x0c,0x00]
+
+v_permlane16_b32 v5, v1, s105, s105
+// GFX11: encoding: [0x05,0x00,0x5b,0xd6,0x01,0xd3,0xa4,0x01]
+
+v_permlane16_b32 v5, v1, ttmp15, ttmp15
+// GFX11: encoding: [0x05,0x00,0x5b,0xd6,0x01,0xf7,0xec,0x01]
+
+v_permlane16_b32 v5, v1, vcc_hi, exec_lo
+// GFX11: encoding: [0x05,0x00,0x5b,0xd6,0x01,0xd7,0xf8,0x01]
+
+v_permlane16_b32 v5, v1, vcc_lo, m0
+// GFX11: encoding: [0x05,0x00,0x5b,0xd6,0x01,0xd5,0xf4,0x01]
+
+v_permlane16_b32 v5, v1, m0, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x5b,0xd6,0x01,0xfb,0xac,0x01]
+
+v_permlane16_b32 v5, v1, exec_hi, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x5b,0xd6,0x01,0xff,0xa8,0x01]
+
+v_permlane16_b32 v5, v1, exec_lo, src_scc
+// GFX11: encoding: [0x05,0x00,0x5b,0xd6,0x01,0xfd,0xf4,0x03]
+
+v_permlane16_b32 v5, v1, null, 0.5 op_sel:[1,1]
+// GFX11: encoding: [0x05,0x18,0x5b,0xd6,0x01,0xf9,0xc0,0x03]
+
+v_permlane16_b32 v5, v1, -1, -1 op_sel:[0,0]
+// GFX11: encoding: [0x05,0x00,0x5b,0xd6,0x01,0x83,0x05,0x03]
+
+v_permlane16_b32 v5, v1, 0.5, null op_sel:[1,0]
+// GFX11: encoding: [0x05,0x08,0x5b,0xd6,0x01,0xe1,0xf1,0x01]
+
+v_permlane16_b32 v255, v255, src_scc, exec_hi op_sel:[0,1]
+// GFX11: encoding: [0xff,0x10,0x5b,0xd6,0xff,0xfb,0xfd,0x01]
+
+v_permlane16_b32 v5, v1, 0xaf123456, s3
+// GFX11: encoding: [0x05,0x00,0x5b,0xd6,0x01,0xff,0x0d,0x00,0x56,0x34,0x12,0xaf]
+
+v_permlane16_b32 v5, v1, s2, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x5b,0xd6,0x01,0x05,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_permlane16_b32 v5, v1, 0x12345678, 0x12345678
+// GFX11: encoding: [0x05,0x00,0x5b,0xd6,0x01,0xff,0xfd,0x03,0x78,0x56,0x34,0x12]
+
+v_permlanex16_b32 v5, v1, s2, s3
+// GFX11: encoding: [0x05,0x00,0x5c,0xd6,0x01,0x05,0x0c,0x00]
+
+v_permlanex16_b32 v5, v1, s105, s105
+// GFX11: encoding: [0x05,0x00,0x5c,0xd6,0x01,0xd3,0xa4,0x01]
+
+v_permlanex16_b32 v5, v1, ttmp15, ttmp15
+// GFX11: encoding: [0x05,0x00,0x5c,0xd6,0x01,0xf7,0xec,0x01]
+
+v_permlanex16_b32 v5, v1, vcc_hi, exec_lo
+// GFX11: encoding: [0x05,0x00,0x5c,0xd6,0x01,0xd7,0xf8,0x01]
+
+v_permlanex16_b32 v5, v1, vcc_lo, m0
+// GFX11: encoding: [0x05,0x00,0x5c,0xd6,0x01,0xd5,0xf4,0x01]
+
+v_permlanex16_b32 v5, v1, m0, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x5c,0xd6,0x01,0xfb,0xac,0x01]
+
+v_permlanex16_b32 v5, v1, exec_hi, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x5c,0xd6,0x01,0xff,0xa8,0x01]
+
+v_permlanex16_b32 v5, v1, exec_lo, src_scc
+// GFX11: encoding: [0x05,0x00,0x5c,0xd6,0x01,0xfd,0xf4,0x03]
+
+v_permlanex16_b32 v5, v1, null, 0.5 op_sel:[1,1]
+// GFX11: encoding: [0x05,0x18,0x5c,0xd6,0x01,0xf9,0xc0,0x03]
+
+v_permlanex16_b32 v5, v1, -1, -1 op_sel:[0,0]
+// GFX11: encoding: [0x05,0x00,0x5c,0xd6,0x01,0x83,0x05,0x03]
+
+v_permlanex16_b32 v5, v1, 0.5, null op_sel:[1,0]
+// GFX11: encoding: [0x05,0x08,0x5c,0xd6,0x01,0xe1,0xf1,0x01]
+
+v_permlanex16_b32 v255, v255, src_scc, exec_hi op_sel:[0,1]
+// GFX11: encoding: [0xff,0x10,0x5c,0xd6,0xff,0xfb,0xfd,0x01]
+
+v_permlanex16_b32 v5, v1, 0xaf123456, s3
+// GFX11: encoding: [0x05,0x00,0x5c,0xd6,0x01,0xff,0x0d,0x00,0x56,0x34,0x12,0xaf]
+
+v_permlanex16_b32 v5, v1, s2, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x5c,0xd6,0x01,0x05,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_permlanex16_b32 v5, v1, 0x12345678, 0x12345678
+// GFX11: encoding: [0x05,0x00,0x5c,0xd6,0x01,0xff,0xfd,0x03,0x78,0x56,0x34,0x12]
+
+v_qsad_pk_u16_u8 v[5:6], v[1:2], v2, ttmp[14:15]
+// GFX11: encoding: [0x05,0x00,0x3a,0xd6,0x01,0x05,0xea,0x01]
+
+v_qsad_pk_u16_u8 v[5:6], v[1:2], v255, ttmp[14:15]
+// GFX11: encoding: [0x05,0x00,0x3a,0xd6,0x01,0xff,0xeb,0x01]
+
+v_qsad_pk_u16_u8 v[5:6], v[1:2], s2, ttmp[14:15]
+// GFX11: encoding: [0x05,0x00,0x3a,0xd6,0x01,0x05,0xe8,0x01]
+
+v_qsad_pk_u16_u8 v[5:6], v[1:2], s105, ttmp[14:15]
+// GFX11: encoding: [0x05,0x00,0x3a,0xd6,0x01,0xd3,0xe8,0x01]
+
+v_qsad_pk_u16_u8 v[5:6], v[254:255], ttmp15, s[6:7]
+// GFX11: encoding: [0x05,0x00,0x3a,0xd6,0xfe,0xf7,0x18,0x00]
+
+v_qsad_pk_u16_u8 v[5:6], s[2:3], vcc_hi, v[3:4]
+// GFX11: encoding: [0x05,0x00,0x3a,0xd6,0x02,0xd6,0x0c,0x04]
+
+v_qsad_pk_u16_u8 v[5:6], s[104:105], vcc_lo, s[104:105]
+// GFX11: encoding: [0x05,0x00,0x3a,0xd6,0x68,0xd4,0xa0,0x01]
+
+v_qsad_pk_u16_u8 v[5:6], vcc, m0, v[254:255]
+// GFX11: encoding: [0x05,0x00,0x3a,0xd6,0x6a,0xfa,0xf8,0x07]
+
+v_qsad_pk_u16_u8 v[5:6], ttmp[14:15], exec_hi, null
+// GFX11: encoding: [0x05,0x00,0x3a,0xd6,0x7a,0xfe,0xf0,0x01]
+
+v_qsad_pk_u16_u8 v[5:6], exec, exec_lo, exec
+// GFX11: encoding: [0x05,0x00,0x3a,0xd6,0x7e,0xfc,0xf8,0x01]
+
+v_qsad_pk_u16_u8 v[5:6], null, null, vcc
+// GFX11: encoding: [0x05,0x00,0x3a,0xd6,0x7c,0xf8,0xa8,0x01]
+
+v_qsad_pk_u16_u8 v[5:6], -1, -1, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x3a,0xd6,0xc1,0x82,0xfd,0x03,0x56,0x34,0x12,0xaf]
+
+v_qsad_pk_u16_u8 v[5:6], 0.5, 0.5, src_scc
+// GFX11: encoding: [0x05,0x00,0x3a,0xd6,0xf0,0xe0,0xf5,0x03]
+
+v_qsad_pk_u16_u8 v[5:6], src_scc, src_scc, 0.5
+// GFX11: encoding: [0x05,0x00,0x3a,0xd6,0xfd,0xfa,0xc1,0x03]
+
+v_qsad_pk_u16_u8 v[254:255], 0xaf123456, 0xaf123456, -1 clamp
+// GFX11: encoding: [0xfe,0x80,0x3a,0xd6,0xff,0xfe,0x05,0x03,0x56,0x34,0x12,0xaf]
+
+v_readlane_b32 s5, v1, s2
+// GFX11: encoding: [0x05,0x00,0x60,0xd7,0x01,0x05,0x00,0x00]
+
+v_readlane_b32 s5, v1, s105
+// GFX11: encoding: [0x05,0x00,0x60,0xd7,0x01,0xd3,0x00,0x00]
+
+v_readlane_b32 s105, v1, ttmp15
+// GFX11: encoding: [0x69,0x00,0x60,0xd7,0x01,0xf7,0x00,0x00]
+
+v_readlane_b32 vcc_lo, v1, vcc_hi
+// GFX11: encoding: [0x6a,0x00,0x60,0xd7,0x01,0xd7,0x00,0x00]
+
+v_readlane_b32 vcc_hi, v1, vcc_lo
+// GFX11: encoding: [0x6b,0x00,0x60,0xd7,0x01,0xd5,0x00,0x00]
+
+v_readlane_b32 ttmp15, v1, m0
+// GFX11: encoding: [0x7b,0x00,0x60,0xd7,0x01,0xfb,0x00,0x00]
+
+v_readlane_b32 null, v255, null
+// GFX11: encoding: [0x7c,0x00,0x60,0xd7,0xff,0xf9,0x00,0x00]
+
+v_sad_hi_u8 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x23,0xd6,0x01,0x05,0x0e,0x00]
+
+v_sad_hi_u8 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x23,0xd6,0xff,0x05,0xa4,0x01]
+
+v_sad_hi_u8 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x23,0xd6,0x01,0xfe,0xff,0x01]
+
+v_sad_hi_u8 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x23,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_sad_hi_u8 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x23,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_sad_hi_u8 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x23,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_sad_hi_u8 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x23,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_sad_hi_u8 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x23,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_sad_hi_u8 v5, exec_lo, -1, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x23,0xd6,0x7e,0x82,0xad,0x01]
+
+v_sad_hi_u8 v5, exec_hi, null, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x23,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_sad_hi_u8 v5, null, exec_lo, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x23,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_sad_hi_u8 v5, -1, exec_hi, src_scc
+// GFX11: encoding: [0x05,0x00,0x23,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_sad_hi_u8 v5, 0.5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x23,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_sad_hi_u8 v5, src_scc, vcc_lo, -1
+// GFX11: encoding: [0x05,0x00,0x23,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_sad_hi_u8 v255, 0xaf123456, vcc_hi, null clamp
+// GFX11: encoding: [0xff,0x80,0x23,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_sad_u16 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x24,0xd6,0x01,0x05,0x0e,0x00]
+
+v_sad_u16 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x24,0xd6,0xff,0x05,0xa4,0x01]
+
+v_sad_u16 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x24,0xd6,0x01,0xfe,0xff,0x01]
+
+v_sad_u16 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x24,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_sad_u16 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x24,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_sad_u16 v5, vcc_hi, 0xfe0b, v255
+// GFX11: encoding: [0x05,0x00,0x24,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+v_sad_u16 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x24,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_sad_u16 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x24,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_sad_u16 v5, exec_lo, -1, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x24,0xd6,0x7e,0x82,0xad,0x01]
+
+v_sad_u16 v5, exec_hi, null, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x24,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_sad_u16 v5, null, exec_lo, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x24,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_sad_u16 v5, -1, exec_hi, src_scc
+// GFX11: encoding: [0x05,0x00,0x24,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_sad_u16 v5, 0.5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x24,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_sad_u16 v5, src_scc, vcc_lo, -1
+// GFX11: encoding: [0x05,0x00,0x24,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_sad_u16 v255, 0xfe0b, vcc_hi, null clamp
+// GFX11: encoding: [0xff,0x80,0x24,0xd6,0xff,0xd6,0xf0,0x01,0x0b,0xfe,0x00,0x00]
+
+v_sad_u32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x25,0xd6,0x01,0x05,0x0e,0x00]
+
+v_sad_u32 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x25,0xd6,0xff,0x05,0xa4,0x01]
+
+v_sad_u32 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x25,0xd6,0x01,0xfe,0xff,0x01]
+
+v_sad_u32 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x25,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_sad_u32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x25,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_sad_u32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x25,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_sad_u32 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x25,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_sad_u32 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x25,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_sad_u32 v5, exec_lo, -1, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x25,0xd6,0x7e,0x82,0xad,0x01]
+
+v_sad_u32 v5, exec_hi, null, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x25,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_sad_u32 v5, null, exec_lo, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x25,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_sad_u32 v5, -1, exec_hi, src_scc
+// GFX11: encoding: [0x05,0x00,0x25,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_sad_u32 v5, 0.5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x25,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_sad_u32 v5, src_scc, vcc_lo, -1
+// GFX11: encoding: [0x05,0x00,0x25,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_sad_u32 v255, 0xaf123456, vcc_hi, null clamp
+// GFX11: encoding: [0xff,0x80,0x25,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_sad_u8 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x22,0xd6,0x01,0x05,0x0e,0x00]
+
+v_sad_u8 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x22,0xd6,0xff,0x05,0xa4,0x01]
+
+v_sad_u8 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x22,0xd6,0x01,0xfe,0xff,0x01]
+
+v_sad_u8 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x22,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_sad_u8 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x22,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_sad_u8 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x22,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_sad_u8 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x22,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_sad_u8 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x22,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_sad_u8 v5, exec_lo, -1, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x22,0xd6,0x7e,0x82,0xad,0x01]
+
+v_sad_u8 v5, exec_hi, null, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x22,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_sad_u8 v5, null, exec_lo, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x22,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_sad_u8 v5, -1, exec_hi, src_scc
+// GFX11: encoding: [0x05,0x00,0x22,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_sad_u8 v5, 0.5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x22,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_sad_u8 v5, src_scc, vcc_lo, -1
+// GFX11: encoding: [0x05,0x00,0x22,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_sad_u8 v255, 0xaf123456, vcc_hi, null clamp
+// GFX11: encoding: [0xff,0x80,0x22,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_sub_co_u32 v5, s6, v1, v2
+// W32: encoding: [0x05,0x06,0x01,0xd7,0x01,0x05,0x02,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s6, v255, v255
+// W32: encoding: [0x05,0x06,0x01,0xd7,0xff,0xff,0x03,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s6, s1, s2
+// W32: encoding: [0x05,0x06,0x01,0xd7,0x01,0x04,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s6, s105, s105
+// W32: encoding: [0x05,0x06,0x01,0xd7,0x69,0xd2,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s6, vcc_lo, ttmp15
+// W32: encoding: [0x05,0x06,0x01,0xd7,0x6a,0xf6,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s6, vcc_hi, 0xaf123456
+// W32: encoding: [0x05,0x06,0x01,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s6, ttmp15, src_scc
+// W32: encoding: [0x05,0x06,0x01,0xd7,0x7b,0xfa,0x01,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s6, m0, 0.5
+// W32: encoding: [0x05,0x06,0x01,0xd7,0x7d,0xe0,0x01,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s6, exec_lo, -1
+// W32: encoding: [0x05,0x06,0x01,0xd7,0x7e,0x82,0x01,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s6, exec_hi, null
+// W32: encoding: [0x05,0x06,0x01,0xd7,0x7f,0xf8,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s105, null, exec_lo
+// W32: encoding: [0x05,0x69,0x01,0xd7,0x7c,0xfc,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, vcc_lo, -1, exec_hi
+// W32: encoding: [0x05,0x6a,0x01,0xd7,0xc1,0xfe,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, vcc_hi, 0.5, m0
+// W32: encoding: [0x05,0x6b,0x01,0xd7,0xf0,0xfa,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, ttmp15, src_scc, vcc_lo
+// W32: encoding: [0x05,0x7b,0x01,0xd7,0xfd,0xd4,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s[12:13], v1, v2
+// W64: encoding: [0x05,0x0c,0x01,0xd7,0x01,0x05,0x02,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s[12:13], v255, v255
+// W64: encoding: [0x05,0x0c,0x01,0xd7,0xff,0xff,0x03,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s[12:13], s1, s2
+// W64: encoding: [0x05,0x0c,0x01,0xd7,0x01,0x04,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s[12:13], s105, s105
+// W64: encoding: [0x05,0x0c,0x01,0xd7,0x69,0xd2,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s[12:13], vcc_lo, ttmp15
+// W64: encoding: [0x05,0x0c,0x01,0xd7,0x6a,0xf6,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s[12:13], vcc_hi, 0xaf123456
+// W64: encoding: [0x05,0x0c,0x01,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s[12:13], ttmp15, src_scc
+// W64: encoding: [0x05,0x0c,0x01,0xd7,0x7b,0xfa,0x01,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s[12:13], m0, 0.5
+// W64: encoding: [0x05,0x0c,0x01,0xd7,0x7d,0xe0,0x01,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s[12:13], exec_lo, -1
+// W64: encoding: [0x05,0x0c,0x01,0xd7,0x7e,0x82,0x01,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s[12:13], exec_hi, null
+// W64: encoding: [0x05,0x0c,0x01,0xd7,0x7f,0xf8,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s[12:13], null, exec_lo
+// W64: encoding: [0x05,0x0c,0x01,0xd7,0x7c,0xfc,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s[104:105], -1, exec_hi
+// W64: encoding: [0x05,0x68,0x01,0xd7,0xc1,0xfe,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, vcc, 0.5, m0
+// W64: encoding: [0x05,0x6a,0x01,0xd7,0xf0,0xfa,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode
+
+v_sub_co_u32 v5, ttmp[14:15], src_scc, vcc_lo
+// W64: encoding: [0x05,0x7a,0x01,0xd7,0xfd,0xd4,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v255, null, 0xaf123456, vcc_hi clamp
+// GFX11: encoding: [0xff,0xfc,0x01,0xd7,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_sub_nc_i16 v5, v1, v2
+// GFX11: encoding: [0x05,0x00,0x0e,0xd7,0x01,0x05,0x02,0x00]
+
+v_sub_nc_i16 v5, v255, v255
+// GFX11: encoding: [0x05,0x00,0x0e,0xd7,0xff,0xff,0x03,0x00]
+
+v_sub_nc_i16 v5, s1, s2
+// GFX11: encoding: [0x05,0x00,0x0e,0xd7,0x01,0x04,0x00,0x00]
+
+v_sub_nc_i16 v5, s105, s105
+// GFX11: encoding: [0x05,0x00,0x0e,0xd7,0x69,0xd2,0x00,0x00]
+
+v_sub_nc_i16 v5, vcc_lo, ttmp15
+// GFX11: encoding: [0x05,0x00,0x0e,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_sub_nc_i16 v5, vcc_hi, 0xfe0b
+// GFX11: encoding: [0x05,0x00,0x0e,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_sub_nc_i16 v5, ttmp15, src_scc
+// GFX11: encoding: [0x05,0x00,0x0e,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_sub_nc_i16 v5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x0e,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_sub_nc_i16 v5, exec_lo, -1
+// GFX11: encoding: [0x05,0x00,0x0e,0xd7,0x7e,0x82,0x01,0x00]
+
+v_sub_nc_i16 v5, exec_hi, null
+// GFX11: encoding: [0x05,0x00,0x0e,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_sub_nc_i16 v5, null, exec_lo op_sel:[1,1,1]
+// GFX11: encoding: [0x05,0x58,0x0e,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_sub_nc_i16 v5, -1, exec_hi op_sel:[0,0,0]
+// GFX11: encoding: [0x05,0x00,0x0e,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_sub_nc_i16 v5, 0.5, m0 op_sel:[1,0,0]
+// GFX11: encoding: [0x05,0x08,0x0e,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_sub_nc_i16 v5, src_scc, vcc_lo op_sel:[0,1,0]
+// GFX11: encoding: [0x05,0x10,0x0e,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_sub_nc_i16 v255, 0xfe0b, vcc_hi op_sel:[0,0,1] clamp
+// GFX11: encoding: [0xff,0xc0,0x0e,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_sub_nc_i32 v5, v1, v2
+// GFX11: encoding: [0x05,0x00,0x25,0xd7,0x01,0x05,0x02,0x00]
+
+v_sub_nc_i32 v5, v255, v255
+// GFX11: encoding: [0x05,0x00,0x25,0xd7,0xff,0xff,0x03,0x00]
+
+v_sub_nc_i32 v5, s1, s2
+// GFX11: encoding: [0x05,0x00,0x25,0xd7,0x01,0x04,0x00,0x00]
+
+v_sub_nc_i32 v5, s105, s105
+// GFX11: encoding: [0x05,0x00,0x25,0xd7,0x69,0xd2,0x00,0x00]
+
+v_sub_nc_i32 v5, vcc_lo, ttmp15
+// GFX11: encoding: [0x05,0x00,0x25,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_sub_nc_i32 v5, vcc_hi, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x25,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_sub_nc_i32 v5, ttmp15, src_scc
+// GFX11: encoding: [0x05,0x00,0x25,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_sub_nc_i32 v5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x25,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_sub_nc_i32 v5, exec_lo, -1
+// GFX11: encoding: [0x05,0x00,0x25,0xd7,0x7e,0x82,0x01,0x00]
+
+v_sub_nc_i32 v5, exec_hi, null
+// GFX11: encoding: [0x05,0x00,0x25,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_sub_nc_i32 v5, null, exec_lo
+// GFX11: encoding: [0x05,0x00,0x25,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_sub_nc_i32 v5, -1, exec_hi
+// GFX11: encoding: [0x05,0x00,0x25,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_sub_nc_i32 v5, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x25,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_sub_nc_i32 v5, src_scc, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x25,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_sub_nc_i32 v255, 0xaf123456, vcc_hi clamp
+// GFX11: encoding: [0xff,0x80,0x25,0xd7,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_sub_nc_u16 v5, v1, v2
+// GFX11: encoding: [0x05,0x00,0x04,0xd7,0x01,0x05,0x02,0x00]
+
+v_sub_nc_u16 v5, v255, v255
+// GFX11: encoding: [0x05,0x00,0x04,0xd7,0xff,0xff,0x03,0x00]
+
+v_sub_nc_u16 v5, s1, s2
+// GFX11: encoding: [0x05,0x00,0x04,0xd7,0x01,0x04,0x00,0x00]
+
+v_sub_nc_u16 v5, s105, s105
+// GFX11: encoding: [0x05,0x00,0x04,0xd7,0x69,0xd2,0x00,0x00]
+
+v_sub_nc_u16 v5, vcc_lo, ttmp15
+// GFX11: encoding: [0x05,0x00,0x04,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_sub_nc_u16 v5, vcc_hi, 0xfe0b
+// GFX11: encoding: [0x05,0x00,0x04,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_sub_nc_u16 v5, ttmp15, src_scc
+// GFX11: encoding: [0x05,0x00,0x04,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_sub_nc_u16 v5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x04,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_sub_nc_u16 v5, exec_lo, -1
+// GFX11: encoding: [0x05,0x00,0x04,0xd7,0x7e,0x82,0x01,0x00]
+
+v_sub_nc_u16 v5, exec_hi, null
+// GFX11: encoding: [0x05,0x00,0x04,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_sub_nc_u16 v5, null, exec_lo op_sel:[1,1,1]
+// GFX11: encoding: [0x05,0x58,0x04,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_sub_nc_u16 v5, -1, exec_hi op_sel:[0,0,0]
+// GFX11: encoding: [0x05,0x00,0x04,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_sub_nc_u16 v5, 0.5, m0 op_sel:[1,0,0]
+// GFX11: encoding: [0x05,0x08,0x04,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_sub_nc_u16 v5, src_scc, vcc_lo op_sel:[0,1,0]
+// GFX11: encoding: [0x05,0x10,0x04,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_sub_nc_u16 v255, 0xfe0b, vcc_hi op_sel:[0,0,1] clamp
+// GFX11: encoding: [0xff,0xc0,0x04,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_subrev_co_u32 v5, s6, v1, v2
+// W32: encoding: [0x05,0x06,0x02,0xd7,0x01,0x05,0x02,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s6, v255, v255
+// W32: encoding: [0x05,0x06,0x02,0xd7,0xff,0xff,0x03,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s6, s1, s2
+// W32: encoding: [0x05,0x06,0x02,0xd7,0x01,0x04,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s6, s105, s105
+// W32: encoding: [0x05,0x06,0x02,0xd7,0x69,0xd2,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s6, vcc_lo, ttmp15
+// W32: encoding: [0x05,0x06,0x02,0xd7,0x6a,0xf6,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s6, vcc_hi, 0xaf123456
+// W32: encoding: [0x05,0x06,0x02,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s6, ttmp15, src_scc
+// W32: encoding: [0x05,0x06,0x02,0xd7,0x7b,0xfa,0x01,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s6, m0, 0.5
+// W32: encoding: [0x05,0x06,0x02,0xd7,0x7d,0xe0,0x01,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s6, exec_lo, -1
+// W32: encoding: [0x05,0x06,0x02,0xd7,0x7e,0x82,0x01,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s6, exec_hi, null
+// W32: encoding: [0x05,0x06,0x02,0xd7,0x7f,0xf8,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s105, null, exec_lo
+// W32: encoding: [0x05,0x69,0x02,0xd7,0x7c,0xfc,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, vcc_lo, -1, exec_hi
+// W32: encoding: [0x05,0x6a,0x02,0xd7,0xc1,0xfe,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, vcc_hi, 0.5, m0
+// W32: encoding: [0x05,0x6b,0x02,0xd7,0xf0,0xfa,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, ttmp15, src_scc, vcc_lo
+// W32: encoding: [0x05,0x7b,0x02,0xd7,0xfd,0xd4,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s[12:13], v1, v2
+// W64: encoding: [0x05,0x0c,0x02,0xd7,0x01,0x05,0x02,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s[12:13], v255, v255
+// W64: encoding: [0x05,0x0c,0x02,0xd7,0xff,0xff,0x03,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s[12:13], s1, s2
+// W64: encoding: [0x05,0x0c,0x02,0xd7,0x01,0x04,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s[12:13], s105, s105
+// W64: encoding: [0x05,0x0c,0x02,0xd7,0x69,0xd2,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s[12:13], vcc_lo, ttmp15
+// W64: encoding: [0x05,0x0c,0x02,0xd7,0x6a,0xf6,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s[12:13], vcc_hi, 0xaf123456
+// W64: encoding: [0x05,0x0c,0x02,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s[12:13], ttmp15, src_scc
+// W64: encoding: [0x05,0x0c,0x02,0xd7,0x7b,0xfa,0x01,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s[12:13], m0, 0.5
+// W64: encoding: [0x05,0x0c,0x02,0xd7,0x7d,0xe0,0x01,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s[12:13], exec_lo, -1
+// W64: encoding: [0x05,0x0c,0x02,0xd7,0x7e,0x82,0x01,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s[12:13], exec_hi, null
+// W64: encoding: [0x05,0x0c,0x02,0xd7,0x7f,0xf8,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s[12:13], null, exec_lo
+// W64: encoding: [0x05,0x0c,0x02,0xd7,0x7c,0xfc,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s[104:105], -1, exec_hi
+// W64: encoding: [0x05,0x68,0x02,0xd7,0xc1,0xfe,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, vcc, 0.5, m0
+// W64: encoding: [0x05,0x6a,0x02,0xd7,0xf0,0xfa,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode
+
+v_subrev_co_u32 v5, ttmp[14:15], src_scc, vcc_lo
+// W64: encoding: [0x05,0x7a,0x02,0xd7,0xfd,0xd4,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v255, null, 0xaf123456, vcc_hi clamp
+// GFX11: encoding: [0xff,0xfc,0x02,0xd7,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_trig_preop_f64 v[5:6], v[1:2], v2
+// GFX11: encoding: [0x05,0x00,0x2f,0xd7,0x01,0x05,0x02,0x00]
+
+v_trig_preop_f64 v[5:6], v[1:2], v255
+// GFX11: encoding: [0x05,0x00,0x2f,0xd7,0x01,0xff,0x03,0x00]
+
+v_trig_preop_f64 v[5:6], v[1:2], s2
+// GFX11: encoding: [0x05,0x00,0x2f,0xd7,0x01,0x05,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], v[1:2], s105
+// GFX11: encoding: [0x05,0x00,0x2f,0xd7,0x01,0xd3,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], v[254:255], ttmp15
+// GFX11: encoding: [0x05,0x00,0x2f,0xd7,0xfe,0xf7,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], s[2:3], vcc_hi
+// GFX11: encoding: [0x05,0x00,0x2f,0xd7,0x02,0xd6,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], s[104:105], vcc_lo
+// GFX11: encoding: [0x05,0x00,0x2f,0xd7,0x68,0xd4,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], vcc, m0
+// GFX11: encoding: [0x05,0x00,0x2f,0xd7,0x6a,0xfa,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], ttmp[14:15], exec_hi
+// GFX11: encoding: [0x05,0x00,0x2f,0xd7,0x7a,0xfe,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], exec, exec_lo
+// GFX11: encoding: [0x05,0x00,0x2f,0xd7,0x7e,0xfc,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], null, null
+// GFX11: encoding: [0x05,0x00,0x2f,0xd7,0x7c,0xf8,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], -1, -1
+// GFX11: encoding: [0x05,0x00,0x2f,0xd7,0xc1,0x82,0x01,0x00]
+
+v_trig_preop_f64 v[5:6], 0.5, 0.5 mul:2
+// GFX11: encoding: [0x05,0x00,0x2f,0xd7,0xf0,0xe0,0x01,0x08]
+
+v_trig_preop_f64 v[5:6], -|src_scc|, src_scc mul:4
+// GFX11: encoding: [0x05,0x01,0x2f,0xd7,0xfd,0xfa,0x01,0x30]
+
+v_trig_preop_f64 v[254:255], 0xaf123456, 0xaf123456 clamp div:2
+// GFX11: encoding: [0xfe,0x80,0x2f,0xd7,0xff,0xfe,0x01,0x18,0x56,0x34,0x12,0xaf]
+
+v_writelane_b32 v5, s1, s2
+// GFX11: encoding: [0x05,0x00,0x61,0xd7,0x01,0x04,0x00,0x00]
+
+v_writelane_b32 v5, s105, s2
+// GFX11: encoding: [0x05,0x00,0x61,0xd7,0x69,0x04,0x00,0x00]
+
+v_writelane_b32 v5, vcc_lo, s2
+// GFX11: encoding: [0x05,0x00,0x61,0xd7,0x6a,0x04,0x00,0x00]
+
+v_writelane_b32 v5, vcc_hi, s2
+// GFX11: encoding: [0x05,0x00,0x61,0xd7,0x6b,0x04,0x00,0x00]
+
+v_writelane_b32 v5, ttmp15, s2
+// GFX11: encoding: [0x05,0x00,0x61,0xd7,0x7b,0x04,0x00,0x00]
+
+v_writelane_b32 v5, m0, s2
+// GFX11: encoding: [0x05,0x00,0x61,0xd7,0x7d,0x04,0x00,0x00]
+
+v_writelane_b32 v5, exec_lo, s2
+// GFX11: encoding: [0x05,0x00,0x61,0xd7,0x7e,0x04,0x00,0x00]
+
+v_writelane_b32 v5, exec_hi, s105
+// GFX11: encoding: [0x05,0x00,0x61,0xd7,0x7f,0xd2,0x00,0x00]
+
+v_writelane_b32 v5, null, ttmp15
+// GFX11: encoding: [0x05,0x00,0x61,0xd7,0x7c,0xf6,0x00,0x00]
+
+v_writelane_b32 v5, -1, null
+// GFX11: encoding: [0x05,0x00,0x61,0xd7,0xc1,0xf8,0x00,0x00]
+
+v_writelane_b32 v5, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x61,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_writelane_b32 v5, src_scc, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x61,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_writelane_b32 v255, 0xaf123456, vcc_hi
+// GFX11: encoding: [0xff,0x00,0x61,0xd7,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_xad_u32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x45,0xd6,0x01,0x05,0x0e,0x00]
+
+v_xad_u32 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x45,0xd6,0xff,0x05,0xa4,0x01]
+
+v_xad_u32 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x45,0xd6,0x01,0xfe,0xff,0x01]
+
+v_xad_u32 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x45,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_xad_u32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x45,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_xad_u32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x45,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_xad_u32 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x45,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_xad_u32 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x45,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_xad_u32 v5, exec_lo, -1, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x45,0xd6,0x7e,0x82,0xad,0x01]
+
+v_xad_u32 v5, exec_hi, null, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x45,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_xad_u32 v5, null, exec_lo, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x45,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_xad_u32 v5, -1, exec_hi, src_scc
+// GFX11: encoding: [0x05,0x00,0x45,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_xad_u32 v5, 0.5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x45,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_xad_u32 v5, src_scc, vcc_lo, -1
+// GFX11: encoding: [0x05,0x00,0x45,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_xad_u32 v255, 0xaf123456, vcc_hi, null
+// GFX11: encoding: [0xff,0x00,0x45,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_xor3_b32 v5, v1, v2, s3
+// GFX11: encoding: [0x05,0x00,0x40,0xd6,0x01,0x05,0x0e,0x00]
+
+v_xor3_b32 v5, v255, s2, s105
+// GFX11: encoding: [0x05,0x00,0x40,0xd6,0xff,0x05,0xa4,0x01]
+
+v_xor3_b32 v5, s1, v255, exec_hi
+// GFX11: encoding: [0x05,0x00,0x40,0xd6,0x01,0xfe,0xff,0x01]
+
+v_xor3_b32 v5, s105, s105, exec_lo
+// GFX11: encoding: [0x05,0x00,0x40,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_xor3_b32 v5, vcc_lo, ttmp15, v3
+// GFX11: encoding: [0x05,0x00,0x40,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_xor3_b32 v5, vcc_hi, 0xaf123456, v255
+// GFX11: encoding: [0x05,0x00,0x40,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_xor3_b32 v5, ttmp15, src_scc, ttmp15
+// GFX11: encoding: [0x05,0x00,0x40,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_xor3_b32 v5, m0, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x40,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_xor3_b32 v5, exec_lo, -1, vcc_hi
+// GFX11: encoding: [0x05,0x00,0x40,0xd6,0x7e,0x82,0xad,0x01]
+
+v_xor3_b32 v5, exec_hi, null, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x40,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_xor3_b32 v5, null, exec_lo, 0xaf123456
+// GFX11: encoding: [0x05,0x00,0x40,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_xor3_b32 v5, -1, exec_hi, src_scc
+// GFX11: encoding: [0x05,0x00,0x40,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_xor3_b32 v5, 0.5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x40,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_xor3_b32 v5, src_scc, vcc_lo, -1
+// GFX11: encoding: [0x05,0x00,0x40,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_xor3_b32 v255, 0xaf123456, vcc_hi, null
+// GFX11: encoding: [0xff,0x00,0x40,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_xor_b16 v5, v1, v2
+// GFX11: encoding: [0x05,0x00,0x64,0xd7,0x01,0x05,0x02,0x00]
+
+v_xor_b16 v5, v255, v255
+// GFX11: encoding: [0x05,0x00,0x64,0xd7,0xff,0xff,0x03,0x00]
+
+v_xor_b16 v5, s1, s2
+// GFX11: encoding: [0x05,0x00,0x64,0xd7,0x01,0x04,0x00,0x00]
+
+v_xor_b16 v5, s105, s105
+// GFX11: encoding: [0x05,0x00,0x64,0xd7,0x69,0xd2,0x00,0x00]
+
+v_xor_b16 v5, vcc_lo, ttmp15
+// GFX11: encoding: [0x05,0x00,0x64,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_xor_b16 v5, vcc_hi, 0xfe0b
+// GFX11: encoding: [0x05,0x00,0x64,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_xor_b16 v5, ttmp15, src_scc
+// GFX11: encoding: [0x05,0x00,0x64,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_xor_b16 v5, m0, 0.5
+// GFX11: encoding: [0x05,0x00,0x64,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_xor_b16 v5, exec_lo, -1
+// GFX11: encoding: [0x05,0x00,0x64,0xd7,0x7e,0x82,0x01,0x00]
+
+v_xor_b16 v5, exec_hi, null
+// GFX11: encoding: [0x05,0x00,0x64,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_xor_b16 v5, null, exec_lo
+// GFX11: encoding: [0x05,0x00,0x64,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_xor_b16 v5, -1, exec_hi
+// GFX11: encoding: [0x05,0x00,0x64,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_xor_b16 v5, 0.5, m0
+// GFX11: encoding: [0x05,0x00,0x64,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_xor_b16 v5, src_scc, vcc_lo
+// GFX11: encoding: [0x05,0x00,0x64,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_xor_b16 v255, 0xfe0b, vcc_hi
+// GFX11: encoding: [0xff,0x00,0x64,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
diff --git a/llvm/test/MC/AMDGPU/gfx11_asm_vop3.s b/llvm/test/MC/AMDGPU/gfx11_asm_vop3.s
index e025ab7..dadb5156 100644
--- a/llvm/test/MC/AMDGPU/gfx11_asm_vop3.s
+++ b/llvm/test/MC/AMDGPU/gfx11_asm_vop3.s
@@ -1,7 +1,7 @@
-// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize32 -show-encoding %s | FileCheck --check-prefixes=GFX11,W32 %s
-// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64 -show-encoding %s | FileCheck --check-prefixes=GFX11,W64 %s
-// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize32 %s 2>&1 | FileCheck --check-prefix=W32-ERR --implicit-check-not=error: %s
-// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64 %s 2>&1 | FileCheck --check-prefix=W64-ERR --implicit-check-not=error: %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize32,+real-true16 -show-encoding %s | FileCheck --check-prefixes=GFX11,W32 %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64,+real-true16 -show-encoding %s | FileCheck --check-prefixes=GFX11,W64 %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize32,+real-true16 %s 2>&1 | FileCheck --check-prefix=W32-ERR --implicit-check-not=error: %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64,+real-true16 %s 2>&1 | FileCheck --check-prefix=W64-ERR --implicit-check-not=error: %s
v_add3_u32 v5, v1, v2, s3
// GFX11: encoding: [0x05,0x00,0x55,0xd6,0x01,0x05,0x0e,0x00]
diff --git a/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp16-fake16.s b/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp16-fake16.s
new file mode 100644
index 0000000..2371dbc
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp16-fake16.s
@@ -0,0 +1,4695 @@
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize32,-real-true16 -show-encoding %s | FileCheck --check-prefixes=GFX11,W32 %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64,-real-true16 -show-encoding %s | FileCheck --check-prefixes=GFX11,W64 %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize32,-real-true16 %s 2>&1 | FileCheck --check-prefixes=GFX11-ERR,W32-ERR --implicit-check-not=error: %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64,-real-true16 %s 2>&1 | FileCheck --check-prefixes=GFX11-ERR,W64-ERR --implicit-check-not=error: %s
+
+v_add3_u32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x55,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_add3_u32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x55,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_add3_u32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x55,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_add3_u32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x55,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_add3_u32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x55,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_add3_u32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x55,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_add3_u32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x55,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_add3_u32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX11: [0x05,0x00,0x55,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_add3_u32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX11: [0x05,0x00,0x55,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_add3_u32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX11: [0x05,0x00,0x55,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_add3_u32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x55,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_add3_u32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x55,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_add3_u32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x55,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_add3_u32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x55,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_add_co_u32_e64_dpp v5, s6, v1, v2 quad_perm:[3,2,1,0]
+// W32: [0x05,0x06,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s6, v1, v2 quad_perm:[0,1,2,3]
+// W32: [0x05,0x06,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s6, v1, v2 row_mirror
+// W32: [0x05,0x06,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s6, v1, v2 row_half_mirror
+// W32: [0x05,0x06,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s6, v1, v2 row_shl:1
+// W32: [0x05,0x06,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s6, v1, v2 row_shl:15
+// W32: [0x05,0x06,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s6, v1, v2 row_shr:1
+// W32: [0x05,0x06,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s6, v1, v2 row_shr:15
+// W32: [0x05,0x06,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s6, v1, v2 row_ror:1
+// W32: [0x05,0x06,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s105, v1, v2 row_ror:15
+// W32: [0x05,0x69,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, vcc_lo, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// W32: [0x05,0x6a,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, vcc_hi, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// W32: [0x05,0x6b,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, ttmp15, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// W32: [0x05,0x7b,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s[12:13], v1, v2 quad_perm:[3,2,1,0]
+// W64: [0x05,0x0c,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s[12:13], v1, v2 quad_perm:[0,1,2,3]
+// W64: [0x05,0x0c,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s[12:13], v1, v2 row_mirror
+// W64: [0x05,0x0c,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s[12:13], v1, v2 row_half_mirror
+// W64: [0x05,0x0c,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s[12:13], v1, v2 row_shl:1
+// W64: [0x05,0x0c,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s[12:13], v1, v2 row_shl:15
+// W64: [0x05,0x0c,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s[12:13], v1, v2 row_shr:1
+// W64: [0x05,0x0c,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s[12:13], v1, v2 row_shr:15
+// W64: [0x05,0x0c,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s[12:13], v1, v2 row_ror:1
+// W64: [0x05,0x0c,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s[12:13], v1, v2 row_ror:15
+// W64: [0x05,0x0c,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s[104:105], v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// W64: [0x05,0x68,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, vcc, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// W64: [0x05,0x6a,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, ttmp[14:15], v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// W64: [0x05,0x7a,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v255, null, v255, v255 clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0xfc,0x00,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x47,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x47,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x47,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x47,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x47,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x47,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x47,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX11: [0x05,0x00,0x47,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX11: [0x05,0x00,0x47,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX11: [0x05,0x00,0x47,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x47,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x47,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x47,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_add_lshl_u32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x47,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x0d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x0d,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 row_mirror
+// GFX11: [0x05,0x00,0x0d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX11: [0x05,0x00,0x0d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 row_shl:1
+// GFX11: [0x05,0x00,0x0d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 row_shl:15
+// GFX11: [0x05,0x00,0x0d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 row_shr:1
+// GFX11: [0x05,0x00,0x0d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 row_shr:15
+// GFX11: [0x05,0x00,0x0d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 row_ror:1
+// GFX11: [0x05,0x00,0x0d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 row_ror:15
+// GFX11: [0x05,0x00,0x0d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x0d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x0d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x0d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_add_nc_i16_e64_dpp v255, v255, v255 clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x80,0x0d,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_add_nc_i32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x26,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_add_nc_i32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x26,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_add_nc_i32_e64_dpp v5, v1, v2 row_mirror
+// GFX11: [0x05,0x00,0x26,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_add_nc_i32_e64_dpp v5, v1, v2 row_half_mirror
+// GFX11: [0x05,0x00,0x26,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_add_nc_i32_e64_dpp v5, v1, v2 row_shl:1
+// GFX11: [0x05,0x00,0x26,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_add_nc_i32_e64_dpp v5, v1, v2 row_shl:15
+// GFX11: [0x05,0x00,0x26,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_add_nc_i32_e64_dpp v5, v1, v2 row_shr:1
+// GFX11: [0x05,0x00,0x26,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_add_nc_i32_e64_dpp v5, v1, v2 row_shr:15
+// GFX11: [0x05,0x00,0x26,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_add_nc_i32_e64_dpp v5, v1, v2 row_ror:1
+// GFX11: [0x05,0x00,0x26,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_add_nc_i32_e64_dpp v5, v1, v2 row_ror:15
+// GFX11: [0x05,0x00,0x26,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_add_nc_i32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x26,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_add_nc_i32_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x26,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_add_nc_i32_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x26,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_add_nc_i32_e64_dpp v255, v255, v255 clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x80,0x26,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x03,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x03,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 row_mirror
+// GFX11: [0x05,0x00,0x03,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX11: [0x05,0x00,0x03,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 row_shl:1
+// GFX11: [0x05,0x00,0x03,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 row_shl:15
+// GFX11: [0x05,0x00,0x03,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 row_shr:1
+// GFX11: [0x05,0x00,0x03,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 row_shr:15
+// GFX11: [0x05,0x00,0x03,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 row_ror:1
+// GFX11: [0x05,0x00,0x03,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 row_ror:15
+// GFX11: [0x05,0x00,0x03,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x03,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x03,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x03,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_add_nc_u16_e64_dpp v255, v255, v255 clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x80,0x03,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x16,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x16,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x16,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, v3 row_half_mirror
+// GFX11: [0x05,0x00,0x16,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, v255 row_shl:1
+// GFX11: [0x05,0x00,0x16,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, s105 row_shl:15
+// GFX11: [0x05,0x00,0x16,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x0f,0x01,0xff]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, vcc_hi row_shr:1
+// GFX11: [0x05,0x00,0x16,0xd6,0xfa,0x04,0xae,0x01,0x01,0x11,0x01,0xff]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, vcc_lo row_shr:15
+// GFX11: [0x05,0x00,0x16,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x1f,0x01,0xff]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, ttmp15 row_ror:1
+// GFX11: [0x05,0x00,0x16,0xd6,0xfa,0x04,0xee,0x01,0x01,0x21,0x01,0xff]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, exec_hi row_ror:15
+// GFX11: [0x05,0x00,0x16,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, exec_lo row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x16,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, null row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x16,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x5f,0x01,0x01]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, -1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x16,0xd6,0xfa,0x04,0x06,0x03,0x01,0x60,0x09,0x13]
+
+v_alignbit_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x16,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, v3 row_half_mirror
+// GFX11: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, v255 row_shl:1
+// GFX11: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, s105 row_shl:15
+// GFX11: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x0f,0x01,0xff]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, vcc_hi row_shr:1
+// GFX11: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xae,0x01,0x01,0x11,0x01,0xff]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, vcc_lo row_shr:15
+// GFX11: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x1f,0x01,0xff]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, ttmp15 row_ror:1
+// GFX11: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xee,0x01,0x01,0x21,0x01,0xff]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, exec_hi row_ror:15
+// GFX11: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, exec_lo row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, null row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x5f,0x01,0x01]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, -1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x06,0x03,0x01,0x60,0x09,0x13]
+
+v_alignbyte_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x17,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_and_b16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_and_b16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_and_b16_e64_dpp v5, v1, v2 row_mirror
+// GFX11: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_and_b16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX11: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_and_b16_e64_dpp v5, v1, v2 row_shl:1
+// GFX11: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_and_b16_e64_dpp v5, v1, v2 row_shl:15
+// GFX11: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_and_b16_e64_dpp v5, v1, v2 row_shr:1
+// GFX11: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_and_b16_e64_dpp v5, v1, v2 row_shr:15
+// GFX11: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_and_b16_e64_dpp v5, v1, v2 row_ror:1
+// GFX11: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_and_b16_e64_dpp v5, v1, v2 row_ror:15
+// GFX11: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_and_b16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_and_b16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_and_b16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_and_b16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x62,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_and_or_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x57,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_and_or_b32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x57,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_and_or_b32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x57,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_and_or_b32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x57,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_and_or_b32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x57,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_and_or_b32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x57,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_and_or_b32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x57,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_and_or_b32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX11: [0x05,0x00,0x57,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_and_or_b32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX11: [0x05,0x00,0x57,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_and_or_b32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX11: [0x05,0x00,0x57,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_and_or_b32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x57,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_and_or_b32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x57,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_and_or_b32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x57,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_and_or_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x57,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_ashrrev_i16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x3a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_ashrrev_i16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x3a,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_ashrrev_i16_e64_dpp v5, v1, v2 row_mirror
+// GFX11: [0x05,0x00,0x3a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_ashrrev_i16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX11: [0x05,0x00,0x3a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_ashrrev_i16_e64_dpp v5, v1, v2 row_shl:1
+// GFX11: [0x05,0x00,0x3a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_ashrrev_i16_e64_dpp v5, v1, v2 row_shl:15
+// GFX11: [0x05,0x00,0x3a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_ashrrev_i16_e64_dpp v5, v1, v2 row_shr:1
+// GFX11: [0x05,0x00,0x3a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_ashrrev_i16_e64_dpp v5, v1, v2 row_shr:15
+// GFX11: [0x05,0x00,0x3a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_ashrrev_i16_e64_dpp v5, v1, v2 row_ror:1
+// GFX11: [0x05,0x00,0x3a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_ashrrev_i16_e64_dpp v5, v1, v2 row_ror:15
+// GFX11: [0x05,0x00,0x3a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_ashrrev_i16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x3a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_ashrrev_i16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x3a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_ashrrev_i16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x3a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_ashrrev_i16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x3a,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_bcnt_u32_b32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x1e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_bcnt_u32_b32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x1e,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_bcnt_u32_b32_e64_dpp v5, v1, v2 row_mirror
+// GFX11: [0x05,0x00,0x1e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_bcnt_u32_b32_e64_dpp v5, v1, v2 row_half_mirror
+// GFX11: [0x05,0x00,0x1e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_bcnt_u32_b32_e64_dpp v5, v1, v2 row_shl:1
+// GFX11: [0x05,0x00,0x1e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_bcnt_u32_b32_e64_dpp v5, v1, v2 row_shl:15
+// GFX11: [0x05,0x00,0x1e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_bcnt_u32_b32_e64_dpp v5, v1, v2 row_shr:1
+// GFX11: [0x05,0x00,0x1e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_bcnt_u32_b32_e64_dpp v5, v1, v2 row_shr:15
+// GFX11: [0x05,0x00,0x1e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_bcnt_u32_b32_e64_dpp v5, v1, v2 row_ror:1
+// GFX11: [0x05,0x00,0x1e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_bcnt_u32_b32_e64_dpp v5, v1, v2 row_ror:15
+// GFX11: [0x05,0x00,0x1e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_bcnt_u32_b32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x1e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_bcnt_u32_b32_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x1e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_bcnt_u32_b32_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x1e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_bcnt_u32_b32_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x1e,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_bfe_i32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x11,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_bfe_i32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x11,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_bfe_i32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x11,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_bfe_i32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x11,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_bfe_i32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x11,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_bfe_i32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x11,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_bfe_i32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x11,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_bfe_i32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX11: [0x05,0x00,0x11,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_bfe_i32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX11: [0x05,0x00,0x11,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_bfe_i32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX11: [0x05,0x00,0x11,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_bfe_i32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x11,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_bfe_i32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x11,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_bfe_i32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x11,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_bfe_i32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x11,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_bfe_u32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x10,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_bfe_u32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x10,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_bfe_u32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x10,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_bfe_u32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x10,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_bfe_u32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x10,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_bfe_u32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x10,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_bfe_u32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x10,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_bfe_u32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX11: [0x05,0x00,0x10,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_bfe_u32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX11: [0x05,0x00,0x10,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_bfe_u32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX11: [0x05,0x00,0x10,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_bfe_u32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x10,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_bfe_u32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x10,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_bfe_u32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x10,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_bfe_u32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x10,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_bfi_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x12,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_bfi_b32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x12,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_bfi_b32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x12,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_bfi_b32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x12,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_bfi_b32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x12,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_bfi_b32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x12,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_bfi_b32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x12,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_bfi_b32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX11: [0x05,0x00,0x12,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_bfi_b32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX11: [0x05,0x00,0x12,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_bfi_b32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX11: [0x05,0x00,0x12,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_bfi_b32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x12,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_bfi_b32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x12,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_bfi_b32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x12,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_bfi_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x12,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_bfm_b32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x1d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_bfm_b32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x1d,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_bfm_b32_e64_dpp v5, v1, v2 row_mirror
+// GFX11: [0x05,0x00,0x1d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_bfm_b32_e64_dpp v5, v1, v2 row_half_mirror
+// GFX11: [0x05,0x00,0x1d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_bfm_b32_e64_dpp v5, v1, v2 row_shl:1
+// GFX11: [0x05,0x00,0x1d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_bfm_b32_e64_dpp v5, v1, v2 row_shl:15
+// GFX11: [0x05,0x00,0x1d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_bfm_b32_e64_dpp v5, v1, v2 row_shr:1
+// GFX11: [0x05,0x00,0x1d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_bfm_b32_e64_dpp v5, v1, v2 row_shr:15
+// GFX11: [0x05,0x00,0x1d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_bfm_b32_e64_dpp v5, v1, v2 row_ror:1
+// GFX11: [0x05,0x00,0x1d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_bfm_b32_e64_dpp v5, v1, v2 row_ror:15
+// GFX11: [0x05,0x00,0x1d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_bfm_b32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x1d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_bfm_b32_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x1d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_bfm_b32_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x1d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_bfm_b32_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x1d,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s3 quad_perm:[3,2,1,0]
+// W32: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x1b,0x00,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s3 quad_perm:[0,1,2,3]
+// W32: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0xe4,0x00,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_mirror
+// W32: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x40,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_half_mirror
+// W32: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x41,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_shl:1
+// W32: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x01,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_shl:15
+// W32: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x0f,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_shr:1
+// W32: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x11,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_shr:15
+// W32: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x1f,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_ror:1
+// W32: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x21,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s105 row_ror:15
+// W32: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x2f,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, vcc_hi row_share:0 row_mask:0xf bank_mask:0xf
+// W32: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xae,0x01,0x01,0x50,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, |v1|, -v2, vcc_lo row_share:15 row_mask:0x0 bank_mask:0x1
+// W32: [0x05,0x01,0x5d,0xd6,0xfa,0x04,0xaa,0x41,0x01,0x5f,0x01,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, -v1, |v2|, ttmp15 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// W32: [0x05,0x02,0x5d,0xd6,0xfa,0x04,0xee,0x21,0x01,0x60,0x09,0x13]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] quad_perm:[3,2,1,0]
+// W64: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1b,0x00,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] quad_perm:[0,1,2,3]
+// W64: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0xe4,0x00,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_mirror
+// W64: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x40,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_half_mirror
+// W64: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x41,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shl:1
+// W64: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x01,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shl:15
+// W64: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x0f,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shr:1
+// W64: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x11,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shr:15
+// W64: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1f,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_ror:1
+// W64: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x21,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_ror:15
+// W64: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x2f,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s[104:105] row_share:0 row_mask:0xf bank_mask:0xf
+// W64: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xa2,0x01,0x01,0x50,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, |v1|, -v2, vcc row_share:15 row_mask:0x0 bank_mask:0x1
+// W64: [0x05,0x01,0x5d,0xd6,0xfa,0x04,0xaa,0x41,0x01,0x5f,0x01,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, -v1, |v2|, ttmp[14:15] row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// W64: [0x05,0x02,0x5d,0xd6,0xfa,0x04,0xea,0x21,0x01,0x60,0x09,0x13]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v255, -|v255|, -|v255|, null row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x03,0x5d,0xd6,0xfa,0xfe,0xf3,0x61,0xff,0x6f,0x05,0x30]
+
+v_cubeid_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x0c,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_cubeid_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x0c,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_cubeid_f32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x0c,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_cubeid_f32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x0c,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_cubeid_f32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x0c,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_cubeid_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x0c,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_cubeid_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x0c,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_cubeid_f32_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX11: [0x05,0x01,0x0c,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_cubeid_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX11: [0x05,0x02,0x0c,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_cubeid_f32_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
+// GFX11: [0x05,0x04,0x0c,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_cubeid_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x03,0x0c,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_cubeid_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x05,0x0c,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+
+v_cubeid_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x06,0x0c,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x09,0x13]
+
+v_cubeid_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x87,0x0c,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
+
+v_cubema_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x0f,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_cubema_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x0f,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_cubema_f32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x0f,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_cubema_f32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x0f,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_cubema_f32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x0f,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_cubema_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x0f,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_cubema_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x0f,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_cubema_f32_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX11: [0x05,0x01,0x0f,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_cubema_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX11: [0x05,0x02,0x0f,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_cubema_f32_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
+// GFX11: [0x05,0x04,0x0f,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_cubema_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x03,0x0f,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_cubema_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x05,0x0f,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+
+v_cubema_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x06,0x0f,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x09,0x13]
+
+v_cubema_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x87,0x0f,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
+
+v_cubesc_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x0d,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_cubesc_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x0d,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_cubesc_f32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x0d,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_cubesc_f32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x0d,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_cubesc_f32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x0d,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_cubesc_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x0d,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_cubesc_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x0d,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_cubesc_f32_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX11: [0x05,0x01,0x0d,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_cubesc_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX11: [0x05,0x02,0x0d,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_cubesc_f32_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
+// GFX11: [0x05,0x04,0x0d,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_cubesc_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x03,0x0d,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_cubesc_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x05,0x0d,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+
+v_cubesc_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x06,0x0d,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x09,0x13]
+
+v_cubesc_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x87,0x0d,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
+
+v_cubetc_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x0e,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_cubetc_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x0e,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_cubetc_f32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x0e,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_cubetc_f32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x0e,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_cubetc_f32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x0e,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_cubetc_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x0e,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_cubetc_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x0e,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_cubetc_f32_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX11: [0x05,0x01,0x0e,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_cubetc_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX11: [0x05,0x02,0x0e,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_cubetc_f32_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
+// GFX11: [0x05,0x04,0x0e,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_cubetc_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x03,0x0e,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_cubetc_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x05,0x0e,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+
+v_cubetc_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x06,0x0e,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x09,0x13]
+
+v_cubetc_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x87,0x0e,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
+
+v_cvt_pk_i16_f32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x06,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_cvt_pk_i16_f32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x06,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_cvt_pk_i16_f32_e64_dpp v5, v1, v2 row_mirror
+// GFX11: [0x05,0x00,0x06,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_cvt_pk_i16_f32_e64_dpp v5, v1, v2 row_half_mirror
+// GFX11: [0x05,0x00,0x06,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_cvt_pk_i16_f32_e64_dpp v5, v1, v2 row_shl:1
+// GFX11: [0x05,0x00,0x06,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_cvt_pk_i16_f32_e64_dpp v5, v1, v2 row_shl:15
+// GFX11: [0x05,0x00,0x06,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_cvt_pk_i16_f32_e64_dpp v5, v1, v2 row_shr:1
+// GFX11: [0x05,0x00,0x06,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_cvt_pk_i16_f32_e64_dpp v5, v1, v2 row_shr:15
+// GFX11: [0x05,0x00,0x06,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_cvt_pk_i16_f32_e64_dpp v5, v1, v2 row_ror:1
+// GFX11: [0x05,0x00,0x06,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_cvt_pk_i16_f32_e64_dpp v5, v1, v2 row_ror:15
+// GFX11: [0x05,0x00,0x06,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_cvt_pk_i16_f32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x06,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_cvt_pk_i16_f32_e64_dpp v5, |v1|, -v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x01,0x06,0xd7,0xfa,0x04,0x02,0x40,0x01,0x5f,0x01,0x01]
+
+v_cvt_pk_i16_f32_e64_dpp v5, -v1, |v2| row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x02,0x06,0xd7,0xfa,0x04,0x02,0x20,0x01,0x60,0x09,0x13]
+
+v_cvt_pk_i16_f32_e64_dpp v255, -|v255|, -|v255| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x03,0x06,0xd7,0xfa,0xfe,0x03,0x60,0xff,0x6f,0x05,0x30]
+
+v_cvt_pk_i16_i32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x24,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_cvt_pk_i16_i32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x24,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_cvt_pk_i16_i32_e64_dpp v5, v1, v2 row_mirror
+// GFX11: [0x05,0x00,0x24,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_cvt_pk_i16_i32_e64_dpp v5, v1, v2 row_half_mirror
+// GFX11: [0x05,0x00,0x24,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_cvt_pk_i16_i32_e64_dpp v5, v1, v2 row_shl:1
+// GFX11: [0x05,0x00,0x24,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_cvt_pk_i16_i32_e64_dpp v5, v1, v2 row_shl:15
+// GFX11: [0x05,0x00,0x24,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_cvt_pk_i16_i32_e64_dpp v5, v1, v2 row_shr:1
+// GFX11: [0x05,0x00,0x24,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_cvt_pk_i16_i32_e64_dpp v5, v1, v2 row_shr:15
+// GFX11: [0x05,0x00,0x24,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_cvt_pk_i16_i32_e64_dpp v5, v1, v2 row_ror:1
+// GFX11: [0x05,0x00,0x24,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_cvt_pk_i16_i32_e64_dpp v5, v1, v2 row_ror:15
+// GFX11: [0x05,0x00,0x24,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_cvt_pk_i16_i32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x24,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_cvt_pk_i16_i32_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x24,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_cvt_pk_i16_i32_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x24,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_cvt_pk_i16_i32_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x24,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 row_mirror
+// GFX11: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX11: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 row_shl:1
+// GFX11: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 row_shl:15
+// GFX11: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 row_shr:1
+// GFX11: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 row_shr:15
+// GFX11: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 row_ror:1
+// GFX11: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 row_ror:15
+// GFX11: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, |v1|, -v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x01,0x12,0xd7,0xfa,0x04,0x02,0x40,0x01,0x5f,0x01,0x01]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, -v1, |v2| row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x02,0x12,0xd7,0xfa,0x04,0x02,0x20,0x01,0x60,0x09,0x13]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v255, -|v255|, -|v255| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x03,0x12,0xd7,0xfa,0xfe,0x03,0x60,0xff,0x6f,0x05,0x30]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 row_mirror
+// GFX11: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX11: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 row_shl:1
+// GFX11: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 row_shl:15
+// GFX11: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 row_shr:1
+// GFX11: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 row_shr:15
+// GFX11: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 row_ror:1
+// GFX11: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 row_ror:15
+// GFX11: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, |v1|, -v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x01,0x13,0xd7,0xfa,0x04,0x02,0x40,0x01,0x5f,0x01,0x01]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, -v1, |v2| row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x02,0x13,0xd7,0xfa,0x04,0x02,0x20,0x01,0x60,0x09,0x13]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v255, -|v255|, -|v255| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x03,0x13,0xd7,0xfa,0xfe,0x03,0x60,0xff,0x6f,0x05,0x30]
+
+v_cvt_pk_u16_f32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x07,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_cvt_pk_u16_f32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x07,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_cvt_pk_u16_f32_e64_dpp v5, v1, v2 row_mirror
+// GFX11: [0x05,0x00,0x07,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_cvt_pk_u16_f32_e64_dpp v5, v1, v2 row_half_mirror
+// GFX11: [0x05,0x00,0x07,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_cvt_pk_u16_f32_e64_dpp v5, v1, v2 row_shl:1
+// GFX11: [0x05,0x00,0x07,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_cvt_pk_u16_f32_e64_dpp v5, v1, v2 row_shl:15
+// GFX11: [0x05,0x00,0x07,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_cvt_pk_u16_f32_e64_dpp v5, v1, v2 row_shr:1
+// GFX11: [0x05,0x00,0x07,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_cvt_pk_u16_f32_e64_dpp v5, v1, v2 row_shr:15
+// GFX11: [0x05,0x00,0x07,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_cvt_pk_u16_f32_e64_dpp v5, v1, v2 row_ror:1
+// GFX11: [0x05,0x00,0x07,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_cvt_pk_u16_f32_e64_dpp v5, v1, v2 row_ror:15
+// GFX11: [0x05,0x00,0x07,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_cvt_pk_u16_f32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x07,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_cvt_pk_u16_f32_e64_dpp v5, |v1|, -v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x01,0x07,0xd7,0xfa,0x04,0x02,0x40,0x01,0x5f,0x01,0x01]
+
+v_cvt_pk_u16_f32_e64_dpp v5, -v1, |v2| row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x02,0x07,0xd7,0xfa,0x04,0x02,0x20,0x01,0x60,0x09,0x13]
+
+v_cvt_pk_u16_f32_e64_dpp v255, -|v255|, -|v255| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x03,0x07,0xd7,0xfa,0xfe,0x03,0x60,0xff,0x6f,0x05,0x30]
+
+v_cvt_pk_u16_u32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x23,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_cvt_pk_u16_u32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x23,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_cvt_pk_u16_u32_e64_dpp v5, v1, v2 row_mirror
+// GFX11: [0x05,0x00,0x23,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_cvt_pk_u16_u32_e64_dpp v5, v1, v2 row_half_mirror
+// GFX11: [0x05,0x00,0x23,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_cvt_pk_u16_u32_e64_dpp v5, v1, v2 row_shl:1
+// GFX11: [0x05,0x00,0x23,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_cvt_pk_u16_u32_e64_dpp v5, v1, v2 row_shl:15
+// GFX11: [0x05,0x00,0x23,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_cvt_pk_u16_u32_e64_dpp v5, v1, v2 row_shr:1
+// GFX11: [0x05,0x00,0x23,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_cvt_pk_u16_u32_e64_dpp v5, v1, v2 row_shr:15
+// GFX11: [0x05,0x00,0x23,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_cvt_pk_u16_u32_e64_dpp v5, v1, v2 row_ror:1
+// GFX11: [0x05,0x00,0x23,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_cvt_pk_u16_u32_e64_dpp v5, v1, v2 row_ror:15
+// GFX11: [0x05,0x00,0x23,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_cvt_pk_u16_u32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x23,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_cvt_pk_u16_u32_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x23,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_cvt_pk_u16_u32_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x23,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_cvt_pk_u16_u32_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x23,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x26,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x26,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x26,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x26,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x26,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x26,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x26,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX11: [0x05,0x00,0x26,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX11: [0x05,0x00,0x26,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX11: [0x05,0x00,0x26,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x26,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x26,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x26,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_cvt_pk_u8_f32_e64_dpp v255, -|v255|, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x01,0x26,0xd6,0xfa,0xfe,0xf7,0x23,0xff,0x6f,0x05,0x30]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 row_mirror
+// GFX11: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX11: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 row_shl:1
+// GFX11: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 row_shl:15
+// GFX11: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 row_shr:1
+// GFX11: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 row_shr:15
+// GFX11: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 row_ror:1
+// GFX11: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 row_ror:15
+// GFX11: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, |v1|, -v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x01,0x12,0xd7,0xfa,0x04,0x02,0x40,0x01,0x5f,0x01,0x01]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, -v1, |v2| row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x02,0x12,0xd7,0xfa,0x04,0x02,0x20,0x01,0x60,0x09,0x13]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v255, -|v255|, -|v255| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x03,0x12,0xd7,0xfa,0xfe,0x03,0x60,0xff,0x6f,0x05,0x30]
+
+v_cvt_pk_norm_i16_f32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x21,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_cvt_pk_norm_i16_f32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x21,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_cvt_pk_norm_i16_f32_e64_dpp v5, v1, v2 row_mirror
+// GFX11: [0x05,0x00,0x21,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_cvt_pk_norm_i16_f32_e64_dpp v5, v1, v2 row_half_mirror
+// GFX11: [0x05,0x00,0x21,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_cvt_pk_norm_i16_f32_e64_dpp v5, v1, v2 row_shl:1
+// GFX11: [0x05,0x00,0x21,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_cvt_pk_norm_i16_f32_e64_dpp v5, v1, v2 row_shl:15
+// GFX11: [0x05,0x00,0x21,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_cvt_pk_norm_i16_f32_e64_dpp v5, v1, v2 row_shr:1
+// GFX11: [0x05,0x00,0x21,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_cvt_pk_norm_i16_f32_e64_dpp v5, v1, v2 row_shr:15
+// GFX11: [0x05,0x00,0x21,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_cvt_pk_norm_i16_f32_e64_dpp v5, v1, v2 row_ror:1
+// GFX11: [0x05,0x00,0x21,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_cvt_pk_norm_i16_f32_e64_dpp v5, v1, v2 row_ror:15
+// GFX11: [0x05,0x00,0x21,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_cvt_pk_norm_i16_f32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x21,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_cvt_pk_norm_i16_f32_e64_dpp v5, |v1|, -v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x01,0x21,0xd7,0xfa,0x04,0x02,0x40,0x01,0x5f,0x01,0x01]
+
+v_cvt_pk_norm_i16_f32_e64_dpp v5, -v1, |v2| row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x02,0x21,0xd7,0xfa,0x04,0x02,0x20,0x01,0x60,0x09,0x13]
+
+v_cvt_pk_norm_i16_f32_e64_dpp v255, -|v255|, -|v255| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x03,0x21,0xd7,0xfa,0xfe,0x03,0x60,0xff,0x6f,0x05,0x30]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 row_mirror
+// GFX11: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX11: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 row_shl:1
+// GFX11: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 row_shl:15
+// GFX11: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 row_shr:1
+// GFX11: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 row_shr:15
+// GFX11: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 row_ror:1
+// GFX11: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 row_ror:15
+// GFX11: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, |v1|, -v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x01,0x13,0xd7,0xfa,0x04,0x02,0x40,0x01,0x5f,0x01,0x01]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, -v1, |v2| row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x02,0x13,0xd7,0xfa,0x04,0x02,0x20,0x01,0x60,0x09,0x13]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v255, -|v255|, -|v255| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x03,0x13,0xd7,0xfa,0xfe,0x03,0x60,0xff,0x6f,0x05,0x30]
+
+v_cvt_pk_norm_u16_f32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x22,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_cvt_pk_norm_u16_f32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x22,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_cvt_pk_norm_u16_f32_e64_dpp v5, v1, v2 row_mirror
+// GFX11: [0x05,0x00,0x22,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_cvt_pk_norm_u16_f32_e64_dpp v5, v1, v2 row_half_mirror
+// GFX11: [0x05,0x00,0x22,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_cvt_pk_norm_u16_f32_e64_dpp v5, v1, v2 row_shl:1
+// GFX11: [0x05,0x00,0x22,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_cvt_pk_norm_u16_f32_e64_dpp v5, v1, v2 row_shl:15
+// GFX11: [0x05,0x00,0x22,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_cvt_pk_norm_u16_f32_e64_dpp v5, v1, v2 row_shr:1
+// GFX11: [0x05,0x00,0x22,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_cvt_pk_norm_u16_f32_e64_dpp v5, v1, v2 row_shr:15
+// GFX11: [0x05,0x00,0x22,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_cvt_pk_norm_u16_f32_e64_dpp v5, v1, v2 row_ror:1
+// GFX11: [0x05,0x00,0x22,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_cvt_pk_norm_u16_f32_e64_dpp v5, v1, v2 row_ror:15
+// GFX11: [0x05,0x00,0x22,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_cvt_pk_norm_u16_f32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x22,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_cvt_pk_norm_u16_f32_e64_dpp v5, |v1|, -v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x01,0x22,0xd7,0xfa,0x04,0x02,0x40,0x01,0x5f,0x01,0x01]
+
+v_cvt_pk_norm_u16_f32_e64_dpp v5, -v1, |v2| row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x02,0x22,0xd7,0xfa,0x04,0x02,0x20,0x01,0x60,0x09,0x13]
+
+v_cvt_pk_norm_u16_f32_e64_dpp v255, -|v255|, -|v255| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x03,0x22,0xd7,0xfa,0xfe,0x03,0x60,0xff,0x6f,0x05,0x30]
+
+v_div_fixup_f16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x54,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_div_fixup_f16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x54,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_div_fixup_f16_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x54,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_div_fixup_f16_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x54,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_div_fixup_f16_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x54,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_div_fixup_f16_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x54,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_div_fixup_f16_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x54,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_div_fixup_f16_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX11: [0x05,0x01,0x54,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_div_fixup_f16_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX11: [0x05,0x02,0x54,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_div_fixup_f16_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
+// GFX11: [0x05,0x04,0x54,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_div_fixup_f16_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x03,0x54,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_div_fixup_f16_e64_dpp v5, -|v1|, v2, -|-1| row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x05,0x54,0xd6,0xfa,0x04,0x06,0xa3,0x01,0x5f,0x01,0x01]
+
+v_div_fixup_f16_e64_dpp v5, v1, -|v2|, -|0.5| row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x06,0x54,0xd6,0xfa,0x04,0xc2,0xc3,0x01,0x60,0x09,0x13]
+
+v_div_fixup_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x87,0x54,0xd6,0xfa,0xfe,0xf7,0xe3,0xff,0x6f,0x05,0x30]
+
+v_fma_f16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x48,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_fma_f16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x48,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_fma_f16_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x48,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_fma_f16_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x48,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_fma_f16_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x48,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_fma_f16_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x48,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_fma_f16_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x48,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_fma_f16_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX11: [0x05,0x01,0x48,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_fma_f16_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX11: [0x05,0x02,0x48,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_fma_f16_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
+// GFX11: [0x05,0x04,0x48,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_fma_f16_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x03,0x48,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_fma_f16_e64_dpp v5, -|v1|, v2, -|-1| row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x05,0x48,0xd6,0xfa,0x04,0x06,0xa3,0x01,0x5f,0x01,0x01]
+
+v_fma_f16_e64_dpp v5, v1, -|v2|, -|0.5| row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x06,0x48,0xd6,0xfa,0x04,0xc2,0xc3,0x01,0x60,0x09,0x13]
+
+v_fma_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x87,0x48,0xd6,0xfa,0xfe,0xf7,0xe3,0xff,0x6f,0x05,0x30]
+
+v_fma_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x13,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_fma_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x13,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_fma_f32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x13,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_fma_f32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x13,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_fma_f32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x13,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_fma_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x13,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_fma_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x13,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_fma_f32_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX11: [0x05,0x01,0x13,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_fma_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX11: [0x05,0x02,0x13,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_fma_f32_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
+// GFX11: [0x05,0x04,0x13,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_fma_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x03,0x13,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_fma_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x05,0x13,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+
+v_fma_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x06,0x13,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x09,0x13]
+
+v_fma_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x87,0x13,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
+
+v_ldexp_f32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x1c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_ldexp_f32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x1c,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_ldexp_f32_e64_dpp v5, v1, v2 row_mirror
+// GFX11: [0x05,0x00,0x1c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_ldexp_f32_e64_dpp v5, v1, v2 row_half_mirror
+// GFX11: [0x05,0x00,0x1c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_ldexp_f32_e64_dpp v5, v1, v2 row_shl:1
+// GFX11: [0x05,0x00,0x1c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_ldexp_f32_e64_dpp v5, v1, v2 row_shl:15
+// GFX11: [0x05,0x00,0x1c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_ldexp_f32_e64_dpp v5, v1, v2 row_shr:1
+// GFX11: [0x05,0x00,0x1c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_ldexp_f32_e64_dpp v5, v1, v2 row_shr:15
+// GFX11: [0x05,0x00,0x1c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_ldexp_f32_e64_dpp v5, v1, v2 row_ror:1
+// GFX11: [0x05,0x00,0x1c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_ldexp_f32_e64_dpp v5, v1, v2 row_ror:15
+// GFX11: [0x05,0x00,0x1c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_ldexp_f32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x1c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_ldexp_f32_e64_dpp v5, v1, v2 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x1c,0xd7,0xfa,0x04,0x02,0x08,0x01,0x5f,0x01,0x01]
+
+v_ldexp_f32_e64_dpp v5, v1, v2 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x1c,0xd7,0xfa,0x04,0x02,0x10,0x01,0x60,0x09,0x13]
+
+v_ldexp_f32_e64_dpp v255, -|v255|, v255 clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x81,0x1c,0xd7,0xfa,0xfe,0x03,0x38,0xff,0x6f,0x05,0x30]
+
+v_lerp_u8_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x15,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_lerp_u8_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x15,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_lerp_u8_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x15,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_lerp_u8_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x15,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_lerp_u8_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x15,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_lerp_u8_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x15,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_lerp_u8_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x15,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_lerp_u8_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX11: [0x05,0x00,0x15,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_lerp_u8_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX11: [0x05,0x00,0x15,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_lerp_u8_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX11: [0x05,0x00,0x15,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_lerp_u8_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x15,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_lerp_u8_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x15,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_lerp_u8_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x15,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_lerp_u8_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x15,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x46,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x46,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x46,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x46,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x46,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x46,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x46,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX11: [0x05,0x00,0x46,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX11: [0x05,0x00,0x46,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX11: [0x05,0x00,0x46,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x46,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x46,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x46,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_lshl_add_u32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x46,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x56,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x56,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x56,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x56,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x56,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x56,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x56,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX11: [0x05,0x00,0x56,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX11: [0x05,0x00,0x56,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX11: [0x05,0x00,0x56,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x56,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x56,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x56,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_lshl_or_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x56,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_lshlrev_b16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x38,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_lshlrev_b16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x38,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_lshlrev_b16_e64_dpp v5, v1, v2 row_mirror
+// GFX11: [0x05,0x00,0x38,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_lshlrev_b16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX11: [0x05,0x00,0x38,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_lshlrev_b16_e64_dpp v5, v1, v2 row_shl:1
+// GFX11: [0x05,0x00,0x38,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_lshlrev_b16_e64_dpp v5, v1, v2 row_shl:15
+// GFX11: [0x05,0x00,0x38,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_lshlrev_b16_e64_dpp v5, v1, v2 row_shr:1
+// GFX11: [0x05,0x00,0x38,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_lshlrev_b16_e64_dpp v5, v1, v2 row_shr:15
+// GFX11: [0x05,0x00,0x38,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_lshlrev_b16_e64_dpp v5, v1, v2 row_ror:1
+// GFX11: [0x05,0x00,0x38,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_lshlrev_b16_e64_dpp v5, v1, v2 row_ror:15
+// GFX11: [0x05,0x00,0x38,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_lshlrev_b16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x38,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_lshlrev_b16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x38,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_lshlrev_b16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x38,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_lshlrev_b16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x38,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_lshrrev_b16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x39,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_lshrrev_b16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x39,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_lshrrev_b16_e64_dpp v5, v1, v2 row_mirror
+// GFX11: [0x05,0x00,0x39,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_lshrrev_b16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX11: [0x05,0x00,0x39,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_lshrrev_b16_e64_dpp v5, v1, v2 row_shl:1
+// GFX11: [0x05,0x00,0x39,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_lshrrev_b16_e64_dpp v5, v1, v2 row_shl:15
+// GFX11: [0x05,0x00,0x39,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_lshrrev_b16_e64_dpp v5, v1, v2 row_shr:1
+// GFX11: [0x05,0x00,0x39,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_lshrrev_b16_e64_dpp v5, v1, v2 row_shr:15
+// GFX11: [0x05,0x00,0x39,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_lshrrev_b16_e64_dpp v5, v1, v2 row_ror:1
+// GFX11: [0x05,0x00,0x39,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_lshrrev_b16_e64_dpp v5, v1, v2 row_ror:15
+// GFX11: [0x05,0x00,0x39,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_lshrrev_b16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x39,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_lshrrev_b16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x39,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_lshrrev_b16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x39,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_lshrrev_b16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x39,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_mad_i16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x53,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_mad_i16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x53,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_mad_i16_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x53,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_mad_i16_e64_dpp v5, v1, v2, v3 row_half_mirror
+// GFX11: [0x05,0x00,0x53,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff]
+
+v_mad_i16_e64_dpp v5, v1, v2, v255 row_shl:1
+// GFX11: [0x05,0x00,0x53,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff]
+
+v_mad_i16_e64_dpp v5, v1, v2, s105 row_shl:15
+// GFX11: [0x05,0x00,0x53,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x0f,0x01,0xff]
+
+v_mad_i16_e64_dpp v5, v1, v2, vcc_hi row_shr:1
+// GFX11: [0x05,0x00,0x53,0xd6,0xfa,0x04,0xae,0x01,0x01,0x11,0x01,0xff]
+
+v_mad_i16_e64_dpp v5, v1, v2, vcc_lo row_shr:15
+// GFX11: [0x05,0x00,0x53,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x1f,0x01,0xff]
+
+v_mad_i16_e64_dpp v5, v1, v2, ttmp15 row_ror:1
+// GFX11: [0x05,0x00,0x53,0xd6,0xfa,0x04,0xee,0x01,0x01,0x21,0x01,0xff]
+
+v_mad_i16_e64_dpp v5, v1, v2, exec_hi row_ror:15
+// GFX11: [0x05,0x00,0x53,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+
+v_mad_i16_e64_dpp v5, v1, v2, exec_lo row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x53,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+
+v_mad_i16_e64_dpp v5, v1, v2, null row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x53,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x5f,0x01,0x01]
+
+v_mad_i16_e64_dpp v5, v1, v2, -1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x53,0xd6,0xfa,0x04,0x06,0x03,0x01,0x60,0x09,0x13]
+
+v_mad_i16_e64_dpp v255, v255, v255, src_scc clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x80,0x53,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x5a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x5a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x5a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x5a,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x5a,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x5a,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x5a,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX11: [0x05,0x00,0x5a,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX11: [0x05,0x00,0x5a,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX11: [0x05,0x00,0x5a,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x5a,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x5a,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x5a,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_mad_i32_i16_e64_dpp v255, v255, v255, src_scc clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x80,0x5a,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x0a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x0a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x0a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x0a,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x0a,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x0a,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x0a,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX11: [0x05,0x00,0x0a,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX11: [0x05,0x00,0x0a,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX11: [0x05,0x00,0x0a,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x0a,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x0a,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x0a,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_mad_i32_i24_e64_dpp v255, v255, v255, src_scc clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x80,0x0a,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_mad_u16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x41,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_mad_u16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x41,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_mad_u16_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x41,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_mad_u16_e64_dpp v5, v1, v2, v3 row_half_mirror
+// GFX11: [0x05,0x00,0x41,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff]
+
+v_mad_u16_e64_dpp v5, v1, v2, v255 row_shl:1
+// GFX11: [0x05,0x00,0x41,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff]
+
+v_mad_u16_e64_dpp v5, v1, v2, s105 row_shl:15
+// GFX11: [0x05,0x00,0x41,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x0f,0x01,0xff]
+
+v_mad_u16_e64_dpp v5, v1, v2, vcc_hi row_shr:1
+// GFX11: [0x05,0x00,0x41,0xd6,0xfa,0x04,0xae,0x01,0x01,0x11,0x01,0xff]
+
+v_mad_u16_e64_dpp v5, v1, v2, vcc_lo row_shr:15
+// GFX11: [0x05,0x00,0x41,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x1f,0x01,0xff]
+
+v_mad_u16_e64_dpp v5, v1, v2, ttmp15 row_ror:1
+// GFX11: [0x05,0x00,0x41,0xd6,0xfa,0x04,0xee,0x01,0x01,0x21,0x01,0xff]
+
+v_mad_u16_e64_dpp v5, v1, v2, exec_hi row_ror:15
+// GFX11: [0x05,0x00,0x41,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+
+v_mad_u16_e64_dpp v5, v1, v2, exec_lo row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x41,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+
+v_mad_u16_e64_dpp v5, v1, v2, null row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x41,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x5f,0x01,0x01]
+
+v_mad_u16_e64_dpp v5, v1, v2, -1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x41,0xd6,0xfa,0x04,0x06,0x03,0x01,0x60,0x09,0x13]
+
+v_mad_u16_e64_dpp v255, v255, v255, src_scc clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x80,0x41,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x59,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x59,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x59,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x59,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x59,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x59,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x59,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX11: [0x05,0x00,0x59,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX11: [0x05,0x00,0x59,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX11: [0x05,0x00,0x59,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x59,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x59,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x59,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_mad_u32_u16_e64_dpp v255, v255, v255, src_scc clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x80,0x59,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x0b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x0b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x0b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x0b,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x0b,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x0b,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x0b,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX11: [0x05,0x00,0x0b,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX11: [0x05,0x00,0x0b,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX11: [0x05,0x00,0x0b,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x0b,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x0b,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x0b,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_mad_u32_u24_e64_dpp v255, v255, v255, src_scc clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x80,0x0b,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_max3_f16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x4c,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_max3_f16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x4c,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_max3_f16_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x4c,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_max3_f16_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x4c,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_max3_f16_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x4c,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_max3_f16_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x4c,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_max3_f16_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x4c,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_max3_f16_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX11: [0x05,0x01,0x4c,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_max3_f16_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX11: [0x05,0x02,0x4c,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_max3_f16_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
+// GFX11: [0x05,0x04,0x4c,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_max3_f16_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x03,0x4c,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_max3_f16_e64_dpp v5, -|v1|, v2, -|-1| row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x05,0x4c,0xd6,0xfa,0x04,0x06,0xa3,0x01,0x5f,0x01,0x01]
+
+v_max3_f16_e64_dpp v5, v1, -|v2|, -|0.5| row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x06,0x4c,0xd6,0xfa,0x04,0xc2,0xc3,0x01,0x60,0x09,0x13]
+
+v_max3_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x87,0x4c,0xd6,0xfa,0xfe,0xf7,0xe3,0xff,0x6f,0x05,0x30]
+
+v_max3_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x1c,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_max3_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x1c,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_max3_f32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x1c,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_max3_f32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x1c,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_max3_f32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x1c,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_max3_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x1c,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_max3_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x1c,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_max3_f32_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX11: [0x05,0x01,0x1c,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_max3_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX11: [0x05,0x02,0x1c,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_max3_f32_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
+// GFX11: [0x05,0x04,0x1c,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_max3_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x03,0x1c,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_max3_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x05,0x1c,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+
+v_max3_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x06,0x1c,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x09,0x13]
+
+v_max3_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x87,0x1c,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
+
+v_max3_i16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x4d,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_max3_i16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x4d,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_max3_i16_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x4d,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_max3_i16_e64_dpp v5, v1, v2, v3 row_half_mirror
+// GFX11: [0x05,0x00,0x4d,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff]
+
+v_max3_i16_e64_dpp v5, v1, v2, v255 row_shl:1
+// GFX11: [0x05,0x00,0x4d,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff]
+
+v_max3_i16_e64_dpp v5, v1, v2, s105 row_shl:15
+// GFX11: [0x05,0x00,0x4d,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x0f,0x01,0xff]
+
+v_max3_i16_e64_dpp v5, v1, v2, vcc_hi row_shr:1
+// GFX11: [0x05,0x00,0x4d,0xd6,0xfa,0x04,0xae,0x01,0x01,0x11,0x01,0xff]
+
+v_max3_i16_e64_dpp v5, v1, v2, vcc_lo row_shr:15
+// GFX11: [0x05,0x00,0x4d,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x1f,0x01,0xff]
+
+v_max3_i16_e64_dpp v5, v1, v2, ttmp15 row_ror:1
+// GFX11: [0x05,0x00,0x4d,0xd6,0xfa,0x04,0xee,0x01,0x01,0x21,0x01,0xff]
+
+v_max3_i16_e64_dpp v5, v1, v2, exec_hi row_ror:15
+// GFX11: [0x05,0x00,0x4d,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+
+v_max3_i16_e64_dpp v5, v1, v2, exec_lo row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x4d,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+
+v_max3_i16_e64_dpp v5, v1, v2, null row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x4d,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x5f,0x01,0x01]
+
+v_max3_i16_e64_dpp v5, v1, v2, -1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x4d,0xd6,0xfa,0x04,0x06,0x03,0x01,0x60,0x09,0x13]
+
+v_max3_i16_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x4d,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_max3_i32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x1d,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_max3_i32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x1d,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_max3_i32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x1d,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_max3_i32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x1d,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_max3_i32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x1d,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_max3_i32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x1d,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_max3_i32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x1d,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_max3_i32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX11: [0x05,0x00,0x1d,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_max3_i32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX11: [0x05,0x00,0x1d,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_max3_i32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX11: [0x05,0x00,0x1d,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_max3_i32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x1d,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_max3_i32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x1d,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_max3_i32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x1d,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_max3_i32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x1d,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_max3_u16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x4e,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_max3_u16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x4e,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_max3_u16_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x4e,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_max3_u16_e64_dpp v5, v1, v2, v3 row_half_mirror
+// GFX11: [0x05,0x00,0x4e,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff]
+
+v_max3_u16_e64_dpp v5, v1, v2, v255 row_shl:1
+// GFX11: [0x05,0x00,0x4e,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff]
+
+v_max3_u16_e64_dpp v5, v1, v2, s105 row_shl:15
+// GFX11: [0x05,0x00,0x4e,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x0f,0x01,0xff]
+
+v_max3_u16_e64_dpp v5, v1, v2, vcc_hi row_shr:1
+// GFX11: [0x05,0x00,0x4e,0xd6,0xfa,0x04,0xae,0x01,0x01,0x11,0x01,0xff]
+
+v_max3_u16_e64_dpp v5, v1, v2, vcc_lo row_shr:15
+// GFX11: [0x05,0x00,0x4e,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x1f,0x01,0xff]
+
+v_max3_u16_e64_dpp v5, v1, v2, ttmp15 row_ror:1
+// GFX11: [0x05,0x00,0x4e,0xd6,0xfa,0x04,0xee,0x01,0x01,0x21,0x01,0xff]
+
+v_max3_u16_e64_dpp v5, v1, v2, exec_hi row_ror:15
+// GFX11: [0x05,0x00,0x4e,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+
+v_max3_u16_e64_dpp v5, v1, v2, exec_lo row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x4e,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+
+v_max3_u16_e64_dpp v5, v1, v2, null row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x4e,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x5f,0x01,0x01]
+
+v_max3_u16_e64_dpp v5, v1, v2, -1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x4e,0xd6,0xfa,0x04,0x06,0x03,0x01,0x60,0x09,0x13]
+
+v_max3_u16_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x4e,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_max3_u32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x1e,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_max3_u32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x1e,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_max3_u32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x1e,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_max3_u32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x1e,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_max3_u32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x1e,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_max3_u32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x1e,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_max3_u32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x1e,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_max3_u32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX11: [0x05,0x00,0x1e,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_max3_u32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX11: [0x05,0x00,0x1e,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_max3_u32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX11: [0x05,0x00,0x1e,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_max3_u32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x1e,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_max3_u32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x1e,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_max3_u32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x1e,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_max3_u32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x1e,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_max_i16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x0a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_max_i16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x0a,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_max_i16_e64_dpp v5, v1, v2 row_mirror
+// GFX11: [0x05,0x00,0x0a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_max_i16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX11: [0x05,0x00,0x0a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_max_i16_e64_dpp v5, v1, v2 row_shl:1
+// GFX11: [0x05,0x00,0x0a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_max_i16_e64_dpp v5, v1, v2 row_shl:15
+// GFX11: [0x05,0x00,0x0a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_max_i16_e64_dpp v5, v1, v2 row_shr:1
+// GFX11: [0x05,0x00,0x0a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_max_i16_e64_dpp v5, v1, v2 row_shr:15
+// GFX11: [0x05,0x00,0x0a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_max_i16_e64_dpp v5, v1, v2 row_ror:1
+// GFX11: [0x05,0x00,0x0a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_max_i16_e64_dpp v5, v1, v2 row_ror:15
+// GFX11: [0x05,0x00,0x0a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_max_i16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x0a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_max_i16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x0a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_max_i16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x0a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_max_i16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x0a,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_max_u16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x09,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_max_u16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x09,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_max_u16_e64_dpp v5, v1, v2 row_mirror
+// GFX11: [0x05,0x00,0x09,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_max_u16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX11: [0x05,0x00,0x09,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_max_u16_e64_dpp v5, v1, v2 row_shl:1
+// GFX11: [0x05,0x00,0x09,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_max_u16_e64_dpp v5, v1, v2 row_shl:15
+// GFX11: [0x05,0x00,0x09,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_max_u16_e64_dpp v5, v1, v2 row_shr:1
+// GFX11: [0x05,0x00,0x09,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_max_u16_e64_dpp v5, v1, v2 row_shr:15
+// GFX11: [0x05,0x00,0x09,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_max_u16_e64_dpp v5, v1, v2 row_ror:1
+// GFX11: [0x05,0x00,0x09,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_max_u16_e64_dpp v5, v1, v2 row_ror:15
+// GFX11: [0x05,0x00,0x09,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_max_u16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x09,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_max_u16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x09,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_max_u16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x09,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_max_u16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x09,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_maxmin_f16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x60,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_maxmin_f16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x60,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_maxmin_f16_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x60,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_maxmin_f16_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x60,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_maxmin_f16_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x60,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_maxmin_f16_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x60,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_maxmin_f16_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x60,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_maxmin_f16_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX11: [0x05,0x01,0x60,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_maxmin_f16_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX11: [0x05,0x02,0x60,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_maxmin_f16_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
+// GFX11: [0x05,0x04,0x60,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_maxmin_f16_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x03,0x60,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_maxmin_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x05,0x60,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+
+v_maxmin_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x06,0x60,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x09,0x13]
+
+v_maxmin_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x87,0x60,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
+
+v_maxmin_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x5e,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_maxmin_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x5e,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_maxmin_f32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x5e,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_maxmin_f32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x5e,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_maxmin_f32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x5e,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_maxmin_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x5e,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_maxmin_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x5e,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_maxmin_f32_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX11: [0x05,0x01,0x5e,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_maxmin_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX11: [0x05,0x02,0x5e,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_maxmin_f32_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
+// GFX11: [0x05,0x04,0x5e,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_maxmin_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x03,0x5e,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_maxmin_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x05,0x5e,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+
+v_maxmin_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x06,0x5e,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x09,0x13]
+
+v_maxmin_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x87,0x5e,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x64,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x64,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x64,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x64,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x64,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x64,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x64,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX11: [0x05,0x00,0x64,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX11: [0x05,0x00,0x64,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX11: [0x05,0x00,0x64,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x64,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x64,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x64,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_maxmin_i32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x64,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x62,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x62,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x62,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x62,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x62,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x62,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x62,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX11: [0x05,0x00,0x62,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX11: [0x05,0x00,0x62,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX11: [0x05,0x00,0x62,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x62,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x62,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x62,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_maxmin_u32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x62,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_mbcnt_hi_u32_b32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x20,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_mbcnt_hi_u32_b32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x20,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_mbcnt_hi_u32_b32_e64_dpp v5, v1, v2 row_mirror
+// GFX11: [0x05,0x00,0x20,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_mbcnt_hi_u32_b32_e64_dpp v5, v1, v2 row_half_mirror
+// GFX11: [0x05,0x00,0x20,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_mbcnt_hi_u32_b32_e64_dpp v5, v1, v2 row_shl:1
+// GFX11: [0x05,0x00,0x20,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_mbcnt_hi_u32_b32_e64_dpp v5, v1, v2 row_shl:15
+// GFX11: [0x05,0x00,0x20,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_mbcnt_hi_u32_b32_e64_dpp v5, v1, v2 row_shr:1
+// GFX11: [0x05,0x00,0x20,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_mbcnt_hi_u32_b32_e64_dpp v5, v1, v2 row_shr:15
+// GFX11: [0x05,0x00,0x20,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_mbcnt_hi_u32_b32_e64_dpp v5, v1, v2 row_ror:1
+// GFX11: [0x05,0x00,0x20,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_mbcnt_hi_u32_b32_e64_dpp v5, v1, v2 row_ror:15
+// GFX11: [0x05,0x00,0x20,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_mbcnt_hi_u32_b32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x20,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_mbcnt_hi_u32_b32_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x20,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_mbcnt_hi_u32_b32_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x20,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_mbcnt_hi_u32_b32_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x20,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_mbcnt_lo_u32_b32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x1f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_mbcnt_lo_u32_b32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x1f,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_mbcnt_lo_u32_b32_e64_dpp v5, v1, v2 row_mirror
+// GFX11: [0x05,0x00,0x1f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_mbcnt_lo_u32_b32_e64_dpp v5, v1, v2 row_half_mirror
+// GFX11: [0x05,0x00,0x1f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_mbcnt_lo_u32_b32_e64_dpp v5, v1, v2 row_shl:1
+// GFX11: [0x05,0x00,0x1f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_mbcnt_lo_u32_b32_e64_dpp v5, v1, v2 row_shl:15
+// GFX11: [0x05,0x00,0x1f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_mbcnt_lo_u32_b32_e64_dpp v5, v1, v2 row_shr:1
+// GFX11: [0x05,0x00,0x1f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_mbcnt_lo_u32_b32_e64_dpp v5, v1, v2 row_shr:15
+// GFX11: [0x05,0x00,0x1f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_mbcnt_lo_u32_b32_e64_dpp v5, v1, v2 row_ror:1
+// GFX11: [0x05,0x00,0x1f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_mbcnt_lo_u32_b32_e64_dpp v5, v1, v2 row_ror:15
+// GFX11: [0x05,0x00,0x1f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_mbcnt_lo_u32_b32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x1f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_mbcnt_lo_u32_b32_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x1f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_mbcnt_lo_u32_b32_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x1f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_mbcnt_lo_u32_b32_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x1f,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_med3_f16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x4f,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_med3_f16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x4f,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_med3_f16_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x4f,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_med3_f16_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x4f,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_med3_f16_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x4f,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_med3_f16_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x4f,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_med3_f16_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x4f,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_med3_f16_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX11: [0x05,0x01,0x4f,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_med3_f16_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX11: [0x05,0x02,0x4f,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_med3_f16_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
+// GFX11: [0x05,0x04,0x4f,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_med3_f16_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x03,0x4f,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_med3_f16_e64_dpp v5, -|v1|, v2, -|-1| row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x05,0x4f,0xd6,0xfa,0x04,0x06,0xa3,0x01,0x5f,0x01,0x01]
+
+v_med3_f16_e64_dpp v5, v1, -|v2|, -|0.5| row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x06,0x4f,0xd6,0xfa,0x04,0xc2,0xc3,0x01,0x60,0x09,0x13]
+
+v_med3_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x87,0x4f,0xd6,0xfa,0xfe,0xf7,0xe3,0xff,0x6f,0x05,0x30]
+
+v_med3_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x1f,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_med3_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x1f,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_med3_f32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x1f,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_med3_f32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x1f,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_med3_f32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x1f,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_med3_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x1f,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_med3_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x1f,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_med3_f32_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX11: [0x05,0x01,0x1f,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_med3_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX11: [0x05,0x02,0x1f,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_med3_f32_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
+// GFX11: [0x05,0x04,0x1f,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_med3_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x03,0x1f,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_med3_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x05,0x1f,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+
+v_med3_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x06,0x1f,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x09,0x13]
+
+v_med3_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x87,0x1f,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
+
+v_med3_i16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x50,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_med3_i16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x50,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_med3_i16_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x50,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_med3_i16_e64_dpp v5, v1, v2, v3 row_half_mirror
+// GFX11: [0x05,0x00,0x50,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff]
+
+v_med3_i16_e64_dpp v5, v1, v2, v255 row_shl:1
+// GFX11: [0x05,0x00,0x50,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff]
+
+v_med3_i16_e64_dpp v5, v1, v2, s105 row_shl:15
+// GFX11: [0x05,0x00,0x50,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x0f,0x01,0xff]
+
+v_med3_i16_e64_dpp v5, v1, v2, vcc_hi row_shr:1
+// GFX11: [0x05,0x00,0x50,0xd6,0xfa,0x04,0xae,0x01,0x01,0x11,0x01,0xff]
+
+v_med3_i16_e64_dpp v5, v1, v2, vcc_lo row_shr:15
+// GFX11: [0x05,0x00,0x50,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x1f,0x01,0xff]
+
+v_med3_i16_e64_dpp v5, v1, v2, ttmp15 row_ror:1
+// GFX11: [0x05,0x00,0x50,0xd6,0xfa,0x04,0xee,0x01,0x01,0x21,0x01,0xff]
+
+v_med3_i16_e64_dpp v5, v1, v2, exec_hi row_ror:15
+// GFX11: [0x05,0x00,0x50,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+
+v_med3_i16_e64_dpp v5, v1, v2, exec_lo row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x50,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+
+v_med3_i16_e64_dpp v5, v1, v2, null row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x50,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x5f,0x01,0x01]
+
+v_med3_i16_e64_dpp v5, v1, v2, -1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x50,0xd6,0xfa,0x04,0x06,0x03,0x01,0x60,0x09,0x13]
+
+v_med3_i16_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x50,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_med3_i32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x20,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_med3_i32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x20,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_med3_i32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x20,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_med3_i32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x20,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_med3_i32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x20,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_med3_i32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x20,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_med3_i32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x20,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_med3_i32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX11: [0x05,0x00,0x20,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_med3_i32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX11: [0x05,0x00,0x20,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_med3_i32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX11: [0x05,0x00,0x20,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_med3_i32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x20,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_med3_i32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x20,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_med3_i32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x20,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_med3_i32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x20,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_med3_u16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x51,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_med3_u16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x51,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_med3_u16_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x51,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_med3_u16_e64_dpp v5, v1, v2, v3 row_half_mirror
+// GFX11: [0x05,0x00,0x51,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff]
+
+v_med3_u16_e64_dpp v5, v1, v2, v255 row_shl:1
+// GFX11: [0x05,0x00,0x51,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff]
+
+v_med3_u16_e64_dpp v5, v1, v2, s105 row_shl:15
+// GFX11: [0x05,0x00,0x51,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x0f,0x01,0xff]
+
+v_med3_u16_e64_dpp v5, v1, v2, vcc_hi row_shr:1
+// GFX11: [0x05,0x00,0x51,0xd6,0xfa,0x04,0xae,0x01,0x01,0x11,0x01,0xff]
+
+v_med3_u16_e64_dpp v5, v1, v2, vcc_lo row_shr:15
+// GFX11: [0x05,0x00,0x51,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x1f,0x01,0xff]
+
+v_med3_u16_e64_dpp v5, v1, v2, ttmp15 row_ror:1
+// GFX11: [0x05,0x00,0x51,0xd6,0xfa,0x04,0xee,0x01,0x01,0x21,0x01,0xff]
+
+v_med3_u16_e64_dpp v5, v1, v2, exec_hi row_ror:15
+// GFX11: [0x05,0x00,0x51,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+
+v_med3_u16_e64_dpp v5, v1, v2, exec_lo row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x51,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+
+v_med3_u16_e64_dpp v5, v1, v2, null row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x51,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x5f,0x01,0x01]
+
+v_med3_u16_e64_dpp v5, v1, v2, -1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x51,0xd6,0xfa,0x04,0x06,0x03,0x01,0x60,0x09,0x13]
+
+v_med3_u16_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x51,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_med3_u32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x21,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_med3_u32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x21,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_med3_u32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x21,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_med3_u32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x21,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_med3_u32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x21,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_med3_u32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x21,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_med3_u32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x21,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_med3_u32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX11: [0x05,0x00,0x21,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_med3_u32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX11: [0x05,0x00,0x21,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_med3_u32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX11: [0x05,0x00,0x21,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_med3_u32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x21,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_med3_u32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x21,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_med3_u32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x21,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_med3_u32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x21,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_min3_f16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x49,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_min3_f16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x49,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_min3_f16_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x49,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_min3_f16_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x49,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_min3_f16_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x49,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_min3_f16_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x49,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_min3_f16_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x49,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_min3_f16_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX11: [0x05,0x01,0x49,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_min3_f16_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX11: [0x05,0x02,0x49,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_min3_f16_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
+// GFX11: [0x05,0x04,0x49,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_min3_f16_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x03,0x49,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_min3_f16_e64_dpp v5, -|v1|, v2, -|-1| row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x05,0x49,0xd6,0xfa,0x04,0x06,0xa3,0x01,0x5f,0x01,0x01]
+
+v_min3_f16_e64_dpp v5, v1, -|v2|, -|0.5| row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x06,0x49,0xd6,0xfa,0x04,0xc2,0xc3,0x01,0x60,0x09,0x13]
+
+v_min3_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x87,0x49,0xd6,0xfa,0xfe,0xf7,0xe3,0xff,0x6f,0x05,0x30]
+
+v_min3_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x19,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_min3_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x19,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_min3_f32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x19,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_min3_f32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x19,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_min3_f32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x19,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_min3_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x19,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_min3_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x19,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_min3_f32_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX11: [0x05,0x01,0x19,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_min3_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX11: [0x05,0x02,0x19,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_min3_f32_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
+// GFX11: [0x05,0x04,0x19,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_min3_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x03,0x19,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_min3_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x05,0x19,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+
+v_min3_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x06,0x19,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x09,0x13]
+
+v_min3_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x87,0x19,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
+
+v_min3_i16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x4a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_min3_i16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x4a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_min3_i16_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x4a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_min3_i16_e64_dpp v5, v1, v2, v3 row_half_mirror
+// GFX11: [0x05,0x00,0x4a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff]
+
+v_min3_i16_e64_dpp v5, v1, v2, v255 row_shl:1
+// GFX11: [0x05,0x00,0x4a,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff]
+
+v_min3_i16_e64_dpp v5, v1, v2, s105 row_shl:15
+// GFX11: [0x05,0x00,0x4a,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x0f,0x01,0xff]
+
+v_min3_i16_e64_dpp v5, v1, v2, vcc_hi row_shr:1
+// GFX11: [0x05,0x00,0x4a,0xd6,0xfa,0x04,0xae,0x01,0x01,0x11,0x01,0xff]
+
+v_min3_i16_e64_dpp v5, v1, v2, vcc_lo row_shr:15
+// GFX11: [0x05,0x00,0x4a,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x1f,0x01,0xff]
+
+v_min3_i16_e64_dpp v5, v1, v2, ttmp15 row_ror:1
+// GFX11: [0x05,0x00,0x4a,0xd6,0xfa,0x04,0xee,0x01,0x01,0x21,0x01,0xff]
+
+v_min3_i16_e64_dpp v5, v1, v2, exec_hi row_ror:15
+// GFX11: [0x05,0x00,0x4a,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+
+v_min3_i16_e64_dpp v5, v1, v2, exec_lo row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x4a,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+
+v_min3_i16_e64_dpp v5, v1, v2, null row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x4a,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x5f,0x01,0x01]
+
+v_min3_i16_e64_dpp v5, v1, v2, -1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x4a,0xd6,0xfa,0x04,0x06,0x03,0x01,0x60,0x09,0x13]
+
+v_min3_i16_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x4a,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_min3_i32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x1a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_min3_i32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x1a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_min3_i32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x1a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_min3_i32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x1a,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_min3_i32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x1a,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_min3_i32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x1a,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_min3_i32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x1a,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_min3_i32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX11: [0x05,0x00,0x1a,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_min3_i32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX11: [0x05,0x00,0x1a,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_min3_i32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX11: [0x05,0x00,0x1a,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_min3_i32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x1a,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_min3_i32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x1a,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_min3_i32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x1a,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_min3_i32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x1a,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_min3_u16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x4b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_min3_u16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x4b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_min3_u16_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x4b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_min3_u16_e64_dpp v5, v1, v2, v3 row_half_mirror
+// GFX11: [0x05,0x00,0x4b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff]
+
+v_min3_u16_e64_dpp v5, v1, v2, v255 row_shl:1
+// GFX11: [0x05,0x00,0x4b,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff]
+
+v_min3_u16_e64_dpp v5, v1, v2, s105 row_shl:15
+// GFX11: [0x05,0x00,0x4b,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x0f,0x01,0xff]
+
+v_min3_u16_e64_dpp v5, v1, v2, vcc_hi row_shr:1
+// GFX11: [0x05,0x00,0x4b,0xd6,0xfa,0x04,0xae,0x01,0x01,0x11,0x01,0xff]
+
+v_min3_u16_e64_dpp v5, v1, v2, vcc_lo row_shr:15
+// GFX11: [0x05,0x00,0x4b,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x1f,0x01,0xff]
+
+v_min3_u16_e64_dpp v5, v1, v2, ttmp15 row_ror:1
+// GFX11: [0x05,0x00,0x4b,0xd6,0xfa,0x04,0xee,0x01,0x01,0x21,0x01,0xff]
+
+v_min3_u16_e64_dpp v5, v1, v2, exec_hi row_ror:15
+// GFX11: [0x05,0x00,0x4b,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+
+v_min3_u16_e64_dpp v5, v1, v2, exec_lo row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x4b,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+
+v_min3_u16_e64_dpp v5, v1, v2, null row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x4b,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x5f,0x01,0x01]
+
+v_min3_u16_e64_dpp v5, v1, v2, -1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x4b,0xd6,0xfa,0x04,0x06,0x03,0x01,0x60,0x09,0x13]
+
+v_min3_u16_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x4b,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_min3_u32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x1b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_min3_u32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x1b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_min3_u32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x1b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_min3_u32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x1b,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_min3_u32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x1b,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_min3_u32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x1b,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_min3_u32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x1b,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_min3_u32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX11: [0x05,0x00,0x1b,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_min3_u32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX11: [0x05,0x00,0x1b,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_min3_u32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX11: [0x05,0x00,0x1b,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_min3_u32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x1b,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_min3_u32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x1b,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_min3_u32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x1b,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_min3_u32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x1b,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_min_i16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x0c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_min_i16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x0c,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_min_i16_e64_dpp v5, v1, v2 row_mirror
+// GFX11: [0x05,0x00,0x0c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_min_i16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX11: [0x05,0x00,0x0c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_min_i16_e64_dpp v5, v1, v2 row_shl:1
+// GFX11: [0x05,0x00,0x0c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_min_i16_e64_dpp v5, v1, v2 row_shl:15
+// GFX11: [0x05,0x00,0x0c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_min_i16_e64_dpp v5, v1, v2 row_shr:1
+// GFX11: [0x05,0x00,0x0c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_min_i16_e64_dpp v5, v1, v2 row_shr:15
+// GFX11: [0x05,0x00,0x0c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_min_i16_e64_dpp v5, v1, v2 row_ror:1
+// GFX11: [0x05,0x00,0x0c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_min_i16_e64_dpp v5, v1, v2 row_ror:15
+// GFX11: [0x05,0x00,0x0c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_min_i16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x0c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_min_i16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x0c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_min_i16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x0c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_min_i16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x0c,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_min_u16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x0b,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_min_u16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x0b,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_min_u16_e64_dpp v5, v1, v2 row_mirror
+// GFX11: [0x05,0x00,0x0b,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_min_u16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX11: [0x05,0x00,0x0b,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_min_u16_e64_dpp v5, v1, v2 row_shl:1
+// GFX11: [0x05,0x00,0x0b,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_min_u16_e64_dpp v5, v1, v2 row_shl:15
+// GFX11: [0x05,0x00,0x0b,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_min_u16_e64_dpp v5, v1, v2 row_shr:1
+// GFX11: [0x05,0x00,0x0b,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_min_u16_e64_dpp v5, v1, v2 row_shr:15
+// GFX11: [0x05,0x00,0x0b,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_min_u16_e64_dpp v5, v1, v2 row_ror:1
+// GFX11: [0x05,0x00,0x0b,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_min_u16_e64_dpp v5, v1, v2 row_ror:15
+// GFX11: [0x05,0x00,0x0b,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_min_u16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x0b,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_min_u16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x0b,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_min_u16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x0b,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_min_u16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x0b,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_minmax_f16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x61,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_minmax_f16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x61,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_minmax_f16_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x61,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_minmax_f16_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x61,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_minmax_f16_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x61,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_minmax_f16_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x61,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_minmax_f16_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x61,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_minmax_f16_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX11: [0x05,0x01,0x61,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_minmax_f16_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX11: [0x05,0x02,0x61,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_minmax_f16_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
+// GFX11: [0x05,0x04,0x61,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_minmax_f16_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x03,0x61,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_minmax_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x05,0x61,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+
+v_minmax_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x06,0x61,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x09,0x13]
+
+v_minmax_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x87,0x61,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
+
+v_minmax_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x5f,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_minmax_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x5f,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_minmax_f32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x5f,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_minmax_f32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x5f,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_minmax_f32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x5f,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_minmax_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x5f,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_minmax_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x5f,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_minmax_f32_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX11: [0x05,0x01,0x5f,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_minmax_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX11: [0x05,0x02,0x5f,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_minmax_f32_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
+// GFX11: [0x05,0x04,0x5f,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_minmax_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x03,0x5f,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_minmax_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x05,0x5f,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+
+v_minmax_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x06,0x5f,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x09,0x13]
+
+v_minmax_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x87,0x5f,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
+
+v_minmax_i32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x65,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_minmax_i32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x65,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_minmax_i32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x65,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_minmax_i32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x65,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_minmax_i32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x65,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_minmax_i32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x65,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_minmax_i32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x65,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_minmax_i32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX11: [0x05,0x00,0x65,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_minmax_i32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX11: [0x05,0x00,0x65,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_minmax_i32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX11: [0x05,0x00,0x65,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_minmax_i32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x65,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_minmax_i32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x65,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_minmax_i32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x65,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_minmax_i32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x65,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_minmax_u32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x63,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_minmax_u32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x63,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_minmax_u32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x63,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_minmax_u32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x63,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_minmax_u32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x63,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_minmax_u32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x63,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_minmax_u32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x63,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_minmax_u32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX11: [0x05,0x00,0x63,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_minmax_u32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX11: [0x05,0x00,0x63,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_minmax_u32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX11: [0x05,0x00,0x63,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_minmax_u32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x63,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_minmax_u32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x63,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_minmax_u32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x63,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_minmax_u32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x63,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_msad_u8_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x39,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_msad_u8_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x39,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_msad_u8_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x39,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_msad_u8_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x39,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_msad_u8_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x39,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_msad_u8_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x39,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_msad_u8_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x39,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_msad_u8_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX11: [0x05,0x00,0x39,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_msad_u8_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX11: [0x05,0x00,0x39,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_msad_u8_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX11: [0x05,0x00,0x39,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_msad_u8_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x39,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_msad_u8_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x39,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_msad_u8_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x39,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_msad_u8_e64_dpp v255, v255, v255, src_scc clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x80,0x39,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_mul_lo_u16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x05,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_mul_lo_u16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x05,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_mul_lo_u16_e64_dpp v5, v1, v2 row_mirror
+// GFX11: [0x05,0x00,0x05,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_mul_lo_u16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX11: [0x05,0x00,0x05,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_mul_lo_u16_e64_dpp v5, v1, v2 row_shl:1
+// GFX11: [0x05,0x00,0x05,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_mul_lo_u16_e64_dpp v5, v1, v2 row_shl:15
+// GFX11: [0x05,0x00,0x05,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_mul_lo_u16_e64_dpp v5, v1, v2 row_shr:1
+// GFX11: [0x05,0x00,0x05,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_mul_lo_u16_e64_dpp v5, v1, v2 row_shr:15
+// GFX11: [0x05,0x00,0x05,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_mul_lo_u16_e64_dpp v5, v1, v2 row_ror:1
+// GFX11: [0x05,0x00,0x05,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_mul_lo_u16_e64_dpp v5, v1, v2 row_ror:15
+// GFX11: [0x05,0x00,0x05,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_mul_lo_u16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x05,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_mul_lo_u16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x05,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_mul_lo_u16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x05,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_mul_lo_u16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x05,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_mullit_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x18,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_mullit_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x18,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_mullit_f32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x18,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_mullit_f32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x18,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_mullit_f32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x18,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_mullit_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x18,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_mullit_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x18,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_mullit_f32_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX11: [0x05,0x01,0x18,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_mullit_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX11: [0x05,0x02,0x18,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_mullit_f32_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
+// GFX11: [0x05,0x04,0x18,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_mullit_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x03,0x18,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_mullit_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x05,0x18,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+
+v_mullit_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x06,0x18,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x09,0x13]
+
+v_mullit_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x87,0x18,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
+
+v_or3_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x58,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_or3_b32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x58,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_or3_b32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x58,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_or3_b32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x58,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_or3_b32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x58,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_or3_b32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x58,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_or3_b32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x58,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_or3_b32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX11: [0x05,0x00,0x58,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_or3_b32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX11: [0x05,0x00,0x58,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_or3_b32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX11: [0x05,0x00,0x58,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_or3_b32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x58,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_or3_b32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x58,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_or3_b32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x58,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_or3_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x58,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_or_b16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_or_b16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_or_b16_e64_dpp v5, v1, v2 row_mirror
+// GFX11: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_or_b16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX11: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_or_b16_e64_dpp v5, v1, v2 row_shl:1
+// GFX11: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_or_b16_e64_dpp v5, v1, v2 row_shl:15
+// GFX11: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_or_b16_e64_dpp v5, v1, v2 row_shr:1
+// GFX11: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_or_b16_e64_dpp v5, v1, v2 row_shr:15
+// GFX11: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_or_b16_e64_dpp v5, v1, v2 row_ror:1
+// GFX11: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_or_b16_e64_dpp v5, v1, v2 row_ror:15
+// GFX11: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_or_b16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_or_b16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_or_b16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_or_b16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x63,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_pack_b32_f16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x11,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_pack_b32_f16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x11,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_pack_b32_f16_e64_dpp v5, v1, v2 row_mirror
+// GFX11: [0x05,0x00,0x11,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_pack_b32_f16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX11: [0x05,0x00,0x11,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_pack_b32_f16_e64_dpp v5, v1, v2 row_shl:1
+// GFX11: [0x05,0x00,0x11,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_pack_b32_f16_e64_dpp v5, v1, v2 row_shl:15
+// GFX11: [0x05,0x00,0x11,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_pack_b32_f16_e64_dpp v5, v1, v2 row_shr:1
+// GFX11: [0x05,0x00,0x11,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_pack_b32_f16_e64_dpp v5, v1, v2 row_shr:15
+// GFX11: [0x05,0x00,0x11,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_pack_b32_f16_e64_dpp v5, v1, v2 row_ror:1
+// GFX11: [0x05,0x00,0x11,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_pack_b32_f16_e64_dpp v5, v1, v2 row_ror:15
+// GFX11: [0x05,0x00,0x11,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_pack_b32_f16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x11,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_pack_b32_f16_e64_dpp v5, |v1|, -v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x01,0x11,0xd7,0xfa,0x04,0x02,0x40,0x01,0x5f,0x01,0x01]
+
+v_pack_b32_f16_e64_dpp v5, -v1, |v2| row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x02,0x11,0xd7,0xfa,0x04,0x02,0x20,0x01,0x60,0x09,0x13]
+
+v_pack_b32_f16_e64_dpp v255, -|v255|, -|v255| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x03,0x11,0xd7,0xfa,0xfe,0x03,0x60,0xff,0x6f,0x05,0x30]
+
+v_perm_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x44,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_perm_b32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x44,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_perm_b32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x44,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_perm_b32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x44,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_perm_b32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x44,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_perm_b32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x44,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_perm_b32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x44,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_perm_b32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX11: [0x05,0x00,0x44,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_perm_b32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX11: [0x05,0x00,0x44,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_perm_b32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX11: [0x05,0x00,0x44,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_perm_b32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x44,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_perm_b32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x44,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_perm_b32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x44,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_perm_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x44,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x23,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x23,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x23,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x23,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x23,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x23,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x23,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX11: [0x05,0x00,0x23,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX11: [0x05,0x00,0x23,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX11: [0x05,0x00,0x23,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x23,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x23,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x23,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_sad_hi_u8_e64_dpp v255, v255, v255, src_scc clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x80,0x23,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_sad_u16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x24,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_sad_u16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x24,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_sad_u16_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x24,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_sad_u16_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x24,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_sad_u16_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x24,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_sad_u16_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x24,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_sad_u16_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x24,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_sad_u16_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX11: [0x05,0x00,0x24,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_sad_u16_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX11: [0x05,0x00,0x24,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_sad_u16_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX11: [0x05,0x00,0x24,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_sad_u16_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x24,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_sad_u16_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x24,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_sad_u16_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x24,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_sad_u16_e64_dpp v255, v255, v255, src_scc clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x80,0x24,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_sad_u32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x25,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_sad_u32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x25,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_sad_u32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x25,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_sad_u32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x25,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_sad_u32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x25,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_sad_u32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x25,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_sad_u32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x25,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_sad_u32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX11: [0x05,0x00,0x25,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_sad_u32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX11: [0x05,0x00,0x25,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_sad_u32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX11: [0x05,0x00,0x25,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_sad_u32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x25,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_sad_u32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x25,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_sad_u32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x25,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_sad_u32_e64_dpp v255, v255, v255, src_scc clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x80,0x25,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_sad_u8_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x22,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_sad_u8_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x22,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_sad_u8_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x22,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_sad_u8_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x22,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_sad_u8_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x22,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_sad_u8_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x22,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_sad_u8_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x22,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_sad_u8_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX11: [0x05,0x00,0x22,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_sad_u8_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX11: [0x05,0x00,0x22,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_sad_u8_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX11: [0x05,0x00,0x22,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_sad_u8_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x22,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_sad_u8_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x22,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_sad_u8_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x22,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_sad_u8_e64_dpp v255, v255, v255, src_scc clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x80,0x22,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_sub_co_u32_e64_dpp v5, s6, v1, v2 quad_perm:[3,2,1,0]
+// W32: [0x05,0x06,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s6, v1, v2 quad_perm:[0,1,2,3]
+// W32: [0x05,0x06,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s6, v1, v2 row_mirror
+// W32: [0x05,0x06,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s6, v1, v2 row_half_mirror
+// W32: [0x05,0x06,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s6, v1, v2 row_shl:1
+// W32: [0x05,0x06,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s6, v1, v2 row_shl:15
+// W32: [0x05,0x06,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s6, v1, v2 row_shr:1
+// W32: [0x05,0x06,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s6, v1, v2 row_shr:15
+// W32: [0x05,0x06,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s6, v1, v2 row_ror:1
+// W32: [0x05,0x06,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s105, v1, v2 row_ror:15
+// W32: [0x05,0x69,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, vcc_lo, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// W32: [0x05,0x6a,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, vcc_hi, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// W32: [0x05,0x6b,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, ttmp15, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// W32: [0x05,0x7b,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s[12:13], v1, v2 quad_perm:[3,2,1,0]
+// W64: [0x05,0x0c,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s[12:13], v1, v2 quad_perm:[0,1,2,3]
+// W64: [0x05,0x0c,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s[12:13], v1, v2 row_mirror
+// W64: [0x05,0x0c,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s[12:13], v1, v2 row_half_mirror
+// W64: [0x05,0x0c,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s[12:13], v1, v2 row_shl:1
+// W64: [0x05,0x0c,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s[12:13], v1, v2 row_shl:15
+// W64: [0x05,0x0c,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s[12:13], v1, v2 row_shr:1
+// W64: [0x05,0x0c,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s[12:13], v1, v2 row_shr:15
+// W64: [0x05,0x0c,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s[12:13], v1, v2 row_ror:1
+// W64: [0x05,0x0c,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s[12:13], v1, v2 row_ror:15
+// W64: [0x05,0x0c,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s[104:105], v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// W64: [0x05,0x68,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, vcc, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// W64: [0x05,0x6a,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, ttmp[14:15], v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// W64: [0x05,0x7a,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v255, null, v255, v255 clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0xfc,0x01,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x0e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x0e,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 row_mirror
+// GFX11: [0x05,0x00,0x0e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX11: [0x05,0x00,0x0e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 row_shl:1
+// GFX11: [0x05,0x00,0x0e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 row_shl:15
+// GFX11: [0x05,0x00,0x0e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 row_shr:1
+// GFX11: [0x05,0x00,0x0e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 row_shr:15
+// GFX11: [0x05,0x00,0x0e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 row_ror:1
+// GFX11: [0x05,0x00,0x0e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 row_ror:15
+// GFX11: [0x05,0x00,0x0e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x0e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x0e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x0e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_sub_nc_i16_e64_dpp v255, v255, v255 clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x80,0x0e,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_sub_nc_i32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x25,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_sub_nc_i32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x25,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_sub_nc_i32_e64_dpp v5, v1, v2 row_mirror
+// GFX11: [0x05,0x00,0x25,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_sub_nc_i32_e64_dpp v5, v1, v2 row_half_mirror
+// GFX11: [0x05,0x00,0x25,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_sub_nc_i32_e64_dpp v5, v1, v2 row_shl:1
+// GFX11: [0x05,0x00,0x25,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_sub_nc_i32_e64_dpp v5, v1, v2 row_shl:15
+// GFX11: [0x05,0x00,0x25,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_sub_nc_i32_e64_dpp v5, v1, v2 row_shr:1
+// GFX11: [0x05,0x00,0x25,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_sub_nc_i32_e64_dpp v5, v1, v2 row_shr:15
+// GFX11: [0x05,0x00,0x25,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_sub_nc_i32_e64_dpp v5, v1, v2 row_ror:1
+// GFX11: [0x05,0x00,0x25,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_sub_nc_i32_e64_dpp v5, v1, v2 row_ror:15
+// GFX11: [0x05,0x00,0x25,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_sub_nc_i32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x25,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_sub_nc_i32_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x25,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_sub_nc_i32_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x25,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_sub_nc_i32_e64_dpp v255, v255, v255 clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x80,0x25,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x04,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x04,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 row_mirror
+// GFX11: [0x05,0x00,0x04,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX11: [0x05,0x00,0x04,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 row_shl:1
+// GFX11: [0x05,0x00,0x04,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 row_shl:15
+// GFX11: [0x05,0x00,0x04,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 row_shr:1
+// GFX11: [0x05,0x00,0x04,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 row_shr:15
+// GFX11: [0x05,0x00,0x04,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 row_ror:1
+// GFX11: [0x05,0x00,0x04,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 row_ror:15
+// GFX11: [0x05,0x00,0x04,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x04,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x04,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x04,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_sub_nc_u16_e64_dpp v255, v255, v255 clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x80,0x04,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_subrev_co_u32_e64_dpp v5, s6, v1, v2 quad_perm:[3,2,1,0]
+// W32: [0x05,0x06,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s6, v1, v2 quad_perm:[0,1,2,3]
+// W32: [0x05,0x06,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s6, v1, v2 row_mirror
+// W32: [0x05,0x06,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s6, v1, v2 row_half_mirror
+// W32: [0x05,0x06,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s6, v1, v2 row_shl:1
+// W32: [0x05,0x06,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s6, v1, v2 row_shl:15
+// W32: [0x05,0x06,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s6, v1, v2 row_shr:1
+// W32: [0x05,0x06,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s6, v1, v2 row_shr:15
+// W32: [0x05,0x06,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s6, v1, v2 row_ror:1
+// W32: [0x05,0x06,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s105, v1, v2 row_ror:15
+// W32: [0x05,0x69,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, vcc_lo, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// W32: [0x05,0x6a,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, vcc_hi, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// W32: [0x05,0x6b,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, ttmp15, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// W32: [0x05,0x7b,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s[12:13], v1, v2 quad_perm:[3,2,1,0]
+// W64: [0x05,0x0c,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s[12:13], v1, v2 quad_perm:[0,1,2,3]
+// W64: [0x05,0x0c,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s[12:13], v1, v2 row_mirror
+// W64: [0x05,0x0c,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s[12:13], v1, v2 row_half_mirror
+// W64: [0x05,0x0c,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s[12:13], v1, v2 row_shl:1
+// W64: [0x05,0x0c,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s[12:13], v1, v2 row_shl:15
+// W64: [0x05,0x0c,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s[12:13], v1, v2 row_shr:1
+// W64: [0x05,0x0c,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s[12:13], v1, v2 row_shr:15
+// W64: [0x05,0x0c,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s[12:13], v1, v2 row_ror:1
+// W64: [0x05,0x0c,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s[12:13], v1, v2 row_ror:15
+// W64: [0x05,0x0c,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s[104:105], v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// W64: [0x05,0x68,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, vcc, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// W64: [0x05,0x6a,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, ttmp[14:15], v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// W64: [0x05,0x7a,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v255, null, v255, v255 clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0xfc,0x02,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_xad_u32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x45,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_xad_u32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x45,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_xad_u32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x45,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_xad_u32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x45,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_xad_u32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x45,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_xad_u32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x45,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_xad_u32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x45,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_xad_u32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX11: [0x05,0x00,0x45,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_xad_u32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX11: [0x05,0x00,0x45,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_xad_u32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX11: [0x05,0x00,0x45,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_xad_u32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x45,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_xad_u32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x45,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_xad_u32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x45,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_xad_u32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x45,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_xor3_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x40,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_xor3_b32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x40,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_xor3_b32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX11: [0x05,0x00,0x40,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_xor3_b32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX11: [0x05,0x00,0x40,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_xor3_b32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX11: [0x05,0x00,0x40,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_xor3_b32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX11: [0x05,0x00,0x40,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_xor3_b32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX11: [0x05,0x00,0x40,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_xor3_b32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX11: [0x05,0x00,0x40,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_xor3_b32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX11: [0x05,0x00,0x40,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_xor3_b32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX11: [0x05,0x00,0x40,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_xor3_b32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x40,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_xor3_b32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x40,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_xor3_b32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x40,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_xor3_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x40,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_xor_b16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX11: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_xor_b16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX11: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_xor_b16_e64_dpp v5, v1, v2 row_mirror
+// GFX11: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_xor_b16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX11: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_xor_b16_e64_dpp v5, v1, v2 row_shl:1
+// GFX11: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_xor_b16_e64_dpp v5, v1, v2 row_shl:15
+// GFX11: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_xor_b16_e64_dpp v5, v1, v2 row_shr:1
+// GFX11: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_xor_b16_e64_dpp v5, v1, v2 row_shr:15
+// GFX11: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_xor_b16_e64_dpp v5, v1, v2 row_ror:1
+// GFX11: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_xor_b16_e64_dpp v5, v1, v2 row_ror:15
+// GFX11: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_xor_b16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_xor_b16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_xor_b16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_xor_b16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: [0xff,0x00,0x64,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 op_sel:[1,1,1] row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x58,0x0d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 op_sel:[1,0,0] row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x08,0x0d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 op_sel:[0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX11: [0x05,0x10,0x0d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13]
+
+v_add_nc_i16_e64_dpp v255, v255, v255 op_sel:[0,0,1] clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX11: [0xff,0xc0,0x0d,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 op_sel:[1,1,1] row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x58,0x03,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 op_sel:[1,0,0] row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x08,0x03,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 op_sel:[0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX11: [0x05,0x10,0x03,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13]
+
+v_add_nc_u16_e64_dpp v255, v255, v255 op_sel:[0,0,1] clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX11: [0xff,0xc0,0x03,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, -v1, |v2| op_sel:[1,0,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX11: [0x05,0x0a,0x12,0xd7,0xfa,0x04,0x02,0x20,0x01,0x60,0x01,0x13]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v255, -|v255|, -|v255| op_sel:[0,1,0] row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX11: [0xff,0x13,0x12,0xd7,0xfa,0xfe,0x03,0x60,0xff,0x6f,0x0d,0x30]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, -v1, |v2| op_sel:[1,0,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX11: [0x05,0x0a,0x13,0xd7,0xfa,0x04,0x02,0x20,0x01,0x60,0x01,0x13]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v255, -|v255|, -|v255| op_sel:[0,1,0] row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX11: [0xff,0x13,0x13,0xd7,0xfa,0xfe,0x03,0x60,0xff,0x6f,0x0d,0x30]
+
+v_div_fixup_f16_e64_dpp v5, -v1, v2, |exec_lo| op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x7c,0x54,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_div_fixup_f16_e64_dpp v5, -|v1|, -|v2|, null op_sel:[1,0,0,0] row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x0b,0x54,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_div_fixup_f16_e64_dpp v5, -|v1|, v2, -|-1| op_sel:[0,1,0,0] row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x15,0x54,0xd6,0xfa,0x04,0x06,0xa3,0x01,0x5f,0x01,0x01]
+
+v_div_fixup_f16_e64_dpp v5, v1, -|v2|, -|0.5| op_sel:[0,0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX11: [0x05,0x26,0x54,0xd6,0xfa,0x04,0xc2,0xc3,0x01,0x60,0x01,0x13]
+
+v_div_fixup_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| op_sel:[0,0,0,1] clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX11: [0xff,0xc7,0x54,0xd6,0xfa,0xfe,0xf7,0xe3,0xff,0x6f,0x0d,0x30]
+
+v_fma_f16_e64_dpp v5, -v1, v2, |exec_lo| op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x7c,0x48,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_fma_f16_e64_dpp v5, -|v1|, -|v2|, null op_sel:[1,0,0,0] row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x0b,0x48,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_fma_f16_e64_dpp v5, -|v1|, v2, -|-1| op_sel:[0,1,0,0] row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x15,0x48,0xd6,0xfa,0x04,0x06,0xa3,0x01,0x5f,0x01,0x01]
+
+v_fma_f16_e64_dpp v5, v1, -|v2|, -|0.5| op_sel:[0,0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX11: [0x05,0x26,0x48,0xd6,0xfa,0x04,0xc2,0xc3,0x01,0x60,0x01,0x13]
+
+v_fma_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| op_sel:[0,0,0,1] clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX11: [0xff,0xc7,0x48,0xd6,0xfa,0xfe,0xf7,0xe3,0xff,0x6f,0x0d,0x30]
+
+v_mad_i16_e64_dpp v5, v1, v2, exec_hi op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x78,0x53,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+
+v_mad_i16_e64_dpp v5, v1, v2, exec_lo op_sel:[1,0,0,0] row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x08,0x53,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+
+v_mad_i16_e64_dpp v5, v1, v2, null op_sel:[0,1,0,0] row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x10,0x53,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x5f,0x01,0x01]
+
+v_mad_i16_e64_dpp v5, v1, v2, -1 op_sel:[0,0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX11: [0x05,0x20,0x53,0xd6,0xfa,0x04,0x06,0x03,0x01,0x60,0x01,0x13]
+
+v_mad_i16_e64_dpp v255, v255, v255, src_scc op_sel:[0,0,0,1] clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX11: [0xff,0xc0,0x53,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, 0.5 op_sel:[1,0,0,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX11: [0x05,0x08,0x5a,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x01,0x13]
+
+v_mad_i32_i16_e64_dpp v255, v255, v255, src_scc op_sel:[0,1,0,0] clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX11: [0xff,0x90,0x5a,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30]
+
+v_mad_u16_e64_dpp v5, v1, v2, exec_hi op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x78,0x41,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+
+v_mad_u16_e64_dpp v5, v1, v2, exec_lo op_sel:[1,0,0,0] row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x08,0x41,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+
+v_mad_u16_e64_dpp v5, v1, v2, null op_sel:[0,1,0,0] row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x10,0x41,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x5f,0x01,0x01]
+
+v_mad_u16_e64_dpp v5, v1, v2, -1 op_sel:[0,0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX11: [0x05,0x20,0x41,0xd6,0xfa,0x04,0x06,0x03,0x01,0x60,0x01,0x13]
+
+v_mad_u16_e64_dpp v255, v255, v255, src_scc op_sel:[0,0,0,1] clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX11: [0xff,0xc0,0x41,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, 0.5 op_sel:[1,0,0,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX11: [0x05,0x08,0x59,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x01,0x13]
+
+v_mad_u32_u16_e64_dpp v255, v255, v255, src_scc op_sel:[0,1,0,0] clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX11: [0xff,0x90,0x59,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30]
+
+v_max3_f16_e64_dpp v5, -v1, v2, |exec_lo| op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x7c,0x4c,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_max3_f16_e64_dpp v5, -|v1|, -|v2|, null op_sel:[1,0,0,0] row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x0b,0x4c,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_max3_f16_e64_dpp v5, -|v1|, v2, -|-1| op_sel:[0,1,0,0] row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x15,0x4c,0xd6,0xfa,0x04,0x06,0xa3,0x01,0x5f,0x01,0x01]
+
+v_max3_f16_e64_dpp v5, v1, -|v2|, -|0.5| op_sel:[0,0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX11: [0x05,0x26,0x4c,0xd6,0xfa,0x04,0xc2,0xc3,0x01,0x60,0x01,0x13]
+
+v_max3_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| op_sel:[0,0,0,1] clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX11: [0xff,0xc7,0x4c,0xd6,0xfa,0xfe,0xf7,0xe3,0xff,0x6f,0x0d,0x30]
+
+v_max3_i16_e64_dpp v5, v1, v2, exec_hi op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x78,0x4d,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+
+v_max3_i16_e64_dpp v5, v1, v2, exec_lo op_sel:[1,0,0,0] row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x08,0x4d,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+
+v_max3_i16_e64_dpp v5, v1, v2, null op_sel:[0,1,0,0] row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x10,0x4d,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x5f,0x01,0x01]
+
+v_max3_i16_e64_dpp v5, v1, v2, -1 op_sel:[0,0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX11: [0x05,0x20,0x4d,0xd6,0xfa,0x04,0x06,0x03,0x01,0x60,0x01,0x13]
+
+v_max3_i16_e64_dpp v255, v255, v255, src_scc op_sel:[0,0,0,1] row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX11: [0xff,0x40,0x4d,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30]
+
+v_max3_u16_e64_dpp v5, v1, v2, exec_hi op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x78,0x4e,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+
+v_max3_u16_e64_dpp v5, v1, v2, exec_lo op_sel:[1,0,0,0] row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x08,0x4e,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+
+v_max3_u16_e64_dpp v5, v1, v2, null op_sel:[0,1,0,0] row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x10,0x4e,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x5f,0x01,0x01]
+
+v_max3_u16_e64_dpp v5, v1, v2, -1 op_sel:[0,0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX11: [0x05,0x20,0x4e,0xd6,0xfa,0x04,0x06,0x03,0x01,0x60,0x01,0x13]
+
+v_max3_u16_e64_dpp v255, v255, v255, src_scc op_sel:[0,0,0,1] row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX11: [0xff,0x40,0x4e,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30]
+
+v_med3_f16_e64_dpp v5, -v1, v2, |exec_lo| op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x7c,0x4f,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_med3_f16_e64_dpp v5, -|v1|, -|v2|, null op_sel:[1,0,0,0] row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x0b,0x4f,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_med3_f16_e64_dpp v5, -|v1|, v2, -|-1| op_sel:[0,1,0,0] row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x15,0x4f,0xd6,0xfa,0x04,0x06,0xa3,0x01,0x5f,0x01,0x01]
+
+v_med3_f16_e64_dpp v5, v1, -|v2|, -|0.5| op_sel:[0,0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX11: [0x05,0x26,0x4f,0xd6,0xfa,0x04,0xc2,0xc3,0x01,0x60,0x01,0x13]
+
+v_med3_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| op_sel:[0,0,0,1] clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX11: [0xff,0xc7,0x4f,0xd6,0xfa,0xfe,0xf7,0xe3,0xff,0x6f,0x0d,0x30]
+
+v_med3_i16_e64_dpp v5, v1, v2, exec_hi op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x78,0x50,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+
+v_med3_i16_e64_dpp v5, v1, v2, exec_lo op_sel:[1,0,0,0] row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x08,0x50,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+
+v_med3_i16_e64_dpp v5, v1, v2, null op_sel:[0,1,0,0] row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x10,0x50,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x5f,0x01,0x01]
+
+v_med3_i16_e64_dpp v5, v1, v2, -1 op_sel:[0,0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX11: [0x05,0x20,0x50,0xd6,0xfa,0x04,0x06,0x03,0x01,0x60,0x01,0x13]
+
+v_med3_i16_e64_dpp v255, v255, v255, src_scc op_sel:[0,0,0,1] row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX11: [0xff,0x40,0x50,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30]
+
+v_med3_u16_e64_dpp v5, v1, v2, exec_hi op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x78,0x51,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+
+v_med3_u16_e64_dpp v5, v1, v2, exec_lo op_sel:[1,0,0,0] row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x08,0x51,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+
+v_med3_u16_e64_dpp v5, v1, v2, null op_sel:[0,1,0,0] row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x10,0x51,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x5f,0x01,0x01]
+
+v_med3_u16_e64_dpp v5, v1, v2, -1 op_sel:[0,0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX11: [0x05,0x20,0x51,0xd6,0xfa,0x04,0x06,0x03,0x01,0x60,0x01,0x13]
+
+v_med3_u16_e64_dpp v255, v255, v255, src_scc op_sel:[0,0,0,1] row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX11: [0xff,0x40,0x51,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30]
+
+v_min3_f16_e64_dpp v5, -v1, v2, |exec_lo| op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x7c,0x49,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_min3_f16_e64_dpp v5, -|v1|, -|v2|, null op_sel:[1,0,0,0] row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x0b,0x49,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_min3_f16_e64_dpp v5, -|v1|, v2, -|-1| op_sel:[0,1,0,0] row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x15,0x49,0xd6,0xfa,0x04,0x06,0xa3,0x01,0x5f,0x01,0x01]
+
+v_min3_f16_e64_dpp v5, v1, -|v2|, -|0.5| op_sel:[0,0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX11: [0x05,0x26,0x49,0xd6,0xfa,0x04,0xc2,0xc3,0x01,0x60,0x01,0x13]
+
+v_min3_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| op_sel:[0,0,0,1] clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX11: [0xff,0xc7,0x49,0xd6,0xfa,0xfe,0xf7,0xe3,0xff,0x6f,0x0d,0x30]
+
+v_min3_i16_e64_dpp v5, v1, v2, exec_hi op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x78,0x4a,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+
+v_min3_i16_e64_dpp v5, v1, v2, exec_lo op_sel:[1,0,0,0] row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x08,0x4a,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+
+v_min3_i16_e64_dpp v5, v1, v2, null op_sel:[0,1,0,0] row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x10,0x4a,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x5f,0x01,0x01]
+
+v_min3_i16_e64_dpp v5, v1, v2, -1 op_sel:[0,0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX11: [0x05,0x20,0x4a,0xd6,0xfa,0x04,0x06,0x03,0x01,0x60,0x01,0x13]
+
+v_min3_i16_e64_dpp v255, v255, v255, src_scc op_sel:[0,0,0,1] row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX11: [0xff,0x40,0x4a,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30]
+
+v_min3_u16_e64_dpp v5, v1, v2, exec_hi op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x78,0x4b,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+
+v_min3_u16_e64_dpp v5, v1, v2, exec_lo op_sel:[1,0,0,0] row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x08,0x4b,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+
+v_min3_u16_e64_dpp v5, v1, v2, null op_sel:[0,1,0,0] row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x10,0x4b,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x5f,0x01,0x01]
+
+v_min3_u16_e64_dpp v5, v1, v2, -1 op_sel:[0,0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX11: [0x05,0x20,0x4b,0xd6,0xfa,0x04,0x06,0x03,0x01,0x60,0x01,0x13]
+
+v_min3_u16_e64_dpp v255, v255, v255, src_scc op_sel:[0,0,0,1] row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX11: [0xff,0x40,0x4b,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30]
+
+v_pack_b32_f16_e64_dpp v5, -v1, |v2| op_sel:[1,0,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX11: [0x05,0x0a,0x11,0xd7,0xfa,0x04,0x02,0x20,0x01,0x60,0x01,0x13]
+
+v_pack_b32_f16_e64_dpp v255, -|v255|, -|v255| op_sel:[0,1,0] row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX11: [0xff,0x13,0x11,0xd7,0xfa,0xfe,0x03,0x60,0xff,0x6f,0x0d,0x30]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 op_sel:[1,1,1] row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x58,0x0e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 op_sel:[1,0,0] row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x08,0x0e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 op_sel:[0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX11: [0x05,0x10,0x0e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13]
+
+v_sub_nc_i16_e64_dpp v255, v255, v255 op_sel:[0,0,1] clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX11: [0xff,0xc0,0x0e,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 op_sel:[1,1,1] row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: [0x05,0x58,0x04,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 op_sel:[1,0,0] row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: [0x05,0x08,0x04,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 op_sel:[0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX11: [0x05,0x10,0x04,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13]
+
+v_sub_nc_u16_e64_dpp v255, v255, v255 op_sel:[0,0,1] clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX11: [0xff,0xc0,0x04,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
+
+v_dot2_f16_f16_e64_dpp v0, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 fi:1
+// GFX11: encoding: [0x00,0x00,0x66,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x04,0x00]
+
+v_dot2_f16_f16_e64_dpp v0, v1, v2, v3 op_sel:[1,1,0,0] quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 fi:1
+// GFX11-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid op_sel operand
+
+v_dot2_f16_f16_e64_dpp v0, s1, v2, v3 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 fi:1
+// GFX11-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_dot2_f16_f16_e64_dpp v0, v1, s2, v3 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 fi:1
+// GFX11-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_dot2_f16_f16_e64_dpp v0, v1, v2, v3 op_sel:[0,0,1,1] quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 fi:1
+// GFX11: encoding: [0x00,0x60,0x66,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x04,0x00]
+
+v_dot2_f16_f16_e64_dpp v0, |v1|, -v2, -|s3| op_sel:[0,0,1,1] quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 fi:1
+// GFX11: encoding: [0x00,0x65,0x66,0xd6,0xfa,0x04,0x0e,0xc0,0x01,0xe4,0x04,0x00]
+
+v_dot2_f16_f16_e64_dpp v5, v1, v2, 0.5 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf
+// GFX11: encoding: [0x05,0x00,0x66,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x1b,0x00,0xff]
+
+v_dot2_bf16_bf16_e64_dpp v0, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 fi:1
+// GFX11: encoding: [0x00,0x00,0x67,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x04,0x00]
+
+v_dot2_bf16_bf16_e64_dpp v0, v1, v2, v3 op_sel:[1,1,0,0] quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 fi:1
+// GFX11-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid op_sel operand
+
+v_dot2_bf16_bf16_e64_dpp v0, s1, v2, v3 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// GFX11-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_dot2_bf16_bf16_e64_dpp v0, v1, s2, v3 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// GFX11-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_dot2_bf16_bf16_e64_dpp v0, v1, v2, v3 op_sel:[0,0,1,1] quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 fi:1
+// GFX11: encoding: [0x00,0x60,0x67,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x04,0x00]
+
+v_dot2_bf16_bf16_e64_dpp v0, |v1|, -v2, -|s3| op_sel:[0,0,1,1] quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 fi:1
+// GFX11: encoding: [0x00,0x65,0x67,0xd6,0xfa,0x04,0x0e,0xc0,0x01,0xe4,0x04,0x00]
+
+v_dot2_bf16_bf16_e64_dpp v5, v1, v2, 0 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf
+// GFX11: encoding: [0x05,0x00,0x67,0xd6,0xfa,0x04,0x02,0x02,0x01,0x1b,0x00,0xff]
diff --git a/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp16.s b/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp16.s
index 58fec38..ceb8cac 100644
--- a/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp16.s
+++ b/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp16.s
@@ -1,7 +1,7 @@
-// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize32 -show-encoding %s | FileCheck --check-prefixes=GFX11,W32 %s
-// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64 -show-encoding %s | FileCheck --check-prefixes=GFX11,W64 %s
-// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize32 %s 2>&1 | FileCheck --check-prefixes=GFX11-ERR,W32-ERR --implicit-check-not=error: %s
-// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64 %s 2>&1 | FileCheck --check-prefixes=GFX11-ERR,W64-ERR --implicit-check-not=error: %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize32,+real-true16 -show-encoding %s | FileCheck --check-prefixes=GFX11,W32 %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64,+real-true16 -show-encoding %s | FileCheck --check-prefixes=GFX11,W64 %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize32,+real-true16 %s 2>&1 | FileCheck --check-prefixes=GFX11-ERR,W32-ERR --implicit-check-not=error: %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64,+real-true16 %s 2>&1 | FileCheck --check-prefixes=GFX11-ERR,W64-ERR --implicit-check-not=error: %s
v_add3_u32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
// GFX11: [0x05,0x00,0x55,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
diff --git a/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp8-fake16.s b/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp8-fake16.s
new file mode 100644
index 0000000..cf2a7ab
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp8-fake16.s
@@ -0,0 +1,2968 @@
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize32,-real-true16 -show-encoding %s | FileCheck --check-prefixes=GFX11,W32 %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64,-real-true16 -show-encoding %s | FileCheck --check-prefixes=GFX11,W64 %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize32,-real-true16 %s 2>&1 | FileCheck --check-prefixes=GFX11-ERR,W32-ERR --implicit-check-not=error: %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64,-real-true16 %s 2>&1 | FileCheck --check-prefixes=GFX11-ERR,W64-ERR --implicit-check-not=error: %s
+
+v_add3_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x55,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_add3_u32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x55,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_add3_u32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x55,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_add3_u32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x55,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_add3_u32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x55,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_add3_u32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x55,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_add3_u32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x55,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_add3_u32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x55,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_add3_u32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x55,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_add3_u32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x55,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_add3_u32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x55,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_add3_u32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x55,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_add_co_u32_e64_dpp v5, s6, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W32: [0x05,0x06,0x00,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s105, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W32: [0x05,0x69,0x00,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, vcc_lo, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W32: [0x05,0x6a,0x00,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, vcc_hi, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W32: [0x05,0x6b,0x00,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, ttmp15, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// W32: [0x05,0x7b,0x00,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s[12:13], v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W64: [0x05,0x0c,0x00,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s[104:105], v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W64: [0x05,0x68,0x00,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, vcc, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W64: [0x05,0x6a,0x00,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, ttmp[14:15], v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// W64: [0x05,0x7a,0x00,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v255, null, v255, v255 clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0xfc,0x00,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x47,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x47,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x47,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x47,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x47,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x47,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x47,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x47,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x47,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x47,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x47,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_add_lshl_u32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x47,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0d,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x0d,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_add_nc_i16_e64_dpp v255, v255, v255 clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x80,0x0d,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_add_nc_i32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x26,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_add_nc_i32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x26,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_add_nc_i32_e64_dpp v255, v255, v255 clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x80,0x26,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x03,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x03,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_add_nc_u16_e64_dpp v255, v255, v255 clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x80,0x03,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x16,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x16,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x16,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x16,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x16,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x16,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x16,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x16,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x16,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x16,0xd6,0xea,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_alignbit_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x16,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x17,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x17,0xd6,0xea,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_alignbyte_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x17,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_and_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x62,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_and_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x62,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_and_b16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x62,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_and_or_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x57,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_and_or_b32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x57,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_and_or_b32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x57,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_and_or_b32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x57,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_and_or_b32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x57,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_and_or_b32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x57,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_and_or_b32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x57,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_and_or_b32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x57,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_and_or_b32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x57,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_and_or_b32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x57,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_and_or_b32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x57,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_and_or_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x57,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_ashrrev_i16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x3a,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_ashrrev_i16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x3a,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_ashrrev_i16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x3a,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_bcnt_u32_b32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1e,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_bcnt_u32_b32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x1e,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_bcnt_u32_b32_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x1e,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_bfe_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x11,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_bfe_i32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x11,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_bfe_i32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x11,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_bfe_i32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x11,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_bfe_i32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x11,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_bfe_i32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x11,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_bfe_i32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x11,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_bfe_i32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x11,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_bfe_i32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x11,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_bfe_i32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x11,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_bfe_i32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x11,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_bfe_i32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x11,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_bfe_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x10,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_bfe_u32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x10,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_bfe_u32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x10,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_bfe_u32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x10,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_bfe_u32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x10,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_bfe_u32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x10,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_bfe_u32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x10,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_bfe_u32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x10,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_bfe_u32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x10,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_bfe_u32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x10,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_bfe_u32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x10,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_bfe_u32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x10,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_bfi_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x12,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_bfi_b32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x12,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_bfi_b32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x12,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_bfi_b32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x12,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_bfi_b32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x12,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_bfi_b32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x12,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_bfi_b32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x12,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_bfi_b32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x12,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_bfi_b32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x12,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_bfi_b32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x12,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_bfi_b32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x12,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_bfi_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x12,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_bfm_b32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1d,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_bfm_b32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x1d,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_bfm_b32_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x1d,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s3 dpp8:[7,6,5,4,3,2,1,0]
+// W32: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0x0e,0x00,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// W32: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// W32: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, |v1|, -v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// W32: [0x05,0x01,0x5d,0xd6,0xe9,0x04,0xaa,0x41,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, -v1, |v2|, ttmp15 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// W32: [0x05,0x02,0x5d,0xd6,0xea,0x04,0xee,0x21,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] dpp8:[7,6,5,4,3,2,1,0]
+// W64: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0x1a,0x00,0x01,0x77,0x39,0x05]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s[104:105] dpp8:[7,6,5,4,3,2,1,0]
+// W64: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xa2,0x01,0x01,0x77,0x39,0x05]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, |v1|, -v2, vcc dpp8:[7,6,5,4,3,2,1,0]
+// W64: [0x05,0x01,0x5d,0xd6,0xe9,0x04,0xaa,0x41,0x01,0x77,0x39,0x05]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, -v1, |v2|, ttmp[14:15] dpp8:[7,6,5,4,3,2,1,0] fi:1
+// W64: [0x05,0x02,0x5d,0xd6,0xea,0x04,0xea,0x21,0x01,0x77,0x39,0x05]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v255, -|v255|, -|v255|, null dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x03,0x5d,0xd6,0xe9,0xfe,0xf3,0x61,0xff,0x00,0x00,0x00]
+
+v_cubeid_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0c,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_cubeid_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0c,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_cubeid_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0c,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_cubeid_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0c,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_cubeid_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0c,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_cubeid_f32_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x01,0x0c,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_cubeid_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x02,0x0c,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_cubeid_f32_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x04,0x0c,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_cubeid_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x03,0x0c,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_cubeid_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x05,0x0c,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+
+v_cubeid_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x06,0x0c,0xd6,0xea,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+
+v_cubeid_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x87,0x0c,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+
+v_cubema_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0f,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_cubema_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0f,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_cubema_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0f,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_cubema_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0f,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_cubema_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0f,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_cubema_f32_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x01,0x0f,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_cubema_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x02,0x0f,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_cubema_f32_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x04,0x0f,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_cubema_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x03,0x0f,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_cubema_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x05,0x0f,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+
+v_cubema_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x06,0x0f,0xd6,0xea,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+
+v_cubema_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x87,0x0f,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+
+v_cubesc_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0d,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_cubesc_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0d,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_cubesc_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0d,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_cubesc_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0d,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_cubesc_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0d,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_cubesc_f32_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x01,0x0d,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_cubesc_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x02,0x0d,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_cubesc_f32_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x04,0x0d,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_cubesc_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x03,0x0d,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_cubesc_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x05,0x0d,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+
+v_cubesc_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x06,0x0d,0xd6,0xea,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+
+v_cubesc_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x87,0x0d,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+
+v_cubetc_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0e,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_cubetc_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0e,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_cubetc_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0e,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_cubetc_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0e,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_cubetc_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0e,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_cubetc_f32_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x01,0x0e,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_cubetc_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x02,0x0e,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_cubetc_f32_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x04,0x0e,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_cubetc_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x03,0x0e,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_cubetc_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x05,0x0e,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+
+v_cubetc_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x06,0x0e,0xd6,0xea,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+
+v_cubetc_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x87,0x0e,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+
+v_cvt_pk_i16_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x06,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_i16_f32_e64_dpp v5, |v1|, -v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x01,0x06,0xd7,0xe9,0x04,0x02,0x40,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_i16_f32_e64_dpp v5, -v1, |v2| dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x02,0x06,0xd7,0xea,0x04,0x02,0x20,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_i16_f32_e64_dpp v255, -|v255|, -|v255| dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x03,0x06,0xd7,0xe9,0xfe,0x03,0x60,0xff,0x00,0x00,0x00]
+
+v_cvt_pk_i16_i32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x24,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_i16_i32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x24,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_i16_i32_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x24,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x12,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, |v1|, -v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x01,0x12,0xd7,0xe9,0x04,0x02,0x40,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, -v1, |v2| dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x02,0x12,0xd7,0xea,0x04,0x02,0x20,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v255, -|v255|, -|v255| dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x03,0x12,0xd7,0xe9,0xfe,0x03,0x60,0xff,0x00,0x00,0x00]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x13,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, |v1|, -v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x01,0x13,0xd7,0xe9,0x04,0x02,0x40,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, -v1, |v2| dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x02,0x13,0xd7,0xea,0x04,0x02,0x20,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v255, -|v255|, -|v255| dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x03,0x13,0xd7,0xe9,0xfe,0x03,0x60,0xff,0x00,0x00,0x00]
+
+v_cvt_pk_u16_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x07,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_u16_f32_e64_dpp v5, |v1|, -v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x01,0x07,0xd7,0xe9,0x04,0x02,0x40,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_u16_f32_e64_dpp v5, -v1, |v2| dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x02,0x07,0xd7,0xea,0x04,0x02,0x20,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_u16_f32_e64_dpp v255, -|v255|, -|v255| dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x03,0x07,0xd7,0xe9,0xfe,0x03,0x60,0xff,0x00,0x00,0x00]
+
+v_cvt_pk_u16_u32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x23,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_u16_u32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x23,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_u16_u32_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x23,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x26,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x26,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x26,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x26,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x26,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x26,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x26,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x26,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x26,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x26,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x26,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_u8_f32_e64_dpp v255, -|v255|, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x01,0x26,0xd6,0xe9,0xfe,0xf7,0x23,0xff,0x00,0x00,0x00]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x12,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, |v1|, -v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x01,0x12,0xd7,0xe9,0x04,0x02,0x40,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, -v1, |v2| dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x02,0x12,0xd7,0xea,0x04,0x02,0x20,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v255, -|v255|, -|v255| dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x03,0x12,0xd7,0xe9,0xfe,0x03,0x60,0xff,0x00,0x00,0x00]
+
+v_cvt_pk_norm_i16_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x21,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_i16_f32_e64_dpp v5, |v1|, -v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x01,0x21,0xd7,0xe9,0x04,0x02,0x40,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_i16_f32_e64_dpp v5, -v1, |v2| dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x02,0x21,0xd7,0xea,0x04,0x02,0x20,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_i16_f32_e64_dpp v255, -|v255|, -|v255| dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x03,0x21,0xd7,0xe9,0xfe,0x03,0x60,0xff,0x00,0x00,0x00]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x13,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, |v1|, -v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x01,0x13,0xd7,0xe9,0x04,0x02,0x40,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, -v1, |v2| dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x02,0x13,0xd7,0xea,0x04,0x02,0x20,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v255, -|v255|, -|v255| dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x03,0x13,0xd7,0xe9,0xfe,0x03,0x60,0xff,0x00,0x00,0x00]
+
+v_cvt_pk_norm_u16_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x22,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_u16_f32_e64_dpp v5, |v1|, -v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x01,0x22,0xd7,0xe9,0x04,0x02,0x40,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_u16_f32_e64_dpp v5, -v1, |v2| dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x02,0x22,0xd7,0xea,0x04,0x02,0x20,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_u16_f32_e64_dpp v255, -|v255|, -|v255| dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x03,0x22,0xd7,0xe9,0xfe,0x03,0x60,0xff,0x00,0x00,0x00]
+
+v_div_fixup_f16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x54,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_div_fixup_f16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x54,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_div_fixup_f16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x54,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_div_fixup_f16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x54,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_div_fixup_f16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x54,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_div_fixup_f16_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x01,0x54,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_div_fixup_f16_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x02,0x54,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_div_fixup_f16_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x04,0x54,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_div_fixup_f16_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x03,0x54,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_div_fixup_f16_e64_dpp v5, -|v1|, v2, -|-1| dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x05,0x54,0xd6,0xe9,0x04,0x06,0xa3,0x01,0x77,0x39,0x05]
+
+v_div_fixup_f16_e64_dpp v5, v1, -|v2|, -|0.5| dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x06,0x54,0xd6,0xea,0x04,0xc2,0xc3,0x01,0x77,0x39,0x05]
+
+v_div_fixup_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x87,0x54,0xd6,0xe9,0xfe,0xf7,0xe3,0xff,0x00,0x00,0x00]
+
+v_fma_f16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x48,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_fma_f16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x48,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_fma_f16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x48,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_fma_f16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x48,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_fma_f16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x48,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_fma_f16_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x01,0x48,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_fma_f16_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x02,0x48,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_fma_f16_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x04,0x48,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_fma_f16_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x03,0x48,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_fma_f16_e64_dpp v5, -|v1|, v2, -|-1| dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x05,0x48,0xd6,0xe9,0x04,0x06,0xa3,0x01,0x77,0x39,0x05]
+
+v_fma_f16_e64_dpp v5, v1, -|v2|, -|0.5| dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x06,0x48,0xd6,0xea,0x04,0xc2,0xc3,0x01,0x77,0x39,0x05]
+
+v_fma_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x87,0x48,0xd6,0xe9,0xfe,0xf7,0xe3,0xff,0x00,0x00,0x00]
+
+v_fma_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x13,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_fma_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x13,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_fma_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x13,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_fma_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x13,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_fma_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x13,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_fma_f32_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x01,0x13,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_fma_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x02,0x13,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_fma_f32_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x04,0x13,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_fma_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x03,0x13,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_fma_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x05,0x13,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+
+v_fma_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x06,0x13,0xd6,0xea,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+
+v_fma_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x87,0x13,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+
+v_ldexp_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1c,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_ldexp_f32_e64_dpp v5, v1, v2 mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1c,0xd7,0xe9,0x04,0x02,0x08,0x01,0x77,0x39,0x05]
+
+v_ldexp_f32_e64_dpp v5, v1, v2 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x1c,0xd7,0xea,0x04,0x02,0x10,0x01,0x77,0x39,0x05]
+
+v_ldexp_f32_e64_dpp v255, -|v255|, v255 clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x81,0x1c,0xd7,0xe9,0xfe,0x03,0x38,0xff,0x00,0x00,0x00]
+
+v_lerp_u8_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x15,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_lerp_u8_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x15,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_lerp_u8_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x15,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_lerp_u8_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x15,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_lerp_u8_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x15,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_lerp_u8_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x15,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_lerp_u8_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x15,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_lerp_u8_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x15,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_lerp_u8_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x15,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_lerp_u8_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x15,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_lerp_u8_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x15,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_lerp_u8_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x15,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x46,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x46,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x46,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x46,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x46,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x46,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x46,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x46,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x46,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x46,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x46,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_lshl_add_u32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x46,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x56,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x56,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x56,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x56,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x56,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x56,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x56,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x56,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x56,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x56,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x56,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_lshl_or_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x56,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_lshlrev_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x38,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_lshlrev_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x38,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_lshlrev_b16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x38,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_lshrrev_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x39,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_lshrrev_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x39,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_lshrrev_b16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x39,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_mad_i16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x53,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_mad_i16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x53,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_mad_i16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x53,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x53,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x53,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i16_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x53,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i16_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x53,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i16_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x53,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i16_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x53,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i16_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x53,0xd6,0xea,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_mad_i16_e64_dpp v255, v255, v255, src_scc clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x80,0x53,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x5a,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x5a,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x5a,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x5a,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x5a,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x5a,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x5a,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x5a,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x5a,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x5a,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x5a,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i16_e64_dpp v255, v255, v255, src_scc clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x80,0x5a,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0a,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0a,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0a,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0a,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0a,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0a,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0a,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0a,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0a,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0a,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x0a,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i24_e64_dpp v255, v255, v255, src_scc clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x80,0x0a,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_mad_u16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x41,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_mad_u16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x41,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_mad_u16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x41,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x41,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x41,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u16_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x41,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u16_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x41,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u16_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x41,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u16_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x41,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u16_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x41,0xd6,0xea,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_mad_u16_e64_dpp v255, v255, v255, src_scc clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x80,0x41,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x59,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x59,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x59,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x59,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x59,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x59,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x59,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x59,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x59,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x59,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x59,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u16_e64_dpp v255, v255, v255, src_scc clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x80,0x59,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0b,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0b,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0b,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0b,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0b,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0b,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0b,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0b,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0b,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0b,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x0b,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u24_e64_dpp v255, v255, v255, src_scc clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x80,0x0b,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_max3_f16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4c,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_max3_f16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4c,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_max3_f16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4c,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_f16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4c,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_f16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4c,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_f16_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x01,0x4c,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_max3_f16_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x02,0x4c,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_max3_f16_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x04,0x4c,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_max3_f16_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x03,0x4c,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_max3_f16_e64_dpp v5, -|v1|, v2, -|-1| dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x05,0x4c,0xd6,0xe9,0x04,0x06,0xa3,0x01,0x77,0x39,0x05]
+
+v_max3_f16_e64_dpp v5, v1, -|v2|, -|0.5| dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x06,0x4c,0xd6,0xea,0x04,0xc2,0xc3,0x01,0x77,0x39,0x05]
+
+v_max3_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x87,0x4c,0xd6,0xe9,0xfe,0xf7,0xe3,0xff,0x00,0x00,0x00]
+
+v_max3_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1c,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_max3_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1c,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_max3_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1c,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1c,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1c,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_f32_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x01,0x1c,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_max3_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x02,0x1c,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_max3_f32_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x04,0x1c,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_max3_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x03,0x1c,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_max3_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x05,0x1c,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+
+v_max3_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x06,0x1c,0xd6,0xea,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+
+v_max3_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x87,0x1c,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+
+v_max3_i16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4d,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_max3_i16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4d,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_max3_i16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4d,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_i16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4d,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_i16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4d,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_i16_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4d,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_i16_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4d,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_i16_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4d,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_i16_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4d,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_i16_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x4d,0xd6,0xea,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_max3_i16_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x4d,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_max3_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1d,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_max3_i32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1d,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_max3_i32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1d,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_i32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1d,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_i32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1d,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_i32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1d,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_i32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1d,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_i32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1d,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_i32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1d,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_i32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1d,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_max3_i32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x1d,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_max3_i32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x1d,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_max3_u16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4e,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_max3_u16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4e,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_max3_u16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4e,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_u16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4e,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_u16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4e,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_u16_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4e,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_u16_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4e,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_u16_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4e,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_u16_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4e,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_u16_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x4e,0xd6,0xea,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_max3_u16_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x4e,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_max3_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1e,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_max3_u32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1e,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_max3_u32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1e,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_u32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1e,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_u32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1e,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_u32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1e,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_u32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1e,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_u32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1e,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_u32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1e,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_u32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1e,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_max3_u32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x1e,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_max3_u32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x1e,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_max_i16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0a,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_max_i16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x0a,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_max_i16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x0a,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_max_u16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x09,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_max_u16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x09,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_max_u16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x09,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_maxmin_f16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x60,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_maxmin_f16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x60,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_maxmin_f16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x60,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_f16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x60,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_f16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x60,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_f16_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x01,0x60,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_maxmin_f16_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x02,0x60,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_maxmin_f16_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x04,0x60,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_maxmin_f16_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x03,0x60,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_maxmin_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x05,0x60,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+
+v_maxmin_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x06,0x60,0xd6,0xea,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+
+v_maxmin_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x87,0x60,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+
+v_maxmin_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x5e,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_maxmin_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x5e,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_maxmin_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x5e,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x5e,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x5e,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_f32_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x01,0x5e,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_maxmin_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x02,0x5e,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_maxmin_f32_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x04,0x5e,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_maxmin_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x03,0x5e,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_maxmin_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x05,0x5e,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+
+v_maxmin_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x06,0x5e,0xd6,0xea,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+
+v_maxmin_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x87,0x5e,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x64,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x64,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x64,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x64,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x64,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x64,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x64,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x64,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x64,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x64,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x64,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_maxmin_i32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x64,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x62,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x62,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x62,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x62,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x62,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x62,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x62,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x62,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x62,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x62,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x62,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_maxmin_u32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x62,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_mbcnt_hi_u32_b32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x20,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_mbcnt_hi_u32_b32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x20,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_mbcnt_hi_u32_b32_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x20,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_mbcnt_lo_u32_b32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1f,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_mbcnt_lo_u32_b32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x1f,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_mbcnt_lo_u32_b32_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x1f,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_med3_f16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4f,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_med3_f16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4f,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_med3_f16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4f,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_f16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4f,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_f16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4f,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_f16_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x01,0x4f,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_med3_f16_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x02,0x4f,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_med3_f16_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x04,0x4f,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_med3_f16_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x03,0x4f,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_med3_f16_e64_dpp v5, -|v1|, v2, -|-1| dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x05,0x4f,0xd6,0xe9,0x04,0x06,0xa3,0x01,0x77,0x39,0x05]
+
+v_med3_f16_e64_dpp v5, v1, -|v2|, -|0.5| dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x06,0x4f,0xd6,0xea,0x04,0xc2,0xc3,0x01,0x77,0x39,0x05]
+
+v_med3_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x87,0x4f,0xd6,0xe9,0xfe,0xf7,0xe3,0xff,0x00,0x00,0x00]
+
+v_med3_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1f,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_med3_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1f,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_med3_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1f,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1f,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1f,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_f32_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x01,0x1f,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_med3_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x02,0x1f,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_med3_f32_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x04,0x1f,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_med3_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x03,0x1f,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_med3_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x05,0x1f,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+
+v_med3_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x06,0x1f,0xd6,0xea,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+
+v_med3_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x87,0x1f,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+
+v_med3_i16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x50,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_med3_i16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x50,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_med3_i16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x50,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_i16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x50,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_i16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x50,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_i16_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x50,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_i16_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x50,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_i16_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x50,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_i16_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x50,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_i16_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x50,0xd6,0xea,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_med3_i16_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x50,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_med3_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x20,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_med3_i32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x20,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_med3_i32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x20,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_i32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x20,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_i32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x20,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_i32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x20,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_i32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x20,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_i32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x20,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_i32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x20,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_i32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x20,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_med3_i32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x20,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_med3_i32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x20,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_med3_u16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x51,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_med3_u16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x51,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_med3_u16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x51,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_u16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x51,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_u16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x51,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_u16_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x51,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_u16_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x51,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_u16_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x51,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_u16_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x51,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_u16_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x51,0xd6,0xea,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_med3_u16_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x51,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_med3_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x21,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_med3_u32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x21,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_med3_u32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x21,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_u32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x21,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_u32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x21,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_u32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x21,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_u32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x21,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_u32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x21,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_u32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x21,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_u32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x21,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_med3_u32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x21,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_med3_u32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x21,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_min3_f16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x49,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_min3_f16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x49,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_min3_f16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x49,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_f16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x49,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_f16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x49,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_f16_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x01,0x49,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_min3_f16_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x02,0x49,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_min3_f16_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x04,0x49,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_min3_f16_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x03,0x49,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_min3_f16_e64_dpp v5, -|v1|, v2, -|-1| dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x05,0x49,0xd6,0xe9,0x04,0x06,0xa3,0x01,0x77,0x39,0x05]
+
+v_min3_f16_e64_dpp v5, v1, -|v2|, -|0.5| dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x06,0x49,0xd6,0xea,0x04,0xc2,0xc3,0x01,0x77,0x39,0x05]
+
+v_min3_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x87,0x49,0xd6,0xe9,0xfe,0xf7,0xe3,0xff,0x00,0x00,0x00]
+
+v_min3_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x19,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_min3_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x19,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_min3_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x19,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x19,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x19,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_f32_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x01,0x19,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_min3_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x02,0x19,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_min3_f32_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x04,0x19,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_min3_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x03,0x19,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_min3_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x05,0x19,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+
+v_min3_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x06,0x19,0xd6,0xea,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+
+v_min3_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x87,0x19,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+
+v_min3_i16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4a,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_min3_i16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4a,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_min3_i16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4a,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_i16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4a,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_i16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4a,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_i16_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4a,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_i16_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4a,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_i16_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4a,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_i16_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4a,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_i16_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x4a,0xd6,0xea,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_min3_i16_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x4a,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_min3_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1a,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_min3_i32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1a,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_min3_i32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1a,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_i32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1a,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_i32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1a,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_i32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1a,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_i32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1a,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_i32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1a,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_i32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1a,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_i32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1a,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_min3_i32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x1a,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_min3_i32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x1a,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_min3_u16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4b,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_min3_u16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4b,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_min3_u16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4b,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_u16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4b,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_u16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4b,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_u16_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4b,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_u16_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4b,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_u16_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4b,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_u16_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x4b,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_u16_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x4b,0xd6,0xea,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_min3_u16_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x4b,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_min3_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1b,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_min3_u32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1b,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_min3_u32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1b,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_u32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1b,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_u32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1b,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_u32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1b,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_u32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1b,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_u32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1b,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_u32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1b,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_u32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x1b,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_min3_u32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x1b,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_min3_u32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x1b,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_min_i16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0c,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_min_i16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x0c,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_min_i16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x0c,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_min_u16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0b,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_min_u16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x0b,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_min_u16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x0b,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_minmax_f16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x61,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_minmax_f16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x61,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_minmax_f16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x61,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_f16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x61,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_f16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x61,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_f16_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x01,0x61,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_minmax_f16_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x02,0x61,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_minmax_f16_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x04,0x61,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_minmax_f16_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x03,0x61,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_minmax_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x05,0x61,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+
+v_minmax_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x06,0x61,0xd6,0xea,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+
+v_minmax_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x87,0x61,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+
+v_minmax_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x5f,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_minmax_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x5f,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_minmax_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x5f,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x5f,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x5f,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_f32_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x01,0x5f,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_minmax_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x02,0x5f,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_minmax_f32_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x04,0x5f,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_minmax_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x03,0x5f,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_minmax_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x05,0x5f,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+
+v_minmax_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x06,0x5f,0xd6,0xea,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+
+v_minmax_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x87,0x5f,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+
+v_minmax_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x65,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_minmax_i32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x65,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_minmax_i32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x65,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_i32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x65,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_i32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x65,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_i32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x65,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_i32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x65,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_i32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x65,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_i32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x65,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_i32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x65,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_minmax_i32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x65,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_minmax_i32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x65,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_minmax_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x63,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_minmax_u32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x63,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_minmax_u32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x63,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_u32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x63,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_u32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x63,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_u32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x63,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_u32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x63,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_u32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x63,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_u32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x63,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_u32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x63,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_minmax_u32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x63,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_minmax_u32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x63,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_msad_u8_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x39,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_msad_u8_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x39,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_msad_u8_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x39,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_msad_u8_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x39,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_msad_u8_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x39,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_msad_u8_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x39,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_msad_u8_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x39,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_msad_u8_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x39,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_msad_u8_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x39,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_msad_u8_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x39,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_msad_u8_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x39,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_msad_u8_e64_dpp v255, v255, v255, src_scc clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x80,0x39,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_mul_lo_u16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x05,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_mul_lo_u16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x05,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_mul_lo_u16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x05,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_mullit_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x18,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_mullit_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x18,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_mullit_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x18,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_mullit_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x18,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_mullit_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x18,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_mullit_f32_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x01,0x18,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_mullit_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x02,0x18,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_mullit_f32_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x04,0x18,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_mullit_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x03,0x18,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_mullit_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x05,0x18,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+
+v_mullit_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x06,0x18,0xd6,0xea,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+
+v_mullit_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x87,0x18,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+
+v_or3_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x58,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_or3_b32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x58,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_or3_b32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x58,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_or3_b32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x58,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_or3_b32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x58,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_or3_b32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x58,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_or3_b32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x58,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_or3_b32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x58,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_or3_b32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x58,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_or3_b32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x58,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_or3_b32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x58,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_or3_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x58,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_or_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x63,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_or_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x63,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_or_b16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x63,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_pack_b32_f16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x11,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_pack_b32_f16_e64_dpp v5, |v1|, -v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x01,0x11,0xd7,0xe9,0x04,0x02,0x40,0x01,0x77,0x39,0x05]
+
+v_pack_b32_f16_e64_dpp v5, -v1, |v2| dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x02,0x11,0xd7,0xea,0x04,0x02,0x20,0x01,0x77,0x39,0x05]
+
+v_pack_b32_f16_e64_dpp v255, -|v255|, -|v255| dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x03,0x11,0xd7,0xe9,0xfe,0x03,0x60,0xff,0x00,0x00,0x00]
+
+v_perm_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x44,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_perm_b32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x44,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_perm_b32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x44,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_perm_b32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x44,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_perm_b32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x44,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_perm_b32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x44,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_perm_b32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x44,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_perm_b32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x44,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_perm_b32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x44,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_perm_b32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x44,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_perm_b32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x44,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_perm_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x44,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x23,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x23,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x23,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x23,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x23,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x23,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x23,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x23,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x23,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x23,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x23,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_sad_hi_u8_e64_dpp v255, v255, v255, src_scc clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x80,0x23,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_sad_u16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x24,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_sad_u16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x24,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_sad_u16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x24,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x24,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x24,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u16_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x24,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u16_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x24,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u16_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x24,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u16_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x24,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u16_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x24,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_sad_u16_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x24,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_sad_u16_e64_dpp v255, v255, v255, src_scc clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x80,0x24,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_sad_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x25,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_sad_u32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x25,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_sad_u32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x25,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x25,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x25,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x25,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x25,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x25,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x25,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x25,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_sad_u32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x25,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_sad_u32_e64_dpp v255, v255, v255, src_scc clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x80,0x25,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_sad_u8_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x22,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_sad_u8_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x22,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_sad_u8_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x22,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u8_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x22,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u8_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x22,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u8_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x22,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u8_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x22,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u8_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x22,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u8_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x22,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u8_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x22,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_sad_u8_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x22,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_sad_u8_e64_dpp v255, v255, v255, src_scc clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x80,0x22,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_sub_co_u32_e64_dpp v5, s6, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W32: [0x05,0x06,0x01,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s105, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W32: [0x05,0x69,0x01,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, vcc_lo, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W32: [0x05,0x6a,0x01,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, vcc_hi, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W32: [0x05,0x6b,0x01,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, ttmp15, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// W32: [0x05,0x7b,0x01,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s[12:13], v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W64: [0x05,0x0c,0x01,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s[104:105], v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W64: [0x05,0x68,0x01,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, vcc, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W64: [0x05,0x6a,0x01,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, ttmp[14:15], v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// W64: [0x05,0x7a,0x01,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v255, null, v255, v255 clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0xfc,0x01,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x0e,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x0e,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_sub_nc_i16_e64_dpp v255, v255, v255 clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x80,0x0e,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_sub_nc_i32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x25,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_sub_nc_i32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x25,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_sub_nc_i32_e64_dpp v255, v255, v255 clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x80,0x25,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x04,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x04,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_sub_nc_u16_e64_dpp v255, v255, v255 clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x80,0x04,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_subrev_co_u32_e64_dpp v5, s6, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W32: [0x05,0x06,0x02,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s105, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W32: [0x05,0x69,0x02,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, vcc_lo, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W32: [0x05,0x6a,0x02,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, vcc_hi, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W32: [0x05,0x6b,0x02,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, ttmp15, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// W32: [0x05,0x7b,0x02,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s[12:13], v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W64: [0x05,0x0c,0x02,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s[104:105], v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W64: [0x05,0x68,0x02,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, vcc, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W64: [0x05,0x6a,0x02,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, ttmp[14:15], v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// W64: [0x05,0x7a,0x02,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v255, null, v255, v255 clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0xfc,0x02,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_xad_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x45,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_xad_u32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x45,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_xad_u32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x45,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_xad_u32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x45,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_xad_u32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x45,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_xad_u32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x45,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_xad_u32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x45,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_xad_u32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x45,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_xad_u32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x45,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_xad_u32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x45,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_xad_u32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x45,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_xad_u32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x45,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_xor3_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x40,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_xor3_b32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x40,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_xor3_b32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x40,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_xor3_b32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x40,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_xor3_b32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x40,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_xor3_b32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x40,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_xor3_b32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x40,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_xor3_b32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x40,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_xor3_b32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x40,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_xor3_b32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x40,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_xor3_b32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x40,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_xor3_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x40,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_xor_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x00,0x64,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_xor_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: [0x05,0x00,0x64,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_xor_b16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: [0xff,0x00,0x64,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 op_sel:[1,1,1] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x58,0x0d,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 op_sel:[1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x08,0x0d,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 op_sel:[0,1,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x10,0x0d,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_add_nc_i16_e64_dpp v255, v255, v255 op_sel:[0,0,1] clamp dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX11: [0xff,0xc0,0x0d,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 op_sel:[1,1,1] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x58,0x03,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 op_sel:[1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x08,0x03,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 op_sel:[0,1,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x10,0x03,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_add_nc_u16_e64_dpp v255, v255, v255 op_sel:[0,0,1] clamp dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX11: [0xff,0xc0,0x03,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, -v1, |v2| op_sel:[1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x0a,0x12,0xd7,0xe9,0x04,0x02,0x20,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v255, -|v255|, -|v255| op_sel:[0,1,0] dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX11: [0xff,0x13,0x12,0xd7,0xea,0xfe,0x03,0x60,0xff,0x00,0x00,0x00]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, -v1, |v2| op_sel:[1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x0a,0x13,0xd7,0xe9,0x04,0x02,0x20,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v255, -|v255|, -|v255| op_sel:[0,1,0] dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX11: [0xff,0x13,0x13,0xd7,0xea,0xfe,0x03,0x60,0xff,0x00,0x00,0x00]
+
+v_div_fixup_f16_e64_dpp v5, -v1, v2, |exec_lo| op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x7c,0x54,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_div_fixup_f16_e64_dpp v5, -|v1|, -|v2|, null op_sel:[1,0,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x0b,0x54,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_div_fixup_f16_e64_dpp v5, -|v1|, v2, -|-1| op_sel:[0,1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x15,0x54,0xd6,0xe9,0x04,0x06,0xa3,0x01,0x77,0x39,0x05]
+
+v_div_fixup_f16_e64_dpp v5, v1, -|v2|, -|0.5| op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x26,0x54,0xd6,0xe9,0x04,0xc2,0xc3,0x01,0x77,0x39,0x05]
+
+v_div_fixup_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| op_sel:[0,0,0,1] clamp dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX11: [0xff,0xc7,0x54,0xd6,0xea,0xfe,0xf7,0xe3,0xff,0x00,0x00,0x00]
+
+v_fma_f16_e64_dpp v5, -v1, v2, |exec_lo| op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x7c,0x48,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_fma_f16_e64_dpp v5, -|v1|, -|v2|, null op_sel:[1,0,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x0b,0x48,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_fma_f16_e64_dpp v5, -|v1|, v2, -|-1| op_sel:[0,1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x15,0x48,0xd6,0xe9,0x04,0x06,0xa3,0x01,0x77,0x39,0x05]
+
+v_fma_f16_e64_dpp v5, v1, -|v2|, -|0.5| op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x26,0x48,0xd6,0xe9,0x04,0xc2,0xc3,0x01,0x77,0x39,0x05]
+
+v_fma_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| op_sel:[0,0,0,1] clamp dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX11: [0xff,0xc7,0x48,0xd6,0xea,0xfe,0xf7,0xe3,0xff,0x00,0x00,0x00]
+
+v_mad_i16_e64_dpp v5, v1, v2, exec_hi op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x78,0x53,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i16_e64_dpp v5, v1, v2, exec_lo op_sel:[1,0,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x08,0x53,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i16_e64_dpp v5, v1, v2, null op_sel:[0,1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x10,0x53,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i16_e64_dpp v5, v1, v2, -1 op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x20,0x53,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_mad_i16_e64_dpp v255, v255, v255, src_scc op_sel:[0,0,0,1] clamp dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX11: [0xff,0xc0,0x53,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, 0.5 op_sel:[1,0,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x08,0x5a,0xd6,0xe9,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i16_e64_dpp v255, v255, v255, src_scc op_sel:[0,1,0,0] clamp dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX11: [0xff,0x90,0x5a,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_mad_u16_e64_dpp v5, v1, v2, exec_hi op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x78,0x41,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u16_e64_dpp v5, v1, v2, exec_lo op_sel:[1,0,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x08,0x41,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u16_e64_dpp v5, v1, v2, null op_sel:[0,1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x10,0x41,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u16_e64_dpp v5, v1, v2, -1 op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x20,0x41,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_mad_u16_e64_dpp v255, v255, v255, src_scc op_sel:[0,0,0,1] clamp dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX11: [0xff,0xc0,0x41,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, 0.5 op_sel:[1,0,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x08,0x59,0xd6,0xe9,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u16_e64_dpp v255, v255, v255, src_scc op_sel:[0,1,0,0] clamp dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX11: [0xff,0x90,0x59,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_max3_f16_e64_dpp v5, -v1, v2, |exec_lo| op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x7c,0x4c,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_max3_f16_e64_dpp v5, -|v1|, -|v2|, null op_sel:[1,0,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x0b,0x4c,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_max3_f16_e64_dpp v5, -|v1|, v2, -|-1| op_sel:[0,1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x15,0x4c,0xd6,0xe9,0x04,0x06,0xa3,0x01,0x77,0x39,0x05]
+
+v_max3_f16_e64_dpp v5, v1, -|v2|, -|0.5| op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x26,0x4c,0xd6,0xe9,0x04,0xc2,0xc3,0x01,0x77,0x39,0x05]
+
+v_max3_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| op_sel:[0,0,0,1] clamp dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX11: [0xff,0xc7,0x4c,0xd6,0xea,0xfe,0xf7,0xe3,0xff,0x00,0x00,0x00]
+
+v_max3_i16_e64_dpp v5, v1, v2, exec_hi op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x78,0x4d,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_i16_e64_dpp v5, v1, v2, exec_lo op_sel:[1,0,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x08,0x4d,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_i16_e64_dpp v5, v1, v2, null op_sel:[0,1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x10,0x4d,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_i16_e64_dpp v5, v1, v2, -1 op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x20,0x4d,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_max3_i16_e64_dpp v255, v255, v255, src_scc op_sel:[0,0,0,1] dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX11: [0xff,0x40,0x4d,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_max3_u16_e64_dpp v5, v1, v2, exec_hi op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x78,0x4e,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_u16_e64_dpp v5, v1, v2, exec_lo op_sel:[1,0,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x08,0x4e,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_u16_e64_dpp v5, v1, v2, null op_sel:[0,1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x10,0x4e,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_u16_e64_dpp v5, v1, v2, -1 op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x20,0x4e,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_max3_u16_e64_dpp v255, v255, v255, src_scc op_sel:[0,0,0,1] dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX11: [0xff,0x40,0x4e,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_med3_f16_e64_dpp v5, -v1, v2, |exec_lo| op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x7c,0x4f,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_med3_f16_e64_dpp v5, -|v1|, -|v2|, null op_sel:[1,0,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x0b,0x4f,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_med3_f16_e64_dpp v5, -|v1|, v2, -|-1| op_sel:[0,1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x15,0x4f,0xd6,0xe9,0x04,0x06,0xa3,0x01,0x77,0x39,0x05]
+
+v_med3_f16_e64_dpp v5, v1, -|v2|, -|0.5| op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x26,0x4f,0xd6,0xe9,0x04,0xc2,0xc3,0x01,0x77,0x39,0x05]
+
+v_med3_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| op_sel:[0,0,0,1] clamp dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX11: [0xff,0xc7,0x4f,0xd6,0xea,0xfe,0xf7,0xe3,0xff,0x00,0x00,0x00]
+
+v_med3_i16_e64_dpp v5, v1, v2, exec_hi op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x78,0x50,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_i16_e64_dpp v5, v1, v2, exec_lo op_sel:[1,0,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x08,0x50,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_i16_e64_dpp v5, v1, v2, null op_sel:[0,1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x10,0x50,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_i16_e64_dpp v5, v1, v2, -1 op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x20,0x50,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_med3_i16_e64_dpp v255, v255, v255, src_scc op_sel:[0,0,0,1] dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX11: [0xff,0x40,0x50,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_med3_u16_e64_dpp v5, v1, v2, exec_hi op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x78,0x51,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_u16_e64_dpp v5, v1, v2, exec_lo op_sel:[1,0,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x08,0x51,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_u16_e64_dpp v5, v1, v2, null op_sel:[0,1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x10,0x51,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_u16_e64_dpp v5, v1, v2, -1 op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x20,0x51,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_med3_u16_e64_dpp v255, v255, v255, src_scc op_sel:[0,0,0,1] dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX11: [0xff,0x40,0x51,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_min3_f16_e64_dpp v5, -v1, v2, |exec_lo| op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x7c,0x49,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_min3_f16_e64_dpp v5, -|v1|, -|v2|, null op_sel:[1,0,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x0b,0x49,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_min3_f16_e64_dpp v5, -|v1|, v2, -|-1| op_sel:[0,1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x15,0x49,0xd6,0xe9,0x04,0x06,0xa3,0x01,0x77,0x39,0x05]
+
+v_min3_f16_e64_dpp v5, v1, -|v2|, -|0.5| op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x26,0x49,0xd6,0xe9,0x04,0xc2,0xc3,0x01,0x77,0x39,0x05]
+
+v_min3_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| op_sel:[0,0,0,1] clamp dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX11: [0xff,0xc7,0x49,0xd6,0xea,0xfe,0xf7,0xe3,0xff,0x00,0x00,0x00]
+
+v_min3_i16_e64_dpp v5, v1, v2, exec_hi op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x78,0x4a,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_i16_e64_dpp v5, v1, v2, exec_lo op_sel:[1,0,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x08,0x4a,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_i16_e64_dpp v5, v1, v2, null op_sel:[0,1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x10,0x4a,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_i16_e64_dpp v5, v1, v2, -1 op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x20,0x4a,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_min3_i16_e64_dpp v255, v255, v255, src_scc op_sel:[0,0,0,1] dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX11: [0xff,0x40,0x4a,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_min3_u16_e64_dpp v5, v1, v2, exec_hi op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x78,0x4b,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_u16_e64_dpp v5, v1, v2, exec_lo op_sel:[1,0,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x08,0x4b,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_u16_e64_dpp v5, v1, v2, null op_sel:[0,1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x10,0x4b,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_u16_e64_dpp v5, v1, v2, -1 op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x20,0x4b,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_min3_u16_e64_dpp v255, v255, v255, src_scc op_sel:[0,0,0,1] dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX11: [0xff,0x40,0x4b,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_pack_b32_f16_e64_dpp v5, -v1, |v2| op_sel:[1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x0a,0x11,0xd7,0xe9,0x04,0x02,0x20,0x01,0x77,0x39,0x05]
+
+v_pack_b32_f16_e64_dpp v255, -|v255|, -|v255| op_sel:[0,1,0] dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX11: [0xff,0x13,0x11,0xd7,0xea,0xfe,0x03,0x60,0xff,0x00,0x00,0x00]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 op_sel:[1,1,1] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x58,0x0e,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 op_sel:[1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x08,0x0e,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 op_sel:[0,1,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x10,0x0e,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_sub_nc_i16_e64_dpp v255, v255, v255 op_sel:[0,0,1] clamp dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX11: [0xff,0xc0,0x0e,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 op_sel:[1,1,1] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x58,0x04,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 op_sel:[1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x08,0x04,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 op_sel:[0,1,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: [0x05,0x10,0x04,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_sub_nc_u16_e64_dpp v255, v255, v255 op_sel:[0,0,1] clamp dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX11: [0xff,0xc0,0x04,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_dot2_f16_f16_e64_dpp v0, v1, v2, v3 dpp8:[0,1,2,3,4,4,4,4]
+// GFX11: encoding: [0x00,0x00,0x66,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x88,0x46,0x92]
+
+v_dot2_f16_f16_e64_dpp v0, v1, v2, v3 op_sel:[1,1,0,0] dpp8:[0,1,2,3,4,4,4,4]
+// GFX11-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid op_sel operand
+
+v_dot2_f16_f16_e64_dpp v0, s1, v2, v3 dpp8:[0,1,2,3,4,4,4,4]
+// GFX11-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_dot2_f16_f16_e64_dpp v0, v1, s2, v3 dpp8:[0,1,2,3,4,4,4,4]
+// GFX11-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_dot2_f16_f16_e64_dpp v0, v1, v2, v3 op_sel:[0,0,1,1] dpp8:[0,1,2,3,4,4,4,4]
+// GFX11: encoding: [0x00,0x60,0x66,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x88,0x46,0x92]
+
+v_dot2_f16_f16_e64_dpp v0, |v1|, -v2, -|s3| op_sel:[0,0,1,1] dpp8:[0,1,2,3,4,4,4,4]
+// GFX11: encoding: [0x00,0x65,0x66,0xd6,0xe9,0x04,0x0e,0xc0,0x01,0x88,0x46,0x92]
+
+v_dot2_f16_f16_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: encoding: [0x05,0x00,0x66,0xd6,0xe9,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_dot2_bf16_bf16_e64_dpp v0, v1, v2, v3 dpp8:[0,1,2,3,4,4,4,4]
+// GFX11: encoding: [0x00,0x00,0x67,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x88,0x46,0x92]
+
+v_dot2_bf16_bf16_e64_dpp v0, v1, v2, v3 op_sel:[1,1,0,0] dpp8:[0,1,2,3,4,4,4,4]
+// GFX11-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid op_sel operand
+
+v_dot2_bf16_bf16_e64_dpp v0, s1, v2, v3 dpp8:[0,1,2,3,4,4,4,4]
+// GFX11-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_dot2_bf16_bf16_e64_dpp v0, v1, s2, v3 dpp8:[0,1,2,3,4,4,4,4]
+// GFX11-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_dot2_bf16_bf16_e64_dpp v0, v1, v2, v3 op_sel:[0,0,1,1] dpp8:[0,1,2,3,4,4,4,4]
+// GFX11: encoding: [0x00,0x60,0x67,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x88,0x46,0x92]
+
+v_dot2_bf16_bf16_e64_dpp v0, |v1|, -v2, -|s3| op_sel:[0,0,1,1] dpp8:[0,1,2,3,4,4,4,4]
+// GFX11: encoding: [0x00,0x65,0x67,0xd6,0xe9,0x04,0x0e,0xc0,0x01,0x88,0x46,0x92]
+
+v_dot2_bf16_bf16_e64_dpp v5, v1, v2, 0 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: encoding: [0x05,0x00,0x67,0xd6,0xe9,0x04,0x02,0x02,0x01,0x77,0x39,0x05]
diff --git a/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp8.s b/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp8.s
index 2fb9566..446c083 100644
--- a/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp8.s
+++ b/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp8.s
@@ -1,7 +1,7 @@
-// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize32 -show-encoding %s | FileCheck --check-prefixes=GFX11,W32 %s
-// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64 -show-encoding %s | FileCheck --check-prefixes=GFX11,W64 %s
-// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize32 %s 2>&1 | FileCheck --check-prefixes=GFX11-ERR,W32-ERR --implicit-check-not=error: %s
-// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64 %s 2>&1 | FileCheck --check-prefixes=GFX11-ERR,W64-ERR --implicit-check-not=error: %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize32,+real-true16 -show-encoding %s | FileCheck --check-prefixes=GFX11,W32 %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64,+real-true16 -show-encoding %s | FileCheck --check-prefixes=GFX11,W64 %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize32,+real-true16 %s 2>&1 | FileCheck --check-prefixes=GFX11-ERR,W32-ERR --implicit-check-not=error: %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64,+real-true16 %s 2>&1 | FileCheck --check-prefixes=GFX11-ERR,W64-ERR --implicit-check-not=error: %s
v_add3_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
// GFX11: [0x05,0x00,0x55,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
diff --git a/llvm/test/MC/AMDGPU/gfx11_flat_instructions_err.s b/llvm/test/MC/AMDGPU/gfx11_flat_instructions_err.s
new file mode 100644
index 0000000..9e51b0b
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/gfx11_flat_instructions_err.s
@@ -0,0 +1,253 @@
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1100 %s 2>&1 | FileCheck --check-prefixes=GFX11 --implicit-check-not=error: %s
+
+global_atomic_add_f32 v0, v2, null
+// GFX11: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+
+global_atomic_add_f32 v0, v2, v4, null glc
+// GFX11: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+
+global_atomic_add_u32 v0, v2, null
+// GFX11: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+
+global_atomic_add_u32 v0, v2, v4, null glc
+// GFX11: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_add_u64 v0, v[2:3], null
+// GFX11: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_add_u64 v[0:1], v2, v[4:5], null
+// GFX11: :[[@LINE-1]]:43: error: invalid operand for instruction
+
+global_atomic_and_b32 v0, v2, null
+// GFX11: :[[@LINE-1]]:31: error: invalid operand for instruction
+
+global_atomic_and_b32 v0, v2, v4, null
+// GFX11: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_and_b64 v0, v[2:3], null
+// GFX11: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_and_b64 v[0:1], v2, v[4:5], null
+// GFX11: :[[@LINE-1]]:43: error: invalid operand for instruction
+
+global_atomic_cmpswap_b32 v0, v[2:3], null
+// GFX11: :[[@LINE-1]]:39: error: invalid operand for instruction
+
+global_atomic_cmpswap_b32 v0, v2, v[4:5], null
+// GFX11: :[[@LINE-1]]:43: error: invalid operand for instruction
+
+global_atomic_cmpswap_b64 v0, v[2:5], null
+// GFX11: :[[@LINE-1]]:39: error: invalid operand for instruction
+
+global_atomic_cmpswap_b64 v[0:1], v2, v[4:7], null
+// GFX11: :[[@LINE-1]]:47: error: invalid operand for instruction
+
+global_atomic_cmpswap_f32 v0, v[2:3], null
+// GFX11: :[[@LINE-1]]:39: error: invalid operand for instruction
+
+global_atomic_cmpswap_f32 v0, v2, v[4:5], null
+// GFX11: :[[@LINE-1]]:43: error: invalid operand for instruction
+
+global_atomic_csub_u32 v0, v2, null
+// GFX11: :[[@LINE-1]]:32: error: invalid operand for instruction
+
+global_atomic_csub_u32 v0, v2, v4, null
+// GFX11: :[[@LINE-1]]:36: error: invalid operand for instruction
+
+global_atomic_dec_u32 v0, v2, null
+// GFX11: :[[@LINE-1]]:31: error: invalid operand for instruction
+
+global_atomic_dec_u32 v0, v2, v4, null
+// GFX11: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_dec_u64 v0, v[2:3], null
+// GFX11: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_dec_u64 v[0:1], v2, v[4:5], null
+// GFX11: :[[@LINE-1]]:43: error: invalid operand for instruction
+
+global_atomic_inc_u32 v0, v2, null
+// GFX11: :[[@LINE-1]]:31: error: invalid operand for instruction
+
+global_atomic_inc_u32 v0, v2, v4, null
+// GFX11: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_inc_u64 v0, v[2:3], null
+// GFX11: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_inc_u64 v[0:1], v2, v[4:5], null
+// GFX11: :[[@LINE-1]]:43: error: invalid operand for instruction
+
+global_atomic_max_f32 v0, v2, null
+// GFX11: :[[@LINE-1]]:31: error: invalid operand for instruction
+
+global_atomic_max_f32 v0, v2, v4, null
+// GFX11: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_max_i32 v0, v2, null
+// GFX11: :[[@LINE-1]]:31: error: invalid operand for instruction
+
+global_atomic_max_i32 v0, v2, v4, null
+// GFX11: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_max_i64 v0, v[2:3], null
+// GFX11: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_max_i64 v[0:1], v2, v[4:5], null
+// GFX11: :[[@LINE-1]]:43: error: invalid operand for instruction
+
+global_atomic_max_u32 v0, v2, null
+// GFX11: :[[@LINE-1]]:31: error: invalid operand for instruction
+
+global_atomic_max_u32 v0, v2, v4, null
+// GFX11: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_max_u64 v0, v[2:3], null
+// GFX11: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_max_u64 v[0:1], v2, v[4:5], null
+// GFX11: :[[@LINE-1]]:43: error: invalid operand for instruction
+
+global_atomic_min_f32 v0, v2, v4, null
+// GFX11: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_min_f32 v0, v2, null
+// GFX11: :[[@LINE-1]]:31: error: invalid operand for instruction
+
+global_atomic_min_i32 v0, v2, v4, null
+// GFX11: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_min_i32 v0, v2, null
+// GFX11: :[[@LINE-1]]:31: error: invalid operand for instruction
+
+global_atomic_min_i64 v0, v[2:3], null
+// GFX11: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_min_i64 v[0:1], v2, v[4:5], null
+// GFX11: :[[@LINE-1]]:43: error: invalid operand for instruction
+
+global_atomic_min_u32 v0, v2, null
+// GFX11: :[[@LINE-1]]:31: error: invalid operand for instruction
+
+global_atomic_min_u32 v0, v2, v4, null
+// GFX11: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_min_u64 v0, v[2:3], null
+// GFX11: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_min_u64 v[0:1], v2, v[4:5], null
+// GFX11: :[[@LINE-1]]:43: error: invalid operand for instruction
+
+global_atomic_or_b32 v0, v2, null
+// GFX11: :[[@LINE-1]]:30: error: invalid operand for instruction
+
+global_atomic_or_b32 v0, v2, v4, null
+// GFX11: :[[@LINE-1]]:34: error: invalid operand for instruction
+
+global_atomic_or_b64 v0, v[2:3], null
+// GFX11: :[[@LINE-1]]:34: error: invalid operand for instruction
+
+global_atomic_or_b64 v[0:1], v2, v[4:5], null
+// GFX11: :[[@LINE-1]]:42: error: invalid operand for instruction
+
+global_atomic_sub_u32 v0, v2, null
+// GFX11: :[[@LINE-1]]:31: error: invalid operand for instruction
+
+global_atomic_sub_u32 v0, v2, v4, null
+// GFX11: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_sub_u64 v0, v[2:3], null
+// GFX11: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_sub_u64 v[0:1], v2, v[4:5], null
+// GFX11: :[[@LINE-1]]:43: error: invalid operand for instruction
+
+global_atomic_swap_b32 v0, v2, null
+// GFX11: :[[@LINE-1]]:32: error: invalid operand for instruction
+
+global_atomic_swap_b32 v0, v2, v4, null
+// GFX11: :[[@LINE-1]]:36: error: invalid operand for instruction
+
+global_atomic_swap_b64 v0, v[2:3], null
+// GFX11: :[[@LINE-1]]:36: error: invalid operand for instruction
+
+global_atomic_swap_b64 v[0:1], v2, v[4:5], null
+// GFX11: :[[@LINE-1]]:44: error: invalid operand for instruction
+
+global_atomic_xor_b32 v0, v2, null
+// GFX11: :[[@LINE-1]]:31: error: invalid operand for instruction
+
+global_atomic_xor_b32 v0, v2, v4, null
+// GFX11: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_xor_b64 v0, v[2:3], null
+// GFX11: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_xor_b64 v[0:1], v2, v[4:5], null
+// GFX11: :[[@LINE-1]]:43: error: invalid operand for instruction
+
+global_load_b128 v[0:3], v4, null
+// GFX11: :[[@LINE-1]]:30: error: invalid operand for instruction
+
+global_load_b32 v0, v4, null
+// GFX11: :[[@LINE-1]]:25: error: invalid operand for instruction
+
+global_load_b64 v[0:1], v4, null
+// GFX11: :[[@LINE-1]]:29: error: invalid operand for instruction
+
+global_load_b96 v[0:2], v4, null
+// GFX11: :[[@LINE-1]]:29: error: invalid operand for instruction
+
+global_load_d16_b16 v0, v2, null
+// GFX11: :[[@LINE-1]]:29: error: invalid operand for instruction
+
+global_load_d16_hi_b16 v0, v2, null
+// GFX11: :[[@LINE-1]]:32: error: invalid operand for instruction
+
+global_load_d16_hi_i8 v0, v2, null
+// GFX11: :[[@LINE-1]]:31: error: invalid operand for instruction
+
+global_load_d16_hi_u8 v0, v2, null
+// GFX11: :[[@LINE-1]]:31: error: invalid operand for instruction
+
+global_load_d16_i8 v0, v2, null
+// GFX11: :[[@LINE-1]]:28: error: invalid operand for instruction
+
+global_load_d16_u8 v0, v2, null
+// GFX11: :[[@LINE-1]]:28: error: invalid operand for instruction
+
+global_load_i16 v0, v2, null
+// GFX11: :[[@LINE-1]]:25: error: invalid operand for instruction
+
+global_load_i8 v0, v2, null
+// GFX11: :[[@LINE-1]]:24: error: invalid operand for instruction
+
+global_load_u16 v0, v2, null
+// GFX11: :[[@LINE-1]]:25: error: invalid operand for instruction
+
+global_load_u8 v0, v2, null
+// GFX11: :[[@LINE-1]]:24: error: invalid operand for instruction
+
+global_store_b128 v0, v[2:5], null
+// GFX11: :[[@LINE-1]]:31: error: invalid operand for instruction
+
+global_store_b16 v0, v2, null
+// GFX11: :[[@LINE-1]]:26: error: invalid operand for instruction
+
+global_store_b32 v0, v2, null
+// GFX11: :[[@LINE-1]]:26: error: invalid operand for instruction
+
+global_store_b64 v0, v[2:3], null
+// GFX11: :[[@LINE-1]]:30: error: invalid operand for instruction
+
+global_store_b8 v0, v2, null
+// GFX11: :[[@LINE-1]]:25: error: invalid operand for instruction
+
+global_store_b96 v0, v[2:4], null
+// GFX11: :[[@LINE-1]]:30: error: invalid operand for instruction
+
+global_store_d16_hi_b16 v0, v2, null
+// GFX11: :[[@LINE-1]]:33: error: invalid operand for instruction
+
+global_store_d16_hi_b8 v0, v2, null
+// GFX11: :[[@LINE-1]]:32: error: invalid operand for instruction
diff --git a/llvm/test/MC/AMDGPU/gfx11_unsupported.s b/llvm/test/MC/AMDGPU/gfx11_unsupported.s
index 1e8d768..c565801 100644
--- a/llvm/test/MC/AMDGPU/gfx11_unsupported.s
+++ b/llvm/test/MC/AMDGPU/gfx11_unsupported.s
@@ -34,6 +34,9 @@ buffer_invl2
buffer_store_lds_dword s[4:7], -1 offset:4095 lds
// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+buffer_wbinvl1
+// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
buffer_wbinvl1_vol
// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
@@ -2011,9 +2014,6 @@ s_cmp_neq_f16 s1, s2
s_cmp_nlt_f16 s1, s2
// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
-s_singleuse_vdst 0x1234
-// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
-
buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:0 glc
// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/gfx12_asm_sopp.s b/llvm/test/MC/AMDGPU/gfx12_asm_sopp.s
index e986592..fdcabc4 100644
--- a/llvm/test/MC/AMDGPU/gfx12_asm_sopp.s
+++ b/llvm/test/MC/AMDGPU/gfx12_asm_sopp.s
@@ -69,15 +69,6 @@ s_wait_alu depctr_va_sdst(3)
s_wait_alu depctr_va_vdst(14) depctr_va_sdst(6) depctr_vm_vsrc(6)
// GFX12: encoding: [0x9b,0xed,0x88,0xbf]
-s_singleuse_vdst 0x0000
-// GFX12: encoding: [0x00,0x00,0x93,0xbf]
-
-s_singleuse_vdst 0xffff
-// GFX12: encoding: [0xff,0xff,0x93,0xbf]
-
-s_singleuse_vdst 0x1234
-// GFX12: encoding: [0x34,0x12,0x93,0xbf]
-
s_barrier_wait 0xffff
// GFX12: encoding: [0xff,0xff,0x94,0xbf]
diff --git a/llvm/test/MC/AMDGPU/gfx12_asm_vop3-fake16.s b/llvm/test/MC/AMDGPU/gfx12_asm_vop3-fake16.s
new file mode 100644
index 0000000..5329849
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/gfx12_asm_vop3-fake16.s
@@ -0,0 +1,7294 @@
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize32,-real-true16 -show-encoding %s | FileCheck --check-prefixes=GFX12,W32 %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64,-real-true16 -show-encoding %s | FileCheck --check-prefixes=GFX12,W64 %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize32,-real-true16 %s 2>&1 | FileCheck --check-prefix=W32-ERR --implicit-check-not=error: %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64,-real-true16 %s 2>&1 | FileCheck --check-prefix=W64-ERR --implicit-check-not=error: %s
+
+v_add3_u32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x55,0xd6,0x01,0x05,0x0e,0x00]
+
+v_add3_u32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x55,0xd6,0xff,0x05,0xa4,0x01]
+
+v_add3_u32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x55,0xd6,0x01,0xfe,0xff,0x01]
+
+v_add3_u32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x55,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_add3_u32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x55,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_add3_u32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x55,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_add3_u32 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x55,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_add3_u32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x55,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_add3_u32 v5, exec_lo, -1, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x55,0xd6,0x7e,0x82,0xad,0x01]
+
+v_add3_u32 v5, exec_hi, null, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x55,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_add3_u32 v5, null, exec_lo, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x55,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_add3_u32 v5, -1, exec_hi, src_scc
+// GFX12: encoding: [0x05,0x00,0x55,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_add3_u32 v5, 0.5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x55,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_add3_u32 v5, src_scc, vcc_lo, -1
+// GFX12: encoding: [0x05,0x00,0x55,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_add3_u32 v255, 0xaf123456, vcc_hi, null
+// GFX12: encoding: [0xff,0x00,0x55,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_add_co_u32 v5, s6, v1, v2
+// W32: encoding: [0x05,0x06,0x00,0xd7,0x01,0x05,0x02,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s6, v255, v255
+// W32: encoding: [0x05,0x06,0x00,0xd7,0xff,0xff,0x03,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s6, s1, s2
+// W32: encoding: [0x05,0x06,0x00,0xd7,0x01,0x04,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s6, s105, s105
+// W32: encoding: [0x05,0x06,0x00,0xd7,0x69,0xd2,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s6, vcc_lo, ttmp15
+// W32: encoding: [0x05,0x06,0x00,0xd7,0x6a,0xf6,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s6, vcc_hi, 0xaf123456
+// W32: encoding: [0x05,0x06,0x00,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s6, ttmp15, src_scc
+// W32: encoding: [0x05,0x06,0x00,0xd7,0x7b,0xfa,0x01,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s6, m0, 0.5
+// W32: encoding: [0x05,0x06,0x00,0xd7,0x7d,0xe0,0x01,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s6, exec_lo, -1
+// W32: encoding: [0x05,0x06,0x00,0xd7,0x7e,0x82,0x01,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s6, exec_hi, null
+// W32: encoding: [0x05,0x06,0x00,0xd7,0x7f,0xf8,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s105, null, exec_lo
+// W32: encoding: [0x05,0x69,0x00,0xd7,0x7c,0xfc,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, vcc_lo, -1, exec_hi
+// W32: encoding: [0x05,0x6a,0x00,0xd7,0xc1,0xfe,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, vcc_hi, 0.5, m0
+// W32: encoding: [0x05,0x6b,0x00,0xd7,0xf0,0xfa,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, ttmp15, src_scc, vcc_lo
+// W32: encoding: [0x05,0x7b,0x00,0xd7,0xfd,0xd4,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s[12:13], v1, v2
+// W64: encoding: [0x05,0x0c,0x00,0xd7,0x01,0x05,0x02,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s[12:13], v255, v255
+// W64: encoding: [0x05,0x0c,0x00,0xd7,0xff,0xff,0x03,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s[12:13], s1, s2
+// W64: encoding: [0x05,0x0c,0x00,0xd7,0x01,0x04,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s[12:13], s105, s105
+// W64: encoding: [0x05,0x0c,0x00,0xd7,0x69,0xd2,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s[12:13], vcc_lo, ttmp15
+// W64: encoding: [0x05,0x0c,0x00,0xd7,0x6a,0xf6,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s[12:13], vcc_hi, 0xaf123456
+// W64: encoding: [0x05,0x0c,0x00,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s[12:13], ttmp15, src_scc
+// W64: encoding: [0x05,0x0c,0x00,0xd7,0x7b,0xfa,0x01,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s[12:13], m0, 0.5
+// W64: encoding: [0x05,0x0c,0x00,0xd7,0x7d,0xe0,0x01,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s[12:13], exec_lo, -1
+// W64: encoding: [0x05,0x0c,0x00,0xd7,0x7e,0x82,0x01,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s[12:13], exec_hi, null
+// W64: encoding: [0x05,0x0c,0x00,0xd7,0x7f,0xf8,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s[12:13], null, exec_lo
+// W64: encoding: [0x05,0x0c,0x00,0xd7,0x7c,0xfc,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, s[104:105], -1, exec_hi
+// W64: encoding: [0x05,0x68,0x00,0xd7,0xc1,0xfe,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v5, vcc, 0.5, m0
+// W64: encoding: [0x05,0x6a,0x00,0xd7,0xf0,0xfa,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode
+
+v_add_co_u32 v5, ttmp[14:15], src_scc, vcc_lo
+// W64: encoding: [0x05,0x7a,0x00,0xd7,0xfd,0xd4,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32 v255, null, 0xaf123456, vcc_hi clamp
+// GFX12: encoding: [0xff,0xfc,0x00,0xd7,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_add_lshl_u32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x47,0xd6,0x01,0x05,0x0e,0x00]
+
+v_add_lshl_u32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x47,0xd6,0xff,0x05,0xa4,0x01]
+
+v_add_lshl_u32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x47,0xd6,0x01,0xfe,0xff,0x01]
+
+v_add_lshl_u32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x47,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_add_lshl_u32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x47,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_add_lshl_u32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x47,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_add_lshl_u32 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x47,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_add_lshl_u32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x47,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_add_lshl_u32 v5, exec_lo, -1, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x47,0xd6,0x7e,0x82,0xad,0x01]
+
+v_add_lshl_u32 v5, exec_hi, null, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x47,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_add_lshl_u32 v5, null, exec_lo, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x47,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_add_lshl_u32 v5, -1, exec_hi, src_scc
+// GFX12: encoding: [0x05,0x00,0x47,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_add_lshl_u32 v5, 0.5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x47,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_add_lshl_u32 v5, src_scc, vcc_lo, -1
+// GFX12: encoding: [0x05,0x00,0x47,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_add_lshl_u32 v255, 0xaf123456, vcc_hi, null
+// GFX12: encoding: [0xff,0x00,0x47,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_add_nc_i16 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x0d,0xd7,0x01,0x05,0x02,0x00]
+
+v_add_nc_i16 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x0d,0xd7,0xff,0xff,0x03,0x00]
+
+v_add_nc_i16 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x0d,0xd7,0x01,0x04,0x00,0x00]
+
+v_add_nc_i16 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x0d,0xd7,0x69,0xd2,0x00,0x00]
+
+v_add_nc_i16 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x0d,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_add_nc_i16 v5, vcc_hi, 0xfe0b
+// GFX12: encoding: [0x05,0x00,0x0d,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_add_nc_i16 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x0d,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_add_nc_i16 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x0d,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_add_nc_i16 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x0d,0xd7,0x7e,0x82,0x01,0x00]
+
+v_add_nc_i16 v5, exec_hi, null
+// GFX12: encoding: [0x05,0x00,0x0d,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_add_nc_i16 v5, null, exec_lo op_sel:[1,1,1]
+// GFX12: encoding: [0x05,0x58,0x0d,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_add_nc_i16 v5, -1, exec_hi op_sel:[0,0,0]
+// GFX12: encoding: [0x05,0x00,0x0d,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_add_nc_i16 v5, 0.5, m0 op_sel:[1,0,0]
+// GFX12: encoding: [0x05,0x08,0x0d,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_add_nc_i16 v5, src_scc, vcc_lo op_sel:[0,1,0]
+// GFX12: encoding: [0x05,0x10,0x0d,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_add_nc_i16 v255, 0xfe0b, vcc_hi op_sel:[0,0,1] clamp
+// GFX12: encoding: [0xff,0xc0,0x0d,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_add_nc_i32 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x26,0xd7,0x01,0x05,0x02,0x00]
+
+v_add_nc_i32 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x26,0xd7,0xff,0xff,0x03,0x00]
+
+v_add_nc_i32 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x26,0xd7,0x01,0x04,0x00,0x00]
+
+v_add_nc_i32 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x26,0xd7,0x69,0xd2,0x00,0x00]
+
+v_add_nc_i32 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x26,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_add_nc_i32 v5, vcc_hi, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x26,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_add_nc_i32 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x26,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_add_nc_i32 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x26,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_add_nc_i32 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x26,0xd7,0x7e,0x82,0x01,0x00]
+
+v_add_nc_i32 v5, exec_hi, null
+// GFX12: encoding: [0x05,0x00,0x26,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_add_nc_i32 v5, null, exec_lo
+// GFX12: encoding: [0x05,0x00,0x26,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_add_nc_i32 v5, -1, exec_hi
+// GFX12: encoding: [0x05,0x00,0x26,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_add_nc_i32 v5, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x26,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_add_nc_i32 v5, src_scc, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x26,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_add_nc_i32 v255, 0xaf123456, vcc_hi clamp
+// GFX12: encoding: [0xff,0x80,0x26,0xd7,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_add_nc_u16 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x03,0xd7,0x01,0x05,0x02,0x00]
+
+v_add_nc_u16 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x03,0xd7,0xff,0xff,0x03,0x00]
+
+v_add_nc_u16 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x03,0xd7,0x01,0x04,0x00,0x00]
+
+v_add_nc_u16 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x03,0xd7,0x69,0xd2,0x00,0x00]
+
+v_add_nc_u16 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x03,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_add_nc_u16 v5, vcc_hi, 0xfe0b
+// GFX12: encoding: [0x05,0x00,0x03,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_add_nc_u16 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x03,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_add_nc_u16 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x03,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_add_nc_u16 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x03,0xd7,0x7e,0x82,0x01,0x00]
+
+v_add_nc_u16 v5, exec_hi, null
+// GFX12: encoding: [0x05,0x00,0x03,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_add_nc_u16 v5, null, exec_lo op_sel:[1,1,1]
+// GFX12: encoding: [0x05,0x58,0x03,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_add_nc_u16 v5, -1, exec_hi op_sel:[0,0,0]
+// GFX12: encoding: [0x05,0x00,0x03,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_add_nc_u16 v5, 0.5, m0 op_sel:[1,0,0]
+// GFX12: encoding: [0x05,0x08,0x03,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_add_nc_u16 v5, src_scc, vcc_lo op_sel:[0,1,0]
+// GFX12: encoding: [0x05,0x10,0x03,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_add_nc_u16 v255, 0xfe0b, vcc_hi op_sel:[0,0,1] clamp
+// GFX12: encoding: [0xff,0xc0,0x03,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_alignbit_b32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x16,0xd6,0x01,0x05,0x0e,0x00]
+
+v_alignbit_b32 v5, v255, s2, s3
+// GFX12: encoding: [0x05,0x00,0x16,0xd6,0xff,0x05,0x0c,0x00]
+
+v_alignbit_b32 v5, s1, v255, s3
+// GFX12: encoding: [0x05,0x00,0x16,0xd6,0x01,0xfe,0x0f,0x00]
+
+v_alignbit_b32 v5, s105, s105, s105
+// GFX12: encoding: [0x05,0x00,0x16,0xd6,0x69,0xd2,0xa4,0x01]
+
+v_alignbit_b32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x16,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_alignbit_b32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x16,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_alignbit_b32 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x16,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_alignbit_b32 v5, m0, 0.5, exec_lo
+// GFX12: encoding: [0x05,0x00,0x16,0xd6,0x7d,0xe0,0xf9,0x01]
+
+v_alignbit_b32 v5, exec_lo, -1, m0
+// GFX12: encoding: [0x05,0x00,0x16,0xd6,0x7e,0x82,0xf5,0x01]
+
+v_alignbit_b32 v5, exec_hi, null, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x16,0xd6,0x7f,0xf8,0xac,0x01]
+
+v_alignbit_b32 v5, null, exec_lo, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x16,0xd6,0x7c,0xfc,0xa8,0x01]
+
+v_alignbit_b32 v5, -1, exec_hi, src_scc
+// GFX12: encoding: [0x05,0x00,0x16,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_alignbit_b32 v5, 0.5, m0, exec_hi
+// GFX12: encoding: [0x05,0x00,0x16,0xd6,0xf0,0xfa,0xfc,0x01]
+
+v_alignbit_b32 v5, src_scc, vcc_lo, -1
+// GFX12: encoding: [0x05,0x00,0x16,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_alignbit_b32 v255, 0xaf123456, vcc_hi, null
+// GFX12: encoding: [0xff,0x00,0x16,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_alignbyte_b32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x17,0xd6,0x01,0x05,0x0e,0x00]
+
+v_alignbyte_b32 v5, v255, s2, s3
+// GFX12: encoding: [0x05,0x00,0x17,0xd6,0xff,0x05,0x0c,0x00]
+
+v_alignbyte_b32 v5, s1, v255, s3
+// GFX12: encoding: [0x05,0x00,0x17,0xd6,0x01,0xfe,0x0f,0x00]
+
+v_alignbyte_b32 v5, s105, s105, s105
+// GFX12: encoding: [0x05,0x00,0x17,0xd6,0x69,0xd2,0xa4,0x01]
+
+v_alignbyte_b32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x17,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_alignbyte_b32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x17,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_alignbyte_b32 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x17,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_alignbyte_b32 v5, m0, 0.5, exec_lo
+// GFX12: encoding: [0x05,0x00,0x17,0xd6,0x7d,0xe0,0xf9,0x01]
+
+v_alignbyte_b32 v5, exec_lo, -1, m0
+// GFX12: encoding: [0x05,0x00,0x17,0xd6,0x7e,0x82,0xf5,0x01]
+
+v_alignbyte_b32 v5, exec_hi, null, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x17,0xd6,0x7f,0xf8,0xac,0x01]
+
+v_alignbyte_b32 v5, null, exec_lo, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x17,0xd6,0x7c,0xfc,0xa8,0x01]
+
+v_alignbyte_b32 v5, -1, exec_hi, src_scc
+// GFX12: encoding: [0x05,0x00,0x17,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_alignbyte_b32 v5, 0.5, m0, exec_hi
+// GFX12: encoding: [0x05,0x00,0x17,0xd6,0xf0,0xfa,0xfc,0x01]
+
+v_alignbyte_b32 v5, src_scc, vcc_lo, -1
+// GFX12: encoding: [0x05,0x00,0x17,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_alignbyte_b32 v255, 0xaf123456, vcc_hi, null
+// GFX12: encoding: [0xff,0x00,0x17,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_and_b16 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x62,0xd7,0x01,0x05,0x02,0x00]
+
+v_and_b16 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x62,0xd7,0xff,0xff,0x03,0x00]
+
+v_and_b16 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x62,0xd7,0x01,0x04,0x00,0x00]
+
+v_and_b16 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x62,0xd7,0x69,0xd2,0x00,0x00]
+
+v_and_b16 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x62,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_and_b16 v5, vcc_hi, 0xfe0b
+// GFX12: encoding: [0x05,0x00,0x62,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_and_b16 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x62,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_and_b16 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x62,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_and_b16 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x62,0xd7,0x7e,0x82,0x01,0x00]
+
+v_and_b16 v5, exec_hi, null
+// GFX12: encoding: [0x05,0x00,0x62,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_and_b16 v5, null, exec_lo
+// GFX12: encoding: [0x05,0x00,0x62,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_and_b16 v5, -1, exec_hi
+// GFX12: encoding: [0x05,0x00,0x62,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_and_b16 v5, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x62,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_and_b16 v5, src_scc, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x62,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_and_b16 v255, 0xfe0b, vcc_hi
+// GFX12: encoding: [0xff,0x00,0x62,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_and_or_b32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x57,0xd6,0x01,0x05,0x0e,0x00]
+
+v_and_or_b32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x57,0xd6,0xff,0x05,0xa4,0x01]
+
+v_and_or_b32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x57,0xd6,0x01,0xfe,0xff,0x01]
+
+v_and_or_b32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x57,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_and_or_b32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x57,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_and_or_b32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x57,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_and_or_b32 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x57,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_and_or_b32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x57,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_and_or_b32 v5, exec_lo, -1, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x57,0xd6,0x7e,0x82,0xad,0x01]
+
+v_and_or_b32 v5, exec_hi, null, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x57,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_and_or_b32 v5, null, exec_lo, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x57,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_and_or_b32 v5, -1, exec_hi, src_scc
+// GFX12: encoding: [0x05,0x00,0x57,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_and_or_b32 v5, 0.5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x57,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_and_or_b32 v5, src_scc, vcc_lo, -1
+// GFX12: encoding: [0x05,0x00,0x57,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_and_or_b32 v255, 0xaf123456, vcc_hi, null
+// GFX12: encoding: [0xff,0x00,0x57,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_ashrrev_i16 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x3a,0xd7,0x01,0x05,0x02,0x00]
+
+v_ashrrev_i16 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x3a,0xd7,0xff,0xff,0x03,0x00]
+
+v_ashrrev_i16 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x3a,0xd7,0x01,0x04,0x00,0x00]
+
+v_ashrrev_i16 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x3a,0xd7,0x69,0xd2,0x00,0x00]
+
+v_ashrrev_i16 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x3a,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_ashrrev_i16 v5, vcc_hi, 0xfe0b
+// GFX12: encoding: [0x05,0x00,0x3a,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_ashrrev_i16 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x3a,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_ashrrev_i16 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x3a,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_ashrrev_i16 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x3a,0xd7,0x7e,0x82,0x01,0x00]
+
+v_ashrrev_i16 v5, exec_hi, null
+// GFX12: encoding: [0x05,0x00,0x3a,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_ashrrev_i16 v5, null, exec_lo
+// GFX12: encoding: [0x05,0x00,0x3a,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_ashrrev_i16 v5, -1, exec_hi
+// GFX12: encoding: [0x05,0x00,0x3a,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_ashrrev_i16 v5, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x3a,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_ashrrev_i16 v5, src_scc, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x3a,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_ashrrev_i16 v255, 0xfe0b, vcc_hi
+// GFX12: encoding: [0xff,0x00,0x3a,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_ashrrev_i64 v[5:6], v1, vcc
+// GFX12: encoding: [0x05,0x00,0x3e,0xd7,0x01,0xd5,0x00,0x00]
+
+v_ashrrev_i64 v[5:6], v255, exec
+// GFX12: encoding: [0x05,0x00,0x3e,0xd7,0xff,0xfd,0x00,0x00]
+
+v_ashrrev_i64 v[5:6], exec_lo, v[2:3]
+// GFX12: encoding: [0x05,0x00,0x3e,0xd7,0x7e,0x04,0x02,0x00]
+
+v_ashrrev_i64 v[5:6], exec_hi, v[254:255]
+// GFX12: encoding: [0x05,0x00,0x3e,0xd7,0x7f,0xfc,0x03,0x00]
+
+v_ashrrev_i64 v[5:6], null, null
+// GFX12: encoding: [0x05,0x00,0x3e,0xd7,0x7c,0xf8,0x00,0x00]
+
+v_ashrrev_i64 v[5:6], -1, -1
+// GFX12: encoding: [0x05,0x00,0x3e,0xd7,0xc1,0x82,0x01,0x00]
+
+v_ashrrev_i64 v[5:6], 0.5, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x3e,0xd7,0xf0,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_ashrrev_i64 v[5:6], src_scc, src_scc
+// GFX12: encoding: [0x05,0x00,0x3e,0xd7,0xfd,0xfa,0x01,0x00]
+
+v_ashrrev_i64 v[254:255], 0xaf123456, 0.5
+// GFX12: encoding: [0xfe,0x00,0x3e,0xd7,0xff,0xe0,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_bcnt_u32_b32 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x1e,0xd7,0x01,0x05,0x02,0x00]
+
+v_bcnt_u32_b32 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x1e,0xd7,0xff,0xff,0x03,0x00]
+
+v_bcnt_u32_b32 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x1e,0xd7,0x01,0x04,0x00,0x00]
+
+v_bcnt_u32_b32 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x1e,0xd7,0x69,0xd2,0x00,0x00]
+
+v_bcnt_u32_b32 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x1e,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_bcnt_u32_b32 v5, vcc_hi, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x1e,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_bcnt_u32_b32 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x1e,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_bcnt_u32_b32 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x1e,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_bcnt_u32_b32 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x1e,0xd7,0x7e,0x82,0x01,0x00]
+
+v_bcnt_u32_b32 v5, exec_hi, null
+// GFX12: encoding: [0x05,0x00,0x1e,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_bcnt_u32_b32 v5, null, exec_lo
+// GFX12: encoding: [0x05,0x00,0x1e,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_bcnt_u32_b32 v5, -1, exec_hi
+// GFX12: encoding: [0x05,0x00,0x1e,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_bcnt_u32_b32 v5, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x1e,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_bcnt_u32_b32 v5, src_scc, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x1e,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_bcnt_u32_b32 v255, 0xaf123456, vcc_hi
+// GFX12: encoding: [0xff,0x00,0x1e,0xd7,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_bfe_i32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x11,0xd6,0x01,0x05,0x0e,0x00]
+
+v_bfe_i32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x11,0xd6,0xff,0x05,0xa4,0x01]
+
+v_bfe_i32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x11,0xd6,0x01,0xfe,0xff,0x01]
+
+v_bfe_i32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x11,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_bfe_i32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x11,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_bfe_i32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x11,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_bfe_i32 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x11,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_bfe_i32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x11,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_bfe_i32 v5, exec_lo, -1, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x11,0xd6,0x7e,0x82,0xad,0x01]
+
+v_bfe_i32 v5, exec_hi, null, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x11,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_bfe_i32 v5, null, exec_lo, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x11,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_bfe_i32 v5, -1, exec_hi, src_scc
+// GFX12: encoding: [0x05,0x00,0x11,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_bfe_i32 v5, 0.5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x11,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_bfe_i32 v5, src_scc, vcc_lo, -1
+// GFX12: encoding: [0x05,0x00,0x11,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_bfe_i32 v255, 0xaf123456, vcc_hi, null
+// GFX12: encoding: [0xff,0x00,0x11,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_bfe_u32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x10,0xd6,0x01,0x05,0x0e,0x00]
+
+v_bfe_u32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x10,0xd6,0xff,0x05,0xa4,0x01]
+
+v_bfe_u32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x10,0xd6,0x01,0xfe,0xff,0x01]
+
+v_bfe_u32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x10,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_bfe_u32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x10,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_bfe_u32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x10,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_bfe_u32 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x10,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_bfe_u32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x10,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_bfe_u32 v5, exec_lo, -1, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x10,0xd6,0x7e,0x82,0xad,0x01]
+
+v_bfe_u32 v5, exec_hi, null, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x10,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_bfe_u32 v5, null, exec_lo, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x10,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_bfe_u32 v5, -1, exec_hi, src_scc
+// GFX12: encoding: [0x05,0x00,0x10,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_bfe_u32 v5, 0.5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x10,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_bfe_u32 v5, src_scc, vcc_lo, -1
+// GFX12: encoding: [0x05,0x00,0x10,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_bfe_u32 v255, 0xaf123456, vcc_hi, null
+// GFX12: encoding: [0xff,0x00,0x10,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_bfi_b32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x12,0xd6,0x01,0x05,0x0e,0x00]
+
+v_bfi_b32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x12,0xd6,0xff,0x05,0xa4,0x01]
+
+v_bfi_b32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x12,0xd6,0x01,0xfe,0xff,0x01]
+
+v_bfi_b32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x12,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_bfi_b32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x12,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_bfi_b32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x12,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_bfi_b32 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x12,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_bfi_b32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x12,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_bfi_b32 v5, exec_lo, -1, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x12,0xd6,0x7e,0x82,0xad,0x01]
+
+v_bfi_b32 v5, exec_hi, null, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x12,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_bfi_b32 v5, null, exec_lo, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x12,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_bfi_b32 v5, -1, exec_hi, src_scc
+// GFX12: encoding: [0x05,0x00,0x12,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_bfi_b32 v5, 0.5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x12,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_bfi_b32 v5, src_scc, vcc_lo, -1
+// GFX12: encoding: [0x05,0x00,0x12,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_bfi_b32 v255, 0xaf123456, vcc_hi, null
+// GFX12: encoding: [0xff,0x00,0x12,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_bfm_b32 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x1d,0xd7,0x01,0x05,0x02,0x00]
+
+v_bfm_b32 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x1d,0xd7,0xff,0xff,0x03,0x00]
+
+v_bfm_b32 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x1d,0xd7,0x01,0x04,0x00,0x00]
+
+v_bfm_b32 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x1d,0xd7,0x69,0xd2,0x00,0x00]
+
+v_bfm_b32 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x1d,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_bfm_b32 v5, vcc_hi, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x1d,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_bfm_b32 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x1d,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_bfm_b32 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x1d,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_bfm_b32 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x1d,0xd7,0x7e,0x82,0x01,0x00]
+
+v_bfm_b32 v5, exec_hi, null
+// GFX12: encoding: [0x05,0x00,0x1d,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_bfm_b32 v5, null, exec_lo
+// GFX12: encoding: [0x05,0x00,0x1d,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_bfm_b32 v5, -1, exec_hi
+// GFX12: encoding: [0x05,0x00,0x1d,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_bfm_b32 v5, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x1d,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_bfm_b32 v5, src_scc, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x1d,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_bfm_b32 v255, 0xaf123456, vcc_hi
+// GFX12: encoding: [0xff,0x00,0x1d,0xd7,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_cndmask_b16 v5, v1, src_scc, s3
+// W32: encoding: [0x05,0x00,0x5d,0xd6,0x01,0xfb,0x0d,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, v255, 0.5, s3
+// W32: encoding: [0x05,0x00,0x5d,0xd6,0xff,0xe1,0x0d,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, s105, s105, s3
+// W32: encoding: [0x05,0x00,0x5d,0xd6,0x69,0xd2,0x0c,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, vcc_hi, v2, s3
+// W32: encoding: [0x05,0x00,0x5d,0xd6,0x6b,0x04,0x0e,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, ttmp15, ttmp15, s3
+// W32: encoding: [0x05,0x00,0x5d,0xd6,0x7b,0xf6,0x0c,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, m0, v255, s3
+// W32: encoding: [0x05,0x00,0x5d,0xd6,0x7d,0xfe,0x0f,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, exec_lo, exec_lo, s3
+// W32: encoding: [0x05,0x00,0x5d,0xd6,0x7e,0xfc,0x0c,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, exec_hi, exec_hi, s3
+// W32: encoding: [0x05,0x00,0x5d,0xd6,0x7f,0xfe,0x0c,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, null, m0, s105
+// W32: encoding: [0x05,0x00,0x5d,0xd6,0x7c,0xfa,0xa4,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, -1, -|vcc_lo|, vcc_lo
+// W32: encoding: [0x05,0x02,0x5d,0xd6,0xc1,0xd4,0xa8,0x41]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, 0.5, -1, vcc_hi
+// W32: encoding: [0x05,0x00,0x5d,0xd6,0xf0,0x82,0xad,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, -|src_scc|, null, ttmp15
+// W32: encoding: [0x05,0x01,0x5d,0xd6,0xfd,0xf8,0xec,0x21]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, v1, src_scc, s[6:7]
+// W64: encoding: [0x05,0x00,0x5d,0xd6,0x01,0xfb,0x19,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, v255, 0.5, s[6:7]
+// W64: encoding: [0x05,0x00,0x5d,0xd6,0xff,0xe1,0x19,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, s105, s105, s[6:7]
+// W64: encoding: [0x05,0x00,0x5d,0xd6,0x69,0xd2,0x18,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, vcc_hi, v2, s[6:7]
+// W64: encoding: [0x05,0x00,0x5d,0xd6,0x6b,0x04,0x1a,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, ttmp15, ttmp15, s[6:7]
+// W64: encoding: [0x05,0x00,0x5d,0xd6,0x7b,0xf6,0x18,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, m0, v255, s[6:7]
+// W64: encoding: [0x05,0x00,0x5d,0xd6,0x7d,0xfe,0x1b,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, exec_lo, exec_lo, s[6:7]
+// W64: encoding: [0x05,0x00,0x5d,0xd6,0x7e,0xfc,0x18,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, exec_hi, exec_hi, s[6:7]
+// W64: encoding: [0x05,0x00,0x5d,0xd6,0x7f,0xfe,0x18,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, null, m0, s[6:7]
+// W64: encoding: [0x05,0x00,0x5d,0xd6,0x7c,0xfa,0x18,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, -1, -|vcc_lo|, s[104:105]
+// W64: encoding: [0x05,0x02,0x5d,0xd6,0xc1,0xd4,0xa0,0x41]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, 0.5, -1, vcc
+// W64: encoding: [0x05,0x00,0x5d,0xd6,0xf0,0x82,0xa9,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v5, -|src_scc|, null, ttmp[14:15]
+// W64: encoding: [0x05,0x01,0x5d,0xd6,0xfd,0xf8,0xe8,0x21]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16 v255, -|0xfe0b|, -|vcc_hi|, null
+// GFX12: encoding: [0xff,0x03,0x5d,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00]
+
+v_cubeid_f32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x0c,0xd6,0x01,0x05,0x0e,0x00]
+
+v_cubeid_f32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x0c,0xd6,0xff,0x05,0xa4,0x01]
+
+v_cubeid_f32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x0c,0xd6,0x01,0xfe,0xff,0x01]
+
+v_cubeid_f32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x0c,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_cubeid_f32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x0c,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_cubeid_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x0c,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_cubeid_f32 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX12: encoding: [0x05,0x07,0x0c,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_cubeid_f32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x0c,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_cubeid_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX12: encoding: [0x05,0x01,0x0c,0xd6,0x7e,0x82,0xad,0x01]
+
+v_cubeid_f32 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX12: encoding: [0x05,0x05,0x0c,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_cubeid_f32 v5, null, exec_lo, -|0xaf123456|
+// GFX12: encoding: [0x05,0x04,0x0c,0xd6,0x7c,0xfc,0xfc,0x83,0x56,0x34,0x12,0xaf]
+
+v_cubeid_f32 v5, -1, -|exec_hi|, -|src_scc|
+// GFX12: encoding: [0x05,0x06,0x0c,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_cubeid_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX12: encoding: [0x05,0x00,0x0c,0xd6,0xf0,0xfa,0xc0,0x4b]
+
+v_cubeid_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX12: encoding: [0x05,0x02,0x0c,0xd6,0xfd,0xd4,0x04,0x33]
+
+v_cubeid_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX12: encoding: [0xff,0x83,0x0c,0xd6,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_cubema_f32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x0f,0xd6,0x01,0x05,0x0e,0x00]
+
+v_cubema_f32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x0f,0xd6,0xff,0x05,0xa4,0x01]
+
+v_cubema_f32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x0f,0xd6,0x01,0xfe,0xff,0x01]
+
+v_cubema_f32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x0f,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_cubema_f32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x0f,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_cubema_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x0f,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_cubema_f32 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX12: encoding: [0x05,0x07,0x0f,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_cubema_f32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x0f,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_cubema_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX12: encoding: [0x05,0x01,0x0f,0xd6,0x7e,0x82,0xad,0x01]
+
+v_cubema_f32 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX12: encoding: [0x05,0x05,0x0f,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_cubema_f32 v5, null, exec_lo, -|0xaf123456|
+// GFX12: encoding: [0x05,0x04,0x0f,0xd6,0x7c,0xfc,0xfc,0x83,0x56,0x34,0x12,0xaf]
+
+v_cubema_f32 v5, -1, -|exec_hi|, -|src_scc|
+// GFX12: encoding: [0x05,0x06,0x0f,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_cubema_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX12: encoding: [0x05,0x00,0x0f,0xd6,0xf0,0xfa,0xc0,0x4b]
+
+v_cubema_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX12: encoding: [0x05,0x02,0x0f,0xd6,0xfd,0xd4,0x04,0x33]
+
+v_cubema_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX12: encoding: [0xff,0x83,0x0f,0xd6,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_cubesc_f32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x0d,0xd6,0x01,0x05,0x0e,0x00]
+
+v_cubesc_f32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x0d,0xd6,0xff,0x05,0xa4,0x01]
+
+v_cubesc_f32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x0d,0xd6,0x01,0xfe,0xff,0x01]
+
+v_cubesc_f32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x0d,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_cubesc_f32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x0d,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_cubesc_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x0d,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_cubesc_f32 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX12: encoding: [0x05,0x07,0x0d,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_cubesc_f32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x0d,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_cubesc_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX12: encoding: [0x05,0x01,0x0d,0xd6,0x7e,0x82,0xad,0x01]
+
+v_cubesc_f32 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX12: encoding: [0x05,0x05,0x0d,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_cubesc_f32 v5, null, exec_lo, -|0xaf123456|
+// GFX12: encoding: [0x05,0x04,0x0d,0xd6,0x7c,0xfc,0xfc,0x83,0x56,0x34,0x12,0xaf]
+
+v_cubesc_f32 v5, -1, -|exec_hi|, -|src_scc|
+// GFX12: encoding: [0x05,0x06,0x0d,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_cubesc_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX12: encoding: [0x05,0x00,0x0d,0xd6,0xf0,0xfa,0xc0,0x4b]
+
+v_cubesc_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX12: encoding: [0x05,0x02,0x0d,0xd6,0xfd,0xd4,0x04,0x33]
+
+v_cubesc_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX12: encoding: [0xff,0x83,0x0d,0xd6,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_cubetc_f32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x0e,0xd6,0x01,0x05,0x0e,0x00]
+
+v_cubetc_f32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x0e,0xd6,0xff,0x05,0xa4,0x01]
+
+v_cubetc_f32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x0e,0xd6,0x01,0xfe,0xff,0x01]
+
+v_cubetc_f32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x0e,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_cubetc_f32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x0e,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_cubetc_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x0e,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_cubetc_f32 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX12: encoding: [0x05,0x07,0x0e,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_cubetc_f32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x0e,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_cubetc_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX12: encoding: [0x05,0x01,0x0e,0xd6,0x7e,0x82,0xad,0x01]
+
+v_cubetc_f32 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX12: encoding: [0x05,0x05,0x0e,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_cubetc_f32 v5, null, exec_lo, -|0xaf123456|
+// GFX12: encoding: [0x05,0x04,0x0e,0xd6,0x7c,0xfc,0xfc,0x83,0x56,0x34,0x12,0xaf]
+
+v_cubetc_f32 v5, -1, -|exec_hi|, -|src_scc|
+// GFX12: encoding: [0x05,0x06,0x0e,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_cubetc_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX12: encoding: [0x05,0x00,0x0e,0xd6,0xf0,0xfa,0xc0,0x4b]
+
+v_cubetc_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX12: encoding: [0x05,0x02,0x0e,0xd6,0xfd,0xd4,0x04,0x33]
+
+v_cubetc_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX12: encoding: [0xff,0x83,0x0e,0xd6,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_cvt_pk_fp8_f32 v1, v2, v3
+// GFX12: encoding: [0x01,0x00,0x69,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_pk_fp8_f32 v1, -v2, |v3|
+// GFX12: encoding: [0x01,0x02,0x69,0xd7,0x02,0x07,0x02,0x20]
+
+v_cvt_pk_fp8_f32 v1, s2, 3
+// GFX12: encoding: [0x01,0x00,0x69,0xd7,0x02,0x06,0x01,0x00]
+
+v_cvt_pk_bf8_f32 v1, v2, v3
+// GFX12: encoding: [0x01,0x00,0x6a,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_pk_bf8_f32 v1, -v2, |v3|
+// GFX12: encoding: [0x01,0x02,0x6a,0xd7,0x02,0x07,0x02,0x20]
+
+v_cvt_pk_bf8_f32 v1, s2, 3
+// GFX12: encoding: [0x01,0x00,0x6a,0xd7,0x02,0x06,0x01,0x00]
+
+v_cvt_sr_fp8_f32 v1, v2, v3
+// GFX12: encoding: [0x01,0x00,0x6b,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f32 v10, s2, v5
+// GFX12: encoding: [0x0a,0x00,0x6b,0xd7,0x02,0x0a,0x02,0x00]
+
+v_cvt_sr_fp8_f32 v5, -|v255|, v4
+// GFX12: encoding: [0x05,0x01,0x6b,0xd7,0xff,0x09,0x02,0x20]
+
+v_cvt_sr_fp8_f32 v1, v2, v3 byte_sel:0
+// GFX12: v_cvt_sr_fp8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x6b,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f32 v1, v2, v3 byte_sel:1
+// GFX12: v_cvt_sr_fp8_f32 v1, v2, v3 byte_sel:1 ; encoding: [0x01,0x20,0x6b,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f32 v1, v2, v3 byte_sel:2
+// GFX12: v_cvt_sr_fp8_f32 v1, v2, v3 byte_sel:2 ; encoding: [0x01,0x40,0x6b,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f32 v1, v2, v3 byte_sel:3
+// GFX12: v_cvt_sr_fp8_f32 v1, v2, v3 byte_sel:3 ; encoding: [0x01,0x60,0x6b,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f32 v1, v2, v3
+// GFX12: encoding: [0x01,0x00,0x6c,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f32 v10, s2, v5
+// GFX12: encoding: [0x0a,0x00,0x6c,0xd7,0x02,0x0a,0x02,0x00]
+
+v_cvt_sr_bf8_f32 v5, -|v255|, v4
+// GFX12: encoding: [0x05,0x01,0x6c,0xd7,0xff,0x09,0x02,0x20]
+
+v_cvt_sr_bf8_f32 v1, v2, v3 byte_sel:0
+// GFX12: v_cvt_sr_bf8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x6c,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f32 v1, v2, v3 byte_sel:1
+// GFX12: v_cvt_sr_bf8_f32 v1, v2, v3 byte_sel:1 ; encoding: [0x01,0x20,0x6c,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f32 v1, v2, v3 byte_sel:2
+// GFX12: v_cvt_sr_bf8_f32 v1, v2, v3 byte_sel:2 ; encoding: [0x01,0x40,0x6c,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f32 v1, v2, v3 byte_sel:3
+// GFX12: v_cvt_sr_bf8_f32 v1, v2, v3 byte_sel:3 ; encoding: [0x01,0x60,0x6c,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_pk_i16_f32 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x06,0xd7,0x01,0x05,0x02,0x00]
+
+v_cvt_pk_i16_f32 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x06,0xd7,0xff,0xff,0x03,0x00]
+
+v_cvt_pk_i16_f32 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x06,0xd7,0x01,0x04,0x00,0x00]
+
+v_cvt_pk_i16_f32 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x06,0xd7,0x69,0xd2,0x00,0x00]
+
+v_cvt_pk_i16_f32 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x06,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_cvt_pk_i16_f32 v5, vcc_hi, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x06,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cvt_pk_i16_f32 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x06,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_cvt_pk_i16_f32 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x06,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_cvt_pk_i16_f32 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x06,0xd7,0x7e,0x82,0x01,0x00]
+
+v_cvt_pk_i16_f32 v5, |exec_hi|, null
+// GFX12: encoding: [0x05,0x01,0x06,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_cvt_pk_i16_f32 v5, null, exec_lo
+// GFX12: encoding: [0x05,0x00,0x06,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_cvt_pk_i16_f32 v5, -1, exec_hi
+// GFX12: encoding: [0x05,0x00,0x06,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_cvt_pk_i16_f32 v5, 0.5, -m0
+// GFX12: encoding: [0x05,0x00,0x06,0xd7,0xf0,0xfa,0x00,0x40]
+
+v_cvt_pk_i16_f32 v5, -src_scc, |vcc_lo|
+// GFX12: encoding: [0x05,0x02,0x06,0xd7,0xfd,0xd4,0x00,0x20]
+
+v_cvt_pk_i16_f32 v255, -|0xaf123456|, -|vcc_hi|
+// GFX12: encoding: [0xff,0x03,0x06,0xd7,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+v_cvt_pk_i16_i32 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x24,0xd7,0x01,0x05,0x02,0x00]
+
+v_cvt_pk_i16_i32 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x24,0xd7,0xff,0xff,0x03,0x00]
+
+v_cvt_pk_i16_i32 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x24,0xd7,0x01,0x04,0x00,0x00]
+
+v_cvt_pk_i16_i32 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x24,0xd7,0x69,0xd2,0x00,0x00]
+
+v_cvt_pk_i16_i32 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x24,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_cvt_pk_i16_i32 v5, vcc_hi, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x24,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cvt_pk_i16_i32 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x24,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_cvt_pk_i16_i32 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x24,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_cvt_pk_i16_i32 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x24,0xd7,0x7e,0x82,0x01,0x00]
+
+v_cvt_pk_i16_i32 v5, exec_hi, null
+// GFX12: encoding: [0x05,0x00,0x24,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_cvt_pk_i16_i32 v5, null, exec_lo
+// GFX12: encoding: [0x05,0x00,0x24,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_cvt_pk_i16_i32 v5, -1, exec_hi
+// GFX12: encoding: [0x05,0x00,0x24,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_cvt_pk_i16_i32 v5, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x24,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_cvt_pk_i16_i32 v5, src_scc, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x24,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_cvt_pk_i16_i32 v255, 0xaf123456, vcc_hi
+// GFX12: encoding: [0xff,0x00,0x24,0xd7,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_cvt_pk_norm_i16_f16 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x12,0xd7,0x01,0x05,0x02,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x12,0xd7,0xff,0xff,0x03,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x12,0xd7,0x01,0x04,0x00,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x12,0xd7,0x69,0xd2,0x00,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x12,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, vcc_hi, 0xfe0b
+// GFX12: encoding: [0x05,0x00,0x12,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x12,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x12,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x12,0xd7,0x7e,0x82,0x01,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, |exec_hi|, null
+// GFX12: encoding: [0x05,0x01,0x12,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, null, exec_lo
+// GFX12: encoding: [0x05,0x00,0x12,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, -1, exec_hi
+// GFX12: encoding: [0x05,0x00,0x12,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, 0.5, -m0 op_sel:[0,0,0]
+// GFX12: encoding: [0x05,0x00,0x12,0xd7,0xf0,0xfa,0x00,0x40]
+
+v_cvt_pk_norm_i16_f16 v5, -src_scc, |vcc_lo| op_sel:[1,0,0]
+// GFX12: encoding: [0x05,0x0a,0x12,0xd7,0xfd,0xd4,0x00,0x20]
+
+v_cvt_pk_norm_i16_f16 v255, -|0xfe0b|, -|vcc_hi| op_sel:[0,1,0]
+// GFX12: encoding: [0xff,0x13,0x12,0xd7,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x13,0xd7,0x01,0x05,0x02,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x13,0xd7,0xff,0xff,0x03,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x13,0xd7,0x01,0x04,0x00,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x13,0xd7,0x69,0xd2,0x00,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x13,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, vcc_hi, 0xfe0b
+// GFX12: encoding: [0x05,0x00,0x13,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x13,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x13,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x13,0xd7,0x7e,0x82,0x01,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, |exec_hi|, null
+// GFX12: encoding: [0x05,0x01,0x13,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, null, exec_lo
+// GFX12: encoding: [0x05,0x00,0x13,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, -1, exec_hi
+// GFX12: encoding: [0x05,0x00,0x13,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, 0.5, -m0 op_sel:[0,0,0]
+// GFX12: encoding: [0x05,0x00,0x13,0xd7,0xf0,0xfa,0x00,0x40]
+
+v_cvt_pk_norm_u16_f16 v5, -src_scc, |vcc_lo| op_sel:[1,0,0]
+// GFX12: encoding: [0x05,0x0a,0x13,0xd7,0xfd,0xd4,0x00,0x20]
+
+v_cvt_pk_norm_u16_f16 v255, -|0xfe0b|, -|vcc_hi| op_sel:[0,1,0]
+// GFX12: encoding: [0xff,0x13,0x13,0xd7,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+v_cvt_pk_u16_f32 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x07,0xd7,0x01,0x05,0x02,0x00]
+
+v_cvt_pk_u16_f32 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x07,0xd7,0xff,0xff,0x03,0x00]
+
+v_cvt_pk_u16_f32 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x07,0xd7,0x01,0x04,0x00,0x00]
+
+v_cvt_pk_u16_f32 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x07,0xd7,0x69,0xd2,0x00,0x00]
+
+v_cvt_pk_u16_f32 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x07,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_cvt_pk_u16_f32 v5, vcc_hi, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x07,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cvt_pk_u16_f32 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x07,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_cvt_pk_u16_f32 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x07,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_cvt_pk_u16_f32 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x07,0xd7,0x7e,0x82,0x01,0x00]
+
+v_cvt_pk_u16_f32 v5, |exec_hi|, null
+// GFX12: encoding: [0x05,0x01,0x07,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_cvt_pk_u16_f32 v5, null, exec_lo
+// GFX12: encoding: [0x05,0x00,0x07,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_cvt_pk_u16_f32 v5, -1, exec_hi
+// GFX12: encoding: [0x05,0x00,0x07,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_cvt_pk_u16_f32 v5, 0.5, -m0
+// GFX12: encoding: [0x05,0x00,0x07,0xd7,0xf0,0xfa,0x00,0x40]
+
+v_cvt_pk_u16_f32 v5, -src_scc, |vcc_lo|
+// GFX12: encoding: [0x05,0x02,0x07,0xd7,0xfd,0xd4,0x00,0x20]
+
+v_cvt_pk_u16_f32 v255, -|0xaf123456|, -|vcc_hi|
+// GFX12: encoding: [0xff,0x03,0x07,0xd7,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+v_cvt_pk_u16_u32 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x23,0xd7,0x01,0x05,0x02,0x00]
+
+v_cvt_pk_u16_u32 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x23,0xd7,0xff,0xff,0x03,0x00]
+
+v_cvt_pk_u16_u32 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x23,0xd7,0x01,0x04,0x00,0x00]
+
+v_cvt_pk_u16_u32 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x23,0xd7,0x69,0xd2,0x00,0x00]
+
+v_cvt_pk_u16_u32 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x23,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_cvt_pk_u16_u32 v5, vcc_hi, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x23,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cvt_pk_u16_u32 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x23,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_cvt_pk_u16_u32 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x23,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_cvt_pk_u16_u32 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x23,0xd7,0x7e,0x82,0x01,0x00]
+
+v_cvt_pk_u16_u32 v5, exec_hi, null
+// GFX12: encoding: [0x05,0x00,0x23,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_cvt_pk_u16_u32 v5, null, exec_lo
+// GFX12: encoding: [0x05,0x00,0x23,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_cvt_pk_u16_u32 v5, -1, exec_hi
+// GFX12: encoding: [0x05,0x00,0x23,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_cvt_pk_u16_u32 v5, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x23,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_cvt_pk_u16_u32 v5, src_scc, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x23,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_cvt_pk_u16_u32 v255, 0xaf123456, vcc_hi
+// GFX12: encoding: [0xff,0x00,0x23,0xd7,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_cvt_pk_u8_f32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x26,0xd6,0x01,0x05,0x0e,0x00]
+
+v_cvt_pk_u8_f32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x26,0xd6,0xff,0x05,0xa4,0x01]
+
+v_cvt_pk_u8_f32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x26,0xd6,0x01,0xfe,0xff,0x01]
+
+v_cvt_pk_u8_f32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x26,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_cvt_pk_u8_f32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x26,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_cvt_pk_u8_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x26,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_cvt_pk_u8_f32 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x26,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_cvt_pk_u8_f32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x26,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_cvt_pk_u8_f32 v5, exec_lo, -1, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x26,0xd6,0x7e,0x82,0xad,0x01]
+
+v_cvt_pk_u8_f32 v5, exec_hi, null, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x26,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_cvt_pk_u8_f32 v5, null, exec_lo, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x26,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_cvt_pk_u8_f32 v5, -1, exec_hi, src_scc
+// GFX12: encoding: [0x05,0x00,0x26,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_cvt_pk_u8_f32 v5, 0.5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x26,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_cvt_pk_u8_f32 v5, src_scc, vcc_lo, -1
+// GFX12: encoding: [0x05,0x00,0x26,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_cvt_pk_u8_f32 v255, -|0xaf123456|, vcc_hi, null
+// GFX12: encoding: [0xff,0x01,0x26,0xd6,0xff,0xd6,0xf0,0x21,0x56,0x34,0x12,0xaf]
+
+v_cvt_pk_norm_i16_f16 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x12,0xd7,0x01,0x05,0x02,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x12,0xd7,0xff,0xff,0x03,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x12,0xd7,0x01,0x04,0x00,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x12,0xd7,0x69,0xd2,0x00,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x12,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, vcc_hi, 0xfe0b
+// GFX12: encoding: [0x05,0x00,0x12,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x12,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x12,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x12,0xd7,0x7e,0x82,0x01,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, |exec_hi|, null
+// GFX12: encoding: [0x05,0x01,0x12,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, null, exec_lo
+// GFX12: encoding: [0x05,0x00,0x12,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, -1, exec_hi
+// GFX12: encoding: [0x05,0x00,0x12,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_cvt_pk_norm_i16_f16 v5, 0.5, -m0 op_sel:[0,0,0]
+// GFX12: encoding: [0x05,0x00,0x12,0xd7,0xf0,0xfa,0x00,0x40]
+
+v_cvt_pk_norm_i16_f16 v5, -src_scc, |vcc_lo| op_sel:[1,0,0]
+// GFX12: encoding: [0x05,0x0a,0x12,0xd7,0xfd,0xd4,0x00,0x20]
+
+v_cvt_pk_norm_i16_f16 v255, -|0xfe0b|, -|vcc_hi| op_sel:[0,1,0]
+// GFX12: encoding: [0xff,0x13,0x12,0xd7,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+v_cvt_pk_norm_i16_f32 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x21,0xd7,0x01,0x05,0x02,0x00]
+
+v_cvt_pk_norm_i16_f32 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x21,0xd7,0xff,0xff,0x03,0x00]
+
+v_cvt_pk_norm_i16_f32 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x21,0xd7,0x01,0x04,0x00,0x00]
+
+v_cvt_pk_norm_i16_f32 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x21,0xd7,0x69,0xd2,0x00,0x00]
+
+v_cvt_pk_norm_i16_f32 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x21,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_cvt_pk_norm_i16_f32 v5, vcc_hi, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x21,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cvt_pk_norm_i16_f32 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x21,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_cvt_pk_norm_i16_f32 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x21,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_cvt_pk_norm_i16_f32 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x21,0xd7,0x7e,0x82,0x01,0x00]
+
+v_cvt_pk_norm_i16_f32 v5, |exec_hi|, null
+// GFX12: encoding: [0x05,0x01,0x21,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_cvt_pk_norm_i16_f32 v5, null, exec_lo
+// GFX12: encoding: [0x05,0x00,0x21,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_cvt_pk_norm_i16_f32 v5, -1, exec_hi
+// GFX12: encoding: [0x05,0x00,0x21,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_cvt_pk_norm_i16_f32 v5, 0.5, -m0
+// GFX12: encoding: [0x05,0x00,0x21,0xd7,0xf0,0xfa,0x00,0x40]
+
+v_cvt_pk_norm_i16_f32 v5, -src_scc, |vcc_lo|
+// GFX12: encoding: [0x05,0x02,0x21,0xd7,0xfd,0xd4,0x00,0x20]
+
+v_cvt_pk_norm_i16_f32 v255, -|0xaf123456|, -|vcc_hi|
+// GFX12: encoding: [0xff,0x03,0x21,0xd7,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+v_cvt_pk_norm_u16_f16 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x13,0xd7,0x01,0x05,0x02,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x13,0xd7,0xff,0xff,0x03,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x13,0xd7,0x01,0x04,0x00,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x13,0xd7,0x69,0xd2,0x00,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x13,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, vcc_hi, 0xfe0b
+// GFX12: encoding: [0x05,0x00,0x13,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x13,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x13,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x13,0xd7,0x7e,0x82,0x01,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, |exec_hi|, null
+// GFX12: encoding: [0x05,0x01,0x13,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, null, exec_lo
+// GFX12: encoding: [0x05,0x00,0x13,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, -1, exec_hi
+// GFX12: encoding: [0x05,0x00,0x13,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_cvt_pk_norm_u16_f16 v5, 0.5, -m0 op_sel:[0,0,0]
+// GFX12: encoding: [0x05,0x00,0x13,0xd7,0xf0,0xfa,0x00,0x40]
+
+v_cvt_pk_norm_u16_f16 v5, -src_scc, |vcc_lo| op_sel:[1,0,0]
+// GFX12: encoding: [0x05,0x0a,0x13,0xd7,0xfd,0xd4,0x00,0x20]
+
+v_cvt_pk_norm_u16_f16 v255, -|0xfe0b|, -|vcc_hi| op_sel:[0,1,0]
+// GFX12: encoding: [0xff,0x13,0x13,0xd7,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+v_cvt_pk_norm_u16_f32 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x22,0xd7,0x01,0x05,0x02,0x00]
+
+v_cvt_pk_norm_u16_f32 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x22,0xd7,0xff,0xff,0x03,0x00]
+
+v_cvt_pk_norm_u16_f32 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x22,0xd7,0x01,0x04,0x00,0x00]
+
+v_cvt_pk_norm_u16_f32 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x22,0xd7,0x69,0xd2,0x00,0x00]
+
+v_cvt_pk_norm_u16_f32 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x22,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_cvt_pk_norm_u16_f32 v5, vcc_hi, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x22,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cvt_pk_norm_u16_f32 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x22,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_cvt_pk_norm_u16_f32 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x22,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_cvt_pk_norm_u16_f32 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x22,0xd7,0x7e,0x82,0x01,0x00]
+
+v_cvt_pk_norm_u16_f32 v5, |exec_hi|, null
+// GFX12: encoding: [0x05,0x01,0x22,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_cvt_pk_norm_u16_f32 v5, null, exec_lo
+// GFX12: encoding: [0x05,0x00,0x22,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_cvt_pk_norm_u16_f32 v5, -1, exec_hi
+// GFX12: encoding: [0x05,0x00,0x22,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_cvt_pk_norm_u16_f32 v5, 0.5, -m0
+// GFX12: encoding: [0x05,0x00,0x22,0xd7,0xf0,0xfa,0x00,0x40]
+
+v_cvt_pk_norm_u16_f32 v5, -src_scc, |vcc_lo|
+// GFX12: encoding: [0x05,0x02,0x22,0xd7,0xfd,0xd4,0x00,0x20]
+
+v_cvt_pk_norm_u16_f32 v255, -|0xaf123456|, -|vcc_hi|
+// GFX12: encoding: [0xff,0x03,0x22,0xd7,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+v_div_fixup_f16 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x54,0xd6,0x01,0x05,0x0e,0x00]
+
+v_div_fixup_f16 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x54,0xd6,0xff,0x05,0xa4,0x01]
+
+v_div_fixup_f16 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x54,0xd6,0x01,0xfe,0xff,0x01]
+
+v_div_fixup_f16 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x54,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_div_fixup_f16 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x54,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_div_fixup_f16 v5, vcc_hi, 0xfe0b, v255
+// GFX12: encoding: [0x05,0x00,0x54,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+v_div_fixup_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX12: encoding: [0x05,0x07,0x54,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_div_fixup_f16 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x54,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_div_fixup_f16 v5, |exec_lo|, -1, vcc_hi
+// GFX12: encoding: [0x05,0x01,0x54,0xd6,0x7e,0x82,0xad,0x01]
+
+v_div_fixup_f16 v5, -|exec_hi|, null, -|vcc_lo| op_sel:[1,1,1,1]
+// GFX12: encoding: [0x05,0x7d,0x54,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_div_fixup_f16 v5, null, exec_lo, -|0xfe0b| op_sel:[0,0,0,0]
+// GFX12: encoding: [0x05,0x04,0x54,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
+
+v_div_fixup_f16 v5, -1, -|exec_hi|, -|src_scc| op_sel:[1,0,0,0]
+// GFX12: encoding: [0x05,0x0e,0x54,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_div_fixup_f16 v5, 0.5, -m0, 0.5 op_sel:[0,1,0,0]
+// GFX12: encoding: [0x05,0x10,0x54,0xd6,0xf0,0xfa,0xc0,0x43]
+
+v_div_fixup_f16 v5, -src_scc, |vcc_lo|, -1 op_sel:[0,0,1,0]
+// GFX12: encoding: [0x05,0x22,0x54,0xd6,0xfd,0xd4,0x04,0x23]
+
+v_div_fixup_f16 v255, -|0xfe0b|, -|vcc_hi|, null op_sel:[0,0,0,1] clamp
+// GFX12: encoding: [0xff,0xc3,0x54,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00]
+
+v_div_fixup_f32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x27,0xd6,0x01,0x05,0x0e,0x00]
+
+v_div_fixup_f32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x27,0xd6,0xff,0x05,0xa4,0x01]
+
+v_div_fixup_f32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x27,0xd6,0x01,0xfe,0xff,0x01]
+
+v_div_fixup_f32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x27,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_div_fixup_f32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x27,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_div_fixup_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x27,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_div_fixup_f32 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX12: encoding: [0x05,0x07,0x27,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_div_fixup_f32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x27,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_div_fixup_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX12: encoding: [0x05,0x01,0x27,0xd6,0x7e,0x82,0xad,0x01]
+
+v_div_fixup_f32 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX12: encoding: [0x05,0x05,0x27,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_div_fixup_f32 v5, null, exec_lo, -|0xaf123456|
+// GFX12: encoding: [0x05,0x04,0x27,0xd6,0x7c,0xfc,0xfc,0x83,0x56,0x34,0x12,0xaf]
+
+v_div_fixup_f32 v5, -1, -|exec_hi|, -|src_scc|
+// GFX12: encoding: [0x05,0x06,0x27,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_div_fixup_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX12: encoding: [0x05,0x00,0x27,0xd6,0xf0,0xfa,0xc0,0x4b]
+
+v_div_fixup_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX12: encoding: [0x05,0x02,0x27,0xd6,0xfd,0xd4,0x04,0x33]
+
+v_div_fixup_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX12: encoding: [0xff,0x83,0x27,0xd6,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_div_fixup_f64 v[5:6], v[1:2], v[2:3], v[3:4]
+// GFX12: encoding: [0x05,0x00,0x28,0xd6,0x01,0x05,0x0e,0x04]
+
+v_div_fixup_f64 v[5:6], v[254:255], v[254:255], s[6:7]
+// GFX12: encoding: [0x05,0x00,0x28,0xd6,0xfe,0xfd,0x1b,0x00]
+
+v_div_fixup_f64 v[5:6], s[2:3], s[4:5], v[254:255]
+// GFX12: encoding: [0x05,0x00,0x28,0xd6,0x02,0x08,0xf8,0x07]
+
+v_div_fixup_f64 v[5:6], -|s[104:105]|, s[104:105], -|s[104:105]|
+// GFX12: encoding: [0x05,0x05,0x28,0xd6,0x68,0xd0,0xa0,0xa1]
+
+v_div_fixup_f64 v[5:6], vcc, -|ttmp[14:15]|, -|ttmp[14:15]|
+// GFX12: encoding: [0x05,0x06,0x28,0xd6,0x6a,0xf4,0xe8,0xc1]
+
+v_div_fixup_f64 v[5:6], -|ttmp[14:15]|, 0xaf123456, null
+// GFX12: encoding: [0x05,0x01,0x28,0xd6,0x7a,0xfe,0xf1,0x21,0x56,0x34,0x12,0xaf]
+
+v_div_fixup_f64 v[5:6], -|exec|, -|src_scc|, -|exec|
+// GFX12: encoding: [0x05,0x07,0x28,0xd6,0x7e,0xfa,0xf9,0xe1]
+
+v_div_fixup_f64 v[5:6], null, 0.5, vcc
+// GFX12: encoding: [0x05,0x00,0x28,0xd6,0x7c,0xe0,0xa9,0x01]
+
+v_div_fixup_f64 v[5:6], -1, -1, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x28,0xd6,0xc1,0x82,0xfd,0x03,0x56,0x34,0x12,0xaf]
+
+v_div_fixup_f64 v[5:6], 0.5, null, -|src_scc| mul:2
+// GFX12: encoding: [0x05,0x04,0x28,0xd6,0xf0,0xf8,0xf4,0x8b]
+
+v_div_fixup_f64 v[5:6], -|src_scc|, -|exec|, 0.5 mul:4
+// GFX12: encoding: [0x05,0x03,0x28,0xd6,0xfd,0xfc,0xc0,0x73]
+
+v_div_fixup_f64 v[254:255], 0xaf123456, -|vcc|, -1 clamp div:2
+// GFX12: encoding: [0xfe,0x82,0x28,0xd6,0xff,0xd4,0x04,0x5b,0x56,0x34,0x12,0xaf]
+
+v_div_fmas_f32 v5, vcc_lo, v2, vcc_lo
+// W32: encoding: [0x05,0x00,0x37,0xd6,0x6a,0x04,0xaa,0x01]
+
+v_div_fmas_f32 v5, ttmp15, ttmp15, ttmp15
+// W32: encoding: [0x05,0x00,0x37,0xd6,0x7b,0xf6,0xec,0x01]
+
+v_div_fmas_f32 v5, -|m0|, -|v255|, v3
+// W32: encoding: [0x05,0x03,0x37,0xd6,0x7d,0xfe,0x0f,0x64]
+
+v_div_fmas_f32 v5, -|exec_lo|, -|exec_lo|, -|exec_lo|
+// W32: encoding: [0x05,0x07,0x37,0xd6,0x7e,0xfc,0xf8,0xe1]
+
+v_div_fmas_f32 v5, -|exec_hi|, 0.5, -|v255|
+// W32: encoding: [0x05,0x05,0x37,0xd6,0x7f,0xe0,0xfd,0xa7]
+
+v_div_fmas_f32 v5, null, exec_hi, -|exec_hi|
+// W32: encoding: [0x05,0x04,0x37,0xd6,0x7c,0xfe,0xfc,0x81]
+
+v_div_fmas_f32 v5, -1, -|m0|, -|m0|
+// W32: encoding: [0x05,0x06,0x37,0xd6,0xc1,0xfa,0xf4,0xc1]
+
+v_div_fmas_f32 v5, 0.5, -|vcc_lo|, 0.5 mul:2
+// W32: encoding: [0x05,0x02,0x37,0xd6,0xf0,0xd4,0xc0,0x4b]
+
+v_div_fmas_f32 v5, vcc_lo, v2, v3
+// W64: encoding: [0x05,0x00,0x37,0xd6,0x6a,0x04,0x0e,0x04]
+
+v_div_fmas_f32 v5, vcc_hi, v255, vcc_hi
+// W64: encoding: [0x05,0x00,0x37,0xd6,0x6b,0xfe,0xaf,0x01]
+
+v_div_fmas_f32 v5, -|ttmp15|, -|ttmp15|, ttmp15
+// W64: encoding: [0x05,0x03,0x37,0xd6,0x7b,0xf6,0xec,0x61]
+
+v_div_fmas_f32 v5, m0, 0.5, v255
+// W64: encoding: [0x05,0x00,0x37,0xd6,0x7d,0xe0,0xfd,0x07]
+
+v_div_fmas_f32 v5, -|exec_lo|, exec_lo, -|exec_lo|
+// W64: encoding: [0x05,0x05,0x37,0xd6,0x7e,0xfc,0xf8,0xa1]
+
+v_div_fmas_f32 v5, -|exec_hi|, -|exec_hi|, -|exec_hi|
+// W64: encoding: [0x05,0x07,0x37,0xd6,0x7f,0xfe,0xfc,0xe1]
+
+v_div_fmas_f32 v5, null, m0, -|m0|
+// W64: encoding: [0x05,0x04,0x37,0xd6,0x7c,0xfa,0xf4,0x81]
+
+v_div_fmas_f32 v5, -1, -|vcc_lo|, -|vcc_lo|
+// W64: encoding: [0x05,0x06,0x37,0xd6,0xc1,0xd4,0xa8,0xc1]
+
+v_div_fmas_f32 v5, 0.5, -|vcc_hi|, 0.5 mul:2
+// W64: encoding: [0x05,0x02,0x37,0xd6,0xf0,0xd6,0xc0,0x4b]
+
+v_div_fmas_f32 v5, v1, 0xaf123456, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x37,0xd6,0x01,0xff,0xfd,0x03,0x56,0x34,0x12,0xaf]
+
+v_div_fmas_f32 v5, v255, src_scc, src_scc
+// GFX12: encoding: [0x05,0x00,0x37,0xd6,0xff,0xfb,0xf5,0x03]
+
+v_div_fmas_f32 v5, s105, s105, s105
+// GFX12: encoding: [0x05,0x00,0x37,0xd6,0x69,0xd2,0xa4,0x01]
+
+v_div_fmas_f32 v5, src_scc, -1, -1 mul:4
+// GFX12: encoding: [0x05,0x00,0x37,0xd6,0xfd,0x82,0x05,0x13]
+
+v_div_fmas_f32 v255, -|0xaf123456|, null, null clamp div:2
+// GFX12: encoding: [0xff,0x81,0x37,0xd6,0xff,0xf8,0xf0,0x39,0x56,0x34,0x12,0xaf]
+
+v_div_fmas_f64 v[5:6], v[1:2], 0xaf123456, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x38,0xd6,0x01,0xff,0xfd,0x03,0x56,0x34,0x12,0xaf]
+
+v_div_fmas_f64 v[5:6], v[254:255], src_scc, v[3:4]
+// GFX12: encoding: [0x05,0x00,0x38,0xd6,0xfe,0xfb,0x0d,0x04]
+
+v_div_fmas_f64 v[5:6], s[104:105], |s[104:105]|, s[104:105]
+// GFX12: encoding: [0x05,0x02,0x38,0xd6,0x68,0xd0,0xa0,0x01]
+
+v_div_fmas_f64 v[5:6], -|vcc|, v[2:3], -|v[254:255]|
+// GFX12: encoding: [0x05,0x05,0x38,0xd6,0x6a,0x04,0xfa,0xa7]
+
+v_div_fmas_f64 v[5:6], -|ttmp[14:15]|, -|ttmp[14:15]|, -|ttmp[14:15]|
+// GFX12: encoding: [0x05,0x07,0x38,0xd6,0x7a,0xf4,0xe8,0xe1]
+
+v_div_fmas_f64 v[5:6], -|exec|, -|v[254:255]|, null
+// GFX12: encoding: [0x05,0x03,0x38,0xd6,0x7e,0xfc,0xf3,0x61]
+
+v_div_fmas_f64 v[5:6], null, 0.5, -src_scc
+// GFX12: encoding: [0x05,0x00,0x38,0xd6,0x7c,0xe0,0xf5,0x83]
+
+v_div_fmas_f64 v[5:6], -1, -exec, |exec|
+// GFX12: encoding: [0x05,0x04,0x38,0xd6,0xc1,0xfc,0xf8,0x41]
+
+v_div_fmas_f64 v[5:6], 0.5, -|vcc|, -|vcc| mul:2
+// GFX12: encoding: [0x05,0x06,0x38,0xd6,0xf0,0xd4,0xa8,0xc9]
+
+v_div_fmas_f64 v[5:6], -|src_scc|, -1, 0.5 mul:4
+// GFX12: encoding: [0x05,0x01,0x38,0xd6,0xfd,0x82,0xc1,0x33]
+
+v_div_fmas_f64 v[254:255], 0xaf123456, null, -1 clamp div:2
+// GFX12: encoding: [0xfe,0x80,0x38,0xd6,0xff,0xf8,0x04,0x1b,0x56,0x34,0x12,0xaf]
+
+v_div_scale_f32 v5, vcc_lo, v1, v2, s3
+// W32: encoding: [0x05,0x6a,0xfc,0xd6,0x01,0x05,0x0e,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc_lo, v255, s2, s105
+// W32: encoding: [0x05,0x6a,0xfc,0xd6,0xff,0x05,0xa4,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc_lo, s1, v255, exec_hi
+// W32: encoding: [0x05,0x6a,0xfc,0xd6,0x01,0xfe,0xff,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc_lo, s105, s105, exec_lo
+// W32: encoding: [0x05,0x6a,0xfc,0xd6,0x69,0xd2,0xf8,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc_lo, vcc_lo, ttmp15, v3
+// W32: encoding: [0x05,0x6a,0xfc,0xd6,0x6a,0xf6,0x0c,0x04]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc_lo, vcc_hi, 0xaf123456, v255
+// W32: encoding: [0x05,0x6a,0xfc,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc_lo, -ttmp15, -src_scc, -ttmp15
+// W32: encoding: [0x05,0x6a,0xfc,0xd6,0x7b,0xfa,0xed,0xe1]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc_lo, m0, 0.5, m0
+// W32: encoding: [0x05,0x6a,0xfc,0xd6,0x7d,0xe0,0xf5,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc_lo, exec_lo, -1, vcc_hi
+// W32: encoding: [0x05,0x6a,0xfc,0xd6,0x7e,0x82,0xad,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc_lo, -exec_hi, null, -vcc_lo
+// W32: encoding: [0x05,0x6a,0xfc,0xd6,0x7f,0xf8,0xa8,0xa1]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc_lo, null, exec_lo, neg(0xaf123456)
+// W32: encoding: [0x05,0x6a,0xfc,0xd6,0x7c,0xfc,0xfc,0x83,0x56,0x34,0x12,0xaf]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc_lo, -1, -exec_hi, -src_scc
+// W32: encoding: [0x05,0x6a,0xfc,0xd6,0xc1,0xfe,0xf4,0xc3]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc_lo, 0.5, -m0, 0.5 mul:2
+// W32: encoding: [0x05,0x6a,0xfc,0xd6,0xf0,0xfa,0xc0,0x4b]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc_lo, -src_scc, vcc_lo, -1 mul:4
+// W32: encoding: [0x05,0x6a,0xfc,0xd6,0xfd,0xd4,0x04,0x33]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v255, vcc_lo, neg(0xaf123456), -vcc_hi, null clamp div:2
+// W32: encoding: [0xff,0xea,0xfc,0xd6,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc, v1, v2, s3
+// W64: encoding: [0x05,0x6a,0xfc,0xd6,0x01,0x05,0x0e,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc, v255, s2, s105
+// W64: encoding: [0x05,0x6a,0xfc,0xd6,0xff,0x05,0xa4,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc, s1, v255, exec_hi
+// W64: encoding: [0x05,0x6a,0xfc,0xd6,0x01,0xfe,0xff,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc, s105, s105, exec_lo
+// W64: encoding: [0x05,0x6a,0xfc,0xd6,0x69,0xd2,0xf8,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc, vcc_lo, ttmp15, v3
+// W64: encoding: [0x05,0x6a,0xfc,0xd6,0x6a,0xf6,0x0c,0x04]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc, vcc_hi, 0xaf123456, v255
+// W64: encoding: [0x05,0x6a,0xfc,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc, -ttmp15, -src_scc, -ttmp15
+// W64: encoding: [0x05,0x6a,0xfc,0xd6,0x7b,0xfa,0xed,0xe1]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc, m0, 0.5, m0
+// W64: encoding: [0x05,0x6a,0xfc,0xd6,0x7d,0xe0,0xf5,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc, exec_lo, -1, vcc_hi
+// W64: encoding: [0x05,0x6a,0xfc,0xd6,0x7e,0x82,0xad,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc, -exec_hi, null, -vcc_lo
+// W64: encoding: [0x05,0x6a,0xfc,0xd6,0x7f,0xf8,0xa8,0xa1]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc, null, exec_lo, neg(0xaf123456)
+// W64: encoding: [0x05,0x6a,0xfc,0xd6,0x7c,0xfc,0xfc,0x83,0x56,0x34,0x12,0xaf]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc, -1, -exec_hi, -src_scc
+// W64: encoding: [0x05,0x6a,0xfc,0xd6,0xc1,0xfe,0xf4,0xc3]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc, 0.5, -m0, 0.5 mul:2
+// W64: encoding: [0x05,0x6a,0xfc,0xd6,0xf0,0xfa,0xc0,0x4b]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v5, vcc, -src_scc, vcc_lo, -1 mul:4
+// W64: encoding: [0x05,0x6a,0xfc,0xd6,0xfd,0xd4,0x04,0x33]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f32 v255, vcc, neg(0xaf123456), -vcc_hi, null clamp div:2
+// W64: encoding: [0xff,0xea,0xfc,0xd6,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc_lo, v[1:2], v[2:3], v[3:4]
+// W32: encoding: [0x05,0x6a,0xfd,0xd6,0x01,0x05,0x0e,0x04]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc_lo, v[254:255], v[254:255], s[6:7]
+// W32: encoding: [0x05,0x6a,0xfd,0xd6,0xfe,0xfd,0x1b,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc_lo, s[2:3], s[4:5], v[254:255]
+// W32: encoding: [0x05,0x6a,0xfd,0xd6,0x02,0x08,0xf8,0x07]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc_lo, -s[104:105], s[104:105], -s[104:105]
+// W32: encoding: [0x05,0x6a,0xfd,0xd6,0x68,0xd0,0xa0,0xa1]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc_lo, vcc, -ttmp[14:15], -ttmp[14:15]
+// W32: encoding: [0x05,0x6a,0xfd,0xd6,0x6a,0xf4,0xe8,0xc1]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc_lo, -ttmp[14:15], 0xaf123456, null
+// W32: encoding: [0x05,0x6a,0xfd,0xd6,0x7a,0xfe,0xf1,0x21,0x56,0x34,0x12,0xaf]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc_lo, -exec, -src_scc, -exec
+// W32: encoding: [0x05,0x6a,0xfd,0xd6,0x7e,0xfa,0xf9,0xe1]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc_lo, null, 0.5, vcc
+// W32: encoding: [0x05,0x6a,0xfd,0xd6,0x7c,0xe0,0xa9,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc_lo, -1, -1, 0xaf123456
+// W32: encoding: [0x05,0x6a,0xfd,0xd6,0xc1,0x82,0xfd,0x03,0x56,0x34,0x12,0xaf]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc_lo, 0.5, null, -src_scc mul:2
+// W32: encoding: [0x05,0x6a,0xfd,0xd6,0xf0,0xf8,0xf4,0x8b]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc_lo, -src_scc, -exec, 0.5 mul:4
+// W32: encoding: [0x05,0x6a,0xfd,0xd6,0xfd,0xfc,0xc0,0x73]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[254:255], vcc_lo, 0xaf123456, -vcc, -1 clamp div:2
+// W32: encoding: [0xfe,0xea,0xfd,0xd6,0xff,0xd4,0x04,0x5b,0x56,0x34,0x12,0xaf]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc, v[1:2], v[2:3], v[3:4]
+// W64: encoding: [0x05,0x6a,0xfd,0xd6,0x01,0x05,0x0e,0x04]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc, v[254:255], v[254:255], s[6:7]
+// W64: encoding: [0x05,0x6a,0xfd,0xd6,0xfe,0xfd,0x1b,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc, s[2:3], s[4:5], v[254:255]
+// W64: encoding: [0x05,0x6a,0xfd,0xd6,0x02,0x08,0xf8,0x07]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc, -s[104:105], s[104:105], -s[104:105]
+// W64: encoding: [0x05,0x6a,0xfd,0xd6,0x68,0xd0,0xa0,0xa1]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc, vcc, -ttmp[14:15], -ttmp[14:15]
+// W64: encoding: [0x05,0x6a,0xfd,0xd6,0x6a,0xf4,0xe8,0xc1]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc, -ttmp[14:15], 0xaf123456, null
+// W64: encoding: [0x05,0x6a,0xfd,0xd6,0x7a,0xfe,0xf1,0x21,0x56,0x34,0x12,0xaf]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc, -exec, -src_scc, -exec
+// W64: encoding: [0x05,0x6a,0xfd,0xd6,0x7e,0xfa,0xf9,0xe1]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc, null, 0.5, vcc
+// W64: encoding: [0x05,0x6a,0xfd,0xd6,0x7c,0xe0,0xa9,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc, -1, -1, 0xaf123456
+// W64: encoding: [0x05,0x6a,0xfd,0xd6,0xc1,0x82,0xfd,0x03,0x56,0x34,0x12,0xaf]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc, 0.5, null, -src_scc mul:2
+// W64: encoding: [0x05,0x6a,0xfd,0xd6,0xf0,0xf8,0xf4,0x8b]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[5:6], vcc, -src_scc, -exec, 0.5 mul:4
+// W64: encoding: [0x05,0x6a,0xfd,0xd6,0xfd,0xfc,0xc0,0x73]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_div_scale_f64 v[254:255], vcc, 0xaf123456, -vcc, -1 clamp div:2
+// W64: encoding: [0xfe,0xea,0xfd,0xd6,0xff,0xd4,0x04,0x5b,0x56,0x34,0x12,0xaf]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_dot2_bf16_bf16 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x67,0xd6,0x01,0x05,0x0e,0x00]
+
+v_dot2_bf16_bf16 v5, v255, v255, s105
+// GFX12: encoding: [0x05,0x00,0x67,0xd6,0xff,0xff,0xa7,0x01]
+
+v_dot2_bf16_bf16 v5, s1, s2, v3
+// GFX12: encoding: [0x05,0x00,0x67,0xd6,0x01,0x04,0x0c,0x04]
+
+v_dot2_bf16_bf16 v5, s105, s105, m0
+// GFX12: encoding: [0x05,0x00,0x67,0xd6,0x69,0xd2,0xf4,0x01]
+
+v_dot2_bf16_bf16 v5, vcc_lo, ttmp15, v255
+// GFX12: encoding: [0x05,0x00,0x67,0xd6,0x6a,0xf6,0xfc,0x07]
+
+v_dot2_bf16_bf16 v5, vcc_hi, 0xfe0b, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x67,0xd6,0x6b,0xfe,0xad,0x01,0x0b,0xfe,0x00,0x00]
+
+v_dot2_bf16_bf16 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x67,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_dot2_bf16_bf16 v5, |m0|, -1, -vcc_lo
+// GFX12: encoding: [0x05,0x01,0x67,0xd6,0x7d,0x82,0xa9,0x81]
+
+v_dot2_bf16_bf16 v5, -|exec_lo|, null, -|0xfe0b|
+// GFX12: encoding: [0x05,0x05,0x67,0xd6,0x7e,0xf8,0xfc,0xa3,0x0b,0xfe,0x00,0x00]
+
+v_dot2_bf16_bf16 v5, -|exec_hi|, -|exec_lo|, -|exec_lo|
+// GFX12: encoding: [0x05,0x07,0x67,0xd6,0x7f,0xfc,0xf8,0xe1]
+
+v_dot2_bf16_bf16 v5, null, -exec_hi, |src_scc|
+// GFX12: encoding: [0x05,0x04,0x67,0xd6,0x7c,0xfe,0xf4,0x43]
+
+v_dot2_bf16_bf16 v5, -1, -|m0|, -|exec_hi| op_sel:[0,0,0,0]
+// GFX12: encoding: [0x05,0x06,0x67,0xd6,0xc1,0xfa,0xfc,0xc1]
+
+v_dot2_bf16_bf16 v5, -src_scc, |vcc_lo|, -1 op_sel:[0,0,1,0]
+// GFX12: encoding: [0x05,0x22,0x67,0xd6,0xfd,0xd4,0x04,0x23]
+
+v_dot2_bf16_bf16 v255, -|0xfe0b|, -|vcc_hi|, null op_sel:[0,0,0,1]
+// GFX12: encoding: [0xff,0x43,0x67,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00]
+
+v_dot2_f16_f16 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x66,0xd6,0x01,0x05,0x0e,0x00]
+
+v_dot2_f16_f16 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x66,0xd6,0xff,0x05,0xa4,0x01]
+
+v_dot2_f16_f16 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x66,0xd6,0x01,0xfe,0xff,0x01]
+
+v_dot2_f16_f16 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x66,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_dot2_f16_f16 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x66,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_dot2_f16_f16 v5, vcc_hi, 0xfe0b, v255
+// GFX12: encoding: [0x05,0x00,0x66,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+v_dot2_f16_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX12: encoding: [0x05,0x07,0x66,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_dot2_f16_f16 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x66,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_dot2_f16_f16 v5, |exec_lo|, -1, vcc_hi
+// GFX12: encoding: [0x05,0x01,0x66,0xd6,0x7e,0x82,0xad,0x01]
+
+v_dot2_f16_f16 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX12: encoding: [0x05,0x05,0x66,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_dot2_f16_f16 v5, null, exec_lo, -|0xfe0b|
+// GFX12: encoding: [0x05,0x04,0x66,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
+
+v_dot2_f16_f16 v5, -1, -|exec_hi|, -|src_scc|
+// GFX12: encoding: [0x05,0x06,0x66,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_dot2_f16_f16 v5, 0.5, -m0, 0.5 op_sel:[0,0,0,0]
+// GFX12: encoding: [0x05,0x00,0x66,0xd6,0xf0,0xfa,0xc0,0x43]
+
+v_dot2_f16_f16 v5, -src_scc, |vcc_lo|, -1 op_sel:[0,0,1,0]
+// GFX12: encoding: [0x05,0x22,0x66,0xd6,0xfd,0xd4,0x04,0x23]
+
+v_dot2_f16_f16 v255, -|0xfe0b|, -|vcc_hi|, null op_sel:[0,0,0,1]
+// GFX12: encoding: [0xff,0x43,0x66,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00]
+
+v_fma_dx9_zero_f32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x09,0xd6,0x01,0x05,0x0e,0x00]
+
+v_fma_dx9_zero_f32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x09,0xd6,0xff,0x05,0xa4,0x01]
+
+v_fma_dx9_zero_f32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x09,0xd6,0x01,0xfe,0xff,0x01]
+
+v_fma_dx9_zero_f32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x09,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_fma_dx9_zero_f32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x09,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_fma_dx9_zero_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x09,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_fma_dx9_zero_f32 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX12: encoding: [0x05,0x07,0x09,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_fma_dx9_zero_f32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x09,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_fma_dx9_zero_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX12: encoding: [0x05,0x01,0x09,0xd6,0x7e,0x82,0xad,0x01]
+
+v_fma_dx9_zero_f32 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX12: encoding: [0x05,0x05,0x09,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_fma_dx9_zero_f32 v5, null, exec_lo, -|0xaf123456|
+// GFX12: encoding: [0x05,0x04,0x09,0xd6,0x7c,0xfc,0xfc,0x83,0x56,0x34,0x12,0xaf]
+
+v_fma_dx9_zero_f32 v5, -1, -|exec_hi|, -|src_scc|
+// GFX12: encoding: [0x05,0x06,0x09,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_fma_dx9_zero_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX12: encoding: [0x05,0x00,0x09,0xd6,0xf0,0xfa,0xc0,0x4b]
+
+v_fma_dx9_zero_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX12: encoding: [0x05,0x02,0x09,0xd6,0xfd,0xd4,0x04,0x33]
+
+v_fma_dx9_zero_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX12: encoding: [0xff,0x83,0x09,0xd6,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_fma_f16 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x48,0xd6,0x01,0x05,0x0e,0x00]
+
+v_fma_f16 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x48,0xd6,0xff,0x05,0xa4,0x01]
+
+v_fma_f16 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x48,0xd6,0x01,0xfe,0xff,0x01]
+
+v_fma_f16 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x48,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_fma_f16 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x48,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_fma_f16 v5, vcc_hi, 0xfe0b, v255
+// GFX12: encoding: [0x05,0x00,0x48,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+v_fma_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX12: encoding: [0x05,0x07,0x48,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_fma_f16 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x48,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_fma_f16 v5, |exec_lo|, -1, vcc_hi
+// GFX12: encoding: [0x05,0x01,0x48,0xd6,0x7e,0x82,0xad,0x01]
+
+v_fma_f16 v5, -|exec_hi|, null, -|vcc_lo| op_sel:[1,1,1,1]
+// GFX12: encoding: [0x05,0x7d,0x48,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_fma_f16 v5, null, exec_lo, -|0xfe0b| op_sel:[0,0,0,0]
+// GFX12: encoding: [0x05,0x04,0x48,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
+
+v_fma_f16 v5, -1, -|exec_hi|, -|src_scc| op_sel:[1,0,0,0]
+// GFX12: encoding: [0x05,0x0e,0x48,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_fma_f16 v5, 0.5, -m0, 0.5 op_sel:[0,1,0,0]
+// GFX12: encoding: [0x05,0x10,0x48,0xd6,0xf0,0xfa,0xc0,0x43]
+
+v_fma_f16 v5, -src_scc, |vcc_lo|, -1 op_sel:[0,0,1,0]
+// GFX12: encoding: [0x05,0x22,0x48,0xd6,0xfd,0xd4,0x04,0x23]
+
+v_fma_f16 v255, -|0xfe0b|, -|vcc_hi|, null op_sel:[0,0,0,1] clamp
+// GFX12: encoding: [0xff,0xc3,0x48,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00]
+
+v_fma_f32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x13,0xd6,0x01,0x05,0x0e,0x00]
+
+v_fma_f32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x13,0xd6,0xff,0x05,0xa4,0x01]
+
+v_fma_f32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x13,0xd6,0x01,0xfe,0xff,0x01]
+
+v_fma_f32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x13,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_fma_f32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x13,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_fma_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x13,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_fma_f32 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX12: encoding: [0x05,0x07,0x13,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_fma_f32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x13,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_fma_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX12: encoding: [0x05,0x01,0x13,0xd6,0x7e,0x82,0xad,0x01]
+
+v_fma_f32 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX12: encoding: [0x05,0x05,0x13,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_fma_f32 v5, null, exec_lo, -|0xaf123456|
+// GFX12: encoding: [0x05,0x04,0x13,0xd6,0x7c,0xfc,0xfc,0x83,0x56,0x34,0x12,0xaf]
+
+v_fma_f32 v5, -1, -|exec_hi|, -|src_scc|
+// GFX12: encoding: [0x05,0x06,0x13,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_fma_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX12: encoding: [0x05,0x00,0x13,0xd6,0xf0,0xfa,0xc0,0x4b]
+
+v_fma_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX12: encoding: [0x05,0x02,0x13,0xd6,0xfd,0xd4,0x04,0x33]
+
+v_fma_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX12: encoding: [0xff,0x83,0x13,0xd6,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_fma_f64 v[5:6], v[1:2], v[2:3], v[3:4]
+// GFX12: encoding: [0x05,0x00,0x14,0xd6,0x01,0x05,0x0e,0x04]
+
+v_fma_f64 v[5:6], v[254:255], v[254:255], s[6:7]
+// GFX12: encoding: [0x05,0x00,0x14,0xd6,0xfe,0xfd,0x1b,0x00]
+
+v_fma_f64 v[5:6], s[2:3], s[4:5], v[254:255]
+// GFX12: encoding: [0x05,0x00,0x14,0xd6,0x02,0x08,0xf8,0x07]
+
+v_fma_f64 v[5:6], -|s[104:105]|, s[104:105], -|s[104:105]|
+// GFX12: encoding: [0x05,0x05,0x14,0xd6,0x68,0xd0,0xa0,0xa1]
+
+v_fma_f64 v[5:6], vcc, -|ttmp[14:15]|, -|ttmp[14:15]|
+// GFX12: encoding: [0x05,0x06,0x14,0xd6,0x6a,0xf4,0xe8,0xc1]
+
+v_fma_f64 v[5:6], -|ttmp[14:15]|, 0xaf123456, null
+// GFX12: encoding: [0x05,0x01,0x14,0xd6,0x7a,0xfe,0xf1,0x21,0x56,0x34,0x12,0xaf]
+
+v_fma_f64 v[5:6], -|exec|, -|src_scc|, -|exec|
+// GFX12: encoding: [0x05,0x07,0x14,0xd6,0x7e,0xfa,0xf9,0xe1]
+
+v_fma_f64 v[5:6], null, 0.5, vcc
+// GFX12: encoding: [0x05,0x00,0x14,0xd6,0x7c,0xe0,0xa9,0x01]
+
+v_fma_f64 v[5:6], -1, -1, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x14,0xd6,0xc1,0x82,0xfd,0x03,0x56,0x34,0x12,0xaf]
+
+v_fma_f64 v[5:6], 0.5, null, -|src_scc| mul:2
+// GFX12: encoding: [0x05,0x04,0x14,0xd6,0xf0,0xf8,0xf4,0x8b]
+
+v_fma_f64 v[5:6], -|src_scc|, -|exec|, 0.5 mul:4
+// GFX12: encoding: [0x05,0x03,0x14,0xd6,0xfd,0xfc,0xc0,0x73]
+
+v_fma_f64 v[254:255], 0xaf123456, -|vcc|, -1 clamp div:2
+// GFX12: encoding: [0xfe,0x82,0x14,0xd6,0xff,0xd4,0x04,0x5b,0x56,0x34,0x12,0xaf]
+
+v_fma_dx9_zero_f32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x09,0xd6,0x01,0x05,0x0e,0x00]
+
+v_fma_dx9_zero_f32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x09,0xd6,0xff,0x05,0xa4,0x01]
+
+v_fma_dx9_zero_f32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x09,0xd6,0x01,0xfe,0xff,0x01]
+
+v_fma_dx9_zero_f32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x09,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_fma_dx9_zero_f32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x09,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_fma_dx9_zero_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x09,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_fma_dx9_zero_f32 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX12: encoding: [0x05,0x07,0x09,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_fma_dx9_zero_f32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x09,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_fma_dx9_zero_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX12: encoding: [0x05,0x01,0x09,0xd6,0x7e,0x82,0xad,0x01]
+
+v_fma_dx9_zero_f32 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX12: encoding: [0x05,0x05,0x09,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_fma_dx9_zero_f32 v5, null, exec_lo, -|0xaf123456|
+// GFX12: encoding: [0x05,0x04,0x09,0xd6,0x7c,0xfc,0xfc,0x83,0x56,0x34,0x12,0xaf]
+
+v_fma_dx9_zero_f32 v5, -1, -|exec_hi|, -|src_scc|
+// GFX12: encoding: [0x05,0x06,0x09,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_fma_dx9_zero_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX12: encoding: [0x05,0x00,0x09,0xd6,0xf0,0xfa,0xc0,0x4b]
+
+v_fma_dx9_zero_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX12: encoding: [0x05,0x02,0x09,0xd6,0xfd,0xd4,0x04,0x33]
+
+v_fma_dx9_zero_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX12: encoding: [0xff,0x83,0x09,0xd6,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_ldexp_f32 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x1c,0xd7,0x01,0x05,0x02,0x00]
+
+v_ldexp_f32 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x1c,0xd7,0xff,0xff,0x03,0x00]
+
+v_ldexp_f32 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x1c,0xd7,0x01,0x04,0x00,0x00]
+
+v_ldexp_f32 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x1c,0xd7,0x69,0xd2,0x00,0x00]
+
+v_ldexp_f32 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x1c,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_ldexp_f32 v5, vcc_hi, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x1c,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_ldexp_f32 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x1c,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_ldexp_f32 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x1c,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_ldexp_f32 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x1c,0xd7,0x7e,0x82,0x01,0x00]
+
+v_ldexp_f32 v5, exec_hi, null
+// GFX12: encoding: [0x05,0x00,0x1c,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_ldexp_f32 v5, null, exec_lo
+// GFX12: encoding: [0x05,0x00,0x1c,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_ldexp_f32 v5, -1, exec_hi
+// GFX12: encoding: [0x05,0x00,0x1c,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_ldexp_f32 v5, 0.5, m0 mul:2
+// GFX12: encoding: [0x05,0x00,0x1c,0xd7,0xf0,0xfa,0x00,0x08]
+
+v_ldexp_f32 v5, src_scc, vcc_lo mul:4
+// GFX12: encoding: [0x05,0x00,0x1c,0xd7,0xfd,0xd4,0x00,0x10]
+
+v_ldexp_f32 v255, -|0xaf123456|, vcc_hi clamp div:2
+// GFX12: encoding: [0xff,0x81,0x1c,0xd7,0xff,0xd6,0x00,0x38,0x56,0x34,0x12,0xaf]
+
+v_ldexp_f64 v[5:6], v[1:2], v2
+// GFX12: encoding: [0x05,0x00,0x2b,0xd7,0x01,0x05,0x02,0x00]
+
+v_ldexp_f64 v[5:6], v[1:2], v255
+// GFX12: encoding: [0x05,0x00,0x2b,0xd7,0x01,0xff,0x03,0x00]
+
+v_ldexp_f64 v[5:6], v[1:2], s2
+// GFX12: encoding: [0x05,0x00,0x2b,0xd7,0x01,0x05,0x00,0x00]
+
+v_ldexp_f64 v[5:6], v[1:2], s105
+// GFX12: encoding: [0x05,0x00,0x2b,0xd7,0x01,0xd3,0x00,0x00]
+
+v_ldexp_f64 v[5:6], v[254:255], ttmp15
+// GFX12: encoding: [0x05,0x00,0x2b,0xd7,0xfe,0xf7,0x00,0x00]
+
+v_ldexp_f64 v[5:6], s[2:3], vcc_hi
+// GFX12: encoding: [0x05,0x00,0x2b,0xd7,0x02,0xd6,0x00,0x00]
+
+v_ldexp_f64 v[5:6], s[104:105], vcc_lo
+// GFX12: encoding: [0x05,0x00,0x2b,0xd7,0x68,0xd4,0x00,0x00]
+
+v_ldexp_f64 v[5:6], vcc, m0
+// GFX12: encoding: [0x05,0x00,0x2b,0xd7,0x6a,0xfa,0x00,0x00]
+
+v_ldexp_f64 v[5:6], ttmp[14:15], exec_hi
+// GFX12: encoding: [0x05,0x00,0x2b,0xd7,0x7a,0xfe,0x00,0x00]
+
+v_ldexp_f64 v[5:6], exec, exec_lo
+// GFX12: encoding: [0x05,0x00,0x2b,0xd7,0x7e,0xfc,0x00,0x00]
+
+v_ldexp_f64 v[5:6], null, null
+// GFX12: encoding: [0x05,0x00,0x2b,0xd7,0x7c,0xf8,0x00,0x00]
+
+v_ldexp_f64 v[5:6], -1, -1
+// GFX12: encoding: [0x05,0x00,0x2b,0xd7,0xc1,0x82,0x01,0x00]
+
+v_ldexp_f64 v[5:6], 0.5, 0.5 mul:2
+// GFX12: encoding: [0x05,0x00,0x2b,0xd7,0xf0,0xe0,0x01,0x08]
+
+v_ldexp_f64 v[5:6], -|src_scc|, src_scc mul:4
+// GFX12: encoding: [0x05,0x01,0x2b,0xd7,0xfd,0xfa,0x01,0x30]
+
+v_ldexp_f64 v[254:255], 0xaf123456, 0xaf123456 clamp div:2
+// GFX12: encoding: [0xfe,0x80,0x2b,0xd7,0xff,0xfe,0x01,0x18,0x56,0x34,0x12,0xaf]
+
+v_lerp_u8 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x15,0xd6,0x01,0x05,0x0e,0x00]
+
+v_lerp_u8 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x15,0xd6,0xff,0x05,0xa4,0x01]
+
+v_lerp_u8 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x15,0xd6,0x01,0xfe,0xff,0x01]
+
+v_lerp_u8 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x15,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_lerp_u8 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x15,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_lerp_u8 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x15,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_lerp_u8 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x15,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_lerp_u8 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x15,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_lerp_u8 v5, exec_lo, -1, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x15,0xd6,0x7e,0x82,0xad,0x01]
+
+v_lerp_u8 v5, exec_hi, null, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x15,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_lerp_u8 v5, null, exec_lo, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x15,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_lerp_u8 v5, -1, exec_hi, src_scc
+// GFX12: encoding: [0x05,0x00,0x15,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_lerp_u8 v5, 0.5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x15,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_lerp_u8 v5, src_scc, vcc_lo, -1
+// GFX12: encoding: [0x05,0x00,0x15,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_lerp_u8 v255, 0xaf123456, vcc_hi, null
+// GFX12: encoding: [0xff,0x00,0x15,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_lshl_add_u32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x46,0xd6,0x01,0x05,0x0e,0x00]
+
+v_lshl_add_u32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x46,0xd6,0xff,0x05,0xa4,0x01]
+
+v_lshl_add_u32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x46,0xd6,0x01,0xfe,0xff,0x01]
+
+v_lshl_add_u32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x46,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_lshl_add_u32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x46,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_lshl_add_u32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x46,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_lshl_add_u32 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x46,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_lshl_add_u32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x46,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_lshl_add_u32 v5, exec_lo, -1, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x46,0xd6,0x7e,0x82,0xad,0x01]
+
+v_lshl_add_u32 v5, exec_hi, null, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x46,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_lshl_add_u32 v5, null, exec_lo, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x46,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_lshl_add_u32 v5, -1, exec_hi, src_scc
+// GFX12: encoding: [0x05,0x00,0x46,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_lshl_add_u32 v5, 0.5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x46,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_lshl_add_u32 v5, src_scc, vcc_lo, -1
+// GFX12: encoding: [0x05,0x00,0x46,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_lshl_add_u32 v255, 0xaf123456, vcc_hi, null
+// GFX12: encoding: [0xff,0x00,0x46,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_lshl_or_b32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x56,0xd6,0x01,0x05,0x0e,0x00]
+
+v_lshl_or_b32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x56,0xd6,0xff,0x05,0xa4,0x01]
+
+v_lshl_or_b32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x56,0xd6,0x01,0xfe,0xff,0x01]
+
+v_lshl_or_b32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x56,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_lshl_or_b32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x56,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_lshl_or_b32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x56,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_lshl_or_b32 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x56,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_lshl_or_b32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x56,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_lshl_or_b32 v5, exec_lo, -1, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x56,0xd6,0x7e,0x82,0xad,0x01]
+
+v_lshl_or_b32 v5, exec_hi, null, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x56,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_lshl_or_b32 v5, null, exec_lo, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x56,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_lshl_or_b32 v5, -1, exec_hi, src_scc
+// GFX12: encoding: [0x05,0x00,0x56,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_lshl_or_b32 v5, 0.5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x56,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_lshl_or_b32 v5, src_scc, vcc_lo, -1
+// GFX12: encoding: [0x05,0x00,0x56,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_lshl_or_b32 v255, 0xaf123456, vcc_hi, null
+// GFX12: encoding: [0xff,0x00,0x56,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_lshlrev_b16 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x38,0xd7,0x01,0x05,0x02,0x00]
+
+v_lshlrev_b16 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x38,0xd7,0xff,0xff,0x03,0x00]
+
+v_lshlrev_b16 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x38,0xd7,0x01,0x04,0x00,0x00]
+
+v_lshlrev_b16 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x38,0xd7,0x69,0xd2,0x00,0x00]
+
+v_lshlrev_b16 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x38,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_lshlrev_b16 v5, vcc_hi, 0xfe0b
+// GFX12: encoding: [0x05,0x00,0x38,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_lshlrev_b16 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x38,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_lshlrev_b16 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x38,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_lshlrev_b16 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x38,0xd7,0x7e,0x82,0x01,0x00]
+
+v_lshlrev_b16 v5, exec_hi, null
+// GFX12: encoding: [0x05,0x00,0x38,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_lshlrev_b16 v5, null, exec_lo
+// GFX12: encoding: [0x05,0x00,0x38,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_lshlrev_b16 v5, -1, exec_hi
+// GFX12: encoding: [0x05,0x00,0x38,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_lshlrev_b16 v5, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x38,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_lshlrev_b16 v5, src_scc, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x38,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_lshlrev_b16 v255, 0xfe0b, vcc_hi
+// GFX12: encoding: [0xff,0x00,0x38,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_lshrrev_b16 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x39,0xd7,0x01,0x05,0x02,0x00]
+
+v_lshrrev_b16 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x39,0xd7,0xff,0xff,0x03,0x00]
+
+v_lshrrev_b16 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x39,0xd7,0x01,0x04,0x00,0x00]
+
+v_lshrrev_b16 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x39,0xd7,0x69,0xd2,0x00,0x00]
+
+v_lshrrev_b16 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x39,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_lshrrev_b16 v5, vcc_hi, 0xfe0b
+// GFX12: encoding: [0x05,0x00,0x39,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_lshrrev_b16 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x39,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_lshrrev_b16 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x39,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_lshrrev_b16 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x39,0xd7,0x7e,0x82,0x01,0x00]
+
+v_lshrrev_b16 v5, exec_hi, null
+// GFX12: encoding: [0x05,0x00,0x39,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_lshrrev_b16 v5, null, exec_lo
+// GFX12: encoding: [0x05,0x00,0x39,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_lshrrev_b16 v5, -1, exec_hi
+// GFX12: encoding: [0x05,0x00,0x39,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_lshrrev_b16 v5, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x39,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_lshrrev_b16 v5, src_scc, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x39,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_lshrrev_b16 v255, 0xfe0b, vcc_hi
+// GFX12: encoding: [0xff,0x00,0x39,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_lshrrev_b64 v[5:6], v1, vcc
+// GFX12: encoding: [0x05,0x00,0x3d,0xd7,0x01,0xd5,0x00,0x00]
+
+v_lshrrev_b64 v[5:6], v255, exec
+// GFX12: encoding: [0x05,0x00,0x3d,0xd7,0xff,0xfd,0x00,0x00]
+
+v_lshrrev_b64 v[5:6], exec_lo, v[2:3]
+// GFX12: encoding: [0x05,0x00,0x3d,0xd7,0x7e,0x04,0x02,0x00]
+
+v_lshrrev_b64 v[5:6], exec_hi, v[254:255]
+// GFX12: encoding: [0x05,0x00,0x3d,0xd7,0x7f,0xfc,0x03,0x00]
+
+v_lshrrev_b64 v[5:6], null, null
+// GFX12: encoding: [0x05,0x00,0x3d,0xd7,0x7c,0xf8,0x00,0x00]
+
+v_lshrrev_b64 v[5:6], -1, -1
+// GFX12: encoding: [0x05,0x00,0x3d,0xd7,0xc1,0x82,0x01,0x00]
+
+v_lshrrev_b64 v[5:6], 0.5, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x3d,0xd7,0xf0,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_lshrrev_b64 v[5:6], src_scc, src_scc
+// GFX12: encoding: [0x05,0x00,0x3d,0xd7,0xfd,0xfa,0x01,0x00]
+
+v_lshrrev_b64 v[254:255], 0xaf123456, 0.5
+// GFX12: encoding: [0xfe,0x00,0x3d,0xd7,0xff,0xe0,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_mad_i16 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x53,0xd6,0x01,0x05,0x0e,0x00]
+
+v_mad_i16 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x53,0xd6,0xff,0x05,0xa4,0x01]
+
+v_mad_i16 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x53,0xd6,0x01,0xfe,0xff,0x01]
+
+v_mad_i16 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x53,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_mad_i16 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x53,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_mad_i16 v5, vcc_hi, 0xfe0b, v255
+// GFX12: encoding: [0x05,0x00,0x53,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+v_mad_i16 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x53,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_mad_i16 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x53,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_mad_i16 v5, exec_lo, -1, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x53,0xd6,0x7e,0x82,0xad,0x01]
+
+v_mad_i16 v5, exec_hi, null, vcc_lo op_sel:[1,1,1,1]
+// GFX12: encoding: [0x05,0x78,0x53,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_mad_i16 v5, null, exec_lo, 0xfe0b op_sel:[0,0,0,0]
+// GFX12: encoding: [0x05,0x00,0x53,0xd6,0x7c,0xfc,0xfc,0x03,0x0b,0xfe,0x00,0x00]
+
+v_mad_i16 v5, -1, exec_hi, src_scc op_sel:[1,0,0,0]
+// GFX12: encoding: [0x05,0x08,0x53,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_mad_i16 v5, 0.5, m0, 0.5 op_sel:[0,1,0,0]
+// GFX12: encoding: [0x05,0x10,0x53,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_mad_i16 v5, src_scc, vcc_lo, -1 op_sel:[0,0,1,0]
+// GFX12: encoding: [0x05,0x20,0x53,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_mad_i16 v255, 0xfe0b, vcc_hi, null op_sel:[0,0,0,1] clamp
+// GFX12: encoding: [0xff,0xc0,0x53,0xd6,0xff,0xd6,0xf0,0x01,0x0b,0xfe,0x00,0x00]
+
+v_mad_i32_i16 v5, v1, v2, v3
+// GFX12: encoding: [0x05,0x00,0x5a,0xd6,0x01,0x05,0x0e,0x04]
+
+v_mad_i32_i16 v5, v255, v255, s3
+// GFX12: encoding: [0x05,0x00,0x5a,0xd6,0xff,0xff,0x0f,0x00]
+
+v_mad_i32_i16 v5, s1, s2, v255
+// GFX12: encoding: [0x05,0x00,0x5a,0xd6,0x01,0x04,0xfc,0x07]
+
+v_mad_i32_i16 v5, s105, s105, s105
+// GFX12: encoding: [0x05,0x00,0x5a,0xd6,0x69,0xd2,0xa4,0x01]
+
+v_mad_i32_i16 v5, vcc_lo, ttmp15, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x5a,0xd6,0x6a,0xf6,0xa8,0x01]
+
+v_mad_i32_i16 v5, vcc_hi, 0xfe0b, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x5a,0xd6,0x6b,0xfe,0xad,0x01,0x0b,0xfe,0x00,0x00]
+
+v_mad_i32_i16 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x5a,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_mad_i32_i16 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x5a,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_mad_i32_i16 v5, exec_lo, -1, exec_hi
+// GFX12: encoding: [0x05,0x00,0x5a,0xd6,0x7e,0x82,0xfd,0x01]
+
+v_mad_i32_i16 v5, exec_hi, null, exec_lo
+// GFX12: encoding: [0x05,0x00,0x5a,0xd6,0x7f,0xf8,0xf8,0x01]
+
+v_mad_i32_i16 v5, null, exec_lo, null
+// GFX12: encoding: [0x05,0x00,0x5a,0xd6,0x7c,0xfc,0xf0,0x01]
+
+v_mad_i32_i16 v5, -1, exec_hi, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x5a,0xd6,0xc1,0xfe,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_mad_i32_i16 v5, 0.5, m0, -1 op_sel:[0,0,0,0]
+// GFX12: encoding: [0x05,0x00,0x5a,0xd6,0xf0,0xfa,0x04,0x03]
+
+v_mad_i32_i16 v5, src_scc, vcc_lo, src_scc op_sel:[1,0,0,0]
+// GFX12: encoding: [0x05,0x08,0x5a,0xd6,0xfd,0xd4,0xf4,0x03]
+
+v_mad_i32_i16 v255, 0xfe0b, vcc_hi, 0.5 op_sel:[0,1,0,0] clamp
+// GFX12: encoding: [0xff,0x90,0x5a,0xd6,0xff,0xd6,0xc0,0x03,0x0b,0xfe,0x00,0x00]
+
+v_mad_i32_i24 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x0a,0xd6,0x01,0x05,0x0e,0x00]
+
+v_mad_i32_i24 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x0a,0xd6,0xff,0x05,0xa4,0x01]
+
+v_mad_i32_i24 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x0a,0xd6,0x01,0xfe,0xff,0x01]
+
+v_mad_i32_i24 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x0a,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_mad_i32_i24 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x0a,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_mad_i32_i24 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x0a,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_mad_i32_i24 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x0a,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_mad_i32_i24 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x0a,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_mad_i32_i24 v5, exec_lo, -1, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x0a,0xd6,0x7e,0x82,0xad,0x01]
+
+v_mad_i32_i24 v5, exec_hi, null, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x0a,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_mad_i32_i24 v5, null, exec_lo, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x0a,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_mad_i32_i24 v5, -1, exec_hi, src_scc
+// GFX12: encoding: [0x05,0x00,0x0a,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_mad_i32_i24 v5, 0.5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x0a,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_mad_i32_i24 v5, src_scc, vcc_lo, -1
+// GFX12: encoding: [0x05,0x00,0x0a,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_mad_i32_i24 v255, 0xaf123456, vcc_hi, null clamp
+// GFX12: encoding: [0xff,0x80,0x0a,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_mad_co_i64_i32 v[5:6], s6, s105, s105, s[6:7]
+// W32: encoding: [0x05,0x06,0xff,0xd6,0x69,0xd2,0x18,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_co_i64_i32 v[5:6], s6, ttmp15, ttmp15, s[104:105]
+// W32: encoding: [0x05,0x06,0xff,0xd6,0x7b,0xf6,0xa0,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_co_i64_i32 v[5:6], s6, m0, 0.5, ttmp[14:15]
+// W32: encoding: [0x05,0x06,0xff,0xd6,0x7d,0xe0,0xe9,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_co_i64_i32 v[5:6], s6, exec_lo, -1, exec
+// W32: encoding: [0x05,0x06,0xff,0xd6,0x7e,0x82,0xf9,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_co_i64_i32 v[5:6], s6, exec_hi, null, vcc
+// W32: encoding: [0x05,0x06,0xff,0xd6,0x7f,0xf8,0xa8,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_co_i64_i32 v[5:6], s105, null, exec_lo, null
+// W32: encoding: [0x05,0x69,0xff,0xd6,0x7c,0xfc,0xf0,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_co_i64_i32 v[5:6], vcc_lo, -1, exec_hi, -1
+// W32: encoding: [0x05,0x6a,0xff,0xd6,0xc1,0xfe,0x04,0x03]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_co_i64_i32 v[5:6], vcc_hi, 0.5, m0, 0xaf123456
+// W32: encoding: [0x05,0x6b,0xff,0xd6,0xf0,0xfa,0xfc,0x03,0x56,0x34,0x12,0xaf]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_co_i64_i32 v[5:6], ttmp15, src_scc, vcc_lo, src_scc
+// W32: encoding: [0x05,0x7b,0xff,0xd6,0xfd,0xd4,0xf4,0x03]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_co_i64_i32 v[5:6], s[12:13], s105, s105, s[6:7]
+// W64: encoding: [0x05,0x0c,0xff,0xd6,0x69,0xd2,0x18,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_co_i64_i32 v[5:6], s[12:13], ttmp15, ttmp15, s[104:105]
+// W64: encoding: [0x05,0x0c,0xff,0xd6,0x7b,0xf6,0xa0,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_co_i64_i32 v[5:6], s[12:13], m0, 0.5, ttmp[14:15]
+// W64: encoding: [0x05,0x0c,0xff,0xd6,0x7d,0xe0,0xe9,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_co_i64_i32 v[5:6], s[12:13], exec_lo, -1, exec
+// W64: encoding: [0x05,0x0c,0xff,0xd6,0x7e,0x82,0xf9,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_co_i64_i32 v[5:6], s[12:13], exec_hi, null, vcc
+// W64: encoding: [0x05,0x0c,0xff,0xd6,0x7f,0xf8,0xa8,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_co_i64_i32 v[5:6], s[12:13], null, exec_lo, null
+// W64: encoding: [0x05,0x0c,0xff,0xd6,0x7c,0xfc,0xf0,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_co_i64_i32 v[5:6], s[104:105], -1, exec_hi, -1
+// W64: encoding: [0x05,0x68,0xff,0xd6,0xc1,0xfe,0x04,0x03]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_co_i64_i32 v[5:6], vcc, 0.5, m0, 0xaf123456
+// W64: encoding: [0x05,0x6a,0xff,0xd6,0xf0,0xfa,0xfc,0x03,0x56,0x34,0x12,0xaf]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_co_i64_i32 v[5:6], ttmp[14:15], src_scc, vcc_lo, src_scc
+// W64: encoding: [0x05,0x7a,0xff,0xd6,0xfd,0xd4,0xf4,0x03]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_co_i64_i32 v[254:255], null, 0xaf123456, vcc_hi, 0.5 clamp
+// GFX12: encoding: [0xfe,0xfc,0xff,0xd6,0xff,0xd6,0xc0,0x03,0x56,0x34,0x12,0xaf]
+
+v_mad_u16 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x41,0xd6,0x01,0x05,0x0e,0x00]
+
+v_mad_u16 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x41,0xd6,0xff,0x05,0xa4,0x01]
+
+v_mad_u16 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x41,0xd6,0x01,0xfe,0xff,0x01]
+
+v_mad_u16 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x41,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_mad_u16 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x41,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_mad_u16 v5, vcc_hi, 0xfe0b, v255
+// GFX12: encoding: [0x05,0x00,0x41,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+v_mad_u16 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x41,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_mad_u16 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x41,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_mad_u16 v5, exec_lo, -1, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x41,0xd6,0x7e,0x82,0xad,0x01]
+
+v_mad_u16 v5, exec_hi, null, vcc_lo op_sel:[1,1,1,1]
+// GFX12: encoding: [0x05,0x78,0x41,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_mad_u16 v5, null, exec_lo, 0xfe0b op_sel:[0,0,0,0]
+// GFX12: encoding: [0x05,0x00,0x41,0xd6,0x7c,0xfc,0xfc,0x03,0x0b,0xfe,0x00,0x00]
+
+v_mad_u16 v5, -1, exec_hi, src_scc op_sel:[1,0,0,0]
+// GFX12: encoding: [0x05,0x08,0x41,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_mad_u16 v5, 0.5, m0, 0.5 op_sel:[0,1,0,0]
+// GFX12: encoding: [0x05,0x10,0x41,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_mad_u16 v5, src_scc, vcc_lo, -1 op_sel:[0,0,1,0]
+// GFX12: encoding: [0x05,0x20,0x41,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_mad_u16 v255, 0xfe0b, vcc_hi, null op_sel:[0,0,0,1] clamp
+// GFX12: encoding: [0xff,0xc0,0x41,0xd6,0xff,0xd6,0xf0,0x01,0x0b,0xfe,0x00,0x00]
+
+v_mad_u32_u16 v5, v1, v2, v3
+// GFX12: encoding: [0x05,0x00,0x59,0xd6,0x01,0x05,0x0e,0x04]
+
+v_mad_u32_u16 v5, v255, v255, s3
+// GFX12: encoding: [0x05,0x00,0x59,0xd6,0xff,0xff,0x0f,0x00]
+
+v_mad_u32_u16 v5, s1, s2, v255
+// GFX12: encoding: [0x05,0x00,0x59,0xd6,0x01,0x04,0xfc,0x07]
+
+v_mad_u32_u16 v5, s105, s105, s105
+// GFX12: encoding: [0x05,0x00,0x59,0xd6,0x69,0xd2,0xa4,0x01]
+
+v_mad_u32_u16 v5, vcc_lo, ttmp15, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x59,0xd6,0x6a,0xf6,0xa8,0x01]
+
+v_mad_u32_u16 v5, vcc_hi, 0xfe0b, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x59,0xd6,0x6b,0xfe,0xad,0x01,0x0b,0xfe,0x00,0x00]
+
+v_mad_u32_u16 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x59,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_mad_u32_u16 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x59,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_mad_u32_u16 v5, exec_lo, -1, exec_hi
+// GFX12: encoding: [0x05,0x00,0x59,0xd6,0x7e,0x82,0xfd,0x01]
+
+v_mad_u32_u16 v5, exec_hi, null, exec_lo
+// GFX12: encoding: [0x05,0x00,0x59,0xd6,0x7f,0xf8,0xf8,0x01]
+
+v_mad_u32_u16 v5, null, exec_lo, null
+// GFX12: encoding: [0x05,0x00,0x59,0xd6,0x7c,0xfc,0xf0,0x01]
+
+v_mad_u32_u16 v5, -1, exec_hi, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x59,0xd6,0xc1,0xfe,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_mad_u32_u16 v5, 0.5, m0, -1 op_sel:[0,0,0,0]
+// GFX12: encoding: [0x05,0x00,0x59,0xd6,0xf0,0xfa,0x04,0x03]
+
+v_mad_u32_u16 v5, src_scc, vcc_lo, src_scc op_sel:[1,0,0,0]
+// GFX12: encoding: [0x05,0x08,0x59,0xd6,0xfd,0xd4,0xf4,0x03]
+
+v_mad_u32_u16 v255, 0xfe0b, vcc_hi, 0.5 op_sel:[0,1,0,0] clamp
+// GFX12: encoding: [0xff,0x90,0x59,0xd6,0xff,0xd6,0xc0,0x03,0x0b,0xfe,0x00,0x00]
+
+v_mad_u32_u24 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x0b,0xd6,0x01,0x05,0x0e,0x00]
+
+v_mad_u32_u24 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x0b,0xd6,0xff,0x05,0xa4,0x01]
+
+v_mad_u32_u24 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x0b,0xd6,0x01,0xfe,0xff,0x01]
+
+v_mad_u32_u24 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x0b,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_mad_u32_u24 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x0b,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_mad_u32_u24 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x0b,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_mad_u32_u24 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x0b,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_mad_u32_u24 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x0b,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_mad_u32_u24 v5, exec_lo, -1, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x0b,0xd6,0x7e,0x82,0xad,0x01]
+
+v_mad_u32_u24 v5, exec_hi, null, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x0b,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_mad_u32_u24 v5, null, exec_lo, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x0b,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_mad_u32_u24 v5, -1, exec_hi, src_scc
+// GFX12: encoding: [0x05,0x00,0x0b,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_mad_u32_u24 v5, 0.5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x0b,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_mad_u32_u24 v5, src_scc, vcc_lo, -1
+// GFX12: encoding: [0x05,0x00,0x0b,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_mad_u32_u24 v255, 0xaf123456, vcc_hi, null clamp
+// GFX12: encoding: [0xff,0x80,0x0b,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_mad_co_u64_u32 v[5:6], s6, s105, s105, s[6:7]
+// W32: encoding: [0x05,0x06,0xfe,0xd6,0x69,0xd2,0x18,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_co_u64_u32 v[5:6], s6, ttmp15, ttmp15, s[104:105]
+// W32: encoding: [0x05,0x06,0xfe,0xd6,0x7b,0xf6,0xa0,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_co_u64_u32 v[5:6], s6, m0, 0.5, ttmp[14:15]
+// W32: encoding: [0x05,0x06,0xfe,0xd6,0x7d,0xe0,0xe9,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_co_u64_u32 v[5:6], s6, exec_lo, -1, exec
+// W32: encoding: [0x05,0x06,0xfe,0xd6,0x7e,0x82,0xf9,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_co_u64_u32 v[5:6], s6, exec_hi, null, vcc
+// W32: encoding: [0x05,0x06,0xfe,0xd6,0x7f,0xf8,0xa8,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_co_u64_u32 v[5:6], s105, null, exec_lo, null
+// W32: encoding: [0x05,0x69,0xfe,0xd6,0x7c,0xfc,0xf0,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_co_u64_u32 v[5:6], vcc_lo, -1, exec_hi, -1
+// W32: encoding: [0x05,0x6a,0xfe,0xd6,0xc1,0xfe,0x04,0x03]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_co_u64_u32 v[5:6], vcc_hi, 0.5, m0, 0xaf123456
+// W32: encoding: [0x05,0x6b,0xfe,0xd6,0xf0,0xfa,0xfc,0x03,0x56,0x34,0x12,0xaf]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_co_u64_u32 v[5:6], ttmp15, src_scc, vcc_lo, src_scc
+// W32: encoding: [0x05,0x7b,0xfe,0xd6,0xfd,0xd4,0xf4,0x03]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_co_u64_u32 v[5:6], s[12:13], s105, s105, s[6:7]
+// W64: encoding: [0x05,0x0c,0xfe,0xd6,0x69,0xd2,0x18,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_co_u64_u32 v[5:6], s[12:13], ttmp15, ttmp15, s[104:105]
+// W64: encoding: [0x05,0x0c,0xfe,0xd6,0x7b,0xf6,0xa0,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_co_u64_u32 v[5:6], s[12:13], m0, 0.5, ttmp[14:15]
+// W64: encoding: [0x05,0x0c,0xfe,0xd6,0x7d,0xe0,0xe9,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_co_u64_u32 v[5:6], s[12:13], exec_lo, -1, exec
+// W64: encoding: [0x05,0x0c,0xfe,0xd6,0x7e,0x82,0xf9,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_co_u64_u32 v[5:6], s[12:13], exec_hi, null, vcc
+// W64: encoding: [0x05,0x0c,0xfe,0xd6,0x7f,0xf8,0xa8,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_co_u64_u32 v[5:6], s[12:13], null, exec_lo, null
+// W64: encoding: [0x05,0x0c,0xfe,0xd6,0x7c,0xfc,0xf0,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_co_u64_u32 v[5:6], s[104:105], -1, exec_hi, -1
+// W64: encoding: [0x05,0x68,0xfe,0xd6,0xc1,0xfe,0x04,0x03]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_co_u64_u32 v[5:6], vcc, 0.5, m0, 0xaf123456
+// W64: encoding: [0x05,0x6a,0xfe,0xd6,0xf0,0xfa,0xfc,0x03,0x56,0x34,0x12,0xaf]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_co_u64_u32 v[5:6], ttmp[14:15], src_scc, vcc_lo, src_scc
+// W64: encoding: [0x05,0x7a,0xfe,0xd6,0xfd,0xd4,0xf4,0x03]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_mad_co_u64_u32 v[254:255], null, 0xaf123456, vcc_hi, 0.5 clamp
+// GFX12: encoding: [0xfe,0xfc,0xfe,0xd6,0xff,0xd6,0xc0,0x03,0x56,0x34,0x12,0xaf]
+
+v_max3_num_f16 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x2c,0xd6,0x01,0x05,0x0e,0x00]
+
+v_max3_num_f16 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x2c,0xd6,0xff,0x05,0xa4,0x01]
+
+v_max3_num_f16 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x2c,0xd6,0x01,0xfe,0xff,0x01]
+
+v_max3_num_f16 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x2c,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_max3_num_f16 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x2c,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_max3_num_f16 v5, vcc_hi, 0xfe0b, v255
+// GFX12: encoding: [0x05,0x00,0x2c,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+v_max3_num_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX12: encoding: [0x05,0x07,0x2c,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_max3_num_f16 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x2c,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_max3_num_f16 v5, |exec_lo|, -1, vcc_hi
+// GFX12: encoding: [0x05,0x01,0x2c,0xd6,0x7e,0x82,0xad,0x01]
+
+v_max3_num_f16 v5, -|exec_hi|, null, -|vcc_lo| op_sel:[1,1,1,1]
+// GFX12: encoding: [0x05,0x7d,0x2c,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_max3_num_f16 v5, null, exec_lo, -|0xfe0b| op_sel:[0,0,0,0]
+// GFX12: encoding: [0x05,0x04,0x2c,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
+
+v_max3_num_f16 v5, -1, -|exec_hi|, -|src_scc| op_sel:[1,0,0,0]
+// GFX12: encoding: [0x05,0x0e,0x2c,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_max3_num_f16 v5, 0.5, -m0, 0.5 op_sel:[0,1,0,0]
+// GFX12: encoding: [0x05,0x10,0x2c,0xd6,0xf0,0xfa,0xc0,0x43]
+
+v_max3_num_f16 v5, -src_scc, |vcc_lo|, -1 op_sel:[0,0,1,0]
+// GFX12: encoding: [0x05,0x22,0x2c,0xd6,0xfd,0xd4,0x04,0x23]
+
+v_max3_num_f16 v255, -|0xfe0b|, -|vcc_hi|, null op_sel:[0,0,0,1] clamp
+// GFX12: encoding: [0xff,0xc3,0x2c,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00]
+
+v_max3_num_f32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x2a,0xd6,0x01,0x05,0x0e,0x00]
+
+v_max3_num_f32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x2a,0xd6,0xff,0x05,0xa4,0x01]
+
+v_max3_num_f32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x2a,0xd6,0x01,0xfe,0xff,0x01]
+
+v_max3_num_f32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x2a,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_max3_num_f32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x2a,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_max3_num_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x2a,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_max3_num_f32 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX12: encoding: [0x05,0x07,0x2a,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_max3_num_f32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x2a,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_max3_num_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX12: encoding: [0x05,0x01,0x2a,0xd6,0x7e,0x82,0xad,0x01]
+
+v_max3_num_f32 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX12: encoding: [0x05,0x05,0x2a,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_max3_num_f32 v5, null, exec_lo, -|0xaf123456|
+// GFX12: encoding: [0x05,0x04,0x2a,0xd6,0x7c,0xfc,0xfc,0x83,0x56,0x34,0x12,0xaf]
+
+v_max3_num_f32 v5, -1, -|exec_hi|, -|src_scc|
+// GFX12: encoding: [0x05,0x06,0x2a,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_max3_num_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX12: encoding: [0x05,0x00,0x2a,0xd6,0xf0,0xfa,0xc0,0x4b]
+
+v_max3_num_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX12: encoding: [0x05,0x02,0x2a,0xd6,0xfd,0xd4,0x04,0x33]
+
+v_max3_num_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX12: encoding: [0xff,0x83,0x2a,0xd6,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_max3_i16 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x4d,0xd6,0x01,0x05,0x0e,0x00]
+
+v_max3_i16 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x4d,0xd6,0xff,0x05,0xa4,0x01]
+
+v_max3_i16 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x4d,0xd6,0x01,0xfe,0xff,0x01]
+
+v_max3_i16 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x4d,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_max3_i16 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x4d,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_max3_i16 v5, vcc_hi, 0xfe0b, v255
+// GFX12: encoding: [0x05,0x00,0x4d,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+v_max3_i16 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x4d,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_max3_i16 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x4d,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_max3_i16 v5, exec_lo, -1, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x4d,0xd6,0x7e,0x82,0xad,0x01]
+
+v_max3_i16 v5, exec_hi, null, vcc_lo op_sel:[1,1,1,1]
+// GFX12: encoding: [0x05,0x78,0x4d,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_max3_i16 v5, null, exec_lo, 0xfe0b op_sel:[0,0,0,0]
+// GFX12: encoding: [0x05,0x00,0x4d,0xd6,0x7c,0xfc,0xfc,0x03,0x0b,0xfe,0x00,0x00]
+
+v_max3_i16 v5, -1, exec_hi, src_scc op_sel:[1,0,0,0]
+// GFX12: encoding: [0x05,0x08,0x4d,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_max3_i16 v5, 0.5, m0, 0.5 op_sel:[0,1,0,0]
+// GFX12: encoding: [0x05,0x10,0x4d,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_max3_i16 v5, src_scc, vcc_lo, -1 op_sel:[0,0,1,0]
+// GFX12: encoding: [0x05,0x20,0x4d,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_max3_i16 v255, 0xfe0b, vcc_hi, null op_sel:[0,0,0,1]
+// GFX12: encoding: [0xff,0x40,0x4d,0xd6,0xff,0xd6,0xf0,0x01,0x0b,0xfe,0x00,0x00]
+
+v_max3_i32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x1d,0xd6,0x01,0x05,0x0e,0x00]
+
+v_max3_i32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x1d,0xd6,0xff,0x05,0xa4,0x01]
+
+v_max3_i32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x1d,0xd6,0x01,0xfe,0xff,0x01]
+
+v_max3_i32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x1d,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_max3_i32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x1d,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_max3_i32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x1d,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_max3_i32 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x1d,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_max3_i32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x1d,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_max3_i32 v5, exec_lo, -1, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x1d,0xd6,0x7e,0x82,0xad,0x01]
+
+v_max3_i32 v5, exec_hi, null, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x1d,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_max3_i32 v5, null, exec_lo, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x1d,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_max3_i32 v5, -1, exec_hi, src_scc
+// GFX12: encoding: [0x05,0x00,0x1d,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_max3_i32 v5, 0.5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x1d,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_max3_i32 v5, src_scc, vcc_lo, -1
+// GFX12: encoding: [0x05,0x00,0x1d,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_max3_i32 v255, 0xaf123456, vcc_hi, null
+// GFX12: encoding: [0xff,0x00,0x1d,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_max3_u16 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x4e,0xd6,0x01,0x05,0x0e,0x00]
+
+v_max3_u16 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x4e,0xd6,0xff,0x05,0xa4,0x01]
+
+v_max3_u16 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x4e,0xd6,0x01,0xfe,0xff,0x01]
+
+v_max3_u16 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x4e,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_max3_u16 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x4e,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_max3_u16 v5, vcc_hi, 0xfe0b, v255
+// GFX12: encoding: [0x05,0x00,0x4e,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+v_max3_u16 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x4e,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_max3_u16 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x4e,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_max3_u16 v5, exec_lo, -1, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x4e,0xd6,0x7e,0x82,0xad,0x01]
+
+v_max3_u16 v5, exec_hi, null, vcc_lo op_sel:[1,1,1,1]
+// GFX12: encoding: [0x05,0x78,0x4e,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_max3_u16 v5, null, exec_lo, 0xfe0b op_sel:[0,0,0,0]
+// GFX12: encoding: [0x05,0x00,0x4e,0xd6,0x7c,0xfc,0xfc,0x03,0x0b,0xfe,0x00,0x00]
+
+v_max3_u16 v5, -1, exec_hi, src_scc op_sel:[1,0,0,0]
+// GFX12: encoding: [0x05,0x08,0x4e,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_max3_u16 v5, 0.5, m0, 0.5 op_sel:[0,1,0,0]
+// GFX12: encoding: [0x05,0x10,0x4e,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_max3_u16 v5, src_scc, vcc_lo, -1 op_sel:[0,0,1,0]
+// GFX12: encoding: [0x05,0x20,0x4e,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_max3_u16 v255, 0xfe0b, vcc_hi, null op_sel:[0,0,0,1]
+// GFX12: encoding: [0xff,0x40,0x4e,0xd6,0xff,0xd6,0xf0,0x01,0x0b,0xfe,0x00,0x00]
+
+v_max3_u32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x1e,0xd6,0x01,0x05,0x0e,0x00]
+
+v_max3_u32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x1e,0xd6,0xff,0x05,0xa4,0x01]
+
+v_max3_u32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x1e,0xd6,0x01,0xfe,0xff,0x01]
+
+v_max3_u32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x1e,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_max3_u32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x1e,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_max3_u32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x1e,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_max3_u32 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x1e,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_max3_u32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x1e,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_max3_u32 v5, exec_lo, -1, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x1e,0xd6,0x7e,0x82,0xad,0x01]
+
+v_max3_u32 v5, exec_hi, null, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x1e,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_max3_u32 v5, null, exec_lo, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x1e,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_max3_u32 v5, -1, exec_hi, src_scc
+// GFX12: encoding: [0x05,0x00,0x1e,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_max3_u32 v5, 0.5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x1e,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_max3_u32 v5, src_scc, vcc_lo, -1
+// GFX12: encoding: [0x05,0x00,0x1e,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_max3_u32 v255, 0xaf123456, vcc_hi, null
+// GFX12: encoding: [0xff,0x00,0x1e,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_max_i16 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x0a,0xd7,0x01,0x05,0x02,0x00]
+
+v_max_i16 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x0a,0xd7,0xff,0xff,0x03,0x00]
+
+v_max_i16 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x0a,0xd7,0x01,0x04,0x00,0x00]
+
+v_max_i16 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x0a,0xd7,0x69,0xd2,0x00,0x00]
+
+v_max_i16 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x0a,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_max_i16 v5, vcc_hi, 0xfe0b
+// GFX12: encoding: [0x05,0x00,0x0a,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_max_i16 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x0a,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_max_i16 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x0a,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_max_i16 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x0a,0xd7,0x7e,0x82,0x01,0x00]
+
+v_max_i16 v5, exec_hi, null
+// GFX12: encoding: [0x05,0x00,0x0a,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_max_i16 v5, null, exec_lo
+// GFX12: encoding: [0x05,0x00,0x0a,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_max_i16 v5, -1, exec_hi
+// GFX12: encoding: [0x05,0x00,0x0a,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_max_i16 v5, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x0a,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_max_i16 v5, src_scc, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x0a,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_max_i16 v255, 0xfe0b, vcc_hi
+// GFX12: encoding: [0xff,0x00,0x0a,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_max_u16 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x09,0xd7,0x01,0x05,0x02,0x00]
+
+v_max_u16 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x09,0xd7,0xff,0xff,0x03,0x00]
+
+v_max_u16 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x09,0xd7,0x01,0x04,0x00,0x00]
+
+v_max_u16 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x09,0xd7,0x69,0xd2,0x00,0x00]
+
+v_max_u16 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x09,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_max_u16 v5, vcc_hi, 0xfe0b
+// GFX12: encoding: [0x05,0x00,0x09,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_max_u16 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x09,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_max_u16 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x09,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_max_u16 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x09,0xd7,0x7e,0x82,0x01,0x00]
+
+v_max_u16 v5, exec_hi, null
+// GFX12: encoding: [0x05,0x00,0x09,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_max_u16 v5, null, exec_lo
+// GFX12: encoding: [0x05,0x00,0x09,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_max_u16 v5, -1, exec_hi
+// GFX12: encoding: [0x05,0x00,0x09,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_max_u16 v5, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x09,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_max_u16 v5, src_scc, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x09,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_max_u16 v255, 0xfe0b, vcc_hi
+// GFX12: encoding: [0xff,0x00,0x09,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_maxmin_num_f16 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x6b,0xd6,0x01,0x05,0x0e,0x00]
+
+v_maxmin_num_f16 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x6b,0xd6,0xff,0x05,0xa4,0x01]
+
+v_maxmin_num_f16 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x6b,0xd6,0x01,0xfe,0xff,0x01]
+
+v_maxmin_num_f16 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x6b,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_maxmin_num_f16 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x6b,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_maxmin_num_f16 v5, vcc_hi, 0xfe0b, v255
+// GFX12: encoding: [0x05,0x00,0x6b,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+v_maxmin_num_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX12: encoding: [0x05,0x07,0x6b,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_maxmin_num_f16 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x6b,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_maxmin_num_f16 v5, |exec_lo|, -1, vcc_hi
+// GFX12: encoding: [0x05,0x01,0x6b,0xd6,0x7e,0x82,0xad,0x01]
+
+v_maxmin_num_f16 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX12: encoding: [0x05,0x05,0x6b,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_maxmin_num_f16 v5, null, exec_lo, -|0xfe0b|
+// GFX12: encoding: [0x05,0x04,0x6b,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
+
+v_maxmin_num_f16 v5, -1, -|exec_hi|, -|src_scc|
+// GFX12: encoding: [0x05,0x06,0x6b,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_maxmin_num_f16 v5, 0.5, -m0, 0.5 mul:2
+// GFX12: encoding: [0x05,0x00,0x6b,0xd6,0xf0,0xfa,0xc0,0x4b]
+
+v_maxmin_num_f16 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX12: encoding: [0x05,0x02,0x6b,0xd6,0xfd,0xd4,0x04,0x33]
+
+v_maxmin_num_f16 v255, -|0xfe0b|, -|vcc_hi|, null clamp div:2
+// GFX12: encoding: [0xff,0x83,0x6b,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+
+v_maxmin_num_f32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x69,0xd6,0x01,0x05,0x0e,0x00]
+
+v_maxmin_num_f32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x69,0xd6,0xff,0x05,0xa4,0x01]
+
+v_maxmin_num_f32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x69,0xd6,0x01,0xfe,0xff,0x01]
+
+v_maxmin_num_f32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x69,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_maxmin_num_f32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x69,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_maxmin_num_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x69,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_maxmin_num_f32 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX12: encoding: [0x05,0x07,0x69,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_maxmin_num_f32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x69,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_maxmin_num_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX12: encoding: [0x05,0x01,0x69,0xd6,0x7e,0x82,0xad,0x01]
+
+v_maxmin_num_f32 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX12: encoding: [0x05,0x05,0x69,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_maxmin_num_f32 v5, null, exec_lo, -|0xaf123456|
+// GFX12: encoding: [0x05,0x04,0x69,0xd6,0x7c,0xfc,0xfc,0x83,0x56,0x34,0x12,0xaf]
+
+v_maxmin_num_f32 v5, -1, -|exec_hi|, -|src_scc|
+// GFX12: encoding: [0x05,0x06,0x69,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_maxmin_num_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX12: encoding: [0x05,0x00,0x69,0xd6,0xf0,0xfa,0xc0,0x4b]
+
+v_maxmin_num_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX12: encoding: [0x05,0x02,0x69,0xd6,0xfd,0xd4,0x04,0x33]
+
+v_maxmin_num_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX12: encoding: [0xff,0x83,0x69,0xd6,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_maxmin_i32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x64,0xd6,0x01,0x05,0x0e,0x00]
+
+v_maxmin_i32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x64,0xd6,0xff,0x05,0xa4,0x01]
+
+v_maxmin_i32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x64,0xd6,0x01,0xfe,0xff,0x01]
+
+v_maxmin_i32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x64,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_maxmin_i32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x64,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_maxmin_i32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x64,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_maxmin_i32 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x64,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_maxmin_i32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x64,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_maxmin_i32 v5, exec_lo, -1, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x64,0xd6,0x7e,0x82,0xad,0x01]
+
+v_maxmin_i32 v5, exec_hi, null, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x64,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_maxmin_i32 v5, null, exec_lo, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x64,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_maxmin_i32 v5, -1, exec_hi, src_scc
+// GFX12: encoding: [0x05,0x00,0x64,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_maxmin_i32 v5, 0.5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x64,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_maxmin_i32 v5, src_scc, vcc_lo, -1
+// GFX12: encoding: [0x05,0x00,0x64,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_maxmin_i32 v255, 0xaf123456, vcc_hi, null
+// GFX12: encoding: [0xff,0x00,0x64,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_maxmin_u32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x62,0xd6,0x01,0x05,0x0e,0x00]
+
+v_maxmin_u32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x62,0xd6,0xff,0x05,0xa4,0x01]
+
+v_maxmin_u32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x62,0xd6,0x01,0xfe,0xff,0x01]
+
+v_maxmin_u32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x62,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_maxmin_u32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x62,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_maxmin_u32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x62,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_maxmin_u32 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x62,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_maxmin_u32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x62,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_maxmin_u32 v5, exec_lo, -1, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x62,0xd6,0x7e,0x82,0xad,0x01]
+
+v_maxmin_u32 v5, exec_hi, null, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x62,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_maxmin_u32 v5, null, exec_lo, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x62,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_maxmin_u32 v5, -1, exec_hi, src_scc
+// GFX12: encoding: [0x05,0x00,0x62,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_maxmin_u32 v5, 0.5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x62,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_maxmin_u32 v5, src_scc, vcc_lo, -1
+// GFX12: encoding: [0x05,0x00,0x62,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_maxmin_u32 v255, 0xaf123456, vcc_hi, null
+// GFX12: encoding: [0xff,0x00,0x62,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_mbcnt_hi_u32_b32 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x20,0xd7,0x01,0x05,0x02,0x00]
+
+v_mbcnt_hi_u32_b32 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x20,0xd7,0xff,0xff,0x03,0x00]
+
+v_mbcnt_hi_u32_b32 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x20,0xd7,0x01,0x04,0x00,0x00]
+
+v_mbcnt_hi_u32_b32 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x20,0xd7,0x69,0xd2,0x00,0x00]
+
+v_mbcnt_hi_u32_b32 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x20,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_mbcnt_hi_u32_b32 v5, vcc_hi, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x20,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_mbcnt_hi_u32_b32 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x20,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_mbcnt_hi_u32_b32 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x20,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_mbcnt_hi_u32_b32 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x20,0xd7,0x7e,0x82,0x01,0x00]
+
+v_mbcnt_hi_u32_b32 v5, exec_hi, null
+// GFX12: encoding: [0x05,0x00,0x20,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_mbcnt_hi_u32_b32 v5, null, exec_lo
+// GFX12: encoding: [0x05,0x00,0x20,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_mbcnt_hi_u32_b32 v5, -1, exec_hi
+// GFX12: encoding: [0x05,0x00,0x20,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_mbcnt_hi_u32_b32 v5, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x20,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_mbcnt_hi_u32_b32 v5, src_scc, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x20,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_mbcnt_hi_u32_b32 v255, 0xaf123456, vcc_hi
+// GFX12: encoding: [0xff,0x00,0x20,0xd7,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_mbcnt_lo_u32_b32 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x1f,0xd7,0x01,0x05,0x02,0x00]
+
+v_mbcnt_lo_u32_b32 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x1f,0xd7,0xff,0xff,0x03,0x00]
+
+v_mbcnt_lo_u32_b32 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x1f,0xd7,0x01,0x04,0x00,0x00]
+
+v_mbcnt_lo_u32_b32 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x1f,0xd7,0x69,0xd2,0x00,0x00]
+
+v_mbcnt_lo_u32_b32 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x1f,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_mbcnt_lo_u32_b32 v5, vcc_hi, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x1f,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_mbcnt_lo_u32_b32 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x1f,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_mbcnt_lo_u32_b32 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x1f,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_mbcnt_lo_u32_b32 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x1f,0xd7,0x7e,0x82,0x01,0x00]
+
+v_mbcnt_lo_u32_b32 v5, exec_hi, null
+// GFX12: encoding: [0x05,0x00,0x1f,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_mbcnt_lo_u32_b32 v5, null, exec_lo
+// GFX12: encoding: [0x05,0x00,0x1f,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_mbcnt_lo_u32_b32 v5, -1, exec_hi
+// GFX12: encoding: [0x05,0x00,0x1f,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_mbcnt_lo_u32_b32 v5, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x1f,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_mbcnt_lo_u32_b32 v5, src_scc, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x1f,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_mbcnt_lo_u32_b32 v255, 0xaf123456, vcc_hi
+// GFX12: encoding: [0xff,0x00,0x1f,0xd7,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_med3_num_f16 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x32,0xd6,0x01,0x05,0x0e,0x00]
+
+v_med3_num_f16 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x32,0xd6,0xff,0x05,0xa4,0x01]
+
+v_med3_num_f16 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x32,0xd6,0x01,0xfe,0xff,0x01]
+
+v_med3_num_f16 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x32,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_med3_num_f16 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x32,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_med3_num_f16 v5, vcc_hi, 0xfe0b, v255
+// GFX12: encoding: [0x05,0x00,0x32,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+v_med3_num_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX12: encoding: [0x05,0x07,0x32,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_med3_num_f16 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x32,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_med3_num_f16 v5, |exec_lo|, -1, vcc_hi
+// GFX12: encoding: [0x05,0x01,0x32,0xd6,0x7e,0x82,0xad,0x01]
+
+v_med3_num_f16 v5, -|exec_hi|, null, -|vcc_lo| op_sel:[1,1,1,1]
+// GFX12: encoding: [0x05,0x7d,0x32,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_med3_num_f16 v5, null, exec_lo, -|0xfe0b| op_sel:[0,0,0,0]
+// GFX12: encoding: [0x05,0x04,0x32,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
+
+v_med3_num_f16 v5, -1, -|exec_hi|, -|src_scc| op_sel:[1,0,0,0]
+// GFX12: encoding: [0x05,0x0e,0x32,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_med3_num_f16 v5, 0.5, -m0, 0.5 op_sel:[0,1,0,0]
+// GFX12: encoding: [0x05,0x10,0x32,0xd6,0xf0,0xfa,0xc0,0x43]
+
+v_med3_num_f16 v5, -src_scc, |vcc_lo|, -1 op_sel:[0,0,1,0]
+// GFX12: encoding: [0x05,0x22,0x32,0xd6,0xfd,0xd4,0x04,0x23]
+
+v_med3_num_f16 v255, -|0xfe0b|, -|vcc_hi|, null op_sel:[0,0,0,1] clamp
+// GFX12: encoding: [0xff,0xc3,0x32,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00]
+
+v_med3_num_f32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x31,0xd6,0x01,0x05,0x0e,0x00]
+
+v_med3_num_f32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x31,0xd6,0xff,0x05,0xa4,0x01]
+
+v_med3_num_f32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x31,0xd6,0x01,0xfe,0xff,0x01]
+
+v_med3_num_f32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x31,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_med3_num_f32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x31,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_med3_num_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x31,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_med3_num_f32 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX12: encoding: [0x05,0x07,0x31,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_med3_num_f32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x31,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_med3_num_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX12: encoding: [0x05,0x01,0x31,0xd6,0x7e,0x82,0xad,0x01]
+
+v_med3_num_f32 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX12: encoding: [0x05,0x05,0x31,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_med3_num_f32 v5, null, exec_lo, -|0xaf123456|
+// GFX12: encoding: [0x05,0x04,0x31,0xd6,0x7c,0xfc,0xfc,0x83,0x56,0x34,0x12,0xaf]
+
+v_med3_num_f32 v5, -1, -|exec_hi|, -|src_scc|
+// GFX12: encoding: [0x05,0x06,0x31,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_med3_num_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX12: encoding: [0x05,0x00,0x31,0xd6,0xf0,0xfa,0xc0,0x4b]
+
+v_med3_num_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX12: encoding: [0x05,0x02,0x31,0xd6,0xfd,0xd4,0x04,0x33]
+
+v_med3_num_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX12: encoding: [0xff,0x83,0x31,0xd6,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_med3_i16 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x50,0xd6,0x01,0x05,0x0e,0x00]
+
+v_med3_i16 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x50,0xd6,0xff,0x05,0xa4,0x01]
+
+v_med3_i16 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x50,0xd6,0x01,0xfe,0xff,0x01]
+
+v_med3_i16 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x50,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_med3_i16 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x50,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_med3_i16 v5, vcc_hi, 0xfe0b, v255
+// GFX12: encoding: [0x05,0x00,0x50,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+v_med3_i16 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x50,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_med3_i16 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x50,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_med3_i16 v5, exec_lo, -1, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x50,0xd6,0x7e,0x82,0xad,0x01]
+
+v_med3_i16 v5, exec_hi, null, vcc_lo op_sel:[1,1,1,1]
+// GFX12: encoding: [0x05,0x78,0x50,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_med3_i16 v5, null, exec_lo, 0xfe0b op_sel:[0,0,0,0]
+// GFX12: encoding: [0x05,0x00,0x50,0xd6,0x7c,0xfc,0xfc,0x03,0x0b,0xfe,0x00,0x00]
+
+v_med3_i16 v5, -1, exec_hi, src_scc op_sel:[1,0,0,0]
+// GFX12: encoding: [0x05,0x08,0x50,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_med3_i16 v5, 0.5, m0, 0.5 op_sel:[0,1,0,0]
+// GFX12: encoding: [0x05,0x10,0x50,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_med3_i16 v5, src_scc, vcc_lo, -1 op_sel:[0,0,1,0]
+// GFX12: encoding: [0x05,0x20,0x50,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_med3_i16 v255, 0xfe0b, vcc_hi, null op_sel:[0,0,0,1]
+// GFX12: encoding: [0xff,0x40,0x50,0xd6,0xff,0xd6,0xf0,0x01,0x0b,0xfe,0x00,0x00]
+
+v_med3_i32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x20,0xd6,0x01,0x05,0x0e,0x00]
+
+v_med3_i32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x20,0xd6,0xff,0x05,0xa4,0x01]
+
+v_med3_i32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x20,0xd6,0x01,0xfe,0xff,0x01]
+
+v_med3_i32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x20,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_med3_i32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x20,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_med3_i32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x20,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_med3_i32 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x20,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_med3_i32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x20,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_med3_i32 v5, exec_lo, -1, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x20,0xd6,0x7e,0x82,0xad,0x01]
+
+v_med3_i32 v5, exec_hi, null, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x20,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_med3_i32 v5, null, exec_lo, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x20,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_med3_i32 v5, -1, exec_hi, src_scc
+// GFX12: encoding: [0x05,0x00,0x20,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_med3_i32 v5, 0.5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x20,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_med3_i32 v5, src_scc, vcc_lo, -1
+// GFX12: encoding: [0x05,0x00,0x20,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_med3_i32 v255, 0xaf123456, vcc_hi, null
+// GFX12: encoding: [0xff,0x00,0x20,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_med3_u16 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x51,0xd6,0x01,0x05,0x0e,0x00]
+
+v_med3_u16 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x51,0xd6,0xff,0x05,0xa4,0x01]
+
+v_med3_u16 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x51,0xd6,0x01,0xfe,0xff,0x01]
+
+v_med3_u16 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x51,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_med3_u16 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x51,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_med3_u16 v5, vcc_hi, 0xfe0b, v255
+// GFX12: encoding: [0x05,0x00,0x51,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+v_med3_u16 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x51,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_med3_u16 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x51,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_med3_u16 v5, exec_lo, -1, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x51,0xd6,0x7e,0x82,0xad,0x01]
+
+v_med3_u16 v5, exec_hi, null, vcc_lo op_sel:[1,1,1,1]
+// GFX12: encoding: [0x05,0x78,0x51,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_med3_u16 v5, null, exec_lo, 0xfe0b op_sel:[0,0,0,0]
+// GFX12: encoding: [0x05,0x00,0x51,0xd6,0x7c,0xfc,0xfc,0x03,0x0b,0xfe,0x00,0x00]
+
+v_med3_u16 v5, -1, exec_hi, src_scc op_sel:[1,0,0,0]
+// GFX12: encoding: [0x05,0x08,0x51,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_med3_u16 v5, 0.5, m0, 0.5 op_sel:[0,1,0,0]
+// GFX12: encoding: [0x05,0x10,0x51,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_med3_u16 v5, src_scc, vcc_lo, -1 op_sel:[0,0,1,0]
+// GFX12: encoding: [0x05,0x20,0x51,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_med3_u16 v255, 0xfe0b, vcc_hi, null op_sel:[0,0,0,1]
+// GFX12: encoding: [0xff,0x40,0x51,0xd6,0xff,0xd6,0xf0,0x01,0x0b,0xfe,0x00,0x00]
+
+v_med3_u32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x21,0xd6,0x01,0x05,0x0e,0x00]
+
+v_med3_u32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x21,0xd6,0xff,0x05,0xa4,0x01]
+
+v_med3_u32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x21,0xd6,0x01,0xfe,0xff,0x01]
+
+v_med3_u32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x21,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_med3_u32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x21,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_med3_u32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x21,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_med3_u32 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x21,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_med3_u32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x21,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_med3_u32 v5, exec_lo, -1, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x21,0xd6,0x7e,0x82,0xad,0x01]
+
+v_med3_u32 v5, exec_hi, null, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x21,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_med3_u32 v5, null, exec_lo, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x21,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_med3_u32 v5, -1, exec_hi, src_scc
+// GFX12: encoding: [0x05,0x00,0x21,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_med3_u32 v5, 0.5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x21,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_med3_u32 v5, src_scc, vcc_lo, -1
+// GFX12: encoding: [0x05,0x00,0x21,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_med3_u32 v255, 0xaf123456, vcc_hi, null
+// GFX12: encoding: [0xff,0x00,0x21,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_min3_num_f16 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x2b,0xd6,0x01,0x05,0x0e,0x00]
+
+v_min3_num_f16 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x2b,0xd6,0xff,0x05,0xa4,0x01]
+
+v_min3_num_f16 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x2b,0xd6,0x01,0xfe,0xff,0x01]
+
+v_min3_num_f16 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x2b,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_min3_num_f16 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x2b,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_min3_num_f16 v5, vcc_hi, 0xfe0b, v255
+// GFX12: encoding: [0x05,0x00,0x2b,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+v_min3_num_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX12: encoding: [0x05,0x07,0x2b,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_min3_num_f16 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x2b,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_min3_num_f16 v5, |exec_lo|, -1, vcc_hi
+// GFX12: encoding: [0x05,0x01,0x2b,0xd6,0x7e,0x82,0xad,0x01]
+
+v_min3_num_f16 v5, -|exec_hi|, null, -|vcc_lo| op_sel:[1,1,1,1]
+// GFX12: encoding: [0x05,0x7d,0x2b,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_min3_num_f16 v5, null, exec_lo, -|0xfe0b| op_sel:[0,0,0,0]
+// GFX12: encoding: [0x05,0x04,0x2b,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
+
+v_min3_num_f16 v5, -1, -|exec_hi|, -|src_scc| op_sel:[1,0,0,0]
+// GFX12: encoding: [0x05,0x0e,0x2b,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_min3_num_f16 v5, 0.5, -m0, 0.5 op_sel:[0,1,0,0]
+// GFX12: encoding: [0x05,0x10,0x2b,0xd6,0xf0,0xfa,0xc0,0x43]
+
+v_min3_num_f16 v5, -src_scc, |vcc_lo|, -1 op_sel:[0,0,1,0]
+// GFX12: encoding: [0x05,0x22,0x2b,0xd6,0xfd,0xd4,0x04,0x23]
+
+v_min3_num_f16 v255, -|0xfe0b|, -|vcc_hi|, null op_sel:[0,0,0,1] clamp
+// GFX12: encoding: [0xff,0xc3,0x2b,0xd6,0xff,0xd6,0xf0,0x61,0x0b,0xfe,0x00,0x00]
+
+v_min3_num_f32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x29,0xd6,0x01,0x05,0x0e,0x00]
+
+v_min3_num_f32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x29,0xd6,0xff,0x05,0xa4,0x01]
+
+v_min3_num_f32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x29,0xd6,0x01,0xfe,0xff,0x01]
+
+v_min3_num_f32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x29,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_min3_num_f32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x29,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_min3_num_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x29,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_min3_num_f32 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX12: encoding: [0x05,0x07,0x29,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_min3_num_f32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x29,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_min3_num_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX12: encoding: [0x05,0x01,0x29,0xd6,0x7e,0x82,0xad,0x01]
+
+v_min3_num_f32 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX12: encoding: [0x05,0x05,0x29,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_min3_num_f32 v5, null, exec_lo, -|0xaf123456|
+// GFX12: encoding: [0x05,0x04,0x29,0xd6,0x7c,0xfc,0xfc,0x83,0x56,0x34,0x12,0xaf]
+
+v_min3_num_f32 v5, -1, -|exec_hi|, -|src_scc|
+// GFX12: encoding: [0x05,0x06,0x29,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_min3_num_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX12: encoding: [0x05,0x00,0x29,0xd6,0xf0,0xfa,0xc0,0x4b]
+
+v_min3_num_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX12: encoding: [0x05,0x02,0x29,0xd6,0xfd,0xd4,0x04,0x33]
+
+v_min3_num_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX12: encoding: [0xff,0x83,0x29,0xd6,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_min3_i16 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x4a,0xd6,0x01,0x05,0x0e,0x00]
+
+v_min3_i16 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x4a,0xd6,0xff,0x05,0xa4,0x01]
+
+v_min3_i16 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x4a,0xd6,0x01,0xfe,0xff,0x01]
+
+v_min3_i16 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x4a,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_min3_i16 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x4a,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_min3_i16 v5, vcc_hi, 0xfe0b, v255
+// GFX12: encoding: [0x05,0x00,0x4a,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+v_min3_i16 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x4a,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_min3_i16 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x4a,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_min3_i16 v5, exec_lo, -1, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x4a,0xd6,0x7e,0x82,0xad,0x01]
+
+v_min3_i16 v5, exec_hi, null, vcc_lo op_sel:[1,1,1,1]
+// GFX12: encoding: [0x05,0x78,0x4a,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_min3_i16 v5, null, exec_lo, 0xfe0b op_sel:[0,0,0,0]
+// GFX12: encoding: [0x05,0x00,0x4a,0xd6,0x7c,0xfc,0xfc,0x03,0x0b,0xfe,0x00,0x00]
+
+v_min3_i16 v5, -1, exec_hi, src_scc op_sel:[1,0,0,0]
+// GFX12: encoding: [0x05,0x08,0x4a,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_min3_i16 v5, 0.5, m0, 0.5 op_sel:[0,1,0,0]
+// GFX12: encoding: [0x05,0x10,0x4a,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_min3_i16 v5, src_scc, vcc_lo, -1 op_sel:[0,0,1,0]
+// GFX12: encoding: [0x05,0x20,0x4a,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_min3_i16 v255, 0xfe0b, vcc_hi, null op_sel:[0,0,0,1]
+// GFX12: encoding: [0xff,0x40,0x4a,0xd6,0xff,0xd6,0xf0,0x01,0x0b,0xfe,0x00,0x00]
+
+v_min3_i32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x1a,0xd6,0x01,0x05,0x0e,0x00]
+
+v_min3_i32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x1a,0xd6,0xff,0x05,0xa4,0x01]
+
+v_min3_i32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x1a,0xd6,0x01,0xfe,0xff,0x01]
+
+v_min3_i32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x1a,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_min3_i32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x1a,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_min3_i32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x1a,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_min3_i32 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x1a,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_min3_i32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x1a,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_min3_i32 v5, exec_lo, -1, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x1a,0xd6,0x7e,0x82,0xad,0x01]
+
+v_min3_i32 v5, exec_hi, null, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x1a,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_min3_i32 v5, null, exec_lo, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x1a,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_min3_i32 v5, -1, exec_hi, src_scc
+// GFX12: encoding: [0x05,0x00,0x1a,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_min3_i32 v5, 0.5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x1a,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_min3_i32 v5, src_scc, vcc_lo, -1
+// GFX12: encoding: [0x05,0x00,0x1a,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_min3_i32 v255, 0xaf123456, vcc_hi, null
+// GFX12: encoding: [0xff,0x00,0x1a,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_min3_u16 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x4b,0xd6,0x01,0x05,0x0e,0x00]
+
+v_min3_u16 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x4b,0xd6,0xff,0x05,0xa4,0x01]
+
+v_min3_u16 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x4b,0xd6,0x01,0xfe,0xff,0x01]
+
+v_min3_u16 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x4b,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_min3_u16 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x4b,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_min3_u16 v5, vcc_hi, 0xfe0b, v255
+// GFX12: encoding: [0x05,0x00,0x4b,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+v_min3_u16 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x4b,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_min3_u16 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x4b,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_min3_u16 v5, exec_lo, -1, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x4b,0xd6,0x7e,0x82,0xad,0x01]
+
+v_min3_u16 v5, exec_hi, null, vcc_lo op_sel:[1,1,1,1]
+// GFX12: encoding: [0x05,0x78,0x4b,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_min3_u16 v5, null, exec_lo, 0xfe0b op_sel:[0,0,0,0]
+// GFX12: encoding: [0x05,0x00,0x4b,0xd6,0x7c,0xfc,0xfc,0x03,0x0b,0xfe,0x00,0x00]
+
+v_min3_u16 v5, -1, exec_hi, src_scc op_sel:[1,0,0,0]
+// GFX12: encoding: [0x05,0x08,0x4b,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_min3_u16 v5, 0.5, m0, 0.5 op_sel:[0,1,0,0]
+// GFX12: encoding: [0x05,0x10,0x4b,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_min3_u16 v5, src_scc, vcc_lo, -1 op_sel:[0,0,1,0]
+// GFX12: encoding: [0x05,0x20,0x4b,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_min3_u16 v255, 0xfe0b, vcc_hi, null op_sel:[0,0,0,1]
+// GFX12: encoding: [0xff,0x40,0x4b,0xd6,0xff,0xd6,0xf0,0x01,0x0b,0xfe,0x00,0x00]
+
+v_min3_u32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x1b,0xd6,0x01,0x05,0x0e,0x00]
+
+v_min3_u32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x1b,0xd6,0xff,0x05,0xa4,0x01]
+
+v_min3_u32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x1b,0xd6,0x01,0xfe,0xff,0x01]
+
+v_min3_u32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x1b,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_min3_u32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x1b,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_min3_u32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x1b,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_min3_u32 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x1b,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_min3_u32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x1b,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_min3_u32 v5, exec_lo, -1, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x1b,0xd6,0x7e,0x82,0xad,0x01]
+
+v_min3_u32 v5, exec_hi, null, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x1b,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_min3_u32 v5, null, exec_lo, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x1b,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_min3_u32 v5, -1, exec_hi, src_scc
+// GFX12: encoding: [0x05,0x00,0x1b,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_min3_u32 v5, 0.5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x1b,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_min3_u32 v5, src_scc, vcc_lo, -1
+// GFX12: encoding: [0x05,0x00,0x1b,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_min3_u32 v255, 0xaf123456, vcc_hi, null
+// GFX12: encoding: [0xff,0x00,0x1b,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_min_i16 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x0c,0xd7,0x01,0x05,0x02,0x00]
+
+v_min_i16 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x0c,0xd7,0xff,0xff,0x03,0x00]
+
+v_min_i16 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x0c,0xd7,0x01,0x04,0x00,0x00]
+
+v_min_i16 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x0c,0xd7,0x69,0xd2,0x00,0x00]
+
+v_min_i16 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x0c,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_min_i16 v5, vcc_hi, 0xfe0b
+// GFX12: encoding: [0x05,0x00,0x0c,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_min_i16 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x0c,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_min_i16 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x0c,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_min_i16 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x0c,0xd7,0x7e,0x82,0x01,0x00]
+
+v_min_i16 v5, exec_hi, null
+// GFX12: encoding: [0x05,0x00,0x0c,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_min_i16 v5, null, exec_lo
+// GFX12: encoding: [0x05,0x00,0x0c,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_min_i16 v5, -1, exec_hi
+// GFX12: encoding: [0x05,0x00,0x0c,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_min_i16 v5, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x0c,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_min_i16 v5, src_scc, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x0c,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_min_i16 v255, 0xfe0b, vcc_hi
+// GFX12: encoding: [0xff,0x00,0x0c,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_min_u16 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x0b,0xd7,0x01,0x05,0x02,0x00]
+
+v_min_u16 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x0b,0xd7,0xff,0xff,0x03,0x00]
+
+v_min_u16 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x0b,0xd7,0x01,0x04,0x00,0x00]
+
+v_min_u16 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x0b,0xd7,0x69,0xd2,0x00,0x00]
+
+v_min_u16 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x0b,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_min_u16 v5, vcc_hi, 0xfe0b
+// GFX12: encoding: [0x05,0x00,0x0b,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_min_u16 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x0b,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_min_u16 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x0b,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_min_u16 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x0b,0xd7,0x7e,0x82,0x01,0x00]
+
+v_min_u16 v5, exec_hi, null
+// GFX12: encoding: [0x05,0x00,0x0b,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_min_u16 v5, null, exec_lo
+// GFX12: encoding: [0x05,0x00,0x0b,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_min_u16 v5, -1, exec_hi
+// GFX12: encoding: [0x05,0x00,0x0b,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_min_u16 v5, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x0b,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_min_u16 v5, src_scc, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x0b,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_min_u16 v255, 0xfe0b, vcc_hi
+// GFX12: encoding: [0xff,0x00,0x0b,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_minmax_num_f16 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x6a,0xd6,0x01,0x05,0x0e,0x00]
+
+v_minmax_num_f16 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x6a,0xd6,0xff,0x05,0xa4,0x01]
+
+v_minmax_num_f16 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x6a,0xd6,0x01,0xfe,0xff,0x01]
+
+v_minmax_num_f16 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x6a,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_minmax_num_f16 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x6a,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_minmax_num_f16 v5, vcc_hi, 0xfe0b, v255
+// GFX12: encoding: [0x05,0x00,0x6a,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+v_minmax_num_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX12: encoding: [0x05,0x07,0x6a,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_minmax_num_f16 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x6a,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_minmax_num_f16 v5, |exec_lo|, -1, vcc_hi
+// GFX12: encoding: [0x05,0x01,0x6a,0xd6,0x7e,0x82,0xad,0x01]
+
+v_minmax_num_f16 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX12: encoding: [0x05,0x05,0x6a,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_minmax_num_f16 v5, null, exec_lo, -|0xfe0b|
+// GFX12: encoding: [0x05,0x04,0x6a,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
+
+v_minmax_num_f16 v5, -1, -|exec_hi|, -|src_scc|
+// GFX12: encoding: [0x05,0x06,0x6a,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_minmax_num_f16 v5, 0.5, -m0, 0.5 mul:2
+// GFX12: encoding: [0x05,0x00,0x6a,0xd6,0xf0,0xfa,0xc0,0x4b]
+
+v_minmax_num_f16 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX12: encoding: [0x05,0x02,0x6a,0xd6,0xfd,0xd4,0x04,0x33]
+
+v_minmax_num_f16 v255, -|0xfe0b|, -|vcc_hi|, null clamp div:2
+// GFX12: encoding: [0xff,0x83,0x6a,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+
+v_minmax_num_f32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x68,0xd6,0x01,0x05,0x0e,0x00]
+
+v_minmax_num_f32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x68,0xd6,0xff,0x05,0xa4,0x01]
+
+v_minmax_num_f32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x68,0xd6,0x01,0xfe,0xff,0x01]
+
+v_minmax_num_f32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x68,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_minmax_num_f32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x68,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_minmax_num_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x68,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_minmax_num_f32 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX12: encoding: [0x05,0x07,0x68,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_minmax_num_f32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x68,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_minmax_num_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX12: encoding: [0x05,0x01,0x68,0xd6,0x7e,0x82,0xad,0x01]
+
+v_minmax_num_f32 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX12: encoding: [0x05,0x05,0x68,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_minmax_num_f32 v5, null, exec_lo, -|0xaf123456|
+// GFX12: encoding: [0x05,0x04,0x68,0xd6,0x7c,0xfc,0xfc,0x83,0x56,0x34,0x12,0xaf]
+
+v_minmax_num_f32 v5, -1, -|exec_hi|, -|src_scc|
+// GFX12: encoding: [0x05,0x06,0x68,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_minmax_num_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX12: encoding: [0x05,0x00,0x68,0xd6,0xf0,0xfa,0xc0,0x4b]
+
+v_minmax_num_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX12: encoding: [0x05,0x02,0x68,0xd6,0xfd,0xd4,0x04,0x33]
+
+v_minmax_num_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX12: encoding: [0xff,0x83,0x68,0xd6,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_minmax_i32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x65,0xd6,0x01,0x05,0x0e,0x00]
+
+v_minmax_i32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x65,0xd6,0xff,0x05,0xa4,0x01]
+
+v_minmax_i32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x65,0xd6,0x01,0xfe,0xff,0x01]
+
+v_minmax_i32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x65,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_minmax_i32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x65,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_minmax_i32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x65,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_minmax_i32 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x65,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_minmax_i32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x65,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_minmax_i32 v5, exec_lo, -1, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x65,0xd6,0x7e,0x82,0xad,0x01]
+
+v_minmax_i32 v5, exec_hi, null, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x65,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_minmax_i32 v5, null, exec_lo, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x65,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_minmax_i32 v5, -1, exec_hi, src_scc
+// GFX12: encoding: [0x05,0x00,0x65,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_minmax_i32 v5, 0.5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x65,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_minmax_i32 v5, src_scc, vcc_lo, -1
+// GFX12: encoding: [0x05,0x00,0x65,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_minmax_i32 v255, 0xaf123456, vcc_hi, null
+// GFX12: encoding: [0xff,0x00,0x65,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_minmax_u32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x63,0xd6,0x01,0x05,0x0e,0x00]
+
+v_minmax_u32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x63,0xd6,0xff,0x05,0xa4,0x01]
+
+v_minmax_u32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x63,0xd6,0x01,0xfe,0xff,0x01]
+
+v_minmax_u32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x63,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_minmax_u32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x63,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_minmax_u32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x63,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_minmax_u32 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x63,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_minmax_u32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x63,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_minmax_u32 v5, exec_lo, -1, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x63,0xd6,0x7e,0x82,0xad,0x01]
+
+v_minmax_u32 v5, exec_hi, null, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x63,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_minmax_u32 v5, null, exec_lo, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x63,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_minmax_u32 v5, -1, exec_hi, src_scc
+// GFX12: encoding: [0x05,0x00,0x63,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_minmax_u32 v5, 0.5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x63,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_minmax_u32 v5, src_scc, vcc_lo, -1
+// GFX12: encoding: [0x05,0x00,0x63,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_minmax_u32 v255, 0xaf123456, vcc_hi, null
+// GFX12: encoding: [0xff,0x00,0x63,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_mqsad_pk_u16_u8 v[5:6], v[1:2], v2, ttmp[14:15]
+// GFX12: encoding: [0x05,0x00,0x3b,0xd6,0x01,0x05,0xea,0x01]
+
+v_mqsad_pk_u16_u8 v[5:6], v[1:2], v255, ttmp[14:15]
+// GFX12: encoding: [0x05,0x00,0x3b,0xd6,0x01,0xff,0xeb,0x01]
+
+v_mqsad_pk_u16_u8 v[5:6], v[1:2], s2, ttmp[14:15]
+// GFX12: encoding: [0x05,0x00,0x3b,0xd6,0x01,0x05,0xe8,0x01]
+
+v_mqsad_pk_u16_u8 v[5:6], v[1:2], s105, ttmp[14:15]
+// GFX12: encoding: [0x05,0x00,0x3b,0xd6,0x01,0xd3,0xe8,0x01]
+
+v_mqsad_pk_u16_u8 v[5:6], v[254:255], ttmp15, s[6:7]
+// GFX12: encoding: [0x05,0x00,0x3b,0xd6,0xfe,0xf7,0x18,0x00]
+
+v_mqsad_pk_u16_u8 v[5:6], s[2:3], vcc_hi, v[3:4]
+// GFX12: encoding: [0x05,0x00,0x3b,0xd6,0x02,0xd6,0x0c,0x04]
+
+v_mqsad_pk_u16_u8 v[5:6], s[104:105], vcc_lo, s[104:105]
+// GFX12: encoding: [0x05,0x00,0x3b,0xd6,0x68,0xd4,0xa0,0x01]
+
+v_mqsad_pk_u16_u8 v[5:6], vcc, m0, v[254:255]
+// GFX12: encoding: [0x05,0x00,0x3b,0xd6,0x6a,0xfa,0xf8,0x07]
+
+v_mqsad_pk_u16_u8 v[5:6], ttmp[14:15], exec_hi, null
+// GFX12: encoding: [0x05,0x00,0x3b,0xd6,0x7a,0xfe,0xf0,0x01]
+
+v_mqsad_pk_u16_u8 v[5:6], exec, exec_lo, exec
+// GFX12: encoding: [0x05,0x00,0x3b,0xd6,0x7e,0xfc,0xf8,0x01]
+
+v_mqsad_pk_u16_u8 v[5:6], null, null, vcc
+// GFX12: encoding: [0x05,0x00,0x3b,0xd6,0x7c,0xf8,0xa8,0x01]
+
+v_mqsad_pk_u16_u8 v[5:6], -1, -1, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x3b,0xd6,0xc1,0x82,0xfd,0x03,0x56,0x34,0x12,0xaf]
+
+v_mqsad_pk_u16_u8 v[5:6], 0.5, 0.5, src_scc
+// GFX12: encoding: [0x05,0x00,0x3b,0xd6,0xf0,0xe0,0xf5,0x03]
+
+v_mqsad_pk_u16_u8 v[5:6], src_scc, src_scc, 0.5
+// GFX12: encoding: [0x05,0x00,0x3b,0xd6,0xfd,0xfa,0xc1,0x03]
+
+v_mqsad_pk_u16_u8 v[254:255], 0xaf123456, 0xaf123456, -1 clamp
+// GFX12: encoding: [0xfe,0x80,0x3b,0xd6,0xff,0xfe,0x05,0x03,0x56,0x34,0x12,0xaf]
+
+v_mqsad_u32_u8 v[5:8], v[1:2], v2, v[252:255]
+// GFX12: encoding: [0x05,0x00,0x3d,0xd6,0x01,0x05,0xf2,0x07]
+
+v_mqsad_u32_u8 v[5:8], v[1:2], v255, v[252:255]
+// GFX12: encoding: [0x05,0x00,0x3d,0xd6,0x01,0xff,0xf3,0x07]
+
+v_mqsad_u32_u8 v[5:8], v[1:2], s2, v[252:255]
+// GFX12: encoding: [0x05,0x00,0x3d,0xd6,0x01,0x05,0xf0,0x07]
+
+v_mqsad_u32_u8 v[5:8], v[1:2], s105, v[252:255]
+// GFX12: encoding: [0x05,0x00,0x3d,0xd6,0x01,0xd3,0xf0,0x07]
+
+v_mqsad_u32_u8 v[5:8], v[254:255], ttmp15, v[252:255]
+// GFX12: encoding: [0x05,0x00,0x3d,0xd6,0xfe,0xf7,0xf0,0x07]
+
+v_mqsad_u32_u8 v[5:8], s[2:3], vcc_hi, v[252:255]
+// GFX12: encoding: [0x05,0x00,0x3d,0xd6,0x02,0xd6,0xf0,0x07]
+
+v_mqsad_u32_u8 v[5:8], s[104:105], vcc_lo, v[252:255]
+// GFX12: encoding: [0x05,0x00,0x3d,0xd6,0x68,0xd4,0xf0,0x07]
+
+v_mqsad_u32_u8 v[5:8], vcc, m0, v[252:255]
+// GFX12: encoding: [0x05,0x00,0x3d,0xd6,0x6a,0xfa,0xf0,0x07]
+
+v_mqsad_u32_u8 v[5:8], ttmp[14:15], exec_hi, v[252:255]
+// GFX12: encoding: [0x05,0x00,0x3d,0xd6,0x7a,0xfe,0xf0,0x07]
+
+v_mqsad_u32_u8 v[5:8], exec, exec_lo, v[252:255]
+// GFX12: encoding: [0x05,0x00,0x3d,0xd6,0x7e,0xfc,0xf0,0x07]
+
+v_mqsad_u32_u8 v[5:8], null, null, v[252:255]
+// GFX12: encoding: [0x05,0x00,0x3d,0xd6,0x7c,0xf8,0xf0,0x07]
+
+v_mqsad_u32_u8 v[5:8], -1, -1, v[252:255]
+// GFX12: encoding: [0x05,0x00,0x3d,0xd6,0xc1,0x82,0xf1,0x07]
+
+v_mqsad_u32_u8 v[5:8], 0.5, 0.5, v[252:255]
+// GFX12: encoding: [0x05,0x00,0x3d,0xd6,0xf0,0xe0,0xf1,0x07]
+
+v_mqsad_u32_u8 v[5:8], src_scc, src_scc, v[252:255]
+// GFX12: encoding: [0x05,0x00,0x3d,0xd6,0xfd,0xfa,0xf1,0x07]
+
+v_mqsad_u32_u8 v[252:255], 0xaf123456, 0xaf123456, v[3:6] clamp
+// GFX12: encoding: [0xfc,0x80,0x3d,0xd6,0xff,0xfe,0x0d,0x04,0x56,0x34,0x12,0xaf]
+
+v_msad_u8 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x39,0xd6,0x01,0x05,0x0e,0x00]
+
+v_msad_u8 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x39,0xd6,0xff,0x05,0xa4,0x01]
+
+v_msad_u8 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x39,0xd6,0x01,0xfe,0xff,0x01]
+
+v_msad_u8 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x39,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_msad_u8 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x39,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_msad_u8 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x39,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_msad_u8 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x39,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_msad_u8 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x39,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_msad_u8 v5, exec_lo, -1, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x39,0xd6,0x7e,0x82,0xad,0x01]
+
+v_msad_u8 v5, exec_hi, null, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x39,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_msad_u8 v5, null, exec_lo, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x39,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_msad_u8 v5, -1, exec_hi, src_scc
+// GFX12: encoding: [0x05,0x00,0x39,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_msad_u8 v5, 0.5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x39,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_msad_u8 v5, src_scc, vcc_lo, -1
+// GFX12: encoding: [0x05,0x00,0x39,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_msad_u8 v255, 0xaf123456, vcc_hi, null clamp
+// GFX12: encoding: [0xff,0x80,0x39,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_mul_hi_i32 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x2e,0xd7,0x01,0x05,0x02,0x00]
+
+v_mul_hi_i32 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x2e,0xd7,0xff,0xff,0x03,0x00]
+
+v_mul_hi_i32 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x2e,0xd7,0x01,0x04,0x00,0x00]
+
+v_mul_hi_i32 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x2e,0xd7,0x69,0xd2,0x00,0x00]
+
+v_mul_hi_i32 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x2e,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_mul_hi_i32 v5, vcc_hi, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x2e,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_mul_hi_i32 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x2e,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_mul_hi_i32 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x2e,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_mul_hi_i32 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x2e,0xd7,0x7e,0x82,0x01,0x00]
+
+v_mul_hi_i32 v5, exec_hi, null
+// GFX12: encoding: [0x05,0x00,0x2e,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_mul_hi_i32 v5, null, exec_lo
+// GFX12: encoding: [0x05,0x00,0x2e,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_mul_hi_i32 v5, -1, exec_hi
+// GFX12: encoding: [0x05,0x00,0x2e,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_mul_hi_i32 v5, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x2e,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_mul_hi_i32 v5, src_scc, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x2e,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_mul_hi_i32 v255, 0xaf123456, vcc_hi
+// GFX12: encoding: [0xff,0x00,0x2e,0xd7,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_mul_hi_u32 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x2d,0xd7,0x01,0x05,0x02,0x00]
+
+v_mul_hi_u32 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x2d,0xd7,0xff,0xff,0x03,0x00]
+
+v_mul_hi_u32 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x2d,0xd7,0x01,0x04,0x00,0x00]
+
+v_mul_hi_u32 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x2d,0xd7,0x69,0xd2,0x00,0x00]
+
+v_mul_hi_u32 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x2d,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_mul_hi_u32 v5, vcc_hi, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x2d,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_mul_hi_u32 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x2d,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_mul_hi_u32 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x2d,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_mul_hi_u32 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x2d,0xd7,0x7e,0x82,0x01,0x00]
+
+v_mul_hi_u32 v5, exec_hi, null
+// GFX12: encoding: [0x05,0x00,0x2d,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_mul_hi_u32 v5, null, exec_lo
+// GFX12: encoding: [0x05,0x00,0x2d,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_mul_hi_u32 v5, -1, exec_hi
+// GFX12: encoding: [0x05,0x00,0x2d,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_mul_hi_u32 v5, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x2d,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_mul_hi_u32 v5, src_scc, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x2d,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_mul_hi_u32 v255, 0xaf123456, vcc_hi
+// GFX12: encoding: [0xff,0x00,0x2d,0xd7,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_mul_lo_u16 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x05,0xd7,0x01,0x05,0x02,0x00]
+
+v_mul_lo_u16 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x05,0xd7,0xff,0xff,0x03,0x00]
+
+v_mul_lo_u16 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x05,0xd7,0x01,0x04,0x00,0x00]
+
+v_mul_lo_u16 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x05,0xd7,0x69,0xd2,0x00,0x00]
+
+v_mul_lo_u16 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x05,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_mul_lo_u16 v5, vcc_hi, 0xfe0b
+// GFX12: encoding: [0x05,0x00,0x05,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_mul_lo_u16 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x05,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_mul_lo_u16 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x05,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_mul_lo_u16 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x05,0xd7,0x7e,0x82,0x01,0x00]
+
+v_mul_lo_u16 v5, exec_hi, null
+// GFX12: encoding: [0x05,0x00,0x05,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_mul_lo_u16 v5, null, exec_lo
+// GFX12: encoding: [0x05,0x00,0x05,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_mul_lo_u16 v5, -1, exec_hi
+// GFX12: encoding: [0x05,0x00,0x05,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_mul_lo_u16 v5, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x05,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_mul_lo_u16 v5, src_scc, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x05,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_mul_lo_u16 v255, 0xfe0b, vcc_hi
+// GFX12: encoding: [0xff,0x00,0x05,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_mul_lo_u32 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x2c,0xd7,0x01,0x05,0x02,0x00]
+
+v_mul_lo_u32 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x2c,0xd7,0xff,0xff,0x03,0x00]
+
+v_mul_lo_u32 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x2c,0xd7,0x01,0x04,0x00,0x00]
+
+v_mul_lo_u32 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x2c,0xd7,0x69,0xd2,0x00,0x00]
+
+v_mul_lo_u32 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x2c,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_mul_lo_u32 v5, vcc_hi, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x2c,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_mul_lo_u32 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x2c,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_mul_lo_u32 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x2c,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_mul_lo_u32 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x2c,0xd7,0x7e,0x82,0x01,0x00]
+
+v_mul_lo_u32 v5, exec_hi, null
+// GFX12: encoding: [0x05,0x00,0x2c,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_mul_lo_u32 v5, null, exec_lo
+// GFX12: encoding: [0x05,0x00,0x2c,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_mul_lo_u32 v5, -1, exec_hi
+// GFX12: encoding: [0x05,0x00,0x2c,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_mul_lo_u32 v5, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x2c,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_mul_lo_u32 v5, src_scc, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x2c,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_mul_lo_u32 v255, 0xaf123456, vcc_hi
+// GFX12: encoding: [0xff,0x00,0x2c,0xd7,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_mullit_f32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x18,0xd6,0x01,0x05,0x0e,0x00]
+
+v_mullit_f32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x18,0xd6,0xff,0x05,0xa4,0x01]
+
+v_mullit_f32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x18,0xd6,0x01,0xfe,0xff,0x01]
+
+v_mullit_f32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x18,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_mullit_f32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x18,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_mullit_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x18,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_mullit_f32 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX12: encoding: [0x05,0x07,0x18,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_mullit_f32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x18,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_mullit_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX12: encoding: [0x05,0x01,0x18,0xd6,0x7e,0x82,0xad,0x01]
+
+v_mullit_f32 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX12: encoding: [0x05,0x05,0x18,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_mullit_f32 v5, null, exec_lo, -|0xaf123456|
+// GFX12: encoding: [0x05,0x04,0x18,0xd6,0x7c,0xfc,0xfc,0x83,0x56,0x34,0x12,0xaf]
+
+v_mullit_f32 v5, -1, -|exec_hi|, -|src_scc|
+// GFX12: encoding: [0x05,0x06,0x18,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_mullit_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX12: encoding: [0x05,0x00,0x18,0xd6,0xf0,0xfa,0xc0,0x4b]
+
+v_mullit_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX12: encoding: [0x05,0x02,0x18,0xd6,0xfd,0xd4,0x04,0x33]
+
+v_mullit_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX12: encoding: [0xff,0x83,0x18,0xd6,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_or3_b32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x58,0xd6,0x01,0x05,0x0e,0x00]
+
+v_or3_b32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x58,0xd6,0xff,0x05,0xa4,0x01]
+
+v_or3_b32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x58,0xd6,0x01,0xfe,0xff,0x01]
+
+v_or3_b32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x58,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_or3_b32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x58,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_or3_b32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x58,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_or3_b32 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x58,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_or3_b32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x58,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_or3_b32 v5, exec_lo, -1, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x58,0xd6,0x7e,0x82,0xad,0x01]
+
+v_or3_b32 v5, exec_hi, null, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x58,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_or3_b32 v5, null, exec_lo, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x58,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_or3_b32 v5, -1, exec_hi, src_scc
+// GFX12: encoding: [0x05,0x00,0x58,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_or3_b32 v5, 0.5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x58,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_or3_b32 v5, src_scc, vcc_lo, -1
+// GFX12: encoding: [0x05,0x00,0x58,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_or3_b32 v255, 0xaf123456, vcc_hi, null
+// GFX12: encoding: [0xff,0x00,0x58,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_or_b16 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x63,0xd7,0x01,0x05,0x02,0x00]
+
+v_or_b16 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x63,0xd7,0xff,0xff,0x03,0x00]
+
+v_or_b16 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x63,0xd7,0x01,0x04,0x00,0x00]
+
+v_or_b16 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x63,0xd7,0x69,0xd2,0x00,0x00]
+
+v_or_b16 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x63,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_or_b16 v5, vcc_hi, 0xfe0b
+// GFX12: encoding: [0x05,0x00,0x63,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_or_b16 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x63,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_or_b16 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x63,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_or_b16 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x63,0xd7,0x7e,0x82,0x01,0x00]
+
+v_or_b16 v5, exec_hi, null
+// GFX12: encoding: [0x05,0x00,0x63,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_or_b16 v5, null, exec_lo
+// GFX12: encoding: [0x05,0x00,0x63,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_or_b16 v5, -1, exec_hi
+// GFX12: encoding: [0x05,0x00,0x63,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_or_b16 v5, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x63,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_or_b16 v5, src_scc, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x63,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_or_b16 v255, 0xfe0b, vcc_hi
+// GFX12: encoding: [0xff,0x00,0x63,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_pack_b32_f16 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x11,0xd7,0x01,0x05,0x02,0x00]
+
+v_pack_b32_f16 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x11,0xd7,0xff,0xff,0x03,0x00]
+
+v_pack_b32_f16 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x11,0xd7,0x01,0x04,0x00,0x00]
+
+v_pack_b32_f16 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x11,0xd7,0x69,0xd2,0x00,0x00]
+
+v_pack_b32_f16 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x11,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_pack_b32_f16 v5, vcc_hi, 0xfe0b
+// GFX12: encoding: [0x05,0x00,0x11,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_pack_b32_f16 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x11,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_pack_b32_f16 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x11,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_pack_b32_f16 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x11,0xd7,0x7e,0x82,0x01,0x00]
+
+v_pack_b32_f16 v5, |exec_hi|, null
+// GFX12: encoding: [0x05,0x01,0x11,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_pack_b32_f16 v5, null, exec_lo
+// GFX12: encoding: [0x05,0x00,0x11,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_pack_b32_f16 v5, -1, exec_hi
+// GFX12: encoding: [0x05,0x00,0x11,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_pack_b32_f16 v5, 0.5, -m0 op_sel:[0,0,0]
+// GFX12: encoding: [0x05,0x00,0x11,0xd7,0xf0,0xfa,0x00,0x40]
+
+v_pack_b32_f16 v5, -src_scc, |vcc_lo| op_sel:[1,0,0]
+// GFX12: encoding: [0x05,0x0a,0x11,0xd7,0xfd,0xd4,0x00,0x20]
+
+v_pack_b32_f16 v255, -|0xfe0b|, -|vcc_hi| op_sel:[0,1,0]
+// GFX12: encoding: [0xff,0x13,0x11,0xd7,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+v_perm_b32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x44,0xd6,0x01,0x05,0x0e,0x00]
+
+v_perm_b32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x44,0xd6,0xff,0x05,0xa4,0x01]
+
+v_perm_b32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x44,0xd6,0x01,0xfe,0xff,0x01]
+
+v_perm_b32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x44,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_perm_b32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x44,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_perm_b32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x44,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_perm_b32 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x44,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_perm_b32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x44,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_perm_b32 v5, exec_lo, -1, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x44,0xd6,0x7e,0x82,0xad,0x01]
+
+v_perm_b32 v5, exec_hi, null, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x44,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_perm_b32 v5, null, exec_lo, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x44,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_perm_b32 v5, -1, exec_hi, src_scc
+// GFX12: encoding: [0x05,0x00,0x44,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_perm_b32 v5, 0.5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x44,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_perm_b32 v5, src_scc, vcc_lo, -1
+// GFX12: encoding: [0x05,0x00,0x44,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_perm_b32 v255, 0xaf123456, vcc_hi, null
+// GFX12: encoding: [0xff,0x00,0x44,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_permlane16_b32 v5, v1, s2, s3
+// GFX12: encoding: [0x05,0x00,0x5b,0xd6,0x01,0x05,0x0c,0x00]
+
+v_permlane16_b32 v5, v1, s105, s105
+// GFX12: encoding: [0x05,0x00,0x5b,0xd6,0x01,0xd3,0xa4,0x01]
+
+v_permlane16_b32 v5, v1, ttmp15, ttmp15
+// GFX12: encoding: [0x05,0x00,0x5b,0xd6,0x01,0xf7,0xec,0x01]
+
+v_permlane16_b32 v5, v1, vcc_hi, exec_lo
+// GFX12: encoding: [0x05,0x00,0x5b,0xd6,0x01,0xd7,0xf8,0x01]
+
+v_permlane16_b32 v5, v1, vcc_lo, m0
+// GFX12: encoding: [0x05,0x00,0x5b,0xd6,0x01,0xd5,0xf4,0x01]
+
+v_permlane16_b32 v5, v1, m0, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x5b,0xd6,0x01,0xfb,0xac,0x01]
+
+v_permlane16_b32 v5, v1, exec_hi, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x5b,0xd6,0x01,0xff,0xa8,0x01]
+
+v_permlane16_b32 v5, v1, exec_lo, src_scc
+// GFX12: encoding: [0x05,0x00,0x5b,0xd6,0x01,0xfd,0xf4,0x03]
+
+v_permlane16_b32 v5, v1, null, 0.5 op_sel:[1,1]
+// GFX12: encoding: [0x05,0x18,0x5b,0xd6,0x01,0xf9,0xc0,0x03]
+
+v_permlane16_b32 v5, v1, -1, -1 op_sel:[0,0]
+// GFX12: encoding: [0x05,0x00,0x5b,0xd6,0x01,0x83,0x05,0x03]
+
+v_permlane16_b32 v5, v1, 0.5, null op_sel:[1,0]
+// GFX12: encoding: [0x05,0x08,0x5b,0xd6,0x01,0xe1,0xf1,0x01]
+
+v_permlane16_b32 v255, v255, src_scc, exec_hi op_sel:[0,1]
+// GFX12: encoding: [0xff,0x10,0x5b,0xd6,0xff,0xfb,0xfd,0x01]
+
+v_permlanex16_b32 v5, v1, s2, s3
+// GFX12: encoding: [0x05,0x00,0x5c,0xd6,0x01,0x05,0x0c,0x00]
+
+v_permlanex16_b32 v5, v1, s105, s105
+// GFX12: encoding: [0x05,0x00,0x5c,0xd6,0x01,0xd3,0xa4,0x01]
+
+v_permlanex16_b32 v5, v1, ttmp15, ttmp15
+// GFX12: encoding: [0x05,0x00,0x5c,0xd6,0x01,0xf7,0xec,0x01]
+
+v_permlanex16_b32 v5, v1, vcc_hi, exec_lo
+// GFX12: encoding: [0x05,0x00,0x5c,0xd6,0x01,0xd7,0xf8,0x01]
+
+v_permlanex16_b32 v5, v1, vcc_lo, m0
+// GFX12: encoding: [0x05,0x00,0x5c,0xd6,0x01,0xd5,0xf4,0x01]
+
+v_permlanex16_b32 v5, v1, m0, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x5c,0xd6,0x01,0xfb,0xac,0x01]
+
+v_permlanex16_b32 v5, v1, exec_hi, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x5c,0xd6,0x01,0xff,0xa8,0x01]
+
+v_permlanex16_b32 v5, v1, exec_lo, src_scc
+// GFX12: encoding: [0x05,0x00,0x5c,0xd6,0x01,0xfd,0xf4,0x03]
+
+v_permlanex16_b32 v5, v1, null, 0.5 op_sel:[1,1]
+// GFX12: encoding: [0x05,0x18,0x5c,0xd6,0x01,0xf9,0xc0,0x03]
+
+v_permlanex16_b32 v5, v1, -1, -1 op_sel:[0,0]
+// GFX12: encoding: [0x05,0x00,0x5c,0xd6,0x01,0x83,0x05,0x03]
+
+v_permlanex16_b32 v5, v1, 0.5, null op_sel:[1,0]
+// GFX12: encoding: [0x05,0x08,0x5c,0xd6,0x01,0xe1,0xf1,0x01]
+
+v_permlanex16_b32 v255, v255, src_scc, exec_hi op_sel:[0,1]
+// GFX12: encoding: [0xff,0x10,0x5c,0xd6,0xff,0xfb,0xfd,0x01]
+
+v_permlane16_var_b32 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x0f,0xd7,0x01,0x05,0x02,0x00]
+
+v_permlane16_var_b32 v5, v1, v255
+// GFX12: encoding: [0x05,0x00,0x0f,0xd7,0x01,0xff,0x03,0x00]
+
+v_permlane16_var_b32 v5, v255, v0
+// GFX12: encoding: [0x05,0x00,0x0f,0xd7,0xff,0x01,0x02,0x00]
+
+v_permlane16_var_b32 v255, v1, v2
+// GFX12: encoding: [0xff,0x00,0x0f,0xd7,0x01,0x05,0x02,0x00]
+
+v_permlane16_var_b32 v5, v1, v50, op_sel:[1,1]
+// GFX12: encoding: [0x05,0x18,0x0f,0xd7,0x01,0x65,0x02,0x00]
+
+v_permlane16_var_b32 v5, v1, v50, op_sel:[0,0]
+// GFX12: encoding: [0x05,0x00,0x0f,0xd7,0x01,0x65,0x02,0x00]
+
+v_permlane16_var_b32 v5, v1, v50, op_sel:[1,0]
+// GFX12: encoding: [0x05,0x08,0x0f,0xd7,0x01,0x65,0x02,0x00]
+
+v_permlane16_var_b32 v255, v255, v0, op_sel:[0,1]
+// GFX12: encoding: [0xff,0x10,0x0f,0xd7,0xff,0x01,0x02,0x00]
+
+v_permlanex16_var_b32 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x10,0xd7,0x01,0x05,0x02,0x00]
+
+v_permlanex16_var_b32 v5, v1, v105
+// GFX12: encoding: [0x05,0x00,0x10,0xd7,0x01,0xd3,0x02,0x00]
+
+v_permlanex16_var_b32 v5, v1, v255
+// GFX12: encoding: [0x05,0x00,0x10,0xd7,0x01,0xff,0x03,0x00]
+
+v_permlanex16_var_b32 v255, v1, v2
+// GFX12: encoding: [0xff,0x00,0x10,0xd7,0x01,0x05,0x02,0x00]
+
+v_permlanex16_var_b32 v1, v255, v2
+// GFX12: encoding: [0x01,0x00,0x10,0xd7,0xff,0x05,0x02,0x00]
+
+v_permlanex16_var_b32 v5, v1, v100, op_sel:[1,1]
+// GFX12: encoding: [0x05,0x18,0x10,0xd7,0x01,0xc9,0x02,0x00]
+
+v_permlanex16_var_b32 v5, v1, v100, op_sel:[0,0]
+// GFX12: encoding: [0x05,0x00,0x10,0xd7,0x01,0xc9,0x02,0x00]
+
+v_permlanex16_var_b32 v5, v1, v100, op_sel:[1,0]
+// GFX12: encoding: [0x05,0x08,0x10,0xd7,0x01,0xc9,0x02,0x00]
+
+v_permlanex16_var_b32 v255, v255, v100, op_sel:[0,1]
+// GFX12: encoding: [0xff,0x10,0x10,0xd7,0xff,0xc9,0x02,0x00]
+
+v_qsad_pk_u16_u8 v[5:6], v[1:2], v2, ttmp[14:15]
+// GFX12: encoding: [0x05,0x00,0x3a,0xd6,0x01,0x05,0xea,0x01]
+
+v_qsad_pk_u16_u8 v[5:6], v[1:2], v255, ttmp[14:15]
+// GFX12: encoding: [0x05,0x00,0x3a,0xd6,0x01,0xff,0xeb,0x01]
+
+v_qsad_pk_u16_u8 v[5:6], v[1:2], s2, ttmp[14:15]
+// GFX12: encoding: [0x05,0x00,0x3a,0xd6,0x01,0x05,0xe8,0x01]
+
+v_qsad_pk_u16_u8 v[5:6], v[1:2], s105, ttmp[14:15]
+// GFX12: encoding: [0x05,0x00,0x3a,0xd6,0x01,0xd3,0xe8,0x01]
+
+v_qsad_pk_u16_u8 v[5:6], v[254:255], ttmp15, s[6:7]
+// GFX12: encoding: [0x05,0x00,0x3a,0xd6,0xfe,0xf7,0x18,0x00]
+
+v_qsad_pk_u16_u8 v[5:6], s[2:3], vcc_hi, v[3:4]
+// GFX12: encoding: [0x05,0x00,0x3a,0xd6,0x02,0xd6,0x0c,0x04]
+
+v_qsad_pk_u16_u8 v[5:6], s[104:105], vcc_lo, s[104:105]
+// GFX12: encoding: [0x05,0x00,0x3a,0xd6,0x68,0xd4,0xa0,0x01]
+
+v_qsad_pk_u16_u8 v[5:6], vcc, m0, v[254:255]
+// GFX12: encoding: [0x05,0x00,0x3a,0xd6,0x6a,0xfa,0xf8,0x07]
+
+v_qsad_pk_u16_u8 v[5:6], ttmp[14:15], exec_hi, null
+// GFX12: encoding: [0x05,0x00,0x3a,0xd6,0x7a,0xfe,0xf0,0x01]
+
+v_qsad_pk_u16_u8 v[5:6], exec, exec_lo, exec
+// GFX12: encoding: [0x05,0x00,0x3a,0xd6,0x7e,0xfc,0xf8,0x01]
+
+v_qsad_pk_u16_u8 v[5:6], null, null, vcc
+// GFX12: encoding: [0x05,0x00,0x3a,0xd6,0x7c,0xf8,0xa8,0x01]
+
+v_qsad_pk_u16_u8 v[5:6], -1, -1, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x3a,0xd6,0xc1,0x82,0xfd,0x03,0x56,0x34,0x12,0xaf]
+
+v_qsad_pk_u16_u8 v[5:6], 0.5, 0.5, src_scc
+// GFX12: encoding: [0x05,0x00,0x3a,0xd6,0xf0,0xe0,0xf5,0x03]
+
+v_qsad_pk_u16_u8 v[5:6], src_scc, src_scc, 0.5
+// GFX12: encoding: [0x05,0x00,0x3a,0xd6,0xfd,0xfa,0xc1,0x03]
+
+v_qsad_pk_u16_u8 v[254:255], 0xaf123456, 0xaf123456, -1 clamp
+// GFX12: encoding: [0xfe,0x80,0x3a,0xd6,0xff,0xfe,0x05,0x03,0x56,0x34,0x12,0xaf]
+
+v_readlane_b32 s5, v1, s2
+// GFX12: encoding: [0x05,0x00,0x60,0xd7,0x01,0x05,0x00,0x00]
+
+v_readlane_b32 s5, v1, s105
+// GFX12: encoding: [0x05,0x00,0x60,0xd7,0x01,0xd3,0x00,0x00]
+
+v_readlane_b32 s105, v1, ttmp15
+// GFX12: encoding: [0x69,0x00,0x60,0xd7,0x01,0xf7,0x00,0x00]
+
+v_readlane_b32 vcc_lo, v1, vcc_hi
+// GFX12: encoding: [0x6a,0x00,0x60,0xd7,0x01,0xd7,0x00,0x00]
+
+v_readlane_b32 vcc_hi, v1, vcc_lo
+// GFX12: encoding: [0x6b,0x00,0x60,0xd7,0x01,0xd5,0x00,0x00]
+
+v_readlane_b32 ttmp15, v1, m0
+// GFX12: encoding: [0x7b,0x00,0x60,0xd7,0x01,0xfb,0x00,0x00]
+
+v_readlane_b32 null, v255, null
+// GFX12: encoding: [0x7c,0x00,0x60,0xd7,0xff,0xf9,0x00,0x00]
+
+v_sad_hi_u8 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x23,0xd6,0x01,0x05,0x0e,0x00]
+
+v_sad_hi_u8 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x23,0xd6,0xff,0x05,0xa4,0x01]
+
+v_sad_hi_u8 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x23,0xd6,0x01,0xfe,0xff,0x01]
+
+v_sad_hi_u8 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x23,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_sad_hi_u8 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x23,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_sad_hi_u8 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x23,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_sad_hi_u8 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x23,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_sad_hi_u8 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x23,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_sad_hi_u8 v5, exec_lo, -1, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x23,0xd6,0x7e,0x82,0xad,0x01]
+
+v_sad_hi_u8 v5, exec_hi, null, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x23,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_sad_hi_u8 v5, null, exec_lo, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x23,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_sad_hi_u8 v5, -1, exec_hi, src_scc
+// GFX12: encoding: [0x05,0x00,0x23,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_sad_hi_u8 v5, 0.5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x23,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_sad_hi_u8 v5, src_scc, vcc_lo, -1
+// GFX12: encoding: [0x05,0x00,0x23,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_sad_hi_u8 v255, 0xaf123456, vcc_hi, null clamp
+// GFX12: encoding: [0xff,0x80,0x23,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_sad_u16 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x24,0xd6,0x01,0x05,0x0e,0x00]
+
+v_sad_u16 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x24,0xd6,0xff,0x05,0xa4,0x01]
+
+v_sad_u16 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x24,0xd6,0x01,0xfe,0xff,0x01]
+
+v_sad_u16 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x24,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_sad_u16 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x24,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_sad_u16 v5, vcc_hi, 0xfe0b, v255
+// GFX12: encoding: [0x05,0x00,0x24,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+v_sad_u16 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x24,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_sad_u16 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x24,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_sad_u16 v5, exec_lo, -1, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x24,0xd6,0x7e,0x82,0xad,0x01]
+
+v_sad_u16 v5, exec_hi, null, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x24,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_sad_u16 v5, null, exec_lo, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x24,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_sad_u16 v5, -1, exec_hi, src_scc
+// GFX12: encoding: [0x05,0x00,0x24,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_sad_u16 v5, 0.5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x24,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_sad_u16 v5, src_scc, vcc_lo, -1
+// GFX12: encoding: [0x05,0x00,0x24,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_sad_u16 v255, 0xfe0b, vcc_hi, null clamp
+// GFX12: encoding: [0xff,0x80,0x24,0xd6,0xff,0xd6,0xf0,0x01,0x0b,0xfe,0x00,0x00]
+
+v_sad_u32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x25,0xd6,0x01,0x05,0x0e,0x00]
+
+v_sad_u32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x25,0xd6,0xff,0x05,0xa4,0x01]
+
+v_sad_u32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x25,0xd6,0x01,0xfe,0xff,0x01]
+
+v_sad_u32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x25,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_sad_u32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x25,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_sad_u32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x25,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_sad_u32 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x25,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_sad_u32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x25,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_sad_u32 v5, exec_lo, -1, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x25,0xd6,0x7e,0x82,0xad,0x01]
+
+v_sad_u32 v5, exec_hi, null, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x25,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_sad_u32 v5, null, exec_lo, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x25,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_sad_u32 v5, -1, exec_hi, src_scc
+// GFX12: encoding: [0x05,0x00,0x25,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_sad_u32 v5, 0.5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x25,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_sad_u32 v5, src_scc, vcc_lo, -1
+// GFX12: encoding: [0x05,0x00,0x25,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_sad_u32 v255, 0xaf123456, vcc_hi, null clamp
+// GFX12: encoding: [0xff,0x80,0x25,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_sad_u8 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x22,0xd6,0x01,0x05,0x0e,0x00]
+
+v_sad_u8 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x22,0xd6,0xff,0x05,0xa4,0x01]
+
+v_sad_u8 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x22,0xd6,0x01,0xfe,0xff,0x01]
+
+v_sad_u8 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x22,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_sad_u8 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x22,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_sad_u8 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x22,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_sad_u8 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x22,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_sad_u8 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x22,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_sad_u8 v5, exec_lo, -1, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x22,0xd6,0x7e,0x82,0xad,0x01]
+
+v_sad_u8 v5, exec_hi, null, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x22,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_sad_u8 v5, null, exec_lo, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x22,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_sad_u8 v5, -1, exec_hi, src_scc
+// GFX12: encoding: [0x05,0x00,0x22,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_sad_u8 v5, 0.5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x22,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_sad_u8 v5, src_scc, vcc_lo, -1
+// GFX12: encoding: [0x05,0x00,0x22,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_sad_u8 v255, 0xaf123456, vcc_hi, null clamp
+// GFX12: encoding: [0xff,0x80,0x22,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_sub_co_u32 v5, s6, v1, v2
+// W32: encoding: [0x05,0x06,0x01,0xd7,0x01,0x05,0x02,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s6, v255, v255
+// W32: encoding: [0x05,0x06,0x01,0xd7,0xff,0xff,0x03,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s6, s1, s2
+// W32: encoding: [0x05,0x06,0x01,0xd7,0x01,0x04,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s6, s105, s105
+// W32: encoding: [0x05,0x06,0x01,0xd7,0x69,0xd2,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s6, vcc_lo, ttmp15
+// W32: encoding: [0x05,0x06,0x01,0xd7,0x6a,0xf6,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s6, vcc_hi, 0xaf123456
+// W32: encoding: [0x05,0x06,0x01,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s6, ttmp15, src_scc
+// W32: encoding: [0x05,0x06,0x01,0xd7,0x7b,0xfa,0x01,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s6, m0, 0.5
+// W32: encoding: [0x05,0x06,0x01,0xd7,0x7d,0xe0,0x01,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s6, exec_lo, -1
+// W32: encoding: [0x05,0x06,0x01,0xd7,0x7e,0x82,0x01,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s6, exec_hi, null
+// W32: encoding: [0x05,0x06,0x01,0xd7,0x7f,0xf8,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s105, null, exec_lo
+// W32: encoding: [0x05,0x69,0x01,0xd7,0x7c,0xfc,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, vcc_lo, -1, exec_hi
+// W32: encoding: [0x05,0x6a,0x01,0xd7,0xc1,0xfe,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, vcc_hi, 0.5, m0
+// W32: encoding: [0x05,0x6b,0x01,0xd7,0xf0,0xfa,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, ttmp15, src_scc, vcc_lo
+// W32: encoding: [0x05,0x7b,0x01,0xd7,0xfd,0xd4,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s[12:13], v1, v2
+// W64: encoding: [0x05,0x0c,0x01,0xd7,0x01,0x05,0x02,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s[12:13], v255, v255
+// W64: encoding: [0x05,0x0c,0x01,0xd7,0xff,0xff,0x03,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s[12:13], s1, s2
+// W64: encoding: [0x05,0x0c,0x01,0xd7,0x01,0x04,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s[12:13], s105, s105
+// W64: encoding: [0x05,0x0c,0x01,0xd7,0x69,0xd2,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s[12:13], vcc_lo, ttmp15
+// W64: encoding: [0x05,0x0c,0x01,0xd7,0x6a,0xf6,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s[12:13], vcc_hi, 0xaf123456
+// W64: encoding: [0x05,0x0c,0x01,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s[12:13], ttmp15, src_scc
+// W64: encoding: [0x05,0x0c,0x01,0xd7,0x7b,0xfa,0x01,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s[12:13], m0, 0.5
+// W64: encoding: [0x05,0x0c,0x01,0xd7,0x7d,0xe0,0x01,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s[12:13], exec_lo, -1
+// W64: encoding: [0x05,0x0c,0x01,0xd7,0x7e,0x82,0x01,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s[12:13], exec_hi, null
+// W64: encoding: [0x05,0x0c,0x01,0xd7,0x7f,0xf8,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s[12:13], null, exec_lo
+// W64: encoding: [0x05,0x0c,0x01,0xd7,0x7c,0xfc,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, s[104:105], -1, exec_hi
+// W64: encoding: [0x05,0x68,0x01,0xd7,0xc1,0xfe,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v5, vcc, 0.5, m0
+// W64: encoding: [0x05,0x6a,0x01,0xd7,0xf0,0xfa,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode
+
+v_sub_co_u32 v5, ttmp[14:15], src_scc, vcc_lo
+// W64: encoding: [0x05,0x7a,0x01,0xd7,0xfd,0xd4,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32 v255, null, 0xaf123456, vcc_hi clamp
+// GFX12: encoding: [0xff,0xfc,0x01,0xd7,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_sub_nc_i16 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x0e,0xd7,0x01,0x05,0x02,0x00]
+
+v_sub_nc_i16 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x0e,0xd7,0xff,0xff,0x03,0x00]
+
+v_sub_nc_i16 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x0e,0xd7,0x01,0x04,0x00,0x00]
+
+v_sub_nc_i16 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x0e,0xd7,0x69,0xd2,0x00,0x00]
+
+v_sub_nc_i16 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x0e,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_sub_nc_i16 v5, vcc_hi, 0xfe0b
+// GFX12: encoding: [0x05,0x00,0x0e,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_sub_nc_i16 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x0e,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_sub_nc_i16 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x0e,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_sub_nc_i16 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x0e,0xd7,0x7e,0x82,0x01,0x00]
+
+v_sub_nc_i16 v5, exec_hi, null
+// GFX12: encoding: [0x05,0x00,0x0e,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_sub_nc_i16 v5, null, exec_lo op_sel:[1,1,1]
+// GFX12: encoding: [0x05,0x58,0x0e,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_sub_nc_i16 v5, -1, exec_hi op_sel:[0,0,0]
+// GFX12: encoding: [0x05,0x00,0x0e,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_sub_nc_i16 v5, 0.5, m0 op_sel:[1,0,0]
+// GFX12: encoding: [0x05,0x08,0x0e,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_sub_nc_i16 v5, src_scc, vcc_lo op_sel:[0,1,0]
+// GFX12: encoding: [0x05,0x10,0x0e,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_sub_nc_i16 v255, 0xfe0b, vcc_hi op_sel:[0,0,1] clamp
+// GFX12: encoding: [0xff,0xc0,0x0e,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_sub_nc_i32 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x25,0xd7,0x01,0x05,0x02,0x00]
+
+v_sub_nc_i32 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x25,0xd7,0xff,0xff,0x03,0x00]
+
+v_sub_nc_i32 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x25,0xd7,0x01,0x04,0x00,0x00]
+
+v_sub_nc_i32 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x25,0xd7,0x69,0xd2,0x00,0x00]
+
+v_sub_nc_i32 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x25,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_sub_nc_i32 v5, vcc_hi, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x25,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_sub_nc_i32 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x25,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_sub_nc_i32 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x25,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_sub_nc_i32 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x25,0xd7,0x7e,0x82,0x01,0x00]
+
+v_sub_nc_i32 v5, exec_hi, null
+// GFX12: encoding: [0x05,0x00,0x25,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_sub_nc_i32 v5, null, exec_lo
+// GFX12: encoding: [0x05,0x00,0x25,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_sub_nc_i32 v5, -1, exec_hi
+// GFX12: encoding: [0x05,0x00,0x25,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_sub_nc_i32 v5, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x25,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_sub_nc_i32 v5, src_scc, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x25,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_sub_nc_i32 v255, 0xaf123456, vcc_hi clamp
+// GFX12: encoding: [0xff,0x80,0x25,0xd7,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_sub_nc_u16 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x04,0xd7,0x01,0x05,0x02,0x00]
+
+v_sub_nc_u16 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x04,0xd7,0xff,0xff,0x03,0x00]
+
+v_sub_nc_u16 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x04,0xd7,0x01,0x04,0x00,0x00]
+
+v_sub_nc_u16 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x04,0xd7,0x69,0xd2,0x00,0x00]
+
+v_sub_nc_u16 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x04,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_sub_nc_u16 v5, vcc_hi, 0xfe0b
+// GFX12: encoding: [0x05,0x00,0x04,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_sub_nc_u16 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x04,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_sub_nc_u16 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x04,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_sub_nc_u16 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x04,0xd7,0x7e,0x82,0x01,0x00]
+
+v_sub_nc_u16 v5, exec_hi, null
+// GFX12: encoding: [0x05,0x00,0x04,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_sub_nc_u16 v5, null, exec_lo op_sel:[1,1,1]
+// GFX12: encoding: [0x05,0x58,0x04,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_sub_nc_u16 v5, -1, exec_hi op_sel:[0,0,0]
+// GFX12: encoding: [0x05,0x00,0x04,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_sub_nc_u16 v5, 0.5, m0 op_sel:[1,0,0]
+// GFX12: encoding: [0x05,0x08,0x04,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_sub_nc_u16 v5, src_scc, vcc_lo op_sel:[0,1,0]
+// GFX12: encoding: [0x05,0x10,0x04,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_sub_nc_u16 v255, 0xfe0b, vcc_hi op_sel:[0,0,1] clamp
+// GFX12: encoding: [0xff,0xc0,0x04,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_subrev_co_u32 v5, s6, v1, v2
+// W32: encoding: [0x05,0x06,0x02,0xd7,0x01,0x05,0x02,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s6, v255, v255
+// W32: encoding: [0x05,0x06,0x02,0xd7,0xff,0xff,0x03,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s6, s1, s2
+// W32: encoding: [0x05,0x06,0x02,0xd7,0x01,0x04,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s6, s105, s105
+// W32: encoding: [0x05,0x06,0x02,0xd7,0x69,0xd2,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s6, vcc_lo, ttmp15
+// W32: encoding: [0x05,0x06,0x02,0xd7,0x6a,0xf6,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s6, vcc_hi, 0xaf123456
+// W32: encoding: [0x05,0x06,0x02,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s6, ttmp15, src_scc
+// W32: encoding: [0x05,0x06,0x02,0xd7,0x7b,0xfa,0x01,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s6, m0, 0.5
+// W32: encoding: [0x05,0x06,0x02,0xd7,0x7d,0xe0,0x01,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s6, exec_lo, -1
+// W32: encoding: [0x05,0x06,0x02,0xd7,0x7e,0x82,0x01,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s6, exec_hi, null
+// W32: encoding: [0x05,0x06,0x02,0xd7,0x7f,0xf8,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s105, null, exec_lo
+// W32: encoding: [0x05,0x69,0x02,0xd7,0x7c,0xfc,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, vcc_lo, -1, exec_hi
+// W32: encoding: [0x05,0x6a,0x02,0xd7,0xc1,0xfe,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, vcc_hi, 0.5, m0
+// W32: encoding: [0x05,0x6b,0x02,0xd7,0xf0,0xfa,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, ttmp15, src_scc, vcc_lo
+// W32: encoding: [0x05,0x7b,0x02,0xd7,0xfd,0xd4,0x00,0x00]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s[12:13], v1, v2
+// W64: encoding: [0x05,0x0c,0x02,0xd7,0x01,0x05,0x02,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s[12:13], v255, v255
+// W64: encoding: [0x05,0x0c,0x02,0xd7,0xff,0xff,0x03,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s[12:13], s1, s2
+// W64: encoding: [0x05,0x0c,0x02,0xd7,0x01,0x04,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s[12:13], s105, s105
+// W64: encoding: [0x05,0x0c,0x02,0xd7,0x69,0xd2,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s[12:13], vcc_lo, ttmp15
+// W64: encoding: [0x05,0x0c,0x02,0xd7,0x6a,0xf6,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s[12:13], vcc_hi, 0xaf123456
+// W64: encoding: [0x05,0x0c,0x02,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s[12:13], ttmp15, src_scc
+// W64: encoding: [0x05,0x0c,0x02,0xd7,0x7b,0xfa,0x01,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s[12:13], m0, 0.5
+// W64: encoding: [0x05,0x0c,0x02,0xd7,0x7d,0xe0,0x01,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s[12:13], exec_lo, -1
+// W64: encoding: [0x05,0x0c,0x02,0xd7,0x7e,0x82,0x01,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s[12:13], exec_hi, null
+// W64: encoding: [0x05,0x0c,0x02,0xd7,0x7f,0xf8,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s[12:13], null, exec_lo
+// W64: encoding: [0x05,0x0c,0x02,0xd7,0x7c,0xfc,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, s[104:105], -1, exec_hi
+// W64: encoding: [0x05,0x68,0x02,0xd7,0xc1,0xfe,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v5, vcc, 0.5, m0
+// W64: encoding: [0x05,0x6a,0x02,0xd7,0xf0,0xfa,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: operands are not valid for this GPU or mode
+
+v_subrev_co_u32 v5, ttmp[14:15], src_scc, vcc_lo
+// W64: encoding: [0x05,0x7a,0x02,0xd7,0xfd,0xd4,0x00,0x00]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32 v255, null, 0xaf123456, vcc_hi clamp
+// GFX12: encoding: [0xff,0xfc,0x02,0xd7,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_trig_preop_f64 v[5:6], v[1:2], v2
+// GFX12: encoding: [0x05,0x00,0x2f,0xd7,0x01,0x05,0x02,0x00]
+
+v_trig_preop_f64 v[5:6], v[1:2], v255
+// GFX12: encoding: [0x05,0x00,0x2f,0xd7,0x01,0xff,0x03,0x00]
+
+v_trig_preop_f64 v[5:6], v[1:2], s2
+// GFX12: encoding: [0x05,0x00,0x2f,0xd7,0x01,0x05,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], v[1:2], s105
+// GFX12: encoding: [0x05,0x00,0x2f,0xd7,0x01,0xd3,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], v[254:255], ttmp15
+// GFX12: encoding: [0x05,0x00,0x2f,0xd7,0xfe,0xf7,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], s[2:3], vcc_hi
+// GFX12: encoding: [0x05,0x00,0x2f,0xd7,0x02,0xd6,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], s[104:105], vcc_lo
+// GFX12: encoding: [0x05,0x00,0x2f,0xd7,0x68,0xd4,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], vcc, m0
+// GFX12: encoding: [0x05,0x00,0x2f,0xd7,0x6a,0xfa,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], ttmp[14:15], exec_hi
+// GFX12: encoding: [0x05,0x00,0x2f,0xd7,0x7a,0xfe,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], exec, exec_lo
+// GFX12: encoding: [0x05,0x00,0x2f,0xd7,0x7e,0xfc,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], null, null
+// GFX12: encoding: [0x05,0x00,0x2f,0xd7,0x7c,0xf8,0x00,0x00]
+
+v_trig_preop_f64 v[5:6], -1, -1
+// GFX12: encoding: [0x05,0x00,0x2f,0xd7,0xc1,0x82,0x01,0x00]
+
+v_trig_preop_f64 v[5:6], 0.5, 0.5 mul:2
+// GFX12: encoding: [0x05,0x00,0x2f,0xd7,0xf0,0xe0,0x01,0x08]
+
+v_trig_preop_f64 v[5:6], -|src_scc|, src_scc mul:4
+// GFX12: encoding: [0x05,0x01,0x2f,0xd7,0xfd,0xfa,0x01,0x30]
+
+v_trig_preop_f64 v[254:255], 0xaf123456, 0xaf123456 clamp div:2
+// GFX12: encoding: [0xfe,0x80,0x2f,0xd7,0xff,0xfe,0x01,0x18,0x56,0x34,0x12,0xaf]
+
+v_writelane_b32 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x61,0xd7,0x01,0x04,0x00,0x00]
+
+v_writelane_b32 v5, s105, s2
+// GFX12: encoding: [0x05,0x00,0x61,0xd7,0x69,0x04,0x00,0x00]
+
+v_writelane_b32 v5, vcc_lo, s2
+// GFX12: encoding: [0x05,0x00,0x61,0xd7,0x6a,0x04,0x00,0x00]
+
+v_writelane_b32 v5, vcc_hi, s2
+// GFX12: encoding: [0x05,0x00,0x61,0xd7,0x6b,0x04,0x00,0x00]
+
+v_writelane_b32 v5, ttmp15, s2
+// GFX12: encoding: [0x05,0x00,0x61,0xd7,0x7b,0x04,0x00,0x00]
+
+v_writelane_b32 v5, m0, s2
+// GFX12: encoding: [0x05,0x00,0x61,0xd7,0x7d,0x04,0x00,0x00]
+
+v_writelane_b32 v5, exec_lo, s2
+// GFX12: encoding: [0x05,0x00,0x61,0xd7,0x7e,0x04,0x00,0x00]
+
+v_writelane_b32 v5, exec_hi, s105
+// GFX12: encoding: [0x05,0x00,0x61,0xd7,0x7f,0xd2,0x00,0x00]
+
+v_writelane_b32 v5, null, ttmp15
+// GFX12: encoding: [0x05,0x00,0x61,0xd7,0x7c,0xf6,0x00,0x00]
+
+v_writelane_b32 v5, -1, null
+// GFX12: encoding: [0x05,0x00,0x61,0xd7,0xc1,0xf8,0x00,0x00]
+
+v_writelane_b32 v5, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x61,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_writelane_b32 v5, src_scc, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x61,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_writelane_b32 v255, 0xaf123456, vcc_hi
+// GFX12: encoding: [0xff,0x00,0x61,0xd7,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_xad_u32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x45,0xd6,0x01,0x05,0x0e,0x00]
+
+v_xad_u32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x45,0xd6,0xff,0x05,0xa4,0x01]
+
+v_xad_u32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x45,0xd6,0x01,0xfe,0xff,0x01]
+
+v_xad_u32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x45,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_xad_u32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x45,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_xad_u32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x45,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_xad_u32 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x45,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_xad_u32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x45,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_xad_u32 v5, exec_lo, -1, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x45,0xd6,0x7e,0x82,0xad,0x01]
+
+v_xad_u32 v5, exec_hi, null, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x45,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_xad_u32 v5, null, exec_lo, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x45,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_xad_u32 v5, -1, exec_hi, src_scc
+// GFX12: encoding: [0x05,0x00,0x45,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_xad_u32 v5, 0.5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x45,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_xad_u32 v5, src_scc, vcc_lo, -1
+// GFX12: encoding: [0x05,0x00,0x45,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_xad_u32 v255, 0xaf123456, vcc_hi, null
+// GFX12: encoding: [0xff,0x00,0x45,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_xor3_b32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x40,0xd6,0x01,0x05,0x0e,0x00]
+
+v_xor3_b32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x40,0xd6,0xff,0x05,0xa4,0x01]
+
+v_xor3_b32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x40,0xd6,0x01,0xfe,0xff,0x01]
+
+v_xor3_b32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x40,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_xor3_b32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x40,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_xor3_b32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x40,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_xor3_b32 v5, ttmp15, src_scc, ttmp15
+// GFX12: encoding: [0x05,0x00,0x40,0xd6,0x7b,0xfa,0xed,0x01]
+
+v_xor3_b32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x40,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_xor3_b32 v5, exec_lo, -1, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x40,0xd6,0x7e,0x82,0xad,0x01]
+
+v_xor3_b32 v5, exec_hi, null, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x40,0xd6,0x7f,0xf8,0xa8,0x01]
+
+v_xor3_b32 v5, null, exec_lo, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x40,0xd6,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_xor3_b32 v5, -1, exec_hi, src_scc
+// GFX12: encoding: [0x05,0x00,0x40,0xd6,0xc1,0xfe,0xf4,0x03]
+
+v_xor3_b32 v5, 0.5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x40,0xd6,0xf0,0xfa,0xc0,0x03]
+
+v_xor3_b32 v5, src_scc, vcc_lo, -1
+// GFX12: encoding: [0x05,0x00,0x40,0xd6,0xfd,0xd4,0x04,0x03]
+
+v_xor3_b32 v255, 0xaf123456, vcc_hi, null
+// GFX12: encoding: [0xff,0x00,0x40,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
+
+v_xor_b16 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x64,0xd7,0x01,0x05,0x02,0x00]
+
+v_xor_b16 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x64,0xd7,0xff,0xff,0x03,0x00]
+
+v_xor_b16 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x64,0xd7,0x01,0x04,0x00,0x00]
+
+v_xor_b16 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x64,0xd7,0x69,0xd2,0x00,0x00]
+
+v_xor_b16 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x64,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_xor_b16 v5, vcc_hi, 0xfe0b
+// GFX12: encoding: [0x05,0x00,0x64,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_xor_b16 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x64,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_xor_b16 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x64,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_xor_b16 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x64,0xd7,0x7e,0x82,0x01,0x00]
+
+v_xor_b16 v5, exec_hi, null
+// GFX12: encoding: [0x05,0x00,0x64,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_xor_b16 v5, null, exec_lo
+// GFX12: encoding: [0x05,0x00,0x64,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_xor_b16 v5, -1, exec_hi
+// GFX12: encoding: [0x05,0x00,0x64,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_xor_b16 v5, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x64,0xd7,0xf0,0xfa,0x00,0x00]
+
+v_xor_b16 v5, src_scc, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x64,0xd7,0xfd,0xd4,0x00,0x00]
+
+v_xor_b16 v255, 0xfe0b, vcc_hi
+// GFX12: encoding: [0xff,0x00,0x64,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_minimum_f32 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x65,0xd7,0x01,0x05,0x02,0x00]
+
+v_minimum_f32 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x65,0xd7,0xff,0xff,0x03,0x00]
+
+v_minimum_f32 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x65,0xd7,0x01,0x04,0x00,0x00]
+
+v_minimum_f32 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x65,0xd7,0x69,0xd2,0x00,0x00]
+
+v_minimum_f32 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x65,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_minimum_f32 v5, vcc_hi, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x65,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_minimum_f32 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x65,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_minimum_f32 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x65,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_minimum_f32 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x65,0xd7,0x7e,0x82,0x01,0x00]
+
+v_minimum_f32 v5, |exec_hi|, null
+// GFX12: encoding: [0x05,0x01,0x65,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_minimum_f32 v5, null, exec_lo
+// GFX12: encoding: [0x05,0x00,0x65,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_minimum_f32 v5, -1, exec_hi
+// GFX12: encoding: [0x05,0x00,0x65,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_minimum_f32 v5, 0.5, -m0
+// GFX12: encoding: [0x05,0x00,0x65,0xd7,0xf0,0xfa,0x00,0x40]
+
+v_minimum_f32 v5, -src_scc, |vcc_lo|
+// GFX12: encoding: [0x05,0x02,0x65,0xd7,0xfd,0xd4,0x00,0x20]
+
+v_minimum_f32 v255, -|0xaf123456|, -|vcc_hi|
+// GFX12: encoding: [0xff,0x03,0x65,0xd7,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+v_maximum_f32 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x66,0xd7,0x01,0x05,0x02,0x00]
+
+v_maximum_f32 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x66,0xd7,0xff,0xff,0x03,0x00]
+
+v_maximum_f32 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x66,0xd7,0x01,0x04,0x00,0x00]
+
+v_maximum_f32 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x66,0xd7,0x69,0xd2,0x00,0x00]
+
+v_maximum_f32 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x66,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_maximum_f32 v5, vcc_hi, 0xaf123456
+// GFX12: encoding: [0x05,0x00,0x66,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_maximum_f32 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x66,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_maximum_f32 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x66,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_maximum_f32 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x66,0xd7,0x7e,0x82,0x01,0x00]
+
+v_maximum_f32 v5, |exec_hi|, null
+// GFX12: encoding: [0x05,0x01,0x66,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_maximum_f32 v5, null, exec_lo
+// GFX12: encoding: [0x05,0x00,0x66,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_maximum_f32 v5, -1, exec_hi
+// GFX12: encoding: [0x05,0x00,0x66,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_maximum_f32 v5, 0.5, -m0
+// GFX12: encoding: [0x05,0x00,0x66,0xd7,0xf0,0xfa,0x00,0x40]
+
+v_maximum_f32 v5, -src_scc, |vcc_lo|
+// GFX12: encoding: [0x05,0x02,0x66,0xd7,0xfd,0xd4,0x00,0x20]
+
+v_maximum_f32 v255, -|0xaf123456|, -|vcc_hi|
+// GFX12: encoding: [0xff,0x03,0x66,0xd7,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+v_minimum_f16 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x67,0xd7,0x01,0x05,0x02,0x00]
+
+v_minimum_f16 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x67,0xd7,0xff,0xff,0x03,0x00]
+
+v_minimum_f16 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x67,0xd7,0x01,0x04,0x00,0x00]
+
+v_minimum_f16 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x67,0xd7,0x69,0xd2,0x00,0x00]
+
+v_minimum_f16 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x67,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_minimum_f16 v5, vcc_hi, 0xaf12
+// GFX12: encoding: [0x05,0x00,0x67,0xd7,0x6b,0xfe,0x01,0x00,0x12,0xaf,0x00,0x00]
+
+v_minimum_f16 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x67,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_minimum_f16 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x67,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_minimum_f16 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x67,0xd7,0x7e,0x82,0x01,0x00]
+
+v_minimum_f16 v5, |exec_hi|, null
+// GFX12: encoding: [0x05,0x01,0x67,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_minimum_f16 v5, null, exec_lo
+// GFX12: encoding: [0x05,0x00,0x67,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_minimum_f16 v5, -1, exec_hi
+// GFX12: encoding: [0x05,0x00,0x67,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_minimum_f16 v5, 0.5, -m0
+// GFX12: encoding: [0x05,0x00,0x67,0xd7,0xf0,0xfa,0x00,0x40]
+
+v_minimum_f16 v5, -src_scc, |vcc_lo|
+// GFX12: encoding: [0x05,0x02,0x67,0xd7,0xfd,0xd4,0x00,0x20]
+
+v_minimum_f16 v255, -|0xaf12|, -|vcc_hi|
+// GFX12: encoding: [0xff,0x03,0x67,0xd7,0xff,0xd6,0x00,0x60,0x12,0xaf,0x00,0x00]
+
+v_minimum_f16 v205, v201, v200
+// GFX12: encoding: [0xcd,0x00,0x67,0xd7,0xc9,0x91,0x03,0x00]
+
+v_maximum_f16 v5, v1, v2
+// GFX12: encoding: [0x05,0x00,0x68,0xd7,0x01,0x05,0x02,0x00]
+
+v_maximum_f16 v5, v255, v255
+// GFX12: encoding: [0x05,0x00,0x68,0xd7,0xff,0xff,0x03,0x00]
+
+v_maximum_f16 v5, s1, s2
+// GFX12: encoding: [0x05,0x00,0x68,0xd7,0x01,0x04,0x00,0x00]
+
+v_maximum_f16 v5, s105, s105
+// GFX12: encoding: [0x05,0x00,0x68,0xd7,0x69,0xd2,0x00,0x00]
+
+v_maximum_f16 v5, vcc_lo, ttmp15
+// GFX12: encoding: [0x05,0x00,0x68,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_maximum_f16 v5, vcc_hi, 0xaf12
+// GFX12: encoding: [0x05,0x00,0x68,0xd7,0x6b,0xfe,0x01,0x00,0x12,0xaf,0x00,0x00]
+
+v_maximum_f16 v5, ttmp15, src_scc
+// GFX12: encoding: [0x05,0x00,0x68,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_maximum_f16 v5, m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x68,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_maximum_f16 v5, exec_lo, -1
+// GFX12: encoding: [0x05,0x00,0x68,0xd7,0x7e,0x82,0x01,0x00]
+
+v_maximum_f16 v5, |exec_hi|, null
+// GFX12: encoding: [0x05,0x01,0x68,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_maximum_f16 v5, null, exec_lo
+// GFX12: encoding: [0x05,0x00,0x68,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_maximum_f16 v5, -1, exec_hi
+// GFX12: encoding: [0x05,0x00,0x68,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_maximum_f16 v5, 0.5, -m0
+// GFX12: encoding: [0x05,0x00,0x68,0xd7,0xf0,0xfa,0x00,0x40]
+
+v_maximum_f16 v5, -src_scc, |vcc_lo|
+// GFX12: encoding: [0x05,0x02,0x68,0xd7,0xfd,0xd4,0x00,0x20]
+
+v_maximum_f16 v255, -|0xaf12|, -|vcc_hi|
+// GFX12: encoding: [0xff,0x03,0x68,0xd7,0xff,0xd6,0x00,0x60,0x12,0xaf,0x00,0x00]
+
+v_maximum_f16 v205, v201, v200
+// GFX12: encoding: [0xcd,0x00,0x68,0xd7,0xc9,0x91,0x03,0x00]
+
+v_minimum_f64 v[5:6], v[1:2], v[3:4]
+// GFX12: encoding: [0x05,0x00,0x41,0xd7,0x01,0x07,0x02,0x00]
+
+v_minimum_f64 v[5:6], v[254:255], v[254:255]
+// GFX12: encoding: [0x05,0x00,0x41,0xd7,0xfe,0xfd,0x03,0x00]
+
+v_minimum_f64 v[5:6], s[6:7], s[4:5]
+// GFX12: encoding: [0x05,0x00,0x41,0xd7,0x06,0x08,0x00,0x00]
+
+v_minimum_f64 v[5:6], s[104:105], s[104:105]
+// GFX12: encoding: [0x05,0x00,0x41,0xd7,0x68,0xd0,0x00,0x00]
+
+v_minimum_f64 v[5:6], vcc, ttmp[14:15]
+// GFX12: encoding: [0x05,0x00,0x41,0xd7,0x6a,0xf4,0x00,0x00]
+
+v_minimum_f64 v[5:6], vcc, 0xaf121234
+// GFX12: encoding: [0x05,0x00,0x41,0xd7,0x6a,0xfe,0x01,0x00,0x34,0x12,0x12,0xaf]
+
+v_minimum_f64 v[5:6], ttmp[14:15], src_scc
+// GFX12: encoding: [0x05,0x00,0x41,0xd7,0x7a,0xfa,0x01,0x00]
+
+v_minimum_f64 v[5:6], vcc, 0.5
+// GFX12: encoding: [0x05,0x00,0x41,0xd7,0x6a,0xe0,0x01,0x00]
+
+v_minimum_f64 v[5:6], exec, -1
+// GFX12: encoding: [0x05,0x00,0x41,0xd7,0x7e,0x82,0x01,0x00]
+
+v_minimum_f64 v[5:6], |exec|, null
+// GFX12: encoding: [0x05,0x01,0x41,0xd7,0x7e,0xf8,0x00,0x00]
+
+v_minimum_f64 v[5:6], null, exec
+// GFX12: encoding: [0x05,0x00,0x41,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_minimum_f64 v[5:6], -1, exec
+// GFX12: encoding: [0x05,0x00,0x41,0xd7,0xc1,0xfc,0x00,0x00]
+
+v_minimum_f64 v[5:6], 0.5, -vcc
+// GFX12: encoding: [0x05,0x00,0x41,0xd7,0xf0,0xd4,0x00,0x40]
+
+v_minimum_f64 v[5:6], -src_scc, |vcc|
+// GFX12: encoding: [0x05,0x02,0x41,0xd7,0xfd,0xd4,0x00,0x20]
+
+v_minimum_f64 v[254:255], -|2|, -|vcc|
+// GFX12: encoding: [0xfe,0x03,0x41,0xd7,0x82,0xd4,0x00,0x60]
+
+v_maximum_f64 v[5:6], v[1:2], v[3:4]
+// GFX12: encoding: [0x05,0x00,0x42,0xd7,0x01,0x07,0x02,0x00]
+
+v_maximum_f64 v[5:6], v[254:255], v[254:255]
+// GFX12: encoding: [0x05,0x00,0x42,0xd7,0xfe,0xfd,0x03,0x00]
+
+v_maximum_f64 v[5:6], s[6:7], s[4:5]
+// GFX12: encoding: [0x05,0x00,0x42,0xd7,0x06,0x08,0x00,0x00]
+
+v_maximum_f64 v[5:6], s[104:105], s[104:105]
+// GFX12: encoding: [0x05,0x00,0x42,0xd7,0x68,0xd0,0x00,0x00]
+
+v_maximum_f64 v[5:6], vcc, ttmp[14:15]
+// GFX12: encoding: [0x05,0x00,0x42,0xd7,0x6a,0xf4,0x00,0x00]
+
+v_maximum_f64 v[5:6], vcc, 0xaf121234
+// GFX12: encoding: [0x05,0x00,0x42,0xd7,0x6a,0xfe,0x01,0x00,0x34,0x12,0x12,0xaf]
+
+v_maximum_f64 v[5:6], ttmp[14:15], src_scc
+// GFX12: encoding: [0x05,0x00,0x42,0xd7,0x7a,0xfa,0x01,0x00]
+
+v_maximum_f64 v[5:6], vcc, 0.5
+// GFX12: encoding: [0x05,0x00,0x42,0xd7,0x6a,0xe0,0x01,0x00]
+
+v_maximum_f64 v[5:6], exec, -1
+// GFX12: encoding: [0x05,0x00,0x42,0xd7,0x7e,0x82,0x01,0x00]
+
+v_maximum_f64 v[5:6], |exec|, null
+// GFX12: encoding: [0x05,0x01,0x42,0xd7,0x7e,0xf8,0x00,0x00]
+
+v_maximum_f64 v[5:6], null, exec
+// GFX12: encoding: [0x05,0x00,0x42,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_maximum_f64 v[5:6], -1, exec
+// GFX12: encoding: [0x05,0x00,0x42,0xd7,0xc1,0xfc,0x00,0x00]
+
+v_maximum_f64 v[5:6], 0.5, -vcc
+// GFX12: encoding: [0x05,0x00,0x42,0xd7,0xf0,0xd4,0x00,0x40]
+
+v_maximum_f64 v[5:6], -src_scc, |vcc|
+// GFX12: encoding: [0x05,0x02,0x42,0xd7,0xfd,0xd4,0x00,0x20]
+
+v_maximum_f64 v[254:255], -|2|, -|vcc|
+// GFX12: encoding: [0xfe,0x03,0x42,0xd7,0x82,0xd4,0x00,0x60]
+
+v_minimum3_f32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x2d,0xd6,0x01,0x05,0x0e,0x00]
+
+v_minimum3_f32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x2d,0xd6,0xff,0x05,0xa4,0x01]
+
+v_minimum3_f32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x2d,0xd6,0x01,0xfe,0xff,0x01]
+
+v_minimum3_f32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x2d,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_minimum3_f32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x2d,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_minimum3_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x2d,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_minimum3_f32 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX12: encoding: [0x05,0x07,0x2d,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_minimum3_f32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x2d,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_minimum3_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX12: encoding: [0x05,0x01,0x2d,0xd6,0x7e,0x82,0xad,0x01]
+
+v_minimum3_f32 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX12: encoding: [0x05,0x05,0x2d,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_minimum3_f32 v5, null, exec_lo, -|0xaf123456|
+// GFX12: encoding: [0x05,0x04,0x2d,0xd6,0x7c,0xfc,0xfc,0x83,0x56,0x34,0x12,0xaf]
+
+v_minimum3_f32 v5, -1, -|exec_hi|, -|src_scc|
+// GFX12: encoding: [0x05,0x06,0x2d,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_minimum3_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX12: encoding: [0x05,0x00,0x2d,0xd6,0xf0,0xfa,0xc0,0x4b]
+
+v_minimum3_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX12: encoding: [0x05,0x02,0x2d,0xd6,0xfd,0xd4,0x04,0x33]
+
+v_minimum3_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX12: encoding: [0xff,0x83,0x2d,0xd6,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_maximum3_f32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x2e,0xd6,0x01,0x05,0x0e,0x00]
+
+v_maximum3_f32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x2e,0xd6,0xff,0x05,0xa4,0x01]
+
+v_maximum3_f32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x2e,0xd6,0x01,0xfe,0xff,0x01]
+
+v_maximum3_f32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x2e,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_maximum3_f32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x2e,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_maximum3_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x2e,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_maximum3_f32 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX12: encoding: [0x05,0x07,0x2e,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_maximum3_f32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x2e,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_maximum3_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX12: encoding: [0x05,0x01,0x2e,0xd6,0x7e,0x82,0xad,0x01]
+
+v_maximum3_f32 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX12: encoding: [0x05,0x05,0x2e,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_maximum3_f32 v5, null, exec_lo, -|0xaf123456|
+// GFX12: encoding: [0x05,0x04,0x2e,0xd6,0x7c,0xfc,0xfc,0x83,0x56,0x34,0x12,0xaf]
+
+v_maximum3_f32 v5, -1, -|exec_hi|, -|src_scc|
+// GFX12: encoding: [0x05,0x06,0x2e,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_maximum3_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX12: encoding: [0x05,0x00,0x2e,0xd6,0xf0,0xfa,0xc0,0x4b]
+
+v_maximum3_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX12: encoding: [0x05,0x02,0x2e,0xd6,0xfd,0xd4,0x04,0x33]
+
+v_maximum3_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX12: encoding: [0xff,0x83,0x2e,0xd6,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_minimum3_f16 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x2f,0xd6,0x01,0x05,0x0e,0x00]
+
+v_minimum3_f16 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x2f,0xd6,0xff,0x05,0xa4,0x01]
+
+v_minimum3_f16 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x2f,0xd6,0x01,0xfe,0xff,0x01]
+
+v_minimum3_f16 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x2f,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_minimum3_f16 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x2f,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_minimum3_f16 v5, vcc_hi, 0xaf12, v255
+// GFX12: encoding: [0x05,0x00,0x2f,0xd6,0x6b,0xfe,0xfd,0x07,0x12,0xaf,0x00,0x00]
+
+v_minimum3_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX12: encoding: [0x05,0x07,0x2f,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_minimum3_f16 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x2f,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_minimum3_f16 v5, |exec_lo|, -1, vcc_hi
+// GFX12: encoding: [0x05,0x01,0x2f,0xd6,0x7e,0x82,0xad,0x01]
+
+v_minimum3_f16 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX12: encoding: [0x05,0x05,0x2f,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_minimum3_f16 v5, null, exec_lo, -|0xaf12|
+// GFX12: encoding: [0x05,0x04,0x2f,0xd6,0x7c,0xfc,0xfc,0x83,0x12,0xaf,0x00,0x00]
+
+v_minimum3_f16 v5, -1, -|exec_hi|, -|src_scc|
+// GFX12: encoding: [0x05,0x06,0x2f,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_minimum3_f16 v5, 0.5, -m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x2f,0xd6,0xf0,0xfa,0xc0,0x43]
+
+v_minimum3_f16 v5, -src_scc, |vcc_lo|, -1
+// GFX12: encoding: [0x05,0x02,0x2f,0xd6,0xfd,0xd4,0x04,0x23]
+
+v_minimum3_f16 v255, -|0xaf12|, -|vcc_hi|, null clamp
+// GFX12: encoding: [0xff,0x83,0x2f,0xd6,0xff,0xd6,0xf0,0x61,0x12,0xaf,0x00,0x00]
+
+v_maximum3_f16 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x30,0xd6,0x01,0x05,0x0e,0x00]
+
+v_maximum3_f16 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x30,0xd6,0xff,0x05,0xa4,0x01]
+
+v_maximum3_f16 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x30,0xd6,0x01,0xfe,0xff,0x01]
+
+v_maximum3_f16 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x30,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_maximum3_f16 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x30,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_maximum3_f16 v5, vcc_hi, 0xaf12, v255
+// GFX12: encoding: [0x05,0x00,0x30,0xd6,0x6b,0xfe,0xfd,0x07,0x12,0xaf,0x00,0x00]
+
+v_maximum3_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX12: encoding: [0x05,0x07,0x30,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_maximum3_f16 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x30,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_maximum3_f16 v5, |exec_lo|, -1, vcc_hi
+// GFX12: encoding: [0x05,0x01,0x30,0xd6,0x7e,0x82,0xad,0x01]
+
+v_maximum3_f16 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX12: encoding: [0x05,0x05,0x30,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_maximum3_f16 v5, null, exec_lo, -|0xaf12|
+// GFX12: encoding: [0x05,0x04,0x30,0xd6,0x7c,0xfc,0xfc,0x83,0x12,0xaf,0x00,0x00]
+
+v_maximum3_f16 v5, -1, -|exec_hi|, -|src_scc|
+// GFX12: encoding: [0x05,0x06,0x30,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_maximum3_f16 v5, 0.5, -m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x30,0xd6,0xf0,0xfa,0xc0,0x43]
+
+v_maximum3_f16 v5, -src_scc, |vcc_lo|, -1
+// GFX12: encoding: [0x05,0x02,0x30,0xd6,0xfd,0xd4,0x04,0x23]
+
+v_maximumminimum_f32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x6d,0xd6,0x01,0x05,0x0e,0x00]
+
+v_maximumminimum_f32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x6d,0xd6,0xff,0x05,0xa4,0x01]
+
+v_maximumminimum_f32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x6d,0xd6,0x01,0xfe,0xff,0x01]
+
+v_maximumminimum_f32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x6d,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_maximumminimum_f32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x6d,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_maximumminimum_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x6d,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_maximumminimum_f32 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX12: encoding: [0x05,0x07,0x6d,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_maximumminimum_f32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x6d,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_maximumminimum_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX12: encoding: [0x05,0x01,0x6d,0xd6,0x7e,0x82,0xad,0x01]
+
+v_maximumminimum_f32 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX12: encoding: [0x05,0x05,0x6d,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_maximumminimum_f32 v5, null, exec_lo, -|0xaf123456|
+// GFX12: encoding: [0x05,0x04,0x6d,0xd6,0x7c,0xfc,0xfc,0x83,0x56,0x34,0x12,0xaf]
+
+v_maximumminimum_f32 v5, -1, -|exec_hi|, -|src_scc|
+// GFX12: encoding: [0x05,0x06,0x6d,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_maximumminimum_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX12: encoding: [0x05,0x00,0x6d,0xd6,0xf0,0xfa,0xc0,0x4b]
+
+v_maximumminimum_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX12: encoding: [0x05,0x02,0x6d,0xd6,0xfd,0xd4,0x04,0x33]
+
+v_maximumminimum_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX12: encoding: [0xff,0x83,0x6d,0xd6,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_minimummaximum_f32 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x6c,0xd6,0x01,0x05,0x0e,0x00]
+
+v_minimummaximum_f32 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x6c,0xd6,0xff,0x05,0xa4,0x01]
+
+v_minimummaximum_f32 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x6c,0xd6,0x01,0xfe,0xff,0x01]
+
+v_minimummaximum_f32 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x6c,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_minimummaximum_f32 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x6c,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_minimummaximum_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX12: encoding: [0x05,0x00,0x6c,0xd6,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_minimummaximum_f32 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX12: encoding: [0x05,0x07,0x6c,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_minimummaximum_f32 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x6c,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_minimummaximum_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX12: encoding: [0x05,0x01,0x6c,0xd6,0x7e,0x82,0xad,0x01]
+
+v_minimummaximum_f32 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX12: encoding: [0x05,0x05,0x6c,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_minimummaximum_f32 v5, null, exec_lo, -|0xaf123456|
+// GFX12: encoding: [0x05,0x04,0x6c,0xd6,0x7c,0xfc,0xfc,0x83,0x56,0x34,0x12,0xaf]
+
+v_minimummaximum_f32 v5, -1, -|exec_hi|, -|src_scc|
+// GFX12: encoding: [0x05,0x06,0x6c,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_minimummaximum_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX12: encoding: [0x05,0x00,0x6c,0xd6,0xf0,0xfa,0xc0,0x4b]
+
+v_minimummaximum_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX12: encoding: [0x05,0x02,0x6c,0xd6,0xfd,0xd4,0x04,0x33]
+
+v_minimummaximum_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX12: encoding: [0xff,0x83,0x6c,0xd6,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_maximumminimum_f16 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x6f,0xd6,0x01,0x05,0x0e,0x00]
+
+v_maximumminimum_f16 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x6f,0xd6,0xff,0x05,0xa4,0x01]
+
+v_maximumminimum_f16 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x6f,0xd6,0x01,0xfe,0xff,0x01]
+
+v_maximumminimum_f16 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x6f,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_maximumminimum_f16 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x6f,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_maximumminimum_f16 v5, vcc_hi, 0xaf12, v255
+// GFX12: encoding: [0x05,0x00,0x6f,0xd6,0x6b,0xfe,0xfd,0x07,0x12,0xaf,0x00,0x00]
+
+v_maximumminimum_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX12: encoding: [0x05,0x07,0x6f,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_maximumminimum_f16 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x6f,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_maximumminimum_f16 v5, |exec_lo|, -1, vcc_hi
+// GFX12: encoding: [0x05,0x01,0x6f,0xd6,0x7e,0x82,0xad,0x01]
+
+v_maximumminimum_f16 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX12: encoding: [0x05,0x05,0x6f,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_maximumminimum_f16 v5, null, exec_lo, -|0xaf12|
+// GFX12: encoding: [0x05,0x04,0x6f,0xd6,0x7c,0xfc,0xfc,0x83,0x12,0xaf,0x00,0x00]
+
+v_maximumminimum_f16 v5, -1, -|exec_hi|, -|src_scc|
+// GFX12: encoding: [0x05,0x06,0x6f,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_maximumminimum_f16 v5, 0.5, -m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x6f,0xd6,0xf0,0xfa,0xc0,0x43]
+
+v_maximumminimum_f16 v5, -src_scc, |vcc_lo|, -1
+// GFX12: encoding: [0x05,0x02,0x6f,0xd6,0xfd,0xd4,0x04,0x23]
+
+v_maximumminimum_f16 v255, -|0xaf12|, -|vcc_hi|, null clamp
+// GFX12: encoding: [0xff,0x83,0x6f,0xd6,0xff,0xd6,0xf0,0x61,0x12,0xaf,0x00,0x00]
+
+v_minimummaximum_f16 v5, v1, v2, s3
+// GFX12: encoding: [0x05,0x00,0x6e,0xd6,0x01,0x05,0x0e,0x00]
+
+v_minimummaximum_f16 v5, v255, s2, s105
+// GFX12: encoding: [0x05,0x00,0x6e,0xd6,0xff,0x05,0xa4,0x01]
+
+v_minimummaximum_f16 v5, s1, v255, exec_hi
+// GFX12: encoding: [0x05,0x00,0x6e,0xd6,0x01,0xfe,0xff,0x01]
+
+v_minimummaximum_f16 v5, s105, s105, exec_lo
+// GFX12: encoding: [0x05,0x00,0x6e,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_minimummaximum_f16 v5, vcc_lo, ttmp15, v3
+// GFX12: encoding: [0x05,0x00,0x6e,0xd6,0x6a,0xf6,0x0c,0x04]
+
+v_minimummaximum_f16 v5, vcc_hi, 0xaf12, v255
+// GFX12: encoding: [0x05,0x00,0x6e,0xd6,0x6b,0xfe,0xfd,0x07,0x12,0xaf,0x00,0x00]
+
+v_minimummaximum_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX12: encoding: [0x05,0x07,0x6e,0xd6,0x7b,0xfa,0xed,0xe1]
+
+v_minimummaximum_f16 v5, m0, 0.5, m0
+// GFX12: encoding: [0x05,0x00,0x6e,0xd6,0x7d,0xe0,0xf5,0x01]
+
+v_minimummaximum_f16 v5, |exec_lo|, -1, vcc_hi
+// GFX12: encoding: [0x05,0x01,0x6e,0xd6,0x7e,0x82,0xad,0x01]
+
+v_minimummaximum_f16 v5, -|exec_hi|, null, -|vcc_lo|
+// GFX12: encoding: [0x05,0x05,0x6e,0xd6,0x7f,0xf8,0xa8,0xa1]
+
+v_minimummaximum_f16 v5, null, exec_lo, -|0xaf12|
+// GFX12: encoding: [0x05,0x04,0x6e,0xd6,0x7c,0xfc,0xfc,0x83,0x12,0xaf,0x00,0x00]
+
+v_minimummaximum_f16 v5, -1, -|exec_hi|, -|src_scc|
+// GFX12: encoding: [0x05,0x06,0x6e,0xd6,0xc1,0xfe,0xf4,0xc3]
+
+v_minimummaximum_f16 v5, 0.5, -m0, 0.5
+// GFX12: encoding: [0x05,0x00,0x6e,0xd6,0xf0,0xfa,0xc0,0x43]
+
+v_minimummaximum_f16 v5, -src_scc, |vcc_lo|, -1
+// GFX12: encoding: [0x05,0x02,0x6e,0xd6,0xfd,0xd4,0x04,0x23]
+
+v_s_exp_f32 s5, s1
+// GFX12: encoding: [0x05,0x00,0x80,0xd6,0x01,0x00,0x00,0x00]
+
+v_s_exp_f32 s5, s105
+// GFX12: encoding: [0x05,0x00,0x80,0xd6,0x69,0x00,0x00,0x00]
+
+v_s_exp_f32 s5, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x80,0xd6,0x6a,0x00,0x00,0x00]
+
+v_s_exp_f32 s5, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x80,0xd6,0x6b,0x00,0x00,0x00]
+
+v_s_exp_f32 s5, ttmp15
+// GFX12: encoding: [0x05,0x00,0x80,0xd6,0x7b,0x00,0x00,0x00]
+
+v_s_exp_f32 s5, m0
+// GFX12: encoding: [0x05,0x00,0x80,0xd6,0x7d,0x00,0x00,0x00]
+
+v_s_exp_f32 s5, exec_lo
+// GFX12: encoding: [0x05,0x00,0x80,0xd6,0x7e,0x00,0x00,0x00]
+
+v_s_exp_f32 s5, exec_hi
+// GFX12: encoding: [0x05,0x00,0x80,0xd6,0x7f,0x00,0x00,0x00]
+
+v_s_exp_f32 s5, null
+// GFX12: encoding: [0x05,0x00,0x80,0xd6,0x7c,0x00,0x00,0x00]
+
+v_s_exp_f32 s5, -1
+// GFX12: encoding: [0x05,0x00,0x80,0xd6,0xc1,0x00,0x00,0x00]
+
+v_s_exp_f32 s5, 0.5
+// GFX12: encoding: [0x05,0x00,0x80,0xd6,0xf0,0x00,0x00,0x00]
+
+v_s_exp_f32 s5, src_scc
+// GFX12: encoding: [0x05,0x00,0x80,0xd6,0xfd,0x00,0x00,0x00]
+
+v_s_exp_f32 s105, 0xaf123456
+// GFX12: encoding: [0x69,0x00,0x80,0xd6,0xff,0x00,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_s_exp_f32 s5, -s1
+// GFX12: encoding: [0x05,0x00,0x80,0xd6,0x01,0x00,0x00,0x20]
+
+v_s_exp_f32 s5, |s1|
+// GFX12: encoding: [0x05,0x01,0x80,0xd6,0x01,0x00,0x00,0x00]
+
+v_s_exp_f32 s5, s1 clamp
+// GFX12: encoding: [0x05,0x80,0x80,0xd6,0x01,0x00,0x00,0x00]
+
+v_s_exp_f32 s5, s1 mul:2
+// GFX12: encoding: [0x05,0x00,0x80,0xd6,0x01,0x00,0x00,0x08]
+
+v_s_exp_f32 s5, s1 mul:4
+// GFX12: encoding: [0x05,0x00,0x80,0xd6,0x01,0x00,0x00,0x10]
+
+v_s_exp_f32 s5, s1 div:2
+// GFX12: encoding: [0x05,0x00,0x80,0xd6,0x01,0x00,0x00,0x18]
+
+v_s_exp_f16 s5, s1
+// GFX12: encoding: [0x05,0x00,0x81,0xd6,0x01,0x00,0x00,0x00]
+
+v_s_exp_f16 s5, s105
+// GFX12: encoding: [0x05,0x00,0x81,0xd6,0x69,0x00,0x00,0x00]
+
+v_s_exp_f16 s5, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x81,0xd6,0x6a,0x00,0x00,0x00]
+
+v_s_exp_f16 s5, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x81,0xd6,0x6b,0x00,0x00,0x00]
+
+v_s_exp_f16 s5, ttmp15
+// GFX12: encoding: [0x05,0x00,0x81,0xd6,0x7b,0x00,0x00,0x00]
+
+v_s_exp_f16 s5, m0
+// GFX12: encoding: [0x05,0x00,0x81,0xd6,0x7d,0x00,0x00,0x00]
+
+v_s_exp_f16 s5, exec_lo
+// GFX12: encoding: [0x05,0x00,0x81,0xd6,0x7e,0x00,0x00,0x00]
+
+v_s_exp_f16 s5, exec_hi
+// GFX12: encoding: [0x05,0x00,0x81,0xd6,0x7f,0x00,0x00,0x00]
+
+v_s_exp_f16 s5, null
+// GFX12: encoding: [0x05,0x00,0x81,0xd6,0x7c,0x00,0x00,0x00]
+
+v_s_exp_f16 s5, -1
+// GFX12: encoding: [0x05,0x00,0x81,0xd6,0xc1,0x00,0x00,0x00]
+
+v_s_exp_f16 s5, 0.5
+// GFX12: encoding: [0x05,0x00,0x81,0xd6,0xf0,0x00,0x00,0x00]
+
+v_s_exp_f16 s5, src_scc
+// GFX12: encoding: [0x05,0x00,0x81,0xd6,0xfd,0x00,0x00,0x00]
+
+v_s_exp_f16 s105, 0xaf12
+// GFX12: encoding: [0x69,0x00,0x81,0xd6,0xff,0x00,0x00,0x00,0x12,0xaf,0x00,0x00]
+
+v_s_exp_f16 s5, -s1
+// GFX12: encoding: [0x05,0x00,0x81,0xd6,0x01,0x00,0x00,0x20]
+
+v_s_exp_f16 s5, |s1|
+// GFX12: encoding: [0x05,0x01,0x81,0xd6,0x01,0x00,0x00,0x00]
+
+v_s_exp_f16 s5, s1 clamp
+// GFX12: encoding: [0x05,0x80,0x81,0xd6,0x01,0x00,0x00,0x00]
+
+v_s_exp_f16 s5, s1 mul:2
+// GFX12: encoding: [0x05,0x00,0x81,0xd6,0x01,0x00,0x00,0x08]
+
+v_s_exp_f16 s5, s1 mul:4
+// GFX12: encoding: [0x05,0x00,0x81,0xd6,0x01,0x00,0x00,0x10]
+
+v_s_exp_f16 s5, s1 div:2
+// GFX12: encoding: [0x05,0x00,0x81,0xd6,0x01,0x00,0x00,0x18]
+
+v_s_log_f32 s5, s1
+// GFX12: encoding: [0x05,0x00,0x82,0xd6,0x01,0x00,0x00,0x00]
+
+v_s_log_f32 s5, s105
+// GFX12: encoding: [0x05,0x00,0x82,0xd6,0x69,0x00,0x00,0x00]
+
+v_s_log_f32 s5, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x82,0xd6,0x6a,0x00,0x00,0x00]
+
+v_s_log_f32 s5, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x82,0xd6,0x6b,0x00,0x00,0x00]
+
+v_s_log_f32 s5, ttmp15
+// GFX12: encoding: [0x05,0x00,0x82,0xd6,0x7b,0x00,0x00,0x00]
+
+v_s_log_f32 s5, m0
+// GFX12: encoding: [0x05,0x00,0x82,0xd6,0x7d,0x00,0x00,0x00]
+
+v_s_log_f32 s5, exec_lo
+// GFX12: encoding: [0x05,0x00,0x82,0xd6,0x7e,0x00,0x00,0x00]
+
+v_s_log_f32 s5, exec_hi
+// GFX12: encoding: [0x05,0x00,0x82,0xd6,0x7f,0x00,0x00,0x00]
+
+v_s_log_f32 s5, null
+// GFX12: encoding: [0x05,0x00,0x82,0xd6,0x7c,0x00,0x00,0x00]
+
+v_s_log_f32 s5, -1
+// GFX12: encoding: [0x05,0x00,0x82,0xd6,0xc1,0x00,0x00,0x00]
+
+v_s_log_f32 s5, 0.5
+// GFX12: encoding: [0x05,0x00,0x82,0xd6,0xf0,0x00,0x00,0x00]
+
+v_s_log_f32 s5, src_scc
+// GFX12: encoding: [0x05,0x00,0x82,0xd6,0xfd,0x00,0x00,0x00]
+
+v_s_log_f32 s105, 0xaf123456
+// GFX12: encoding: [0x69,0x00,0x82,0xd6,0xff,0x00,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_s_log_f32 s5, -s1
+// GFX12: encoding: [0x05,0x00,0x82,0xd6,0x01,0x00,0x00,0x20]
+
+v_s_log_f32 s5, |s1|
+// GFX12: encoding: [0x05,0x01,0x82,0xd6,0x01,0x00,0x00,0x00]
+
+v_s_log_f32 s5, s1 clamp
+// GFX12: encoding: [0x05,0x80,0x82,0xd6,0x01,0x00,0x00,0x00]
+
+v_s_log_f32 s5, s1 mul:2
+// GFX12: encoding: [0x05,0x00,0x82,0xd6,0x01,0x00,0x00,0x08]
+
+v_s_log_f32 s5, s1 mul:4
+// GFX12: encoding: [0x05,0x00,0x82,0xd6,0x01,0x00,0x00,0x10]
+
+v_s_log_f32 s5, s1 div:2
+// GFX12: encoding: [0x05,0x00,0x82,0xd6,0x01,0x00,0x00,0x18]
+
+v_s_log_f16 s5, s1
+// GFX12: encoding: [0x05,0x00,0x83,0xd6,0x01,0x00,0x00,0x00]
+
+v_s_log_f16 s5, s105
+// GFX12: encoding: [0x05,0x00,0x83,0xd6,0x69,0x00,0x00,0x00]
+
+v_s_log_f16 s5, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x83,0xd6,0x6a,0x00,0x00,0x00]
+
+v_s_log_f16 s5, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x83,0xd6,0x6b,0x00,0x00,0x00]
+
+v_s_log_f16 s5, ttmp15
+// GFX12: encoding: [0x05,0x00,0x83,0xd6,0x7b,0x00,0x00,0x00]
+
+v_s_log_f16 s5, m0
+// GFX12: encoding: [0x05,0x00,0x83,0xd6,0x7d,0x00,0x00,0x00]
+
+v_s_log_f16 s5, exec_lo
+// GFX12: encoding: [0x05,0x00,0x83,0xd6,0x7e,0x00,0x00,0x00]
+
+v_s_log_f16 s5, exec_hi
+// GFX12: encoding: [0x05,0x00,0x83,0xd6,0x7f,0x00,0x00,0x00]
+
+v_s_log_f16 s5, null
+// GFX12: encoding: [0x05,0x00,0x83,0xd6,0x7c,0x00,0x00,0x00]
+
+v_s_log_f16 s5, -1
+// GFX12: encoding: [0x05,0x00,0x83,0xd6,0xc1,0x00,0x00,0x00]
+
+v_s_log_f16 s5, 0.5
+// GFX12: encoding: [0x05,0x00,0x83,0xd6,0xf0,0x00,0x00,0x00]
+
+v_s_log_f16 s5, src_scc
+// GFX12: encoding: [0x05,0x00,0x83,0xd6,0xfd,0x00,0x00,0x00]
+
+v_s_log_f16 s105, 0xaf12
+// GFX12: encoding: [0x69,0x00,0x83,0xd6,0xff,0x00,0x00,0x00,0x12,0xaf,0x00,0x00]
+
+v_s_log_f16 s5, -s1
+// GFX12: encoding: [0x05,0x00,0x83,0xd6,0x01,0x00,0x00,0x20]
+
+v_s_log_f16 s5, |s1|
+// GFX12: encoding: [0x05,0x01,0x83,0xd6,0x01,0x00,0x00,0x00]
+
+v_s_log_f16 s5, s1 clamp
+// GFX12: encoding: [0x05,0x80,0x83,0xd6,0x01,0x00,0x00,0x00]
+
+v_s_log_f16 s5, s1 mul:2
+// GFX12: encoding: [0x05,0x00,0x83,0xd6,0x01,0x00,0x00,0x08]
+
+v_s_log_f16 s5, s1 mul:4
+// GFX12: encoding: [0x05,0x00,0x83,0xd6,0x01,0x00,0x00,0x10]
+
+v_s_log_f16 s5, s1 div:2
+// GFX12: encoding: [0x05,0x00,0x83,0xd6,0x01,0x00,0x00,0x18]
+
+v_s_rcp_f32 s5, s1
+// GFX12: encoding: [0x05,0x00,0x84,0xd6,0x01,0x00,0x00,0x00]
+
+v_s_rcp_f32 s5, s105
+// GFX12: encoding: [0x05,0x00,0x84,0xd6,0x69,0x00,0x00,0x00]
+
+v_s_rcp_f32 s5, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x84,0xd6,0x6a,0x00,0x00,0x00]
+
+v_s_rcp_f32 s5, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x84,0xd6,0x6b,0x00,0x00,0x00]
+
+v_s_rcp_f32 s5, ttmp15
+// GFX12: encoding: [0x05,0x00,0x84,0xd6,0x7b,0x00,0x00,0x00]
+
+v_s_rcp_f32 s5, m0
+// GFX12: encoding: [0x05,0x00,0x84,0xd6,0x7d,0x00,0x00,0x00]
+
+v_s_rcp_f32 s5, exec_lo
+// GFX12: encoding: [0x05,0x00,0x84,0xd6,0x7e,0x00,0x00,0x00]
+
+v_s_rcp_f32 s5, exec_hi
+// GFX12: encoding: [0x05,0x00,0x84,0xd6,0x7f,0x00,0x00,0x00]
+
+v_s_rcp_f32 s5, null
+// GFX12: encoding: [0x05,0x00,0x84,0xd6,0x7c,0x00,0x00,0x00]
+
+v_s_rcp_f32 s5, -1
+// GFX12: encoding: [0x05,0x00,0x84,0xd6,0xc1,0x00,0x00,0x00]
+
+v_s_rcp_f32 s5, 0.5
+// GFX12: encoding: [0x05,0x00,0x84,0xd6,0xf0,0x00,0x00,0x00]
+
+v_s_rcp_f32 s5, src_scc
+// GFX12: encoding: [0x05,0x00,0x84,0xd6,0xfd,0x00,0x00,0x00]
+
+v_s_rcp_f32 s105, 0xaf123456
+// GFX12: encoding: [0x69,0x00,0x84,0xd6,0xff,0x00,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_s_rcp_f32 s5, -s1
+// GFX12: encoding: [0x05,0x00,0x84,0xd6,0x01,0x00,0x00,0x20]
+
+v_s_rcp_f32 s5, |s1|
+// GFX12: encoding: [0x05,0x01,0x84,0xd6,0x01,0x00,0x00,0x00]
+
+v_s_rcp_f32 s5, s1 clamp
+// GFX12: encoding: [0x05,0x80,0x84,0xd6,0x01,0x00,0x00,0x00]
+
+v_s_rcp_f32 s5, s1 mul:2
+// GFX12: encoding: [0x05,0x00,0x84,0xd6,0x01,0x00,0x00,0x08]
+
+v_s_rcp_f32 s5, s1 mul:4
+// GFX12: encoding: [0x05,0x00,0x84,0xd6,0x01,0x00,0x00,0x10]
+
+v_s_rcp_f32 s5, s1 div:2
+// GFX12: encoding: [0x05,0x00,0x84,0xd6,0x01,0x00,0x00,0x18]
+
+v_s_rcp_f16 s5, s1
+// GFX12: encoding: [0x05,0x00,0x85,0xd6,0x01,0x00,0x00,0x00]
+
+v_s_rcp_f16 s5, s105
+// GFX12: encoding: [0x05,0x00,0x85,0xd6,0x69,0x00,0x00,0x00]
+
+v_s_rcp_f16 s5, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x85,0xd6,0x6a,0x00,0x00,0x00]
+
+v_s_rcp_f16 s5, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x85,0xd6,0x6b,0x00,0x00,0x00]
+
+v_s_rcp_f16 s5, ttmp15
+// GFX12: encoding: [0x05,0x00,0x85,0xd6,0x7b,0x00,0x00,0x00]
+
+v_s_rcp_f16 s5, m0
+// GFX12: encoding: [0x05,0x00,0x85,0xd6,0x7d,0x00,0x00,0x00]
+
+v_s_rcp_f16 s5, exec_lo
+// GFX12: encoding: [0x05,0x00,0x85,0xd6,0x7e,0x00,0x00,0x00]
+
+v_s_rcp_f16 s5, exec_hi
+// GFX12: encoding: [0x05,0x00,0x85,0xd6,0x7f,0x00,0x00,0x00]
+
+v_s_rcp_f16 s5, null
+// GFX12: encoding: [0x05,0x00,0x85,0xd6,0x7c,0x00,0x00,0x00]
+
+v_s_rcp_f16 s5, -1
+// GFX12: encoding: [0x05,0x00,0x85,0xd6,0xc1,0x00,0x00,0x00]
+
+v_s_rcp_f16 s5, 0.5
+// GFX12: encoding: [0x05,0x00,0x85,0xd6,0xf0,0x00,0x00,0x00]
+
+v_s_rcp_f16 s5, src_scc
+// GFX12: encoding: [0x05,0x00,0x85,0xd6,0xfd,0x00,0x00,0x00]
+
+v_s_rcp_f16 s105, 0xaf12
+// GFX12: encoding: [0x69,0x00,0x85,0xd6,0xff,0x00,0x00,0x00,0x12,0xaf,0x00,0x00]
+
+v_s_rcp_f16 s5, -s1
+// GFX12: encoding: [0x05,0x00,0x85,0xd6,0x01,0x00,0x00,0x20]
+
+v_s_rcp_f16 s5, |s1|
+// GFX12: encoding: [0x05,0x01,0x85,0xd6,0x01,0x00,0x00,0x00]
+
+v_s_rcp_f16 s5, s1 clamp
+// GFX12: encoding: [0x05,0x80,0x85,0xd6,0x01,0x00,0x00,0x00]
+
+v_s_rcp_f16 s5, s1 mul:2
+// GFX12: encoding: [0x05,0x00,0x85,0xd6,0x01,0x00,0x00,0x08]
+
+v_s_rcp_f16 s5, s1 mul:4
+// GFX12: encoding: [0x05,0x00,0x85,0xd6,0x01,0x00,0x00,0x10]
+
+v_s_rcp_f16 s5, s1 div:2
+// GFX12: encoding: [0x05,0x00,0x85,0xd6,0x01,0x00,0x00,0x18]
+
+v_s_rsq_f32 s5, s1
+// GFX12: encoding: [0x05,0x00,0x86,0xd6,0x01,0x00,0x00,0x00]
+
+v_s_rsq_f32 s5, s105
+// GFX12: encoding: [0x05,0x00,0x86,0xd6,0x69,0x00,0x00,0x00]
+
+v_s_rsq_f32 s5, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x86,0xd6,0x6a,0x00,0x00,0x00]
+
+v_s_rsq_f32 s5, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x86,0xd6,0x6b,0x00,0x00,0x00]
+
+v_s_rsq_f32 s5, ttmp15
+// GFX12: encoding: [0x05,0x00,0x86,0xd6,0x7b,0x00,0x00,0x00]
+
+v_s_rsq_f32 s5, m0
+// GFX12: encoding: [0x05,0x00,0x86,0xd6,0x7d,0x00,0x00,0x00]
+
+v_s_rsq_f32 s5, exec_lo
+// GFX12: encoding: [0x05,0x00,0x86,0xd6,0x7e,0x00,0x00,0x00]
+
+v_s_rsq_f32 s5, exec_hi
+// GFX12: encoding: [0x05,0x00,0x86,0xd6,0x7f,0x00,0x00,0x00]
+
+v_s_rsq_f32 s5, null
+// GFX12: encoding: [0x05,0x00,0x86,0xd6,0x7c,0x00,0x00,0x00]
+
+v_s_rsq_f32 s5, -1
+// GFX12: encoding: [0x05,0x00,0x86,0xd6,0xc1,0x00,0x00,0x00]
+
+v_s_rsq_f32 s5, 0.5
+// GFX12: encoding: [0x05,0x00,0x86,0xd6,0xf0,0x00,0x00,0x00]
+
+v_s_rsq_f32 s5, src_scc
+// GFX12: encoding: [0x05,0x00,0x86,0xd6,0xfd,0x00,0x00,0x00]
+
+v_s_rsq_f32 s105, 0xaf123456
+// GFX12: encoding: [0x69,0x00,0x86,0xd6,0xff,0x00,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_s_rsq_f32 s5, -s1
+// GFX12: encoding: [0x05,0x00,0x86,0xd6,0x01,0x00,0x00,0x20]
+
+v_s_rsq_f32 s5, |s1|
+// GFX12: encoding: [0x05,0x01,0x86,0xd6,0x01,0x00,0x00,0x00]
+
+v_s_rsq_f32 s5, s1 clamp
+// GFX12: encoding: [0x05,0x80,0x86,0xd6,0x01,0x00,0x00,0x00]
+
+v_s_rsq_f32 s5, s1 mul:2
+// GFX12: encoding: [0x05,0x00,0x86,0xd6,0x01,0x00,0x00,0x08]
+
+v_s_rsq_f32 s5, s1 mul:4
+// GFX12: encoding: [0x05,0x00,0x86,0xd6,0x01,0x00,0x00,0x10]
+
+v_s_rsq_f32 s5, s1 div:2
+// GFX12: encoding: [0x05,0x00,0x86,0xd6,0x01,0x00,0x00,0x18]
+
+v_s_rsq_f16 s5, s1
+// GFX12: encoding: [0x05,0x00,0x87,0xd6,0x01,0x00,0x00,0x00]
+
+v_s_rsq_f16 s5, s105
+// GFX12: encoding: [0x05,0x00,0x87,0xd6,0x69,0x00,0x00,0x00]
+
+v_s_rsq_f16 s5, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x87,0xd6,0x6a,0x00,0x00,0x00]
+
+v_s_rsq_f16 s5, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x87,0xd6,0x6b,0x00,0x00,0x00]
+
+v_s_rsq_f16 s5, ttmp15
+// GFX12: encoding: [0x05,0x00,0x87,0xd6,0x7b,0x00,0x00,0x00]
+
+v_s_rsq_f16 s5, m0
+// GFX12: encoding: [0x05,0x00,0x87,0xd6,0x7d,0x00,0x00,0x00]
+
+v_s_rsq_f16 s5, exec_lo
+// GFX12: encoding: [0x05,0x00,0x87,0xd6,0x7e,0x00,0x00,0x00]
+
+v_s_rsq_f16 s5, exec_hi
+// GFX12: encoding: [0x05,0x00,0x87,0xd6,0x7f,0x00,0x00,0x00]
+
+v_s_rsq_f16 s5, null
+// GFX12: encoding: [0x05,0x00,0x87,0xd6,0x7c,0x00,0x00,0x00]
+
+v_s_rsq_f16 s5, -1
+// GFX12: encoding: [0x05,0x00,0x87,0xd6,0xc1,0x00,0x00,0x00]
+
+v_s_rsq_f16 s5, 0.5
+// GFX12: encoding: [0x05,0x00,0x87,0xd6,0xf0,0x00,0x00,0x00]
+
+v_s_rsq_f16 s5, src_scc
+// GFX12: encoding: [0x05,0x00,0x87,0xd6,0xfd,0x00,0x00,0x00]
+
+v_s_rsq_f16 s105, 0xaf12
+// GFX12: encoding: [0x69,0x00,0x87,0xd6,0xff,0x00,0x00,0x00,0x12,0xaf,0x00,0x00]
+
+v_s_rsq_f16 s5, -s1
+// GFX12: encoding: [0x05,0x00,0x87,0xd6,0x01,0x00,0x00,0x20]
+
+v_s_rsq_f16 s5, |s1|
+// GFX12: encoding: [0x05,0x01,0x87,0xd6,0x01,0x00,0x00,0x00]
+
+v_s_rsq_f16 s5, s1 clamp
+// GFX12: encoding: [0x05,0x80,0x87,0xd6,0x01,0x00,0x00,0x00]
+
+v_s_rsq_f16 s5, s1 mul:2
+// GFX12: encoding: [0x05,0x00,0x87,0xd6,0x01,0x00,0x00,0x08]
+
+v_s_rsq_f16 s5, s1 mul:4
+// GFX12: encoding: [0x05,0x00,0x87,0xd6,0x01,0x00,0x00,0x10]
+
+v_s_rsq_f16 s5, s1 div:2
+// GFX12: encoding: [0x05,0x00,0x87,0xd6,0x01,0x00,0x00,0x18]
+
+v_s_sqrt_f32 s5, s1
+// GFX12: encoding: [0x05,0x00,0x88,0xd6,0x01,0x00,0x00,0x00]
+
+v_s_sqrt_f32 s5, s105
+// GFX12: encoding: [0x05,0x00,0x88,0xd6,0x69,0x00,0x00,0x00]
+
+v_s_sqrt_f32 s5, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x88,0xd6,0x6a,0x00,0x00,0x00]
+
+v_s_sqrt_f32 s5, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x88,0xd6,0x6b,0x00,0x00,0x00]
+
+v_s_sqrt_f32 s5, ttmp15
+// GFX12: encoding: [0x05,0x00,0x88,0xd6,0x7b,0x00,0x00,0x00]
+
+v_s_sqrt_f32 s5, m0
+// GFX12: encoding: [0x05,0x00,0x88,0xd6,0x7d,0x00,0x00,0x00]
+
+v_s_sqrt_f32 s5, exec_lo
+// GFX12: encoding: [0x05,0x00,0x88,0xd6,0x7e,0x00,0x00,0x00]
+
+v_s_sqrt_f32 s5, exec_hi
+// GFX12: encoding: [0x05,0x00,0x88,0xd6,0x7f,0x00,0x00,0x00]
+
+v_s_sqrt_f32 s5, null
+// GFX12: encoding: [0x05,0x00,0x88,0xd6,0x7c,0x00,0x00,0x00]
+
+v_s_sqrt_f32 s5, -1
+// GFX12: encoding: [0x05,0x00,0x88,0xd6,0xc1,0x00,0x00,0x00]
+
+v_s_sqrt_f32 s5, 0.5
+// GFX12: encoding: [0x05,0x00,0x88,0xd6,0xf0,0x00,0x00,0x00]
+
+v_s_sqrt_f32 s5, src_scc
+// GFX12: encoding: [0x05,0x00,0x88,0xd6,0xfd,0x00,0x00,0x00]
+
+v_s_sqrt_f32 s105, 0xaf123456
+// GFX12: encoding: [0x69,0x00,0x88,0xd6,0xff,0x00,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_s_sqrt_f32 s5, -s1
+// GFX12: encoding: [0x05,0x00,0x88,0xd6,0x01,0x00,0x00,0x20]
+
+v_s_sqrt_f32 s5, |s1|
+// GFX12: encoding: [0x05,0x01,0x88,0xd6,0x01,0x00,0x00,0x00]
+
+v_s_sqrt_f32 s5, s1 clamp
+// GFX12: encoding: [0x05,0x80,0x88,0xd6,0x01,0x00,0x00,0x00]
+
+v_s_sqrt_f32 s5, s1 mul:2
+// GFX12: encoding: [0x05,0x00,0x88,0xd6,0x01,0x00,0x00,0x08]
+
+v_s_sqrt_f32 s5, s1 mul:4
+// GFX12: encoding: [0x05,0x00,0x88,0xd6,0x01,0x00,0x00,0x10]
+
+v_s_sqrt_f32 s5, s1 div:2
+// GFX12: encoding: [0x05,0x00,0x88,0xd6,0x01,0x00,0x00,0x18]
+
+v_s_sqrt_f16 s5, s1
+// GFX12: encoding: [0x05,0x00,0x89,0xd6,0x01,0x00,0x00,0x00]
+
+v_s_sqrt_f16 s5, s105
+// GFX12: encoding: [0x05,0x00,0x89,0xd6,0x69,0x00,0x00,0x00]
+
+v_s_sqrt_f16 s5, vcc_lo
+// GFX12: encoding: [0x05,0x00,0x89,0xd6,0x6a,0x00,0x00,0x00]
+
+v_s_sqrt_f16 s5, vcc_hi
+// GFX12: encoding: [0x05,0x00,0x89,0xd6,0x6b,0x00,0x00,0x00]
+
+v_s_sqrt_f16 s5, ttmp15
+// GFX12: encoding: [0x05,0x00,0x89,0xd6,0x7b,0x00,0x00,0x00]
+
+v_s_sqrt_f16 s5, m0
+// GFX12: encoding: [0x05,0x00,0x89,0xd6,0x7d,0x00,0x00,0x00]
+
+v_s_sqrt_f16 s5, exec_lo
+// GFX12: encoding: [0x05,0x00,0x89,0xd6,0x7e,0x00,0x00,0x00]
+
+v_s_sqrt_f16 s5, exec_hi
+// GFX12: encoding: [0x05,0x00,0x89,0xd6,0x7f,0x00,0x00,0x00]
+
+v_s_sqrt_f16 s5, null
+// GFX12: encoding: [0x05,0x00,0x89,0xd6,0x7c,0x00,0x00,0x00]
+
+v_s_sqrt_f16 s5, -1
+// GFX12: encoding: [0x05,0x00,0x89,0xd6,0xc1,0x00,0x00,0x00]
+
+v_s_sqrt_f16 s5, 0.5
+// GFX12: encoding: [0x05,0x00,0x89,0xd6,0xf0,0x00,0x00,0x00]
+
+v_s_sqrt_f16 s5, src_scc
+// GFX12: encoding: [0x05,0x00,0x89,0xd6,0xfd,0x00,0x00,0x00]
+
+v_s_sqrt_f16 s105, 0xaf12
+// GFX12: encoding: [0x69,0x00,0x89,0xd6,0xff,0x00,0x00,0x00,0x12,0xaf,0x00,0x00]
+
+v_s_sqrt_f16 s5, -s1
+// GFX12: encoding: [0x05,0x00,0x89,0xd6,0x01,0x00,0x00,0x20]
+
+v_s_sqrt_f16 s5, |s1|
+// GFX12: encoding: [0x05,0x01,0x89,0xd6,0x01,0x00,0x00,0x00]
+
+v_s_sqrt_f16 s5, s1 clamp
+// GFX12: encoding: [0x05,0x80,0x89,0xd6,0x01,0x00,0x00,0x00]
+
+v_s_sqrt_f16 s5, s1 mul:2
+// GFX12: encoding: [0x05,0x00,0x89,0xd6,0x01,0x00,0x00,0x08]
+
+v_s_sqrt_f16 s5, s1 mul:4
+// GFX12: encoding: [0x05,0x00,0x89,0xd6,0x01,0x00,0x00,0x10]
+
+v_s_sqrt_f16 s5, s1 div:2
+// GFX12: encoding: [0x05,0x00,0x89,0xd6,0x01,0x00,0x00,0x18]
diff --git a/llvm/test/MC/AMDGPU/gfx12_asm_vop3.s b/llvm/test/MC/AMDGPU/gfx12_asm_vop3.s
index 484e73d..3e99a61 100644
--- a/llvm/test/MC/AMDGPU/gfx12_asm_vop3.s
+++ b/llvm/test/MC/AMDGPU/gfx12_asm_vop3.s
@@ -1,7 +1,7 @@
-// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize32 -show-encoding %s | FileCheck --check-prefixes=GFX12,W32 %s
-// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64 -show-encoding %s | FileCheck --check-prefixes=GFX12,W64 %s
-// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize32 %s 2>&1 | FileCheck --check-prefix=W32-ERR --implicit-check-not=error: %s
-// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64 %s 2>&1 | FileCheck --check-prefix=W64-ERR --implicit-check-not=error: %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize32,+real-true16 -show-encoding %s | FileCheck --check-prefixes=GFX12,W32 %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64,+real-true16 -show-encoding %s | FileCheck --check-prefixes=GFX12,W64 %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize32,+real-true16 %s 2>&1 | FileCheck --check-prefix=W32-ERR --implicit-check-not=error: %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64,+real-true16 %s 2>&1 | FileCheck --check-prefix=W64-ERR --implicit-check-not=error: %s
v_add3_u32 v5, v1, v2, s3
// GFX12: encoding: [0x05,0x00,0x55,0xd6,0x01,0x05,0x0e,0x00]
diff --git a/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp16-fake16.s b/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp16-fake16.s
new file mode 100644
index 0000000..9fe555f
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp16-fake16.s
@@ -0,0 +1,5764 @@
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize32,-real-true16 -show-encoding %s | FileCheck --check-prefixes=GFX12,W32 %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64,-real-true16 -show-encoding %s | FileCheck --check-prefixes=GFX12,W64 %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize32,-real-true16 %s 2>&1 | FileCheck --check-prefixes=GFX12-ERR,W32-ERR --implicit-check-not=error: %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64,-real-true16 %s 2>&1 | FileCheck --check-prefixes=GFX12-ERR,W64-ERR --implicit-check-not=error: %s
+
+v_add3_u32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x55,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_add3_u32_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x55,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_add3_u32_e64_dpp v5, v1, 10, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x55,0xd6,0xfa,0x14,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_add3_u32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x55,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_add3_u32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x55,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_add3_u32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x55,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_add3_u32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x55,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_add3_u32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x55,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_add3_u32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x55,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_add3_u32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX12: [0x05,0x00,0x55,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_add3_u32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX12: [0x05,0x00,0x55,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_add3_u32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX12: [0x05,0x00,0x55,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_add3_u32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x55,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_add3_u32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x55,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_add3_u32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x55,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_add3_u32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x55,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_add_co_u32_e64_dpp v5, s6, v1, v2 quad_perm:[3,2,1,0]
+// W32: [0x05,0x06,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s6, v1, v2 quad_perm:[0,1,2,3]
+// W32: [0x05,0x06,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s6, v1, v2 row_mirror
+// W32: [0x05,0x06,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s6, v1, s2 row_mirror
+// W32: [0x05,0x06,0x00,0xd7,0xfa,0x04,0x00,0x00,0x01,0x40,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s6, v1, v2 row_half_mirror
+// W32: [0x05,0x06,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s6, v1, v2 row_shl:1
+// W32: [0x05,0x06,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s6, v1, v2 row_shl:15
+// W32: [0x05,0x06,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s6, v1, v2 row_shr:1
+// W32: [0x05,0x06,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s6, v1, v2 row_shr:15
+// W32: [0x05,0x06,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s6, v1, v2 row_ror:1
+// W32: [0x05,0x06,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s105, v1, v2 row_ror:15
+// W32: [0x05,0x69,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, vcc_lo, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// W32: [0x05,0x6a,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, vcc_hi, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// W32: [0x05,0x6b,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, ttmp15, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// W32: [0x05,0x7b,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s[12:13], v1, v2 quad_perm:[3,2,1,0]
+// W64: [0x05,0x0c,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s[12:13], v1, v2 quad_perm:[0,1,2,3]
+// W64: [0x05,0x0c,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s[12:13], v1, v2 row_mirror
+// W64: [0x05,0x0c,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s[12:13], v1, v2 row_half_mirror
+// W64: [0x05,0x0c,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s[12:13], v1, s2 row_half_mirror
+// W64: [0x05,0x0c,0x00,0xd7,0xfa,0x04,0x00,0x00,0x01,0x41,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s[12:13], v1, v2 row_shl:1
+// W64: [0x05,0x0c,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s[12:13], v1, v2 row_shl:15
+// W64: [0x05,0x0c,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s[12:13], v1, v2 row_shr:1
+// W64: [0x05,0x0c,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s[12:13], v1, v2 row_shr:15
+// W64: [0x05,0x0c,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s[12:13], v1, v2 row_ror:1
+// W64: [0x05,0x0c,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s[12:13], v1, v2 row_ror:15
+// W64: [0x05,0x0c,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s[104:105], v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// W64: [0x05,0x68,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, vcc, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// W64: [0x05,0x6a,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, ttmp[14:15], v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// W64: [0x05,0x7a,0x00,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v255, null, v255, v255 clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0xfc,0x00,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x47,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_add_lshl_u32_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x47,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_add_lshl_u32_e64_dpp v5, v1, 10, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x47,0xd6,0xfa,0x14,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x47,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x47,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x47,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x47,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x47,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x47,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX12: [0x05,0x00,0x47,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX12: [0x05,0x00,0x47,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX12: [0x05,0x00,0x47,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x47,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x47,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x47,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_add_lshl_u32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x47,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x0d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x0d,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 row_mirror
+// GFX12: [0x05,0x00,0x0d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX12: [0x05,0x00,0x0d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 row_shl:1
+// GFX12: [0x05,0x00,0x0d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 row_shl:15
+// GFX12: [0x05,0x00,0x0d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 row_shr:1
+// GFX12: [0x05,0x00,0x0d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 row_shr:15
+// GFX12: [0x05,0x00,0x0d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 row_ror:1
+// GFX12: [0x05,0x00,0x0d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 row_ror:15
+// GFX12: [0x05,0x00,0x0d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x0d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x0d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x0d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_add_nc_i16_e64_dpp v255, v255, v255 clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x80,0x0d,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_add_nc_i32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x26,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_add_nc_i32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x26,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_add_nc_i32_e64_dpp v5, v1, v2 row_mirror
+// GFX12: [0x05,0x00,0x26,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_add_nc_i32_e64_dpp v5, v1, v2 row_half_mirror
+// GFX12: [0x05,0x00,0x26,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_add_nc_i32_e64_dpp v5, v1, v2 row_shl:1
+// GFX12: [0x05,0x00,0x26,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_add_nc_i32_e64_dpp v5, v1, v2 row_shl:15
+// GFX12: [0x05,0x00,0x26,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_add_nc_i32_e64_dpp v5, v1, v2 row_shr:1
+// GFX12: [0x05,0x00,0x26,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_add_nc_i32_e64_dpp v5, v1, v2 row_shr:15
+// GFX12: [0x05,0x00,0x26,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_add_nc_i32_e64_dpp v5, v1, v2 row_ror:1
+// GFX12: [0x05,0x00,0x26,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_add_nc_i32_e64_dpp v5, v1, v2 row_ror:15
+// GFX12: [0x05,0x00,0x26,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_add_nc_i32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x26,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_add_nc_i32_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x26,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_add_nc_i32_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x26,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_add_nc_i32_e64_dpp v255, v255, v255 clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x80,0x26,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x03,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x03,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 row_mirror
+// GFX12: [0x05,0x00,0x03,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX12: [0x05,0x00,0x03,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 row_shl:1
+// GFX12: [0x05,0x00,0x03,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 row_shl:15
+// GFX12: [0x05,0x00,0x03,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 row_shr:1
+// GFX12: [0x05,0x00,0x03,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 row_shr:15
+// GFX12: [0x05,0x00,0x03,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 row_ror:1
+// GFX12: [0x05,0x00,0x03,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 row_ror:15
+// GFX12: [0x05,0x00,0x03,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x03,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x03,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x03,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_add_nc_u16_e64_dpp v255, v255, v255 clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x80,0x03,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x16,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_alignbit_b32_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x16,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_alignbit_b32_e64_dpp v5, v1, 10, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x16,0xd6,0xfa,0x14,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x16,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x16,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, v3 row_half_mirror
+// GFX12: [0x05,0x00,0x16,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, v255 row_shl:1
+// GFX12: [0x05,0x00,0x16,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, s105 row_shl:15
+// GFX12: [0x05,0x00,0x16,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x0f,0x01,0xff]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, vcc_hi row_shr:1
+// GFX12: [0x05,0x00,0x16,0xd6,0xfa,0x04,0xae,0x01,0x01,0x11,0x01,0xff]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, vcc_lo row_shr:15
+// GFX12: [0x05,0x00,0x16,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x1f,0x01,0xff]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, ttmp15 row_ror:1
+// GFX12: [0x05,0x00,0x16,0xd6,0xfa,0x04,0xee,0x01,0x01,0x21,0x01,0xff]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, exec_hi row_ror:15
+// GFX12: [0x05,0x00,0x16,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, exec_lo row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x16,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, null row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x16,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x5f,0x01,0x01]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, -1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x16,0xd6,0xfa,0x04,0x06,0x03,0x01,0x60,0x09,0x13]
+
+v_alignbit_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x16,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_alignbyte_b32_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_alignbyte_b32_e64_dpp v5, v1, 10, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x17,0xd6,0xfa,0x14,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, v3 row_half_mirror
+// GFX12: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, v255 row_shl:1
+// GFX12: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, s105 row_shl:15
+// GFX12: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x0f,0x01,0xff]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, vcc_hi row_shr:1
+// GFX12: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xae,0x01,0x01,0x11,0x01,0xff]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, vcc_lo row_shr:15
+// GFX12: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x1f,0x01,0xff]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, ttmp15 row_ror:1
+// GFX12: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xee,0x01,0x01,0x21,0x01,0xff]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, exec_hi row_ror:15
+// GFX12: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, exec_lo row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, null row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x17,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x5f,0x01,0x01]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, -1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x17,0xd6,0xfa,0x04,0x06,0x03,0x01,0x60,0x09,0x13]
+
+v_alignbyte_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x17,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_and_b16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_and_b16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_and_b16_e64_dpp v5, v1, v2 row_mirror
+// GFX12: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_and_b16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX12: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_and_b16_e64_dpp v5, v1, v2 row_shl:1
+// GFX12: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_and_b16_e64_dpp v5, v1, v2 row_shl:15
+// GFX12: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_and_b16_e64_dpp v5, v1, v2 row_shr:1
+// GFX12: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_and_b16_e64_dpp v5, v1, v2 row_shr:15
+// GFX12: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_and_b16_e64_dpp v5, v1, v2 row_ror:1
+// GFX12: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_and_b16_e64_dpp v5, v1, v2 row_ror:15
+// GFX12: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_and_b16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_and_b16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_and_b16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_and_b16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x62,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_and_or_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x57,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_and_or_b32_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x57,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_and_or_b32_e64_dpp v5, v1, 10, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x57,0xd6,0xfa,0x14,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_and_or_b32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x57,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_and_or_b32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x57,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_and_or_b32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x57,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_and_or_b32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x57,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_and_or_b32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x57,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_and_or_b32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x57,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_and_or_b32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX12: [0x05,0x00,0x57,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_and_or_b32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX12: [0x05,0x00,0x57,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_and_or_b32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX12: [0x05,0x00,0x57,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_and_or_b32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x57,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_and_or_b32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x57,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_and_or_b32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x57,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_and_or_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x57,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_ashrrev_i16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x3a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_ashrrev_i16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x3a,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_ashrrev_i16_e64_dpp v5, v1, v2 row_mirror
+// GFX12: [0x05,0x00,0x3a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_ashrrev_i16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX12: [0x05,0x00,0x3a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_ashrrev_i16_e64_dpp v5, v1, v2 row_shl:1
+// GFX12: [0x05,0x00,0x3a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_ashrrev_i16_e64_dpp v5, v1, v2 row_shl:15
+// GFX12: [0x05,0x00,0x3a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_ashrrev_i16_e64_dpp v5, v1, v2 row_shr:1
+// GFX12: [0x05,0x00,0x3a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_ashrrev_i16_e64_dpp v5, v1, v2 row_shr:15
+// GFX12: [0x05,0x00,0x3a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_ashrrev_i16_e64_dpp v5, v1, v2 row_ror:1
+// GFX12: [0x05,0x00,0x3a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_ashrrev_i16_e64_dpp v5, v1, v2 row_ror:15
+// GFX12: [0x05,0x00,0x3a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_ashrrev_i16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x3a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_ashrrev_i16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x3a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_ashrrev_i16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x3a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_ashrrev_i16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x3a,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_bcnt_u32_b32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x1e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_bcnt_u32_b32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x1e,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_bcnt_u32_b32_e64_dpp v5, v1, v2 row_mirror
+// GFX12: [0x05,0x00,0x1e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_bcnt_u32_b32_e64_dpp v5, v1, v2 row_half_mirror
+// GFX12: [0x05,0x00,0x1e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_bcnt_u32_b32_e64_dpp v5, v1, v2 row_shl:1
+// GFX12: [0x05,0x00,0x1e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_bcnt_u32_b32_e64_dpp v5, v1, v2 row_shl:15
+// GFX12: [0x05,0x00,0x1e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_bcnt_u32_b32_e64_dpp v5, v1, v2 row_shr:1
+// GFX12: [0x05,0x00,0x1e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_bcnt_u32_b32_e64_dpp v5, v1, v2 row_shr:15
+// GFX12: [0x05,0x00,0x1e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_bcnt_u32_b32_e64_dpp v5, v1, v2 row_ror:1
+// GFX12: [0x05,0x00,0x1e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_bcnt_u32_b32_e64_dpp v5, v1, v2 row_ror:15
+// GFX12: [0x05,0x00,0x1e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_bcnt_u32_b32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x1e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_bcnt_u32_b32_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x1e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_bcnt_u32_b32_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x1e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_bcnt_u32_b32_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x1e,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_bfe_i32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x11,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_bfe_i32_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x11,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_bfe_i32_e64_dpp v5, v1, 10, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x11,0xd6,0xfa,0x14,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_bfe_i32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x11,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_bfe_i32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x11,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_bfe_i32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x11,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_bfe_i32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x11,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_bfe_i32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x11,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_bfe_i32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x11,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_bfe_i32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX12: [0x05,0x00,0x11,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_bfe_i32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX12: [0x05,0x00,0x11,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_bfe_i32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX12: [0x05,0x00,0x11,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_bfe_i32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x11,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_bfe_i32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x11,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_bfe_i32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x11,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_bfe_i32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x11,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_bfe_u32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x10,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_bfe_u32_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x10,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_bfe_u32_e64_dpp v5, v1, 10, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x10,0xd6,0xfa,0x14,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_bfe_u32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x10,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_bfe_u32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x10,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_bfe_u32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x10,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_bfe_u32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x10,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_bfe_u32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x10,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_bfe_u32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x10,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_bfe_u32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX12: [0x05,0x00,0x10,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_bfe_u32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX12: [0x05,0x00,0x10,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_bfe_u32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX12: [0x05,0x00,0x10,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_bfe_u32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x10,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_bfe_u32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x10,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_bfe_u32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x10,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_bfe_u32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x10,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_bfi_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x12,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_bfi_b32_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x12,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_bfi_b32_e64_dpp v5, v1, 10, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x12,0xd6,0xfa,0x14,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_bfi_b32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x12,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_bfi_b32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x12,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_bfi_b32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x12,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_bfi_b32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x12,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_bfi_b32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x12,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_bfi_b32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x12,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_bfi_b32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX12: [0x05,0x00,0x12,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_bfi_b32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX12: [0x05,0x00,0x12,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_bfi_b32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX12: [0x05,0x00,0x12,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_bfi_b32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x12,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_bfi_b32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x12,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_bfi_b32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x12,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_bfi_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x12,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_bfm_b32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x1d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_bfm_b32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x1d,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_bfm_b32_e64_dpp v5, v1, v2 row_mirror
+// GFX12: [0x05,0x00,0x1d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_bfm_b32_e64_dpp v5, v1, v2 row_half_mirror
+// GFX12: [0x05,0x00,0x1d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_bfm_b32_e64_dpp v5, v1, v2 row_shl:1
+// GFX12: [0x05,0x00,0x1d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_bfm_b32_e64_dpp v5, v1, v2 row_shl:15
+// GFX12: [0x05,0x00,0x1d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_bfm_b32_e64_dpp v5, v1, v2 row_shr:1
+// GFX12: [0x05,0x00,0x1d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_bfm_b32_e64_dpp v5, v1, v2 row_shr:15
+// GFX12: [0x05,0x00,0x1d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_bfm_b32_e64_dpp v5, v1, v2 row_ror:1
+// GFX12: [0x05,0x00,0x1d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_bfm_b32_e64_dpp v5, v1, v2 row_ror:15
+// GFX12: [0x05,0x00,0x1d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_bfm_b32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x1d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_bfm_b32_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x1d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_bfm_b32_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x1d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_bfm_b32_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x1d,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s3 quad_perm:[3,2,1,0]
+// W32: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x1b,0x00,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s3 quad_perm:[0,1,2,3]
+// W32: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0xe4,0x00,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_mirror
+// W32: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x40,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, s2, s3 row_mirror
+// W32: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0c,0x00,0x01,0x40,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, 10, s3 row_mirror
+// W32: [0x05,0x00,0x5d,0xd6,0xfa,0x14,0x0d,0x00,0x01,0x40,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_half_mirror
+// W32: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x41,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_shl:1
+// W32: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x01,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_shl:15
+// W32: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x0f,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_shr:1
+// W32: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x11,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_shr:15
+// W32: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x1f,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s3 row_ror:1
+// W32: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x21,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s105 row_ror:15
+// W32: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x2f,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, vcc_hi row_share:0 row_mask:0xf bank_mask:0xf
+// W32: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xae,0x01,0x01,0x50,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, |v1|, -v2, vcc_lo row_share:15 row_mask:0x0 bank_mask:0x1
+// W32: [0x05,0x01,0x5d,0xd6,0xfa,0x04,0xaa,0x41,0x01,0x5f,0x01,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, -v1, |v2|, ttmp15 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// W32: [0x05,0x02,0x5d,0xd6,0xfa,0x04,0xee,0x21,0x01,0x60,0x09,0x13]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] quad_perm:[3,2,1,0]
+// W64: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1b,0x00,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] quad_perm:[0,1,2,3]
+// W64: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0xe4,0x00,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_mirror
+// W64: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x40,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_half_mirror
+// W64: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x41,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, s2, s[6:7] row_half_mirror
+// W64: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x18,0x00,0x01,0x41,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, 10, s[6:7] row_half_mirror
+// W64: [0x05,0x00,0x5d,0xd6,0xfa,0x14,0x19,0x00,0x01,0x41,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shl:1
+// W64: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x01,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shl:15
+// W64: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x0f,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shr:1
+// W64: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x11,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_shr:15
+// W64: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x1f,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_ror:1
+// W64: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x21,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] row_ror:15
+// W64: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0x1a,0x00,0x01,0x2f,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s[104:105] row_share:0 row_mask:0xf bank_mask:0xf
+// W64: [0x05,0x00,0x5d,0xd6,0xfa,0x04,0xa2,0x01,0x01,0x50,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, |v1|, -v2, vcc row_share:15 row_mask:0x0 bank_mask:0x1
+// W64: [0x05,0x01,0x5d,0xd6,0xfa,0x04,0xaa,0x41,0x01,0x5f,0x01,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, -v1, |v2|, ttmp[14:15] row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// W64: [0x05,0x02,0x5d,0xd6,0xfa,0x04,0xea,0x21,0x01,0x60,0x09,0x13]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v255, -|v255|, -|v255|, null row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x03,0x5d,0xd6,0xfa,0xfe,0xf3,0x61,0xff,0x6f,0x05,0x30]
+
+v_cubeid_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x0c,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_cubeid_f32_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x0c,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_cubeid_f32_e64_dpp v5, v1, 2.0, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x0c,0xd6,0xfa,0xe8,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_cubeid_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x0c,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_cubeid_f32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x0c,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_cubeid_f32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x0c,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_cubeid_f32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x0c,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_cubeid_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x0c,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_cubeid_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x0c,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_cubeid_f32_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX12: [0x05,0x01,0x0c,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_cubeid_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX12: [0x05,0x02,0x0c,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_cubeid_f32_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
+// GFX12: [0x05,0x04,0x0c,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_cubeid_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x03,0x0c,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_cubeid_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x05,0x0c,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+
+v_cubeid_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x06,0x0c,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x09,0x13]
+
+v_cubeid_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x87,0x0c,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
+
+v_cubema_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x0f,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_cubema_f32_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x0f,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_cubema_f32_e64_dpp v5, v1, 2.0, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x0f,0xd6,0xfa,0xe8,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_cubema_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x0f,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_cubema_f32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x0f,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_cubema_f32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x0f,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_cubema_f32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x0f,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_cubema_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x0f,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_cubema_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x0f,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_cubema_f32_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX12: [0x05,0x01,0x0f,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_cubema_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX12: [0x05,0x02,0x0f,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_cubema_f32_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
+// GFX12: [0x05,0x04,0x0f,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_cubema_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x03,0x0f,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_cubema_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x05,0x0f,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+
+v_cubema_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x06,0x0f,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x09,0x13]
+
+v_cubema_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x87,0x0f,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
+
+v_cubesc_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x0d,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_cubesc_f32_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x0d,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_cubesc_f32_e64_dpp v5, v1, 2.0, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x0d,0xd6,0xfa,0xe8,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_cubesc_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x0d,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_cubesc_f32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x0d,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_cubesc_f32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x0d,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_cubesc_f32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x0d,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_cubesc_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x0d,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_cubesc_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x0d,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_cubesc_f32_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX12: [0x05,0x01,0x0d,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_cubesc_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX12: [0x05,0x02,0x0d,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_cubesc_f32_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
+// GFX12: [0x05,0x04,0x0d,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_cubesc_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x03,0x0d,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_cubesc_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x05,0x0d,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+
+v_cubesc_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x06,0x0d,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x09,0x13]
+
+v_cubesc_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x87,0x0d,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
+
+v_cubetc_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x0e,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_cubetc_f32_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x0e,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_cubetc_f32_e64_dpp v5, v1, 2.0, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x0e,0xd6,0xfa,0xe8,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_cubetc_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x0e,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_cubetc_f32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x0e,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_cubetc_f32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x0e,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_cubetc_f32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x0e,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_cubetc_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x0e,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_cubetc_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x0e,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_cubetc_f32_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX12: [0x05,0x01,0x0e,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_cubetc_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX12: [0x05,0x02,0x0e,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_cubetc_f32_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
+// GFX12: [0x05,0x04,0x0e,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_cubetc_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x03,0x0e,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_cubetc_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x05,0x0e,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+
+v_cubetc_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x06,0x0e,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x09,0x13]
+
+v_cubetc_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x87,0x0e,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
+
+v_cvt_pk_bf8_f32_e64_dpp v1, -v2, |v3| quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd
+// GFX12: encoding: [0x01,0x02,0x6a,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+
+v_cvt_pk_bf8_f32_e64_dpp v1, -v2, |v3| quad_perm:[0,1,2,3]
+// GFX12: encoding: [0x01,0x02,0x6a,0xd7,0xfa,0x06,0x02,0x20,0x02,0xe4,0x00,0xff]
+
+v_cvt_pk_bf8_f32_e64_dpp v6, -v2, |v3| quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd
+// GFX12: encoding: [0x06,0x02,0x6a,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+
+v_cvt_pk_bf8_f32_e64_dpp v1, -v6, |v3| quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd
+// GFX12: encoding: [0x01,0x02,0x6a,0xd7,0xfa,0x06,0x02,0x20,0x06,0x1b,0x00,0xed]
+
+v_cvt_pk_bf8_f32_e64_dpp v1, -v2, |v255| quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd
+// GFX12: encoding: [0x01,0x02,0x6a,0xd7,0xfa,0xfe,0x03,0x20,0x02,0x1b,0x00,0xed]
+
+v_cvt_pk_bf8_f32_e64_dpp v1, -v2, |v3| quad_perm:[0,2,1,3] row_mask:0xe bank_mask:0xd
+// GFX12: encoding: [0x01,0x02,0x6a,0xd7,0xfa,0x06,0x02,0x20,0x02,0xd8,0x00,0xed]
+
+v_cvt_pk_bf8_f32_e64_dpp v1, -v2, |v3| quad_perm:[3,2,1,0] row_mask:0x2 bank_mask:0xd
+// GFX12: encoding: [0x01,0x02,0x6a,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0x2d]
+
+v_cvt_pk_bf8_f32_e64_dpp v1, -v2, |v3| quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0x5
+// GFX12: encoding: [0x01,0x02,0x6a,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xe5]
+
+v_cvt_pk_bf8_f32_e64_dpp v1, -v2, |v3| quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd fi:1
+// GFX12: encoding: [0x01,0x02,0x6a,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x04,0xed]
+
+v_cvt_pk_fp8_f32_e64_dpp v1, -v2, |v3| quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd
+// GFX12: encoding: [0x01,0x02,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+
+v_cvt_pk_fp8_f32_e64_dpp v1, -v2, |v3| quad_perm:[0,1,2,3]
+// GFX12: encoding: [0x01,0x02,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0xe4,0x00,0xff]
+
+v_cvt_pk_fp8_f32_e64_dpp v6, -v2, |v3| quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd
+// GFX12: encoding: [0x06,0x02,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+
+v_cvt_pk_fp8_f32_e64_dpp v1, -v6, |v3| quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd
+// GFX12: encoding: [0x01,0x02,0x69,0xd7,0xfa,0x06,0x02,0x20,0x06,0x1b,0x00,0xed]
+
+v_cvt_pk_fp8_f32_e64_dpp v1, -v2, |v255| quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd
+// GFX12: encoding: [0x01,0x02,0x69,0xd7,0xfa,0xfe,0x03,0x20,0x02,0x1b,0x00,0xed]
+
+v_cvt_pk_fp8_f32_e64_dpp v1, -v2, |v3| quad_perm:[0,2,1,3] row_mask:0xe bank_mask:0xd
+// GFX12: encoding: [0x01,0x02,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0xd8,0x00,0xed]
+
+v_cvt_pk_fp8_f32_e64_dpp v1, -v2, |v3| quad_perm:[3,2,1,0] row_mask:0x2 bank_mask:0xd
+// GFX12: encoding: [0x01,0x02,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0x2d]
+
+v_cvt_pk_fp8_f32_e64_dpp v1, -v2, |v3| quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0x5
+// GFX12: encoding: [0x01,0x02,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xe5]
+
+v_cvt_pk_fp8_f32_e64_dpp v1, -v2, |v3| quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd fi:1
+// GFX12: encoding: [0x01,0x02,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x04,0xed]
+
+v_cvt_sr_bf8_f32_e64_dpp v1, -v2, v3 quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd
+// GFX12: encoding: [0x01,0x00,0x6c,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+
+v_cvt_sr_bf8_f32_e64_dpp v1, -v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf
+// GFX12: encoding: [0x01,0x00,0x6c,0xd7,0xfa,0x06,0x02,0x20,0x02,0xe4,0x00,0xff]
+
+v_cvt_sr_bf8_f32_e64_dpp v6, -v2, v3 quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd
+// GFX12: encoding: [0x06,0x00,0x6c,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+
+v_cvt_sr_bf8_f32_e64_dpp v1, -v6, v3 quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd
+// GFX12: encoding: [0x01,0x00,0x6c,0xd7,0xfa,0x06,0x02,0x20,0x06,0x1b,0x00,0xed]
+
+v_cvt_sr_bf8_f32_e64_dpp v1, -v2, v255 quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd
+// GFX12: encoding: [0x01,0x00,0x6c,0xd7,0xfa,0xfe,0x03,0x20,0x02,0x1b,0x00,0xed]
+
+v_cvt_sr_bf8_f32_e64_dpp v1, -v2, v3 quad_perm:[0,2,1,3] row_mask:0xe bank_mask:0xd
+// GFX12: encoding: [0x01,0x00,0x6c,0xd7,0xfa,0x06,0x02,0x20,0x02,0xd8,0x00,0xed]
+
+v_cvt_sr_bf8_f32_e64_dpp v1, -v2, v3 quad_perm:[3,2,1,0] row_mask:0x2 bank_mask:0xd
+// GFX12: encoding: [0x01,0x00,0x6c,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0x2d]
+
+v_cvt_sr_bf8_f32_e64_dpp v1, -v2, v3 quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0x5
+// GFX12: encoding: [0x01,0x00,0x6c,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xe5]
+
+v_cvt_sr_bf8_f32_e64_dpp v1, -v2, v3 quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd fi:1
+// GFX12: encoding: [0x01,0x00,0x6c,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x04,0xed]
+
+v_cvt_sr_bf8_f32 v1, v2, v3 byte_sel:0 quad_perm:[3,2,1,0]
+// GFX12: v_cvt_sr_bf8_f32_e64_dpp v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x00,0x6c,0xd7,0xfa,0x06,0x02,0x00,0x02,0x1b,0x00,0xff]
+
+v_cvt_sr_bf8_f32 v1, v2, v3 byte_sel:1 quad_perm:[3,2,1,0]
+// GFX12: v_cvt_sr_bf8_f32_e64_dpp v1, v2, v3 byte_sel:1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x20,0x6c,0xd7,0xfa,0x06,0x02,0x00,0x02,0x1b,0x00,0xff]
+
+v_cvt_sr_bf8_f32 v1, v2, v3 byte_sel:2 quad_perm:[3,2,1,0]
+// GFX12: v_cvt_sr_bf8_f32_e64_dpp v1, v2, v3 byte_sel:2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x40,0x6c,0xd7,0xfa,0x06,0x02,0x00,0x02,0x1b,0x00,0xff]
+
+v_cvt_sr_bf8_f32 v1, v2, v3 byte_sel:3 quad_perm:[3,2,1,0]
+// GFX12: v_cvt_sr_bf8_f32_e64_dpp v1, v2, v3 byte_sel:3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x60,0x6c,0xd7,0xfa,0x06,0x02,0x00,0x02,0x1b,0x00,0xff]
+
+v_cvt_sr_fp8_f32_e64_dpp v1, -v2, v3 quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd
+// GFX12: encoding: [0x01,0x00,0x6b,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+
+v_cvt_sr_fp8_f32_e64_dpp v1, -v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf
+// GFX12: encoding: [0x01,0x00,0x6b,0xd7,0xfa,0x06,0x02,0x20,0x02,0xe4,0x00,0xff]
+
+v_cvt_sr_fp8_f32_e64_dpp v6, -v2, v3 quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd
+// GFX12: encoding: [0x06,0x00,0x6b,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+
+v_cvt_sr_fp8_f32_e64_dpp v1, -v6, v3 quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd
+// GFX12: encoding: [0x01,0x00,0x6b,0xd7,0xfa,0x06,0x02,0x20,0x06,0x1b,0x00,0xed]
+
+v_cvt_sr_fp8_f32_e64_dpp v1, -v2, v255 quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd
+// GFX12: encoding: [0x01,0x00,0x6b,0xd7,0xfa,0xfe,0x03,0x20,0x02,0x1b,0x00,0xed]
+
+v_cvt_sr_fp8_f32_e64_dpp v1, -v2, v3 quad_perm:[0,2,1,3] row_mask:0xe bank_mask:0xd
+// GFX12: encoding: [0x01,0x00,0x6b,0xd7,0xfa,0x06,0x02,0x20,0x02,0xd8,0x00,0xed]
+
+v_cvt_sr_fp8_f32_e64_dpp v1, -v2, v3 quad_perm:[3,2,1,0] row_mask:0x2 bank_mask:0xd
+// GFX12: encoding: [0x01,0x00,0x6b,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0x2d]
+
+v_cvt_sr_fp8_f32_e64_dpp v1, -v2, v3 quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0x5
+// GFX12: encoding: [0x01,0x00,0x6b,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xe5]
+
+v_cvt_sr_fp8_f32_e64_dpp v1, -v2, v3 quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd fi:1
+// GFX12: encoding: [0x01,0x00,0x6b,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x04,0xed]
+
+v_cvt_sr_fp8_f32 v1, v2, v3 byte_sel:0 quad_perm:[3,2,1,0]
+// GFX12: v_cvt_sr_fp8_f32_e64_dpp v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x00,0x6b,0xd7,0xfa,0x06,0x02,0x00,0x02,0x1b,0x00,0xff]
+
+v_cvt_sr_fp8_f32 v1, v2, v3 byte_sel:1 quad_perm:[3,2,1,0]
+// GFX12: v_cvt_sr_fp8_f32_e64_dpp v1, v2, v3 byte_sel:1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x20,0x6b,0xd7,0xfa,0x06,0x02,0x00,0x02,0x1b,0x00,0xff]
+
+v_cvt_sr_fp8_f32 v1, v2, v3 byte_sel:2 quad_perm:[3,2,1,0]
+// GFX12: v_cvt_sr_fp8_f32_e64_dpp v1, v2, v3 byte_sel:2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x40,0x6b,0xd7,0xfa,0x06,0x02,0x00,0x02,0x1b,0x00,0xff]
+
+v_cvt_sr_fp8_f32 v1, v2, v3 byte_sel:3 quad_perm:[3,2,1,0]
+// GFX12: v_cvt_sr_fp8_f32_e64_dpp v1, v2, v3 byte_sel:3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x60,0x6b,0xd7,0xfa,0x06,0x02,0x00,0x02,0x1b,0x00,0xff]
+
+v_cvt_pk_i16_f32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x06,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_cvt_pk_i16_f32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x06,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_cvt_pk_i16_f32_e64_dpp v5, v1, v2 row_mirror
+// GFX12: [0x05,0x00,0x06,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_cvt_pk_i16_f32_e64_dpp v5, v1, v2 row_half_mirror
+// GFX12: [0x05,0x00,0x06,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_cvt_pk_i16_f32_e64_dpp v5, v1, v2 row_shl:1
+// GFX12: [0x05,0x00,0x06,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_cvt_pk_i16_f32_e64_dpp v5, v1, v2 row_shl:15
+// GFX12: [0x05,0x00,0x06,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_cvt_pk_i16_f32_e64_dpp v5, v1, v2 row_shr:1
+// GFX12: [0x05,0x00,0x06,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_cvt_pk_i16_f32_e64_dpp v5, v1, v2 row_shr:15
+// GFX12: [0x05,0x00,0x06,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_cvt_pk_i16_f32_e64_dpp v5, v1, v2 row_ror:1
+// GFX12: [0x05,0x00,0x06,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_cvt_pk_i16_f32_e64_dpp v5, v1, v2 row_ror:15
+// GFX12: [0x05,0x00,0x06,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_cvt_pk_i16_f32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x06,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_cvt_pk_i16_f32_e64_dpp v5, |v1|, -v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x01,0x06,0xd7,0xfa,0x04,0x02,0x40,0x01,0x5f,0x01,0x01]
+
+v_cvt_pk_i16_f32_e64_dpp v5, -v1, |v2| row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x02,0x06,0xd7,0xfa,0x04,0x02,0x20,0x01,0x60,0x09,0x13]
+
+v_cvt_pk_i16_f32_e64_dpp v255, -|v255|, -|v255| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x03,0x06,0xd7,0xfa,0xfe,0x03,0x60,0xff,0x6f,0x05,0x30]
+
+v_cvt_pk_i16_i32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x24,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_cvt_pk_i16_i32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x24,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_cvt_pk_i16_i32_e64_dpp v5, v1, v2 row_mirror
+// GFX12: [0x05,0x00,0x24,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_cvt_pk_i16_i32_e64_dpp v5, v1, v2 row_half_mirror
+// GFX12: [0x05,0x00,0x24,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_cvt_pk_i16_i32_e64_dpp v5, v1, v2 row_shl:1
+// GFX12: [0x05,0x00,0x24,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_cvt_pk_i16_i32_e64_dpp v5, v1, v2 row_shl:15
+// GFX12: [0x05,0x00,0x24,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_cvt_pk_i16_i32_e64_dpp v5, v1, v2 row_shr:1
+// GFX12: [0x05,0x00,0x24,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_cvt_pk_i16_i32_e64_dpp v5, v1, v2 row_shr:15
+// GFX12: [0x05,0x00,0x24,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_cvt_pk_i16_i32_e64_dpp v5, v1, v2 row_ror:1
+// GFX12: [0x05,0x00,0x24,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_cvt_pk_i16_i32_e64_dpp v5, v1, v2 row_ror:15
+// GFX12: [0x05,0x00,0x24,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_cvt_pk_i16_i32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x24,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_cvt_pk_i16_i32_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x24,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_cvt_pk_i16_i32_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x24,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_cvt_pk_i16_i32_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x24,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 row_mirror
+// GFX12: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX12: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 row_shl:1
+// GFX12: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 row_shl:15
+// GFX12: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 row_shr:1
+// GFX12: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 row_shr:15
+// GFX12: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 row_ror:1
+// GFX12: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 row_ror:15
+// GFX12: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, |v1|, -v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x01,0x12,0xd7,0xfa,0x04,0x02,0x40,0x01,0x5f,0x01,0x01]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, -v1, |v2| row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x02,0x12,0xd7,0xfa,0x04,0x02,0x20,0x01,0x60,0x09,0x13]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v255, -|v255|, -|v255| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x03,0x12,0xd7,0xfa,0xfe,0x03,0x60,0xff,0x6f,0x05,0x30]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 row_mirror
+// GFX12: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX12: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 row_shl:1
+// GFX12: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 row_shl:15
+// GFX12: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 row_shr:1
+// GFX12: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 row_shr:15
+// GFX12: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 row_ror:1
+// GFX12: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 row_ror:15
+// GFX12: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, |v1|, -v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x01,0x13,0xd7,0xfa,0x04,0x02,0x40,0x01,0x5f,0x01,0x01]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, -v1, |v2| row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x02,0x13,0xd7,0xfa,0x04,0x02,0x20,0x01,0x60,0x09,0x13]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v255, -|v255|, -|v255| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x03,0x13,0xd7,0xfa,0xfe,0x03,0x60,0xff,0x6f,0x05,0x30]
+
+v_cvt_pk_u16_f32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x07,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_cvt_pk_u16_f32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x07,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_cvt_pk_u16_f32_e64_dpp v5, v1, v2 row_mirror
+// GFX12: [0x05,0x00,0x07,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_cvt_pk_u16_f32_e64_dpp v5, v1, v2 row_half_mirror
+// GFX12: [0x05,0x00,0x07,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_cvt_pk_u16_f32_e64_dpp v5, v1, v2 row_shl:1
+// GFX12: [0x05,0x00,0x07,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_cvt_pk_u16_f32_e64_dpp v5, v1, v2 row_shl:15
+// GFX12: [0x05,0x00,0x07,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_cvt_pk_u16_f32_e64_dpp v5, v1, v2 row_shr:1
+// GFX12: [0x05,0x00,0x07,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_cvt_pk_u16_f32_e64_dpp v5, v1, v2 row_shr:15
+// GFX12: [0x05,0x00,0x07,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_cvt_pk_u16_f32_e64_dpp v5, v1, v2 row_ror:1
+// GFX12: [0x05,0x00,0x07,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_cvt_pk_u16_f32_e64_dpp v5, v1, v2 row_ror:15
+// GFX12: [0x05,0x00,0x07,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_cvt_pk_u16_f32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x07,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_cvt_pk_u16_f32_e64_dpp v5, |v1|, -v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x01,0x07,0xd7,0xfa,0x04,0x02,0x40,0x01,0x5f,0x01,0x01]
+
+v_cvt_pk_u16_f32_e64_dpp v5, -v1, |v2| row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x02,0x07,0xd7,0xfa,0x04,0x02,0x20,0x01,0x60,0x09,0x13]
+
+v_cvt_pk_u16_f32_e64_dpp v255, -|v255|, -|v255| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x03,0x07,0xd7,0xfa,0xfe,0x03,0x60,0xff,0x6f,0x05,0x30]
+
+v_cvt_pk_u16_u32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x23,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_cvt_pk_u16_u32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x23,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_cvt_pk_u16_u32_e64_dpp v5, v1, v2 row_mirror
+// GFX12: [0x05,0x00,0x23,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_cvt_pk_u16_u32_e64_dpp v5, v1, v2 row_half_mirror
+// GFX12: [0x05,0x00,0x23,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_cvt_pk_u16_u32_e64_dpp v5, v1, v2 row_shl:1
+// GFX12: [0x05,0x00,0x23,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_cvt_pk_u16_u32_e64_dpp v5, v1, v2 row_shl:15
+// GFX12: [0x05,0x00,0x23,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_cvt_pk_u16_u32_e64_dpp v5, v1, v2 row_shr:1
+// GFX12: [0x05,0x00,0x23,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_cvt_pk_u16_u32_e64_dpp v5, v1, v2 row_shr:15
+// GFX12: [0x05,0x00,0x23,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_cvt_pk_u16_u32_e64_dpp v5, v1, v2 row_ror:1
+// GFX12: [0x05,0x00,0x23,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_cvt_pk_u16_u32_e64_dpp v5, v1, v2 row_ror:15
+// GFX12: [0x05,0x00,0x23,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_cvt_pk_u16_u32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x23,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_cvt_pk_u16_u32_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x23,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_cvt_pk_u16_u32_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x23,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_cvt_pk_u16_u32_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x23,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x26,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x26,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, 2.0, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x26,0xd6,0xfa,0xe8,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x26,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x26,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x26,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x26,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x26,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x26,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX12: [0x05,0x00,0x26,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX12: [0x05,0x00,0x26,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX12: [0x05,0x00,0x26,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x26,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x26,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x26,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_cvt_pk_u8_f32_e64_dpp v255, -|v255|, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x01,0x26,0xd6,0xfa,0xfe,0xf7,0x23,0xff,0x6f,0x05,0x30]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 row_mirror
+// GFX12: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX12: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 row_shl:1
+// GFX12: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 row_shl:15
+// GFX12: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 row_shr:1
+// GFX12: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 row_shr:15
+// GFX12: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 row_ror:1
+// GFX12: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 row_ror:15
+// GFX12: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x12,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, |v1|, -v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x01,0x12,0xd7,0xfa,0x04,0x02,0x40,0x01,0x5f,0x01,0x01]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, -v1, |v2| row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x02,0x12,0xd7,0xfa,0x04,0x02,0x20,0x01,0x60,0x09,0x13]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v255, -|v255|, -|v255| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x03,0x12,0xd7,0xfa,0xfe,0x03,0x60,0xff,0x6f,0x05,0x30]
+
+v_cvt_pk_norm_i16_f32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x21,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_cvt_pk_norm_i16_f32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x21,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_cvt_pk_norm_i16_f32_e64_dpp v5, v1, v2 row_mirror
+// GFX12: [0x05,0x00,0x21,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_cvt_pk_norm_i16_f32_e64_dpp v5, v1, v2 row_half_mirror
+// GFX12: [0x05,0x00,0x21,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_cvt_pk_norm_i16_f32_e64_dpp v5, v1, v2 row_shl:1
+// GFX12: [0x05,0x00,0x21,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_cvt_pk_norm_i16_f32_e64_dpp v5, v1, v2 row_shl:15
+// GFX12: [0x05,0x00,0x21,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_cvt_pk_norm_i16_f32_e64_dpp v5, v1, v2 row_shr:1
+// GFX12: [0x05,0x00,0x21,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_cvt_pk_norm_i16_f32_e64_dpp v5, v1, v2 row_shr:15
+// GFX12: [0x05,0x00,0x21,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_cvt_pk_norm_i16_f32_e64_dpp v5, v1, v2 row_ror:1
+// GFX12: [0x05,0x00,0x21,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_cvt_pk_norm_i16_f32_e64_dpp v5, v1, v2 row_ror:15
+// GFX12: [0x05,0x00,0x21,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_cvt_pk_norm_i16_f32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x21,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_cvt_pk_norm_i16_f32_e64_dpp v5, |v1|, -v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x01,0x21,0xd7,0xfa,0x04,0x02,0x40,0x01,0x5f,0x01,0x01]
+
+v_cvt_pk_norm_i16_f32_e64_dpp v5, -v1, |v2| row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x02,0x21,0xd7,0xfa,0x04,0x02,0x20,0x01,0x60,0x09,0x13]
+
+v_cvt_pk_norm_i16_f32_e64_dpp v255, -|v255|, -|v255| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x03,0x21,0xd7,0xfa,0xfe,0x03,0x60,0xff,0x6f,0x05,0x30]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 row_mirror
+// GFX12: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX12: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 row_shl:1
+// GFX12: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 row_shl:15
+// GFX12: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 row_shr:1
+// GFX12: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 row_shr:15
+// GFX12: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 row_ror:1
+// GFX12: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 row_ror:15
+// GFX12: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x13,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, |v1|, -v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x01,0x13,0xd7,0xfa,0x04,0x02,0x40,0x01,0x5f,0x01,0x01]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, -v1, |v2| row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x02,0x13,0xd7,0xfa,0x04,0x02,0x20,0x01,0x60,0x09,0x13]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v255, -|v255|, -|v255| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x03,0x13,0xd7,0xfa,0xfe,0x03,0x60,0xff,0x6f,0x05,0x30]
+
+v_cvt_pk_norm_u16_f32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x22,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_cvt_pk_norm_u16_f32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x22,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_cvt_pk_norm_u16_f32_e64_dpp v5, v1, v2 row_mirror
+// GFX12: [0x05,0x00,0x22,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_cvt_pk_norm_u16_f32_e64_dpp v5, v1, v2 row_half_mirror
+// GFX12: [0x05,0x00,0x22,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_cvt_pk_norm_u16_f32_e64_dpp v5, v1, v2 row_shl:1
+// GFX12: [0x05,0x00,0x22,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_cvt_pk_norm_u16_f32_e64_dpp v5, v1, v2 row_shl:15
+// GFX12: [0x05,0x00,0x22,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_cvt_pk_norm_u16_f32_e64_dpp v5, v1, v2 row_shr:1
+// GFX12: [0x05,0x00,0x22,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_cvt_pk_norm_u16_f32_e64_dpp v5, v1, v2 row_shr:15
+// GFX12: [0x05,0x00,0x22,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_cvt_pk_norm_u16_f32_e64_dpp v5, v1, v2 row_ror:1
+// GFX12: [0x05,0x00,0x22,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_cvt_pk_norm_u16_f32_e64_dpp v5, v1, v2 row_ror:15
+// GFX12: [0x05,0x00,0x22,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_cvt_pk_norm_u16_f32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x22,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_cvt_pk_norm_u16_f32_e64_dpp v5, |v1|, -v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x01,0x22,0xd7,0xfa,0x04,0x02,0x40,0x01,0x5f,0x01,0x01]
+
+v_cvt_pk_norm_u16_f32_e64_dpp v5, -v1, |v2| row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x02,0x22,0xd7,0xfa,0x04,0x02,0x20,0x01,0x60,0x09,0x13]
+
+v_cvt_pk_norm_u16_f32_e64_dpp v255, -|v255|, -|v255| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x03,0x22,0xd7,0xfa,0xfe,0x03,0x60,0xff,0x6f,0x05,0x30]
+
+v_div_fixup_f16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x54,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_div_fixup_f16_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x54,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_div_fixup_f16_e64_dpp v5, v1, 2.0, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x54,0xd6,0xfa,0xe8,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_div_fixup_f16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x54,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_div_fixup_f16_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x54,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_div_fixup_f16_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x54,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_div_fixup_f16_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x54,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_div_fixup_f16_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x54,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_div_fixup_f16_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x54,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_div_fixup_f16_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX12: [0x05,0x01,0x54,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_div_fixup_f16_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX12: [0x05,0x02,0x54,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_div_fixup_f16_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
+// GFX12: [0x05,0x04,0x54,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_div_fixup_f16_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x03,0x54,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_div_fixup_f16_e64_dpp v5, -|v1|, v2, -|-1| row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x05,0x54,0xd6,0xfa,0x04,0x06,0xa3,0x01,0x5f,0x01,0x01]
+
+v_div_fixup_f16_e64_dpp v5, v1, -|v2|, -|0.5| row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x06,0x54,0xd6,0xfa,0x04,0xc2,0xc3,0x01,0x60,0x09,0x13]
+
+v_div_fixup_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x87,0x54,0xd6,0xfa,0xfe,0xf7,0xe3,0xff,0x6f,0x05,0x30]
+
+v_fma_f16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x48,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_fma_f16_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x48,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_fma_f16_e64_dpp v5, v1, 2.0, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x48,0xd6,0xfa,0xe8,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_fma_f16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x48,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_fma_f16_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x48,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_fma_f16_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x48,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_fma_f16_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x48,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_fma_f16_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x48,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_fma_f16_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x48,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_fma_f16_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX12: [0x05,0x01,0x48,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_fma_f16_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX12: [0x05,0x02,0x48,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_fma_f16_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
+// GFX12: [0x05,0x04,0x48,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_fma_f16_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x03,0x48,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_fma_f16_e64_dpp v5, -|v1|, v2, -|-1| row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x05,0x48,0xd6,0xfa,0x04,0x06,0xa3,0x01,0x5f,0x01,0x01]
+
+v_fma_f16_e64_dpp v5, v1, -|v2|, -|0.5| row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x06,0x48,0xd6,0xfa,0x04,0xc2,0xc3,0x01,0x60,0x09,0x13]
+
+v_fma_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x87,0x48,0xd6,0xfa,0xfe,0xf7,0xe3,0xff,0x6f,0x05,0x30]
+
+v_fma_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x13,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_fma_f32_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x13,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_fma_f32_e64_dpp v5, v1, 2.0, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x13,0xd6,0xfa,0xe8,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_fma_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x13,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_fma_f32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x13,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_fma_f32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x13,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_fma_f32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x13,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_fma_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x13,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_fma_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x13,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_fma_f32_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX12: [0x05,0x01,0x13,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_fma_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX12: [0x05,0x02,0x13,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_fma_f32_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
+// GFX12: [0x05,0x04,0x13,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_fma_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x03,0x13,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_fma_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x05,0x13,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+
+v_fma_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x06,0x13,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x09,0x13]
+
+v_fma_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x87,0x13,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
+
+v_ldexp_f32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x1c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_ldexp_f32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x1c,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_ldexp_f32_e64_dpp v5, v1, v2 row_mirror
+// GFX12: [0x05,0x00,0x1c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_ldexp_f32_e64_dpp v5, v1, v2 row_half_mirror
+// GFX12: [0x05,0x00,0x1c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_ldexp_f32_e64_dpp v5, v1, v2 row_shl:1
+// GFX12: [0x05,0x00,0x1c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_ldexp_f32_e64_dpp v5, v1, v2 row_shl:15
+// GFX12: [0x05,0x00,0x1c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_ldexp_f32_e64_dpp v5, v1, v2 row_shr:1
+// GFX12: [0x05,0x00,0x1c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_ldexp_f32_e64_dpp v5, v1, v2 row_shr:15
+// GFX12: [0x05,0x00,0x1c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_ldexp_f32_e64_dpp v5, v1, v2 row_ror:1
+// GFX12: [0x05,0x00,0x1c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_ldexp_f32_e64_dpp v5, v1, v2 row_ror:15
+// GFX12: [0x05,0x00,0x1c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_ldexp_f32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x1c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_ldexp_f32_e64_dpp v5, v1, v2 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x1c,0xd7,0xfa,0x04,0x02,0x08,0x01,0x5f,0x01,0x01]
+
+v_ldexp_f32_e64_dpp v5, v1, v2 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x1c,0xd7,0xfa,0x04,0x02,0x10,0x01,0x60,0x09,0x13]
+
+v_ldexp_f32_e64_dpp v255, -|v255|, v255 clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x81,0x1c,0xd7,0xfa,0xfe,0x03,0x38,0xff,0x6f,0x05,0x30]
+
+v_lerp_u8_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x15,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_lerp_u8_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x15,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_lerp_u8_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x15,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_lerp_u8_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x15,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_lerp_u8_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x15,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_lerp_u8_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x15,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_lerp_u8_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x15,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_lerp_u8_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x15,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_lerp_u8_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX12: [0x05,0x00,0x15,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_lerp_u8_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX12: [0x05,0x00,0x15,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_lerp_u8_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX12: [0x05,0x00,0x15,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_lerp_u8_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x15,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_lerp_u8_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x15,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_lerp_u8_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x15,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_lerp_u8_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x15,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x46,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_lshl_add_u32_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x46,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_lshl_add_u32_e64_dpp v5, v1, 10, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x46,0xd6,0xfa,0x14,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x46,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x46,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x46,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x46,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x46,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x46,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX12: [0x05,0x00,0x46,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX12: [0x05,0x00,0x46,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX12: [0x05,0x00,0x46,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x46,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x46,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x46,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_lshl_add_u32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x46,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x56,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_lshl_or_b32_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x56,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_lshl_or_b32_e64_dpp v5, v1, 10, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x56,0xd6,0xfa,0x14,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x56,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x56,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x56,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x56,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x56,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x56,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX12: [0x05,0x00,0x56,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX12: [0x05,0x00,0x56,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX12: [0x05,0x00,0x56,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x56,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x56,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x56,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_lshl_or_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x56,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_lshlrev_b16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x38,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_lshlrev_b16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x38,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_lshlrev_b16_e64_dpp v5, v1, v2 row_mirror
+// GFX12: [0x05,0x00,0x38,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_lshlrev_b16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX12: [0x05,0x00,0x38,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_lshlrev_b16_e64_dpp v5, v1, v2 row_shl:1
+// GFX12: [0x05,0x00,0x38,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_lshlrev_b16_e64_dpp v5, v1, v2 row_shl:15
+// GFX12: [0x05,0x00,0x38,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_lshlrev_b16_e64_dpp v5, v1, v2 row_shr:1
+// GFX12: [0x05,0x00,0x38,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_lshlrev_b16_e64_dpp v5, v1, v2 row_shr:15
+// GFX12: [0x05,0x00,0x38,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_lshlrev_b16_e64_dpp v5, v1, v2 row_ror:1
+// GFX12: [0x05,0x00,0x38,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_lshlrev_b16_e64_dpp v5, v1, v2 row_ror:15
+// GFX12: [0x05,0x00,0x38,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_lshlrev_b16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x38,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_lshlrev_b16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x38,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_lshlrev_b16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x38,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_lshlrev_b16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x38,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_lshrrev_b16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x39,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_lshrrev_b16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x39,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_lshrrev_b16_e64_dpp v5, v1, v2 row_mirror
+// GFX12: [0x05,0x00,0x39,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_lshrrev_b16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX12: [0x05,0x00,0x39,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_lshrrev_b16_e64_dpp v5, v1, v2 row_shl:1
+// GFX12: [0x05,0x00,0x39,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_lshrrev_b16_e64_dpp v5, v1, v2 row_shl:15
+// GFX12: [0x05,0x00,0x39,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_lshrrev_b16_e64_dpp v5, v1, v2 row_shr:1
+// GFX12: [0x05,0x00,0x39,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_lshrrev_b16_e64_dpp v5, v1, v2 row_shr:15
+// GFX12: [0x05,0x00,0x39,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_lshrrev_b16_e64_dpp v5, v1, v2 row_ror:1
+// GFX12: [0x05,0x00,0x39,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_lshrrev_b16_e64_dpp v5, v1, v2 row_ror:15
+// GFX12: [0x05,0x00,0x39,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_lshrrev_b16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x39,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_lshrrev_b16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x39,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_lshrrev_b16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x39,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_lshrrev_b16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x39,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_mad_i16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x53,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_mad_i16_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x53,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_mad_i16_e64_dpp v5, v1, 10, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x53,0xd6,0xfa,0x14,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_mad_i16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x53,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_mad_i16_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x53,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_mad_i16_e64_dpp v5, v1, v2, v3 row_half_mirror
+// GFX12: [0x05,0x00,0x53,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff]
+
+v_mad_i16_e64_dpp v5, v1, v2, v255 row_shl:1
+// GFX12: [0x05,0x00,0x53,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff]
+
+v_mad_i16_e64_dpp v5, v1, v2, s105 row_shl:15
+// GFX12: [0x05,0x00,0x53,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x0f,0x01,0xff]
+
+v_mad_i16_e64_dpp v5, v1, v2, vcc_hi row_shr:1
+// GFX12: [0x05,0x00,0x53,0xd6,0xfa,0x04,0xae,0x01,0x01,0x11,0x01,0xff]
+
+v_mad_i16_e64_dpp v5, v1, v2, vcc_lo row_shr:15
+// GFX12: [0x05,0x00,0x53,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x1f,0x01,0xff]
+
+v_mad_i16_e64_dpp v5, v1, v2, ttmp15 row_ror:1
+// GFX12: [0x05,0x00,0x53,0xd6,0xfa,0x04,0xee,0x01,0x01,0x21,0x01,0xff]
+
+v_mad_i16_e64_dpp v5, v1, v2, exec_hi row_ror:15
+// GFX12: [0x05,0x00,0x53,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+
+v_mad_i16_e64_dpp v5, v1, v2, exec_lo row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x53,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+
+v_mad_i16_e64_dpp v5, v1, v2, null row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x53,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x5f,0x01,0x01]
+
+v_mad_i16_e64_dpp v5, v1, v2, -1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x53,0xd6,0xfa,0x04,0x06,0x03,0x01,0x60,0x09,0x13]
+
+v_mad_i16_e64_dpp v255, v255, v255, src_scc clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x80,0x53,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x5a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_mad_i32_i16_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x5a,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_mad_i32_i16_e64_dpp v5, v1, 10, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x5a,0xd6,0xfa,0x14,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x5a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x5a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x5a,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x5a,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x5a,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x5a,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX12: [0x05,0x00,0x5a,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX12: [0x05,0x00,0x5a,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX12: [0x05,0x00,0x5a,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x5a,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x5a,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x5a,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_mad_i32_i16_e64_dpp v255, v255, v255, src_scc clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x80,0x5a,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x0a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_mad_i32_i24_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x0a,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_mad_i32_i24_e64_dpp v5, v1, 10, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x0a,0xd6,0xfa,0x14,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x0a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x0a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x0a,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x0a,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x0a,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x0a,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX12: [0x05,0x00,0x0a,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX12: [0x05,0x00,0x0a,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX12: [0x05,0x00,0x0a,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x0a,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x0a,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x0a,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_mad_i32_i24_e64_dpp v255, v255, v255, src_scc clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x80,0x0a,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_mad_u16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x41,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_mad_u16_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x41,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_mad_u16_e64_dpp v5, v1, 10, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x41,0xd6,0xfa,0x14,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_mad_u16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x41,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_mad_u16_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x41,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_mad_u16_e64_dpp v5, v1, v2, v3 row_half_mirror
+// GFX12: [0x05,0x00,0x41,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff]
+
+v_mad_u16_e64_dpp v5, v1, v2, v255 row_shl:1
+// GFX12: [0x05,0x00,0x41,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff]
+
+v_mad_u16_e64_dpp v5, v1, v2, s105 row_shl:15
+// GFX12: [0x05,0x00,0x41,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x0f,0x01,0xff]
+
+v_mad_u16_e64_dpp v5, v1, v2, vcc_hi row_shr:1
+// GFX12: [0x05,0x00,0x41,0xd6,0xfa,0x04,0xae,0x01,0x01,0x11,0x01,0xff]
+
+v_mad_u16_e64_dpp v5, v1, v2, vcc_lo row_shr:15
+// GFX12: [0x05,0x00,0x41,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x1f,0x01,0xff]
+
+v_mad_u16_e64_dpp v5, v1, v2, ttmp15 row_ror:1
+// GFX12: [0x05,0x00,0x41,0xd6,0xfa,0x04,0xee,0x01,0x01,0x21,0x01,0xff]
+
+v_mad_u16_e64_dpp v5, v1, v2, exec_hi row_ror:15
+// GFX12: [0x05,0x00,0x41,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+
+v_mad_u16_e64_dpp v5, v1, v2, exec_lo row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x41,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+
+v_mad_u16_e64_dpp v5, v1, v2, null row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x41,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x5f,0x01,0x01]
+
+v_mad_u16_e64_dpp v5, v1, v2, -1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x41,0xd6,0xfa,0x04,0x06,0x03,0x01,0x60,0x09,0x13]
+
+v_mad_u16_e64_dpp v255, v255, v255, src_scc clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x80,0x41,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x59,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_mad_u32_u16_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x59,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_mad_u32_u16_e64_dpp v5, v1, 10, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x59,0xd6,0xfa,0x14,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x59,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x59,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x59,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x59,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x59,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x59,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX12: [0x05,0x00,0x59,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX12: [0x05,0x00,0x59,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX12: [0x05,0x00,0x59,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x59,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x59,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x59,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_mad_u32_u16_e64_dpp v255, v255, v255, src_scc clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x80,0x59,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x0b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_mad_u32_u24_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x0b,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_mad_u32_u24_e64_dpp v5, v1, 10, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x0b,0xd6,0xfa,0x14,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x0b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x0b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x0b,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x0b,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x0b,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x0b,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX12: [0x05,0x00,0x0b,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX12: [0x05,0x00,0x0b,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX12: [0x05,0x00,0x0b,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x0b,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x0b,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x0b,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_mad_u32_u24_e64_dpp v255, v255, v255, src_scc clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x80,0x0b,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_max3_num_f16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x2c,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_max3_num_f16_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x2c,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_max3_num_f16_e64_dpp v5, v1, 2.0, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x2c,0xd6,0xfa,0xe8,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_max3_num_f16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x2c,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_max3_num_f16_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x2c,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_max3_num_f16_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x2c,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_max3_num_f16_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x2c,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_max3_num_f16_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x2c,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_max3_num_f16_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x2c,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_max3_num_f16_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX12: [0x05,0x01,0x2c,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_max3_num_f16_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX12: [0x05,0x02,0x2c,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_max3_num_f16_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
+// GFX12: [0x05,0x04,0x2c,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_max3_num_f16_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x03,0x2c,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_max3_num_f16_e64_dpp v5, -|v1|, v2, -|-1| row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x05,0x2c,0xd6,0xfa,0x04,0x06,0xa3,0x01,0x5f,0x01,0x01]
+
+v_max3_num_f16_e64_dpp v5, v1, -|v2|, -|0.5| row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x06,0x2c,0xd6,0xfa,0x04,0xc2,0xc3,0x01,0x60,0x09,0x13]
+
+v_max3_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x87,0x2c,0xd6,0xfa,0xfe,0xf7,0xe3,0xff,0x6f,0x05,0x30]
+
+v_max3_num_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x2a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_max3_num_f32_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x2a,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_max3_num_f32_e64_dpp v5, v1, 2.0, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x2a,0xd6,0xfa,0xe8,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_max3_num_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x2a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_max3_num_f32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x2a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_max3_num_f32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x2a,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_max3_num_f32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x2a,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_max3_num_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x2a,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_max3_num_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x2a,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_max3_num_f32_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX12: [0x05,0x01,0x2a,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_max3_num_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX12: [0x05,0x02,0x2a,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_max3_num_f32_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
+// GFX12: [0x05,0x04,0x2a,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_max3_num_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x03,0x2a,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_max3_num_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x05,0x2a,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+
+v_max3_num_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x06,0x2a,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x09,0x13]
+
+v_max3_num_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x87,0x2a,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
+
+v_max3_i16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x4d,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_max3_i16_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x4d,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_max3_i16_e64_dpp v5, v1, 10, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x4d,0xd6,0xfa,0x14,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_max3_i16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x4d,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_max3_i16_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x4d,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_max3_i16_e64_dpp v5, v1, v2, v3 row_half_mirror
+// GFX12: [0x05,0x00,0x4d,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff]
+
+v_max3_i16_e64_dpp v5, v1, v2, v255 row_shl:1
+// GFX12: [0x05,0x00,0x4d,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff]
+
+v_max3_i16_e64_dpp v5, v1, v2, s105 row_shl:15
+// GFX12: [0x05,0x00,0x4d,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x0f,0x01,0xff]
+
+v_max3_i16_e64_dpp v5, v1, v2, vcc_hi row_shr:1
+// GFX12: [0x05,0x00,0x4d,0xd6,0xfa,0x04,0xae,0x01,0x01,0x11,0x01,0xff]
+
+v_max3_i16_e64_dpp v5, v1, v2, vcc_lo row_shr:15
+// GFX12: [0x05,0x00,0x4d,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x1f,0x01,0xff]
+
+v_max3_i16_e64_dpp v5, v1, v2, ttmp15 row_ror:1
+// GFX12: [0x05,0x00,0x4d,0xd6,0xfa,0x04,0xee,0x01,0x01,0x21,0x01,0xff]
+
+v_max3_i16_e64_dpp v5, v1, v2, exec_hi row_ror:15
+// GFX12: [0x05,0x00,0x4d,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+
+v_max3_i16_e64_dpp v5, v1, v2, exec_lo row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x4d,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+
+v_max3_i16_e64_dpp v5, v1, v2, null row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x4d,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x5f,0x01,0x01]
+
+v_max3_i16_e64_dpp v5, v1, v2, -1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x4d,0xd6,0xfa,0x04,0x06,0x03,0x01,0x60,0x09,0x13]
+
+v_max3_i16_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x4d,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_max3_i32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x1d,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_max3_i32_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x1d,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_max3_i32_e64_dpp v5, v1, 10, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x1d,0xd6,0xfa,0x14,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_max3_i32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x1d,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_max3_i32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x1d,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_max3_i32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x1d,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_max3_i32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x1d,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_max3_i32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x1d,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_max3_i32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x1d,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_max3_i32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX12: [0x05,0x00,0x1d,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_max3_i32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX12: [0x05,0x00,0x1d,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_max3_i32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX12: [0x05,0x00,0x1d,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_max3_i32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x1d,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_max3_i32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x1d,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_max3_i32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x1d,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_max3_i32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x1d,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_max3_u16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x4e,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_max3_u16_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x4e,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_max3_u16_e64_dpp v5, v1, 10, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x4e,0xd6,0xfa,0x14,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_max3_u16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x4e,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_max3_u16_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x4e,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_max3_u16_e64_dpp v5, v1, v2, v3 row_half_mirror
+// GFX12: [0x05,0x00,0x4e,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff]
+
+v_max3_u16_e64_dpp v5, v1, v2, v255 row_shl:1
+// GFX12: [0x05,0x00,0x4e,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff]
+
+v_max3_u16_e64_dpp v5, v1, v2, s105 row_shl:15
+// GFX12: [0x05,0x00,0x4e,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x0f,0x01,0xff]
+
+v_max3_u16_e64_dpp v5, v1, v2, vcc_hi row_shr:1
+// GFX12: [0x05,0x00,0x4e,0xd6,0xfa,0x04,0xae,0x01,0x01,0x11,0x01,0xff]
+
+v_max3_u16_e64_dpp v5, v1, v2, vcc_lo row_shr:15
+// GFX12: [0x05,0x00,0x4e,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x1f,0x01,0xff]
+
+v_max3_u16_e64_dpp v5, v1, v2, ttmp15 row_ror:1
+// GFX12: [0x05,0x00,0x4e,0xd6,0xfa,0x04,0xee,0x01,0x01,0x21,0x01,0xff]
+
+v_max3_u16_e64_dpp v5, v1, v2, exec_hi row_ror:15
+// GFX12: [0x05,0x00,0x4e,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+
+v_max3_u16_e64_dpp v5, v1, v2, exec_lo row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x4e,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+
+v_max3_u16_e64_dpp v5, v1, v2, null row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x4e,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x5f,0x01,0x01]
+
+v_max3_u16_e64_dpp v5, v1, v2, -1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x4e,0xd6,0xfa,0x04,0x06,0x03,0x01,0x60,0x09,0x13]
+
+v_max3_u16_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x4e,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_max3_u32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x1e,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_max3_u32_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x1e,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_max3_u32_e64_dpp v5, v1, 10, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x1e,0xd6,0xfa,0x14,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_max3_u32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x1e,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_max3_u32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x1e,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_max3_u32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x1e,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_max3_u32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x1e,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_max3_u32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x1e,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_max3_u32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x1e,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_max3_u32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX12: [0x05,0x00,0x1e,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_max3_u32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX12: [0x05,0x00,0x1e,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_max3_u32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX12: [0x05,0x00,0x1e,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_max3_u32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x1e,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_max3_u32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x1e,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_max3_u32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x1e,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_max3_u32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x1e,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_max_i16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x0a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_max_i16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x0a,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_max_i16_e64_dpp v5, v1, v2 row_mirror
+// GFX12: [0x05,0x00,0x0a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_max_i16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX12: [0x05,0x00,0x0a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_max_i16_e64_dpp v5, v1, v2 row_shl:1
+// GFX12: [0x05,0x00,0x0a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_max_i16_e64_dpp v5, v1, v2 row_shl:15
+// GFX12: [0x05,0x00,0x0a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_max_i16_e64_dpp v5, v1, v2 row_shr:1
+// GFX12: [0x05,0x00,0x0a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_max_i16_e64_dpp v5, v1, v2 row_shr:15
+// GFX12: [0x05,0x00,0x0a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_max_i16_e64_dpp v5, v1, v2 row_ror:1
+// GFX12: [0x05,0x00,0x0a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_max_i16_e64_dpp v5, v1, v2 row_ror:15
+// GFX12: [0x05,0x00,0x0a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_max_i16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x0a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_max_i16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x0a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_max_i16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x0a,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_max_i16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x0a,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_max_u16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x09,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_max_u16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x09,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_max_u16_e64_dpp v5, v1, v2 row_mirror
+// GFX12: [0x05,0x00,0x09,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_max_u16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX12: [0x05,0x00,0x09,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_max_u16_e64_dpp v5, v1, v2 row_shl:1
+// GFX12: [0x05,0x00,0x09,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_max_u16_e64_dpp v5, v1, v2 row_shl:15
+// GFX12: [0x05,0x00,0x09,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_max_u16_e64_dpp v5, v1, v2 row_shr:1
+// GFX12: [0x05,0x00,0x09,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_max_u16_e64_dpp v5, v1, v2 row_shr:15
+// GFX12: [0x05,0x00,0x09,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_max_u16_e64_dpp v5, v1, v2 row_ror:1
+// GFX12: [0x05,0x00,0x09,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_max_u16_e64_dpp v5, v1, v2 row_ror:15
+// GFX12: [0x05,0x00,0x09,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_max_u16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x09,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_max_u16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x09,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_max_u16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x09,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_max_u16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x09,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_maxmin_num_f16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_maxmin_num_f16_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_maxmin_num_f16_e64_dpp v5, v1, 2.0, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x6b,0xd6,0xfa,0xe8,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_maxmin_num_f16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_maxmin_num_f16_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_maxmin_num_f16_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_maxmin_num_f16_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_maxmin_num_f16_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_maxmin_num_f16_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_maxmin_num_f16_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX12: [0x05,0x01,0x6b,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_maxmin_num_f16_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX12: [0x05,0x02,0x6b,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_maxmin_num_f16_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
+// GFX12: [0x05,0x04,0x6b,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_maxmin_num_f16_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x03,0x6b,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_maxmin_num_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x05,0x6b,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+
+v_maxmin_num_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x06,0x6b,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x09,0x13]
+
+v_maxmin_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x87,0x6b,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
+
+v_maxmin_num_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x69,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_maxmin_num_f32_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x69,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_maxmin_num_f32_e64_dpp v5, v1, 2.0, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x69,0xd6,0xfa,0xe8,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_maxmin_num_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x69,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_maxmin_num_f32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x69,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_maxmin_num_f32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x69,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_maxmin_num_f32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x69,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_maxmin_num_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x69,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_maxmin_num_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x69,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_maxmin_num_f32_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX12: [0x05,0x01,0x69,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_maxmin_num_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX12: [0x05,0x02,0x69,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_maxmin_num_f32_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
+// GFX12: [0x05,0x04,0x69,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_maxmin_num_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x03,0x69,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_maxmin_num_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x05,0x69,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+
+v_maxmin_num_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x06,0x69,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x09,0x13]
+
+v_maxmin_num_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x87,0x69,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x64,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_maxmin_i32_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x64,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_maxmin_i32_e64_dpp v5, v1, 10, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x64,0xd6,0xfa,0x14,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x64,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x64,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x64,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x64,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x64,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x64,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX12: [0x05,0x00,0x64,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX12: [0x05,0x00,0x64,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX12: [0x05,0x00,0x64,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x64,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x64,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x64,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_maxmin_i32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x64,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x62,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_maxmin_u32_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x62,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_maxmin_u32_e64_dpp v5, v1, 10, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x62,0xd6,0xfa,0x14,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x62,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x62,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x62,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x62,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x62,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x62,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX12: [0x05,0x00,0x62,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX12: [0x05,0x00,0x62,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX12: [0x05,0x00,0x62,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x62,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x62,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x62,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_maxmin_u32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x62,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_mbcnt_hi_u32_b32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x20,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_mbcnt_hi_u32_b32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x20,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_mbcnt_hi_u32_b32_e64_dpp v5, v1, v2 row_mirror
+// GFX12: [0x05,0x00,0x20,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_mbcnt_hi_u32_b32_e64_dpp v5, v1, v2 row_half_mirror
+// GFX12: [0x05,0x00,0x20,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_mbcnt_hi_u32_b32_e64_dpp v5, v1, v2 row_shl:1
+// GFX12: [0x05,0x00,0x20,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_mbcnt_hi_u32_b32_e64_dpp v5, v1, v2 row_shl:15
+// GFX12: [0x05,0x00,0x20,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_mbcnt_hi_u32_b32_e64_dpp v5, v1, v2 row_shr:1
+// GFX12: [0x05,0x00,0x20,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_mbcnt_hi_u32_b32_e64_dpp v5, v1, v2 row_shr:15
+// GFX12: [0x05,0x00,0x20,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_mbcnt_hi_u32_b32_e64_dpp v5, v1, v2 row_ror:1
+// GFX12: [0x05,0x00,0x20,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_mbcnt_hi_u32_b32_e64_dpp v5, v1, v2 row_ror:15
+// GFX12: [0x05,0x00,0x20,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_mbcnt_hi_u32_b32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x20,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_mbcnt_hi_u32_b32_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x20,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_mbcnt_hi_u32_b32_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x20,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_mbcnt_hi_u32_b32_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x20,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_mbcnt_lo_u32_b32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x1f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_mbcnt_lo_u32_b32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x1f,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_mbcnt_lo_u32_b32_e64_dpp v5, v1, v2 row_mirror
+// GFX12: [0x05,0x00,0x1f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_mbcnt_lo_u32_b32_e64_dpp v5, v1, v2 row_half_mirror
+// GFX12: [0x05,0x00,0x1f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_mbcnt_lo_u32_b32_e64_dpp v5, v1, v2 row_shl:1
+// GFX12: [0x05,0x00,0x1f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_mbcnt_lo_u32_b32_e64_dpp v5, v1, v2 row_shl:15
+// GFX12: [0x05,0x00,0x1f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_mbcnt_lo_u32_b32_e64_dpp v5, v1, v2 row_shr:1
+// GFX12: [0x05,0x00,0x1f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_mbcnt_lo_u32_b32_e64_dpp v5, v1, v2 row_shr:15
+// GFX12: [0x05,0x00,0x1f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_mbcnt_lo_u32_b32_e64_dpp v5, v1, v2 row_ror:1
+// GFX12: [0x05,0x00,0x1f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_mbcnt_lo_u32_b32_e64_dpp v5, v1, v2 row_ror:15
+// GFX12: [0x05,0x00,0x1f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_mbcnt_lo_u32_b32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x1f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_mbcnt_lo_u32_b32_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x1f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_mbcnt_lo_u32_b32_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x1f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_mbcnt_lo_u32_b32_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x1f,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_med3_num_f16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x32,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_med3_num_f16_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x32,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_med3_num_f16_e64_dpp v5, v1, 2.0, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x32,0xd6,0xfa,0xe8,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_med3_num_f16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x32,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_med3_num_f16_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x32,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_med3_num_f16_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x32,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_med3_num_f16_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x32,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_med3_num_f16_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x32,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_med3_num_f16_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x32,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_med3_num_f16_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX12: [0x05,0x01,0x32,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_med3_num_f16_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX12: [0x05,0x02,0x32,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_med3_num_f16_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
+// GFX12: [0x05,0x04,0x32,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_med3_num_f16_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x03,0x32,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_med3_num_f16_e64_dpp v5, -|v1|, v2, -|-1| row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x05,0x32,0xd6,0xfa,0x04,0x06,0xa3,0x01,0x5f,0x01,0x01]
+
+v_med3_num_f16_e64_dpp v5, v1, -|v2|, -|0.5| row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x06,0x32,0xd6,0xfa,0x04,0xc2,0xc3,0x01,0x60,0x09,0x13]
+
+v_med3_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x87,0x32,0xd6,0xfa,0xfe,0xf7,0xe3,0xff,0x6f,0x05,0x30]
+
+v_med3_num_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x31,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_med3_num_f32_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x31,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_med3_num_f32_e64_dpp v5, v1, 2.0, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x31,0xd6,0xfa,0xe8,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_med3_num_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x31,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_med3_num_f32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x31,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_med3_num_f32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x31,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_med3_num_f32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x31,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_med3_num_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x31,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_med3_num_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x31,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_med3_num_f32_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX12: [0x05,0x01,0x31,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_med3_num_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX12: [0x05,0x02,0x31,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_med3_num_f32_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
+// GFX12: [0x05,0x04,0x31,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_med3_num_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x03,0x31,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_med3_num_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x05,0x31,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+
+v_med3_num_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x06,0x31,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x09,0x13]
+
+v_med3_num_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x87,0x31,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
+
+v_med3_i16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x50,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_med3_i16_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x50,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_med3_i16_e64_dpp v5, v1, 10, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x50,0xd6,0xfa,0x14,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_med3_i16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x50,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_med3_i16_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x50,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_med3_i16_e64_dpp v5, v1, v2, v3 row_half_mirror
+// GFX12: [0x05,0x00,0x50,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff]
+
+v_med3_i16_e64_dpp v5, v1, v2, v255 row_shl:1
+// GFX12: [0x05,0x00,0x50,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff]
+
+v_med3_i16_e64_dpp v5, v1, v2, s105 row_shl:15
+// GFX12: [0x05,0x00,0x50,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x0f,0x01,0xff]
+
+v_med3_i16_e64_dpp v5, v1, v2, vcc_hi row_shr:1
+// GFX12: [0x05,0x00,0x50,0xd6,0xfa,0x04,0xae,0x01,0x01,0x11,0x01,0xff]
+
+v_med3_i16_e64_dpp v5, v1, v2, vcc_lo row_shr:15
+// GFX12: [0x05,0x00,0x50,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x1f,0x01,0xff]
+
+v_med3_i16_e64_dpp v5, v1, v2, ttmp15 row_ror:1
+// GFX12: [0x05,0x00,0x50,0xd6,0xfa,0x04,0xee,0x01,0x01,0x21,0x01,0xff]
+
+v_med3_i16_e64_dpp v5, v1, v2, exec_hi row_ror:15
+// GFX12: [0x05,0x00,0x50,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+
+v_med3_i16_e64_dpp v5, v1, v2, exec_lo row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x50,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+
+v_med3_i16_e64_dpp v5, v1, v2, null row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x50,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x5f,0x01,0x01]
+
+v_med3_i16_e64_dpp v5, v1, v2, -1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x50,0xd6,0xfa,0x04,0x06,0x03,0x01,0x60,0x09,0x13]
+
+v_med3_i16_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x50,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_med3_i32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x20,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_med3_i32_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x20,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_med3_i32_e64_dpp v5, v1, 10, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x20,0xd6,0xfa,0x14,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_med3_i32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x20,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_med3_i32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x20,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_med3_i32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x20,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_med3_i32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x20,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_med3_i32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x20,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_med3_i32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x20,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_med3_i32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX12: [0x05,0x00,0x20,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_med3_i32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX12: [0x05,0x00,0x20,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_med3_i32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX12: [0x05,0x00,0x20,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_med3_i32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x20,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_med3_i32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x20,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_med3_i32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x20,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_med3_i32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x20,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_med3_u16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x51,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_med3_u16_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x51,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_med3_u16_e64_dpp v5, v1, 10, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x51,0xd6,0xfa,0x14,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_med3_u16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x51,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_med3_u16_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x51,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_med3_u16_e64_dpp v5, v1, v2, v3 row_half_mirror
+// GFX12: [0x05,0x00,0x51,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff]
+
+v_med3_u16_e64_dpp v5, v1, v2, v255 row_shl:1
+// GFX12: [0x05,0x00,0x51,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff]
+
+v_med3_u16_e64_dpp v5, v1, v2, s105 row_shl:15
+// GFX12: [0x05,0x00,0x51,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x0f,0x01,0xff]
+
+v_med3_u16_e64_dpp v5, v1, v2, vcc_hi row_shr:1
+// GFX12: [0x05,0x00,0x51,0xd6,0xfa,0x04,0xae,0x01,0x01,0x11,0x01,0xff]
+
+v_med3_u16_e64_dpp v5, v1, v2, vcc_lo row_shr:15
+// GFX12: [0x05,0x00,0x51,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x1f,0x01,0xff]
+
+v_med3_u16_e64_dpp v5, v1, v2, ttmp15 row_ror:1
+// GFX12: [0x05,0x00,0x51,0xd6,0xfa,0x04,0xee,0x01,0x01,0x21,0x01,0xff]
+
+v_med3_u16_e64_dpp v5, v1, v2, exec_hi row_ror:15
+// GFX12: [0x05,0x00,0x51,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+
+v_med3_u16_e64_dpp v5, v1, v2, exec_lo row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x51,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+
+v_med3_u16_e64_dpp v5, v1, v2, null row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x51,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x5f,0x01,0x01]
+
+v_med3_u16_e64_dpp v5, v1, v2, -1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x51,0xd6,0xfa,0x04,0x06,0x03,0x01,0x60,0x09,0x13]
+
+v_med3_u16_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x51,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_med3_u32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x21,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_med3_u32_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x21,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_med3_u32_e64_dpp v5, v1, 10, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x21,0xd6,0xfa,0x14,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_med3_u32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x21,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_med3_u32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x21,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_med3_u32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x21,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_med3_u32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x21,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_med3_u32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x21,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_med3_u32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x21,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_med3_u32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX12: [0x05,0x00,0x21,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_med3_u32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX12: [0x05,0x00,0x21,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_med3_u32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX12: [0x05,0x00,0x21,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_med3_u32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x21,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_med3_u32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x21,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_med3_u32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x21,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_med3_u32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x21,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_min3_num_f16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x2b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_min3_num_f16_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x2b,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_min3_num_f16_e64_dpp v5, v1, 2.0, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x2b,0xd6,0xfa,0xe8,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_min3_num_f16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x2b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_min3_num_f16_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x2b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_min3_num_f16_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x2b,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_min3_num_f16_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x2b,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_min3_num_f16_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x2b,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_min3_num_f16_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x2b,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_min3_num_f16_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX12: [0x05,0x01,0x2b,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_min3_num_f16_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX12: [0x05,0x02,0x2b,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_min3_num_f16_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
+// GFX12: [0x05,0x04,0x2b,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_min3_num_f16_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x03,0x2b,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_min3_num_f16_e64_dpp v5, -|v1|, v2, -|-1| row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x05,0x2b,0xd6,0xfa,0x04,0x06,0xa3,0x01,0x5f,0x01,0x01]
+
+v_min3_num_f16_e64_dpp v5, v1, -|v2|, -|0.5| row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x06,0x2b,0xd6,0xfa,0x04,0xc2,0xc3,0x01,0x60,0x09,0x13]
+
+v_min3_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x87,0x2b,0xd6,0xfa,0xfe,0xf7,0xe3,0xff,0x6f,0x05,0x30]
+
+v_min3_num_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x29,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_min3_num_f32_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x29,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_min3_num_f32_e64_dpp v5, v1, 2.0, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x29,0xd6,0xfa,0xe8,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_min3_num_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x29,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_min3_num_f32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x29,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_min3_num_f32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x29,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_min3_num_f32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x29,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_min3_num_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x29,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_min3_num_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x29,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_min3_num_f32_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX12: [0x05,0x01,0x29,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_min3_num_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX12: [0x05,0x02,0x29,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_min3_num_f32_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
+// GFX12: [0x05,0x04,0x29,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_min3_num_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x03,0x29,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_min3_num_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x05,0x29,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+
+v_min3_num_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x06,0x29,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x09,0x13]
+
+v_min3_num_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x87,0x29,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
+
+v_min3_i16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x4a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_min3_i16_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x4a,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_min3_i16_e64_dpp v5, v1, 10, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x4a,0xd6,0xfa,0x14,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_min3_i16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x4a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_min3_i16_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x4a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_min3_i16_e64_dpp v5, v1, v2, v3 row_half_mirror
+// GFX12: [0x05,0x00,0x4a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff]
+
+v_min3_i16_e64_dpp v5, v1, v2, v255 row_shl:1
+// GFX12: [0x05,0x00,0x4a,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff]
+
+v_min3_i16_e64_dpp v5, v1, v2, s105 row_shl:15
+// GFX12: [0x05,0x00,0x4a,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x0f,0x01,0xff]
+
+v_min3_i16_e64_dpp v5, v1, v2, vcc_hi row_shr:1
+// GFX12: [0x05,0x00,0x4a,0xd6,0xfa,0x04,0xae,0x01,0x01,0x11,0x01,0xff]
+
+v_min3_i16_e64_dpp v5, v1, v2, vcc_lo row_shr:15
+// GFX12: [0x05,0x00,0x4a,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x1f,0x01,0xff]
+
+v_min3_i16_e64_dpp v5, v1, v2, ttmp15 row_ror:1
+// GFX12: [0x05,0x00,0x4a,0xd6,0xfa,0x04,0xee,0x01,0x01,0x21,0x01,0xff]
+
+v_min3_i16_e64_dpp v5, v1, v2, exec_hi row_ror:15
+// GFX12: [0x05,0x00,0x4a,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+
+v_min3_i16_e64_dpp v5, v1, v2, exec_lo row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x4a,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+
+v_min3_i16_e64_dpp v5, v1, v2, null row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x4a,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x5f,0x01,0x01]
+
+v_min3_i16_e64_dpp v5, v1, v2, -1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x4a,0xd6,0xfa,0x04,0x06,0x03,0x01,0x60,0x09,0x13]
+
+v_min3_i16_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x4a,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_min3_i32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x1a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_min3_i32_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x1a,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_min3_i32_e64_dpp v5, v1, 10, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x1a,0xd6,0xfa,0x14,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_min3_i32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x1a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_min3_i32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x1a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_min3_i32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x1a,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_min3_i32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x1a,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_min3_i32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x1a,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_min3_i32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x1a,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_min3_i32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX12: [0x05,0x00,0x1a,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_min3_i32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX12: [0x05,0x00,0x1a,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_min3_i32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX12: [0x05,0x00,0x1a,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_min3_i32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x1a,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_min3_i32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x1a,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_min3_i32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x1a,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_min3_i32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x1a,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_min3_u16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x4b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_min3_u16_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x4b,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_min3_u16_e64_dpp v5, v1, 10, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x4b,0xd6,0xfa,0x14,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_min3_u16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x4b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_min3_u16_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x4b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_min3_u16_e64_dpp v5, v1, v2, v3 row_half_mirror
+// GFX12: [0x05,0x00,0x4b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x41,0x01,0xff]
+
+v_min3_u16_e64_dpp v5, v1, v2, v255 row_shl:1
+// GFX12: [0x05,0x00,0x4b,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff]
+
+v_min3_u16_e64_dpp v5, v1, v2, s105 row_shl:15
+// GFX12: [0x05,0x00,0x4b,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x0f,0x01,0xff]
+
+v_min3_u16_e64_dpp v5, v1, v2, vcc_hi row_shr:1
+// GFX12: [0x05,0x00,0x4b,0xd6,0xfa,0x04,0xae,0x01,0x01,0x11,0x01,0xff]
+
+v_min3_u16_e64_dpp v5, v1, v2, vcc_lo row_shr:15
+// GFX12: [0x05,0x00,0x4b,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x1f,0x01,0xff]
+
+v_min3_u16_e64_dpp v5, v1, v2, ttmp15 row_ror:1
+// GFX12: [0x05,0x00,0x4b,0xd6,0xfa,0x04,0xee,0x01,0x01,0x21,0x01,0xff]
+
+v_min3_u16_e64_dpp v5, v1, v2, exec_hi row_ror:15
+// GFX12: [0x05,0x00,0x4b,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+
+v_min3_u16_e64_dpp v5, v1, v2, exec_lo row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x4b,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+
+v_min3_u16_e64_dpp v5, v1, v2, null row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x4b,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x5f,0x01,0x01]
+
+v_min3_u16_e64_dpp v5, v1, v2, -1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x4b,0xd6,0xfa,0x04,0x06,0x03,0x01,0x60,0x09,0x13]
+
+v_min3_u16_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x4b,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_min3_u32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x1b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_min3_u32_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x1b,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_min3_u32_e64_dpp v5, v1, 10, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x1b,0xd6,0xfa,0x14,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_min3_u32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x1b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_min3_u32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x1b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_min3_u32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x1b,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_min3_u32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x1b,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_min3_u32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x1b,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_min3_u32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x1b,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_min3_u32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX12: [0x05,0x00,0x1b,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_min3_u32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX12: [0x05,0x00,0x1b,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_min3_u32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX12: [0x05,0x00,0x1b,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_min3_u32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x1b,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_min3_u32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x1b,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_min3_u32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x1b,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_min3_u32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x1b,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_min_i16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x0c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_min_i16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x0c,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_min_i16_e64_dpp v5, v1, v2 row_mirror
+// GFX12: [0x05,0x00,0x0c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_min_i16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX12: [0x05,0x00,0x0c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_min_i16_e64_dpp v5, v1, v2 row_shl:1
+// GFX12: [0x05,0x00,0x0c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_min_i16_e64_dpp v5, v1, v2 row_shl:15
+// GFX12: [0x05,0x00,0x0c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_min_i16_e64_dpp v5, v1, v2 row_shr:1
+// GFX12: [0x05,0x00,0x0c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_min_i16_e64_dpp v5, v1, v2 row_shr:15
+// GFX12: [0x05,0x00,0x0c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_min_i16_e64_dpp v5, v1, v2 row_ror:1
+// GFX12: [0x05,0x00,0x0c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_min_i16_e64_dpp v5, v1, v2 row_ror:15
+// GFX12: [0x05,0x00,0x0c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_min_i16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x0c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_min_i16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x0c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_min_i16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x0c,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_min_i16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x0c,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_min_u16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x0b,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_min_u16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x0b,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_min_u16_e64_dpp v5, v1, v2 row_mirror
+// GFX12: [0x05,0x00,0x0b,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_min_u16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX12: [0x05,0x00,0x0b,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_min_u16_e64_dpp v5, v1, v2 row_shl:1
+// GFX12: [0x05,0x00,0x0b,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_min_u16_e64_dpp v5, v1, v2 row_shl:15
+// GFX12: [0x05,0x00,0x0b,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_min_u16_e64_dpp v5, v1, v2 row_shr:1
+// GFX12: [0x05,0x00,0x0b,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_min_u16_e64_dpp v5, v1, v2 row_shr:15
+// GFX12: [0x05,0x00,0x0b,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_min_u16_e64_dpp v5, v1, v2 row_ror:1
+// GFX12: [0x05,0x00,0x0b,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_min_u16_e64_dpp v5, v1, v2 row_ror:15
+// GFX12: [0x05,0x00,0x0b,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_min_u16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x0b,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_min_u16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x0b,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_min_u16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x0b,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_min_u16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x0b,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_minmax_num_f16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_minmax_num_f16_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_minmax_num_f16_e64_dpp v5, v1, 2.0, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x6a,0xd6,0xfa,0xe8,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_minmax_num_f16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_minmax_num_f16_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_minmax_num_f16_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_minmax_num_f16_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_minmax_num_f16_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_minmax_num_f16_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_minmax_num_f16_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX12: [0x05,0x01,0x6a,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_minmax_num_f16_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX12: [0x05,0x02,0x6a,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_minmax_num_f16_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
+// GFX12: [0x05,0x04,0x6a,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_minmax_num_f16_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x03,0x6a,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_minmax_num_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x05,0x6a,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+
+v_minmax_num_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x06,0x6a,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x09,0x13]
+
+v_minmax_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x87,0x6a,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
+
+v_minmax_num_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x68,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_minmax_num_f32_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x68,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_minmax_num_f32_e64_dpp v5, v1, 2.0, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x68,0xd6,0xfa,0xe8,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_minmax_num_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x68,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_minmax_num_f32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x68,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_minmax_num_f32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x68,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_minmax_num_f32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x68,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_minmax_num_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x68,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_minmax_num_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x68,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_minmax_num_f32_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX12: [0x05,0x01,0x68,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_minmax_num_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX12: [0x05,0x02,0x68,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_minmax_num_f32_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
+// GFX12: [0x05,0x04,0x68,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_minmax_num_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x03,0x68,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_minmax_num_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x05,0x68,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+
+v_minmax_num_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x06,0x68,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x09,0x13]
+
+v_minmax_num_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x87,0x68,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
+
+v_minmax_i32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x65,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_minmax_i32_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x65,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_minmax_i32_e64_dpp v5, v1, 10, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x65,0xd6,0xfa,0x14,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_minmax_i32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x65,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_minmax_i32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x65,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_minmax_i32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x65,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_minmax_i32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x65,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_minmax_i32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x65,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_minmax_i32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x65,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_minmax_i32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX12: [0x05,0x00,0x65,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_minmax_i32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX12: [0x05,0x00,0x65,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_minmax_i32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX12: [0x05,0x00,0x65,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_minmax_i32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x65,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_minmax_i32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x65,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_minmax_i32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x65,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_minmax_i32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x65,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_minmax_u32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x63,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_minmax_u32_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x63,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_minmax_u32_e64_dpp v5, v1, 10, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x63,0xd6,0xfa,0x14,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_minmax_u32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x63,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_minmax_u32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x63,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_minmax_u32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x63,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_minmax_u32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x63,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_minmax_u32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x63,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_minmax_u32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x63,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_minmax_u32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX12: [0x05,0x00,0x63,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_minmax_u32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX12: [0x05,0x00,0x63,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_minmax_u32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX12: [0x05,0x00,0x63,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_minmax_u32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x63,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_minmax_u32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x63,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_minmax_u32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x63,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_minmax_u32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x63,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_msad_u8_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x39,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_msad_u8_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x39,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_msad_u8_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x39,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_msad_u8_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x39,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_msad_u8_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x39,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_msad_u8_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x39,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_msad_u8_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x39,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_msad_u8_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x39,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_msad_u8_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX12: [0x05,0x00,0x39,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_msad_u8_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX12: [0x05,0x00,0x39,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_msad_u8_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX12: [0x05,0x00,0x39,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_msad_u8_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x39,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_msad_u8_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x39,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_msad_u8_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x39,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_msad_u8_e64_dpp v255, v255, v255, src_scc clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x80,0x39,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_mul_lo_u16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x05,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_mul_lo_u16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x05,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_mul_lo_u16_e64_dpp v5, v1, v2 row_mirror
+// GFX12: [0x05,0x00,0x05,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_mul_lo_u16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX12: [0x05,0x00,0x05,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_mul_lo_u16_e64_dpp v5, v1, v2 row_shl:1
+// GFX12: [0x05,0x00,0x05,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_mul_lo_u16_e64_dpp v5, v1, v2 row_shl:15
+// GFX12: [0x05,0x00,0x05,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_mul_lo_u16_e64_dpp v5, v1, v2 row_shr:1
+// GFX12: [0x05,0x00,0x05,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_mul_lo_u16_e64_dpp v5, v1, v2 row_shr:15
+// GFX12: [0x05,0x00,0x05,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_mul_lo_u16_e64_dpp v5, v1, v2 row_ror:1
+// GFX12: [0x05,0x00,0x05,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_mul_lo_u16_e64_dpp v5, v1, v2 row_ror:15
+// GFX12: [0x05,0x00,0x05,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_mul_lo_u16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x05,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_mul_lo_u16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x05,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_mul_lo_u16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x05,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_mul_lo_u16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x05,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_mullit_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x18,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_mullit_f32_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x18,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_mullit_f32_e64_dpp v5, v1, 2.0, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x18,0xd6,0xfa,0xe8,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_mullit_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x18,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_mullit_f32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x18,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_mullit_f32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x18,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_mullit_f32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x18,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_mullit_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x18,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_mullit_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x18,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_mullit_f32_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX12: [0x05,0x01,0x18,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_mullit_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX12: [0x05,0x02,0x18,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_mullit_f32_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
+// GFX12: [0x05,0x04,0x18,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_mullit_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x03,0x18,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_mullit_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x05,0x18,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+
+v_mullit_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x06,0x18,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x09,0x13]
+
+v_mullit_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x87,0x18,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
+
+v_or3_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x58,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_or3_b32_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x58,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_or3_b32_e64_dpp v5, v1, 10, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x58,0xd6,0xfa,0x14,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_or3_b32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x58,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_or3_b32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x58,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_or3_b32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x58,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_or3_b32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x58,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_or3_b32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x58,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_or3_b32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x58,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_or3_b32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX12: [0x05,0x00,0x58,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_or3_b32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX12: [0x05,0x00,0x58,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_or3_b32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX12: [0x05,0x00,0x58,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_or3_b32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x58,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_or3_b32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x58,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_or3_b32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x58,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_or3_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x58,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_or_b16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_or_b16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_or_b16_e64_dpp v5, v1, v2 row_mirror
+// GFX12: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_or_b16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX12: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_or_b16_e64_dpp v5, v1, v2 row_shl:1
+// GFX12: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_or_b16_e64_dpp v5, v1, v2 row_shl:15
+// GFX12: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_or_b16_e64_dpp v5, v1, v2 row_shr:1
+// GFX12: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_or_b16_e64_dpp v5, v1, v2 row_shr:15
+// GFX12: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_or_b16_e64_dpp v5, v1, v2 row_ror:1
+// GFX12: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_or_b16_e64_dpp v5, v1, v2 row_ror:15
+// GFX12: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_or_b16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_or_b16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_or_b16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_or_b16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x63,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_pack_b32_f16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x11,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_pack_b32_f16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x11,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_pack_b32_f16_e64_dpp v5, v1, v2 row_mirror
+// GFX12: [0x05,0x00,0x11,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_pack_b32_f16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX12: [0x05,0x00,0x11,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_pack_b32_f16_e64_dpp v5, v1, v2 row_shl:1
+// GFX12: [0x05,0x00,0x11,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_pack_b32_f16_e64_dpp v5, v1, v2 row_shl:15
+// GFX12: [0x05,0x00,0x11,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_pack_b32_f16_e64_dpp v5, v1, v2 row_shr:1
+// GFX12: [0x05,0x00,0x11,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_pack_b32_f16_e64_dpp v5, v1, v2 row_shr:15
+// GFX12: [0x05,0x00,0x11,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_pack_b32_f16_e64_dpp v5, v1, v2 row_ror:1
+// GFX12: [0x05,0x00,0x11,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_pack_b32_f16_e64_dpp v5, v1, v2 row_ror:15
+// GFX12: [0x05,0x00,0x11,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_pack_b32_f16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x11,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_pack_b32_f16_e64_dpp v5, |v1|, -v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x01,0x11,0xd7,0xfa,0x04,0x02,0x40,0x01,0x5f,0x01,0x01]
+
+v_pack_b32_f16_e64_dpp v5, -v1, |v2| row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x02,0x11,0xd7,0xfa,0x04,0x02,0x20,0x01,0x60,0x09,0x13]
+
+v_pack_b32_f16_e64_dpp v255, -|v255|, -|v255| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x03,0x11,0xd7,0xfa,0xfe,0x03,0x60,0xff,0x6f,0x05,0x30]
+
+v_perm_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x44,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_perm_b32_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x44,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_perm_b32_e64_dpp v5, v1, 10, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x44,0xd6,0xfa,0x14,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_perm_b32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x44,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_perm_b32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x44,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_perm_b32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x44,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_perm_b32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x44,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_perm_b32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x44,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_perm_b32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x44,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_perm_b32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX12: [0x05,0x00,0x44,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_perm_b32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX12: [0x05,0x00,0x44,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_perm_b32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX12: [0x05,0x00,0x44,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_perm_b32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x44,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_perm_b32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x44,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_perm_b32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x44,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_perm_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x44,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x23,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_sad_hi_u8_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x23,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x23,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x23,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x23,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x23,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x23,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x23,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX12: [0x05,0x00,0x23,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX12: [0x05,0x00,0x23,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX12: [0x05,0x00,0x23,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x23,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x23,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x23,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_sad_hi_u8_e64_dpp v255, v255, v255, src_scc clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x80,0x23,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_sad_u16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x24,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_sad_u16_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x24,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_sad_u16_e64_dpp v5, v1, 10, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x24,0xd6,0xfa,0x14,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_sad_u16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x24,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_sad_u16_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x24,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_sad_u16_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x24,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_sad_u16_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x24,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_sad_u16_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x24,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_sad_u16_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x24,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_sad_u16_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX12: [0x05,0x00,0x24,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_sad_u16_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX12: [0x05,0x00,0x24,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_sad_u16_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX12: [0x05,0x00,0x24,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_sad_u16_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x24,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_sad_u16_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x24,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_sad_u16_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x24,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_sad_u16_e64_dpp v255, v255, v255, src_scc clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x80,0x24,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_sad_u32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x25,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_sad_u32_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x25,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_sad_u32_e64_dpp v5, v1, 10, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x25,0xd6,0xfa,0x14,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_sad_u32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x25,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_sad_u32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x25,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_sad_u32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x25,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_sad_u32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x25,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_sad_u32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x25,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_sad_u32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x25,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_sad_u32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX12: [0x05,0x00,0x25,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_sad_u32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX12: [0x05,0x00,0x25,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_sad_u32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX12: [0x05,0x00,0x25,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_sad_u32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x25,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_sad_u32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x25,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_sad_u32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x25,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_sad_u32_e64_dpp v255, v255, v255, src_scc clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x80,0x25,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_sad_u8_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x22,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_sad_u8_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x22,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_sad_u8_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x22,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_sad_u8_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x22,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_sad_u8_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x22,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_sad_u8_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x22,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_sad_u8_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x22,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_sad_u8_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x22,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_sad_u8_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX12: [0x05,0x00,0x22,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_sad_u8_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX12: [0x05,0x00,0x22,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_sad_u8_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX12: [0x05,0x00,0x22,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_sad_u8_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x22,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_sad_u8_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x22,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_sad_u8_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x22,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_sad_u8_e64_dpp v255, v255, v255, src_scc clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x80,0x22,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_sub_co_u32_e64_dpp v5, s6, v1, v2 quad_perm:[3,2,1,0]
+// W32: [0x05,0x06,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s6, v1, v2 quad_perm:[0,1,2,3]
+// W32: [0x05,0x06,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s6, v1, v2 row_mirror
+// W32: [0x05,0x06,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s6, v1, s2 row_mirror
+// W32: [0x05,0x06,0x01,0xd7,0xfa,0x04,0x00,0x00,0x01,0x40,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s6, v1, v2 row_half_mirror
+// W32: [0x05,0x06,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s6, v1, v2 row_shl:1
+// W32: [0x05,0x06,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s6, v1, v2 row_shl:15
+// W32: [0x05,0x06,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s6, v1, v2 row_shr:1
+// W32: [0x05,0x06,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s6, v1, v2 row_shr:15
+// W32: [0x05,0x06,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s6, v1, v2 row_ror:1
+// W32: [0x05,0x06,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s105, v1, v2 row_ror:15
+// W32: [0x05,0x69,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, vcc_lo, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// W32: [0x05,0x6a,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, vcc_hi, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// W32: [0x05,0x6b,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, ttmp15, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// W32: [0x05,0x7b,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s[12:13], v1, v2 quad_perm:[3,2,1,0]
+// W64: [0x05,0x0c,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s[12:13], v1, v2 quad_perm:[0,1,2,3]
+// W64: [0x05,0x0c,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s[12:13], v1, v2 row_mirror
+// W64: [0x05,0x0c,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s[12:13], v1, v2 row_half_mirror
+// W64: [0x05,0x0c,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s[12:13], v1, s2 row_half_mirror
+// W64: [0x05,0x0c,0x01,0xd7,0xfa,0x04,0x00,0x00,0x01,0x41,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s[12:13], v1, v2 row_shl:1
+// W64: [0x05,0x0c,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s[12:13], v1, v2 row_shl:15
+// W64: [0x05,0x0c,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s[12:13], v1, v2 row_shr:1
+// W64: [0x05,0x0c,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s[12:13], v1, v2 row_shr:15
+// W64: [0x05,0x0c,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s[12:13], v1, v2 row_ror:1
+// W64: [0x05,0x0c,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s[12:13], v1, v2 row_ror:15
+// W64: [0x05,0x0c,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s[104:105], v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// W64: [0x05,0x68,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, vcc, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// W64: [0x05,0x6a,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, ttmp[14:15], v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// W64: [0x05,0x7a,0x01,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v255, null, v255, v255 clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0xfc,0x01,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x0e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x0e,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 row_mirror
+// GFX12: [0x05,0x00,0x0e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX12: [0x05,0x00,0x0e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 row_shl:1
+// GFX12: [0x05,0x00,0x0e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 row_shl:15
+// GFX12: [0x05,0x00,0x0e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 row_shr:1
+// GFX12: [0x05,0x00,0x0e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 row_shr:15
+// GFX12: [0x05,0x00,0x0e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 row_ror:1
+// GFX12: [0x05,0x00,0x0e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 row_ror:15
+// GFX12: [0x05,0x00,0x0e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x0e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x0e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x0e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_sub_nc_i16_e64_dpp v255, v255, v255 clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x80,0x0e,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_sub_nc_i32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x25,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_sub_nc_i32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x25,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_sub_nc_i32_e64_dpp v5, v1, v2 row_mirror
+// GFX12: [0x05,0x00,0x25,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_sub_nc_i32_e64_dpp v5, v1, v2 row_half_mirror
+// GFX12: [0x05,0x00,0x25,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_sub_nc_i32_e64_dpp v5, v1, v2 row_shl:1
+// GFX12: [0x05,0x00,0x25,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_sub_nc_i32_e64_dpp v5, v1, v2 row_shl:15
+// GFX12: [0x05,0x00,0x25,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_sub_nc_i32_e64_dpp v5, v1, v2 row_shr:1
+// GFX12: [0x05,0x00,0x25,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_sub_nc_i32_e64_dpp v5, v1, v2 row_shr:15
+// GFX12: [0x05,0x00,0x25,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_sub_nc_i32_e64_dpp v5, v1, v2 row_ror:1
+// GFX12: [0x05,0x00,0x25,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_sub_nc_i32_e64_dpp v5, v1, v2 row_ror:15
+// GFX12: [0x05,0x00,0x25,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_sub_nc_i32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x25,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_sub_nc_i32_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x25,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_sub_nc_i32_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x25,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_sub_nc_i32_e64_dpp v255, v255, v255 clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x80,0x25,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x04,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x04,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 row_mirror
+// GFX12: [0x05,0x00,0x04,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX12: [0x05,0x00,0x04,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 row_shl:1
+// GFX12: [0x05,0x00,0x04,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 row_shl:15
+// GFX12: [0x05,0x00,0x04,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 row_shr:1
+// GFX12: [0x05,0x00,0x04,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 row_shr:15
+// GFX12: [0x05,0x00,0x04,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 row_ror:1
+// GFX12: [0x05,0x00,0x04,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 row_ror:15
+// GFX12: [0x05,0x00,0x04,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x04,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x04,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x04,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_sub_nc_u16_e64_dpp v255, v255, v255 clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x80,0x04,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_subrev_co_u32_e64_dpp v5, s6, v1, v2 quad_perm:[3,2,1,0]
+// W32: [0x05,0x06,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s6, v1, v2 quad_perm:[0,1,2,3]
+// W32: [0x05,0x06,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s6, v1, v2 row_mirror
+// W32: [0x05,0x06,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s6, v1, s2 row_mirror
+// W32: [0x05,0x06,0x02,0xd7,0xfa,0x04,0x00,0x00,0x01,0x40,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s6, v1, v2 row_half_mirror
+// W32: [0x05,0x06,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s6, v1, v2 row_shl:1
+// W32: [0x05,0x06,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s6, v1, v2 row_shl:15
+// W32: [0x05,0x06,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s6, v1, v2 row_shr:1
+// W32: [0x05,0x06,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s6, v1, v2 row_shr:15
+// W32: [0x05,0x06,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s6, v1, v2 row_ror:1
+// W32: [0x05,0x06,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s105, v1, v2 row_ror:15
+// W32: [0x05,0x69,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, vcc_lo, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// W32: [0x05,0x6a,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, vcc_hi, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// W32: [0x05,0x6b,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, ttmp15, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// W32: [0x05,0x7b,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s[12:13], v1, v2 quad_perm:[3,2,1,0]
+// W64: [0x05,0x0c,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s[12:13], v1, v2 quad_perm:[0,1,2,3]
+// W64: [0x05,0x0c,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s[12:13], v1, v2 row_mirror
+// W64: [0x05,0x0c,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s[12:13], v1, v2 row_half_mirror
+// W64: [0x05,0x0c,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s[12:13], v1, s2 row_half_mirror
+// W64: [0x05,0x0c,0x02,0xd7,0xfa,0x04,0x00,0x00,0x01,0x41,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s[12:13], v1, v2 row_shl:1
+// W64: [0x05,0x0c,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s[12:13], v1, v2 row_shl:15
+// W64: [0x05,0x0c,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s[12:13], v1, v2 row_shr:1
+// W64: [0x05,0x0c,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s[12:13], v1, v2 row_shr:15
+// W64: [0x05,0x0c,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s[12:13], v1, v2 row_ror:1
+// W64: [0x05,0x0c,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s[12:13], v1, v2 row_ror:15
+// W64: [0x05,0x0c,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s[104:105], v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// W64: [0x05,0x68,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, vcc, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// W64: [0x05,0x6a,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, ttmp[14:15], v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// W64: [0x05,0x7a,0x02,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v255, null, v255, v255 clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0xfc,0x02,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_xad_u32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x45,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_xad_u32_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x45,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_xad_u32_e64_dpp v5, v1, 10, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x45,0xd6,0xfa,0x14,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_xad_u32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x45,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_xad_u32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x45,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_xad_u32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x45,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_xad_u32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x45,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_xad_u32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x45,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_xad_u32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x45,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_xad_u32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX12: [0x05,0x00,0x45,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_xad_u32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX12: [0x05,0x00,0x45,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_xad_u32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX12: [0x05,0x00,0x45,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_xad_u32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x45,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_xad_u32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x45,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_xad_u32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x45,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_xad_u32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x45,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_xor3_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x40,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_xor3_b32_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x40,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_xor3_b32_e64_dpp v5, v1, 10, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x40,0xd6,0xfa,0x14,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_xor3_b32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x40,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_xor3_b32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x40,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_xor3_b32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x40,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_xor3_b32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x40,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_xor3_b32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x40,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_xor3_b32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x40,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_xor3_b32_e64_dpp v5, v1, v2, ttmp15 row_shr:15
+// GFX12: [0x05,0x00,0x40,0xd6,0xfa,0x04,0xee,0x01,0x01,0x1f,0x01,0xff]
+
+v_xor3_b32_e64_dpp v5, v1, v2, exec_hi row_ror:1
+// GFX12: [0x05,0x00,0x40,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x21,0x01,0xff]
+
+v_xor3_b32_e64_dpp v5, v1, v2, exec_lo row_ror:15
+// GFX12: [0x05,0x00,0x40,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x2f,0x01,0xff]
+
+v_xor3_b32_e64_dpp v5, v1, v2, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x40,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x50,0x01,0xff]
+
+v_xor3_b32_e64_dpp v5, v1, v2, -1 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x40,0xd6,0xfa,0x04,0x06,0x03,0x01,0x5f,0x01,0x01]
+
+v_xor3_b32_e64_dpp v5, v1, v2, 0.5 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x40,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x09,0x13]
+
+v_xor3_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x40,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x05,0x30]
+
+v_xor_b16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_xor_b16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_xor_b16_e64_dpp v5, v1, v2 row_mirror
+// GFX12: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_xor_b16_e64_dpp v5, v1, v2 row_half_mirror
+// GFX12: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_xor_b16_e64_dpp v5, v1, v2 row_shl:1
+// GFX12: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_xor_b16_e64_dpp v5, v1, v2 row_shl:15
+// GFX12: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_xor_b16_e64_dpp v5, v1, v2 row_shr:1
+// GFX12: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_xor_b16_e64_dpp v5, v1, v2 row_shr:15
+// GFX12: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_xor_b16_e64_dpp v5, v1, v2 row_ror:1
+// GFX12: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_xor_b16_e64_dpp v5, v1, v2 row_ror:15
+// GFX12: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_xor_b16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_xor_b16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_xor_b16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x09,0x13]
+
+v_xor_b16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x00,0x64,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 op_sel:[1,1,1] row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x58,0x0d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 op_sel:[1,0,0] row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x08,0x0d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 op_sel:[0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX12: [0x05,0x10,0x0d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13]
+
+v_add_nc_i16_e64_dpp v255, v255, v255 op_sel:[0,0,1] clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX12: [0xff,0xc0,0x0d,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 op_sel:[1,1,1] row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x58,0x03,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 op_sel:[1,0,0] row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x08,0x03,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 op_sel:[0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX12: [0x05,0x10,0x03,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13]
+
+v_add_nc_u16_e64_dpp v255, v255, v255 op_sel:[0,0,1] clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX12: [0xff,0xc0,0x03,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, -v1, |v2| op_sel:[1,0,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX12: [0x05,0x0a,0x12,0xd7,0xfa,0x04,0x02,0x20,0x01,0x60,0x01,0x13]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v255, -|v255|, -|v255| op_sel:[0,1,0] row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX12: [0xff,0x13,0x12,0xd7,0xfa,0xfe,0x03,0x60,0xff,0x6f,0x0d,0x30]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, -v1, |v2| op_sel:[1,0,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX12: [0x05,0x0a,0x13,0xd7,0xfa,0x04,0x02,0x20,0x01,0x60,0x01,0x13]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v255, -|v255|, -|v255| op_sel:[0,1,0] row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX12: [0xff,0x13,0x13,0xd7,0xfa,0xfe,0x03,0x60,0xff,0x6f,0x0d,0x30]
+
+v_div_fixup_f16_e64_dpp v5, -v1, v2, |exec_lo| op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x7c,0x54,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_div_fixup_f16_e64_dpp v5, -|v1|, -|v2|, null op_sel:[1,0,0,0] row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x0b,0x54,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_div_fixup_f16_e64_dpp v5, -|v1|, v2, -|-1| op_sel:[0,1,0,0] row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x15,0x54,0xd6,0xfa,0x04,0x06,0xa3,0x01,0x5f,0x01,0x01]
+
+v_div_fixup_f16_e64_dpp v5, v1, -|v2|, -|0.5| op_sel:[0,0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX12: [0x05,0x26,0x54,0xd6,0xfa,0x04,0xc2,0xc3,0x01,0x60,0x01,0x13]
+
+v_div_fixup_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| op_sel:[0,0,0,1] clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX12: [0xff,0xc7,0x54,0xd6,0xfa,0xfe,0xf7,0xe3,0xff,0x6f,0x0d,0x30]
+
+v_fma_f16_e64_dpp v5, -v1, v2, |exec_lo| op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x7c,0x48,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_fma_f16_e64_dpp v5, -|v1|, -|v2|, null op_sel:[1,0,0,0] row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x0b,0x48,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_fma_f16_e64_dpp v5, -|v1|, v2, -|-1| op_sel:[0,1,0,0] row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x15,0x48,0xd6,0xfa,0x04,0x06,0xa3,0x01,0x5f,0x01,0x01]
+
+v_fma_f16_e64_dpp v5, v1, -|v2|, -|0.5| op_sel:[0,0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX12: [0x05,0x26,0x48,0xd6,0xfa,0x04,0xc2,0xc3,0x01,0x60,0x01,0x13]
+
+v_fma_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| op_sel:[0,0,0,1] clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX12: [0xff,0xc7,0x48,0xd6,0xfa,0xfe,0xf7,0xe3,0xff,0x6f,0x0d,0x30]
+
+v_mad_i16_e64_dpp v5, v1, v2, exec_hi op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x78,0x53,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+
+v_mad_i16_e64_dpp v5, v1, v2, exec_lo op_sel:[1,0,0,0] row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x08,0x53,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+
+v_mad_i16_e64_dpp v5, v1, v2, null op_sel:[0,1,0,0] row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x10,0x53,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x5f,0x01,0x01]
+
+v_mad_i16_e64_dpp v5, v1, v2, -1 op_sel:[0,0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX12: [0x05,0x20,0x53,0xd6,0xfa,0x04,0x06,0x03,0x01,0x60,0x01,0x13]
+
+v_mad_i16_e64_dpp v255, v255, v255, src_scc op_sel:[0,0,0,1] clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX12: [0xff,0xc0,0x53,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, 0.5 op_sel:[1,0,0,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX12: [0x05,0x08,0x5a,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x01,0x13]
+
+v_mad_i32_i16_e64_dpp v255, v255, v255, src_scc op_sel:[0,1,0,0] clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX12: [0xff,0x90,0x5a,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30]
+
+v_mad_u16_e64_dpp v5, v1, v2, exec_hi op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x78,0x41,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+
+v_mad_u16_e64_dpp v5, v1, v2, exec_lo op_sel:[1,0,0,0] row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x08,0x41,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+
+v_mad_u16_e64_dpp v5, v1, v2, null op_sel:[0,1,0,0] row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x10,0x41,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x5f,0x01,0x01]
+
+v_mad_u16_e64_dpp v5, v1, v2, -1 op_sel:[0,0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX12: [0x05,0x20,0x41,0xd6,0xfa,0x04,0x06,0x03,0x01,0x60,0x01,0x13]
+
+v_mad_u16_e64_dpp v255, v255, v255, src_scc op_sel:[0,0,0,1] clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX12: [0xff,0xc0,0x41,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, 0.5 op_sel:[1,0,0,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX12: [0x05,0x08,0x59,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x60,0x01,0x13]
+
+v_mad_u32_u16_e64_dpp v255, v255, v255, src_scc op_sel:[0,1,0,0] clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX12: [0xff,0x90,0x59,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30]
+
+v_max3_num_f16_e64_dpp v5, -v1, v2, |exec_lo| op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x7c,0x2c,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_max3_num_f16_e64_dpp v5, -|v1|, -|v2|, null op_sel:[1,0,0,0] row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x0b,0x2c,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_max3_num_f16_e64_dpp v5, -|v1|, v2, -|-1| op_sel:[0,1,0,0] row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x15,0x2c,0xd6,0xfa,0x04,0x06,0xa3,0x01,0x5f,0x01,0x01]
+
+v_max3_num_f16_e64_dpp v5, v1, -|v2|, -|0.5| op_sel:[0,0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX12: [0x05,0x26,0x2c,0xd6,0xfa,0x04,0xc2,0xc3,0x01,0x60,0x01,0x13]
+
+v_max3_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| op_sel:[0,0,0,1] clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX12: [0xff,0xc7,0x2c,0xd6,0xfa,0xfe,0xf7,0xe3,0xff,0x6f,0x0d,0x30]
+
+v_max3_i16_e64_dpp v5, v1, v2, exec_hi op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x78,0x4d,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+
+v_max3_i16_e64_dpp v5, v1, v2, exec_lo op_sel:[1,0,0,0] row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x08,0x4d,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+
+v_max3_i16_e64_dpp v5, v1, v2, null op_sel:[0,1,0,0] row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x10,0x4d,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x5f,0x01,0x01]
+
+v_max3_i16_e64_dpp v5, v1, v2, -1 op_sel:[0,0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX12: [0x05,0x20,0x4d,0xd6,0xfa,0x04,0x06,0x03,0x01,0x60,0x01,0x13]
+
+v_max3_i16_e64_dpp v255, v255, v255, src_scc op_sel:[0,0,0,1] row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX12: [0xff,0x40,0x4d,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30]
+
+v_max3_u16_e64_dpp v5, v1, v2, exec_hi op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x78,0x4e,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+
+v_max3_u16_e64_dpp v5, v1, v2, exec_lo op_sel:[1,0,0,0] row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x08,0x4e,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+
+v_max3_u16_e64_dpp v5, v1, v2, null op_sel:[0,1,0,0] row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x10,0x4e,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x5f,0x01,0x01]
+
+v_max3_u16_e64_dpp v5, v1, v2, -1 op_sel:[0,0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX12: [0x05,0x20,0x4e,0xd6,0xfa,0x04,0x06,0x03,0x01,0x60,0x01,0x13]
+
+v_max3_u16_e64_dpp v255, v255, v255, src_scc op_sel:[0,0,0,1] row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX12: [0xff,0x40,0x4e,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30]
+
+v_med3_num_f16_e64_dpp v5, -v1, v2, |exec_lo| op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x7c,0x32,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_med3_num_f16_e64_dpp v5, -|v1|, -|v2|, null op_sel:[1,0,0,0] row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x0b,0x32,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_med3_num_f16_e64_dpp v5, -|v1|, v2, -|-1| op_sel:[0,1,0,0] row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x15,0x32,0xd6,0xfa,0x04,0x06,0xa3,0x01,0x5f,0x01,0x01]
+
+v_med3_num_f16_e64_dpp v5, v1, -|v2|, -|0.5| op_sel:[0,0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX12: [0x05,0x26,0x32,0xd6,0xfa,0x04,0xc2,0xc3,0x01,0x60,0x01,0x13]
+
+v_med3_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| op_sel:[0,0,0,1] clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX12: [0xff,0xc7,0x32,0xd6,0xfa,0xfe,0xf7,0xe3,0xff,0x6f,0x0d,0x30]
+
+v_med3_i16_e64_dpp v5, v1, v2, exec_hi op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x78,0x50,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+
+v_med3_i16_e64_dpp v5, v1, v2, exec_lo op_sel:[1,0,0,0] row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x08,0x50,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+
+v_med3_i16_e64_dpp v5, v1, v2, null op_sel:[0,1,0,0] row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x10,0x50,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x5f,0x01,0x01]
+
+v_med3_i16_e64_dpp v5, v1, v2, -1 op_sel:[0,0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX12: [0x05,0x20,0x50,0xd6,0xfa,0x04,0x06,0x03,0x01,0x60,0x01,0x13]
+
+v_med3_i16_e64_dpp v255, v255, v255, src_scc op_sel:[0,0,0,1] row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX12: [0xff,0x40,0x50,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30]
+
+v_med3_u16_e64_dpp v5, v1, v2, exec_hi op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x78,0x51,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+
+v_med3_u16_e64_dpp v5, v1, v2, exec_lo op_sel:[1,0,0,0] row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x08,0x51,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+
+v_med3_u16_e64_dpp v5, v1, v2, null op_sel:[0,1,0,0] row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x10,0x51,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x5f,0x01,0x01]
+
+v_med3_u16_e64_dpp v5, v1, v2, -1 op_sel:[0,0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX12: [0x05,0x20,0x51,0xd6,0xfa,0x04,0x06,0x03,0x01,0x60,0x01,0x13]
+
+v_med3_u16_e64_dpp v255, v255, v255, src_scc op_sel:[0,0,0,1] row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX12: [0xff,0x40,0x51,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30]
+
+v_min3_num_f16_e64_dpp v5, -v1, v2, |exec_lo| op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x7c,0x2b,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_min3_num_f16_e64_dpp v5, -|v1|, -|v2|, null op_sel:[1,0,0,0] row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x0b,0x2b,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_min3_num_f16_e64_dpp v5, -|v1|, v2, -|-1| op_sel:[0,1,0,0] row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x15,0x2b,0xd6,0xfa,0x04,0x06,0xa3,0x01,0x5f,0x01,0x01]
+
+v_min3_num_f16_e64_dpp v5, v1, -|v2|, -|0.5| op_sel:[0,0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX12: [0x05,0x26,0x2b,0xd6,0xfa,0x04,0xc2,0xc3,0x01,0x60,0x01,0x13]
+
+v_min3_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| op_sel:[0,0,0,1] clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX12: [0xff,0xc7,0x2b,0xd6,0xfa,0xfe,0xf7,0xe3,0xff,0x6f,0x0d,0x30]
+
+v_min3_i16_e64_dpp v5, v1, v2, exec_hi op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x78,0x4a,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+
+v_min3_i16_e64_dpp v5, v1, v2, exec_lo op_sel:[1,0,0,0] row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x08,0x4a,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+
+v_min3_i16_e64_dpp v5, v1, v2, null op_sel:[0,1,0,0] row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x10,0x4a,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x5f,0x01,0x01]
+
+v_min3_i16_e64_dpp v5, v1, v2, -1 op_sel:[0,0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX12: [0x05,0x20,0x4a,0xd6,0xfa,0x04,0x06,0x03,0x01,0x60,0x01,0x13]
+
+v_min3_i16_e64_dpp v255, v255, v255, src_scc op_sel:[0,0,0,1] row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX12: [0xff,0x40,0x4a,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30]
+
+v_min3_u16_e64_dpp v5, v1, v2, exec_hi op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x78,0x4b,0xd6,0xfa,0x04,0xfe,0x01,0x01,0x2f,0x01,0xff]
+
+v_min3_u16_e64_dpp v5, v1, v2, exec_lo op_sel:[1,0,0,0] row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x08,0x4b,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff]
+
+v_min3_u16_e64_dpp v5, v1, v2, null op_sel:[0,1,0,0] row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x10,0x4b,0xd6,0xfa,0x04,0xf2,0x01,0x01,0x5f,0x01,0x01]
+
+v_min3_u16_e64_dpp v5, v1, v2, -1 op_sel:[0,0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX12: [0x05,0x20,0x4b,0xd6,0xfa,0x04,0x06,0x03,0x01,0x60,0x01,0x13]
+
+v_min3_u16_e64_dpp v255, v255, v255, src_scc op_sel:[0,0,0,1] row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX12: [0xff,0x40,0x4b,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30]
+
+v_pack_b32_f16_e64_dpp v5, -v1, |v2| op_sel:[1,0,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX12: [0x05,0x0a,0x11,0xd7,0xfa,0x04,0x02,0x20,0x01,0x60,0x01,0x13]
+
+v_pack_b32_f16_e64_dpp v255, -|v255|, -|v255| op_sel:[0,1,0] row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX12: [0xff,0x13,0x11,0xd7,0xfa,0xfe,0x03,0x60,0xff,0x6f,0x0d,0x30]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 op_sel:[1,1,1] row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x58,0x0e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 op_sel:[1,0,0] row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x08,0x0e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 op_sel:[0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX12: [0x05,0x10,0x0e,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13]
+
+v_sub_nc_i16_e64_dpp v255, v255, v255 op_sel:[0,0,1] clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX12: [0xff,0xc0,0x0e,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 op_sel:[1,1,1] row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x58,0x04,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 op_sel:[1,0,0] row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x08,0x04,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 op_sel:[0,1,0] row_xmask:0 row_mask:0x1 bank_mask:0x3
+// GFX12: [0x05,0x10,0x04,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13]
+
+v_sub_nc_u16_e64_dpp v255, v255, v255 op_sel:[0,0,1] clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1
+// GFX12: [0xff,0xc0,0x04,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
+
+v_dot2_f16_f16_e64_dpp v0, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 fi:1
+// GFX12: [0x00,0x00,0x66,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x04,0x00]
+
+v_dot2_f16_f16_e64_dpp v0, v1, v2, v3 op_sel:[1,1,0,0] quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 fi:1
+// GFX12-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid op_sel operand
+
+v_dot2_f16_f16_e64_dpp v0, s1, v2, v3 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 fi:1
+// GFX12-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_dot2_f16_f16_e64_dpp v0, v1, s2, v3 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 fi:1
+// GFX12: [0x00,0x00,0x66,0xd6,0xfa,0x04,0x0c,0x04,0x01,0xe4,0x04,0x00]
+
+v_dot2_f16_f16_e64_dpp v0, v1, v2, v3 op_sel:[0,0,1,1] quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 fi:1
+// GFX12: [0x00,0x60,0x66,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x04,0x00]
+
+v_dot2_f16_f16_e64_dpp v0, |v1|, -v2, -|s3| op_sel:[0,0,1,1] quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 fi:1
+// GFX12: [0x00,0x65,0x66,0xd6,0xfa,0x04,0x0e,0xc0,0x01,0xe4,0x04,0x00]
+
+v_dot2_f16_f16_e64_dpp v5, v1, v2, 0.5 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x66,0xd6,0xfa,0x04,0xc2,0x03,0x01,0x1b,0x00,0xff]
+
+v_dot2_bf16_bf16_e64_dpp v0, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 fi:1
+// GFX12: [0x00,0x00,0x67,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x04,0x00]
+
+v_dot2_bf16_bf16_e64_dpp v0, v1, v2, v3 op_sel:[1,1,0,0] quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 fi:1
+// GFX12-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid op_sel operand
+
+v_dot2_bf16_bf16_e64_dpp v0, s1, v2, v3 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// GFX12-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_dot2_bf16_bf16_e64_dpp v0, v1, s2, v3 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// GFX12: [0x00,0x00,0x67,0xd6,0xfa,0x04,0x0c,0x04,0x01,0xe4,0x00,0x00]
+
+v_dot2_bf16_bf16_e64_dpp v0, v1, v2, v3 op_sel:[0,0,1,1] quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 fi:1
+// GFX12: [0x00,0x60,0x67,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x04,0x00]
+
+v_dot2_bf16_bf16_e64_dpp v0, |v1|, -v2, -|s3| op_sel:[0,0,1,1] quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 fi:1
+// GFX12: [0x00,0x65,0x67,0xd6,0xfa,0x04,0x0e,0xc0,0x01,0xe4,0x04,0x00]
+
+v_dot2_bf16_bf16_e64_dpp v5, v1, v2, 0 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x67,0xd6,0xfa,0x04,0x02,0x02,0x01,0x1b,0x00,0xff]
+
+v_minimum_f32 v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x65,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_minimum_f32 v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x65,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_minimum_f32 v5, v1, v2 row_mirror
+// GFX12: [0x05,0x00,0x65,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_minimum_f32 v5, v1, v2 row_half_mirror
+// GFX12: [0x05,0x00,0x65,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_minimum_f32 v5, v1, v2 row_shl:1
+// GFX12: [0x05,0x00,0x65,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_minimum_f32 v5, v1, v2 row_shl:15
+// GFX12: [0x05,0x00,0x65,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_minimum_f32 v5, v1, v2 row_shr:1
+// GFX12: [0x05,0x00,0x65,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_minimum_f32 v5, v1, v2 row_shr:15
+// GFX12: [0x05,0x00,0x65,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_minimum_f32 v5, v1, v2 row_ror:1
+// GFX12: [0x05,0x00,0x65,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_minimum_f32 v5, v1, v2 row_ror:15
+// GFX12: [0x05,0x00,0x65,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_minimum_f32 v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x65,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_minimum_f32 v5, |v1|, -v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x01,0x65,0xd7,0xfa,0x04,0x02,0x40,0x01,0x5f,0x01,0x01]
+
+v_minimum_f32 v5, -v1, |v2| row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x02,0x65,0xd7,0xfa,0x04,0x02,0x20,0x01,0x60,0x09,0x13]
+
+v_minimum_f32 v255, -|v255|, -|v255| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x03,0x65,0xd7,0xfa,0xfe,0x03,0x60,0xff,0x6f,0x05,0x30]
+
+v_maximum_f32 v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x66,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_maximum_f32 v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x66,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_maximum_f32 v5, v1, v2 row_mirror
+// GFX12: [0x05,0x00,0x66,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_maximum_f32 v5, v1, v2 row_half_mirror
+// GFX12: [0x05,0x00,0x66,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_maximum_f32 v5, v1, v2 row_shl:1
+// GFX12: [0x05,0x00,0x66,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_maximum_f32 v5, v1, v2 row_shl:15
+// GFX12: [0x05,0x00,0x66,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_maximum_f32 v5, v1, v2 row_shr:1
+// GFX12: [0x05,0x00,0x66,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_maximum_f32 v5, v1, v2 row_shr:15
+// GFX12: [0x05,0x00,0x66,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_maximum_f32 v5, v1, v2 row_ror:1
+// GFX12: [0x05,0x00,0x66,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_maximum_f32 v5, v1, v2 row_ror:15
+// GFX12: [0x05,0x00,0x66,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_maximum_f32 v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x66,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_maximum_f32 v5, |v1|, -v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x01,0x66,0xd7,0xfa,0x04,0x02,0x40,0x01,0x5f,0x01,0x01]
+
+v_maximum_f32 v5, -v1, |v2| row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x02,0x66,0xd7,0xfa,0x04,0x02,0x20,0x01,0x60,0x09,0x13]
+
+v_maximum_f32 v255, -|v255|, -|v255| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x03,0x66,0xd7,0xfa,0xfe,0x03,0x60,0xff,0x6f,0x05,0x30]
+
+v_minimum_f16 v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x67,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_minimum_f16 v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x67,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_minimum_f16 v5, v1, v2 row_mirror
+// GFX12: [0x05,0x00,0x67,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_minimum_f16 v5, v1, v2 row_half_mirror
+// GFX12: [0x05,0x00,0x67,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_minimum_f16 v5, v1, v2 row_shl:1
+// GFX12: [0x05,0x00,0x67,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_minimum_f16 v5, v1, v2 row_shl:15
+// GFX12: [0x05,0x00,0x67,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_minimum_f16 v5, v1, v2 row_shr:1
+// GFX12: [0x05,0x00,0x67,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_minimum_f16 v5, v1, v2 row_shr:15
+// GFX12: [0x05,0x00,0x67,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_minimum_f16 v5, v1, v2 row_ror:1
+// GFX12: [0x05,0x00,0x67,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_minimum_f16 v5, v1, v2 row_ror:15
+// GFX12: [0x05,0x00,0x67,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_minimum_f16 v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x67,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_minimum_f16 v5, |v1|, -v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x01,0x67,0xd7,0xfa,0x04,0x02,0x40,0x01,0x5f,0x01,0x01]
+
+v_minimum_f16 v5, -v1, |v2| row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x02,0x67,0xd7,0xfa,0x04,0x02,0x20,0x01,0x60,0x09,0x13]
+
+v_minimum_f16 v255, -|v255|, -|v255| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x03,0x67,0xd7,0xfa,0xfe,0x03,0x60,0xff,0x6f,0x05,0x30]
+
+v_maximum_f16 v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x68,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+v_maximum_f16 v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x68,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+v_maximum_f16 v5, v1, v2 row_mirror
+// GFX12: [0x05,0x00,0x68,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+v_maximum_f16 v5, v1, v2 row_half_mirror
+// GFX12: [0x05,0x00,0x68,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+v_maximum_f16 v5, v1, v2 row_shl:1
+// GFX12: [0x05,0x00,0x68,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+v_maximum_f16 v5, v1, v2 row_shl:15
+// GFX12: [0x05,0x00,0x68,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+v_maximum_f16 v5, v1, v2 row_shr:1
+// GFX12: [0x05,0x00,0x68,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+v_maximum_f16 v5, v1, v2 row_shr:15
+// GFX12: [0x05,0x00,0x68,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+v_maximum_f16 v5, v1, v2 row_ror:1
+// GFX12: [0x05,0x00,0x68,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+v_maximum_f16 v5, v1, v2 row_ror:15
+// GFX12: [0x05,0x00,0x68,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+v_maximum_f16 v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x00,0x68,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+v_maximum_f16 v5, |v1|, -v2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x01,0x68,0xd7,0xfa,0x04,0x02,0x40,0x01,0x5f,0x01,0x01]
+
+v_maximum_f16 v5, -v1, |v2| row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x02,0x68,0xd7,0xfa,0x04,0x02,0x20,0x01,0x60,0x09,0x13]
+
+v_maximum_f16 v255, -|v255|, -|v255| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x03,0x68,0xd7,0xfa,0xfe,0x03,0x60,0xff,0x6f,0x05,0x30]
+
+v_minimum3_f32 v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x2d,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_minimum3_f32 v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x2d,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_minimum3_f32 v5, v1, 2.0, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x2d,0xd6,0xfa,0xe8,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_minimum3_f32 v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x2d,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_minimum3_f32 v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x2d,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_minimum3_f32 v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x2d,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_minimum3_f32 v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x2d,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_minimum3_f32 v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x2d,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_minimum3_f32 v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x2d,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_minimum3_f32 v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX12: [0x05,0x01,0x2d,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_minimum3_f32 v5, v1, -|v2|, exec_hi row_ror:1
+// GFX12: [0x05,0x02,0x2d,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_minimum3_f32 v5, -v1, v2, |exec_lo| row_ror:15
+// GFX12: [0x05,0x04,0x2d,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_minimum3_f32 v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x03,0x2d,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_minimum3_f32 v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x05,0x2d,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+
+v_minimum3_f32 v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x06,0x2d,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x09,0x13]
+
+v_minimum3_f32 v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x87,0x2d,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
+
+v_maximum3_f32 v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x2e,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_maximum3_f32 v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x2e,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_maximum3_f32 v5, v1, 2.0, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x2e,0xd6,0xfa,0xe8,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_maximum3_f32 v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x2e,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_maximum3_f32 v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x2e,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_maximum3_f32 v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x2e,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_maximum3_f32 v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x2e,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_maximum3_f32 v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x2e,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_maximum3_f32 v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x2e,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_maximum3_f32 v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX12: [0x05,0x01,0x2e,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_maximum3_f32 v5, v1, -|v2|, exec_hi row_ror:1
+// GFX12: [0x05,0x02,0x2e,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_maximum3_f32 v5, -v1, v2, |exec_lo| row_ror:15
+// GFX12: [0x05,0x04,0x2e,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_maximum3_f32 v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x03,0x2e,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_maximum3_f32 v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x05,0x2e,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+
+v_maximum3_f32 v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x06,0x2e,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x09,0x13]
+
+v_maximum3_f32 v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x87,0x2e,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
+
+v_minimum3_f16 v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x2f,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_minimum3_f16 v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x2f,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_minimum3_f16 v5, v1, 2.0, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x2f,0xd6,0xfa,0xe8,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_minimum3_f16 v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x2f,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_minimum3_f16 v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x2f,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_minimum3_f16 v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x2f,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_minimum3_f16 v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x2f,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_minimum3_f16 v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x2f,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_minimum3_f16 v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x2f,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_minimum3_f16 v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX12: [0x05,0x01,0x2f,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_minimum3_f16 v5, v1, -|v2|, exec_hi row_ror:1
+// GFX12: [0x05,0x02,0x2f,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_minimum3_f16 v5, -v1, v2, |exec_lo| row_ror:15
+// GFX12: [0x05,0x04,0x2f,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_minimum3_f16 v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x03,0x2f,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_minimum3_f16 v5, -|v1|, v2, -|-1| row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x05,0x2f,0xd6,0xfa,0x04,0x06,0xa3,0x01,0x5f,0x01,0x01]
+
+v_minimum3_f16 v5, v1, -|v2|, -|0.5| row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x06,0x2f,0xd6,0xfa,0x04,0xc2,0xc3,0x01,0x60,0x09,0x13]
+
+v_minimum3_f16 v255, -|v255|, -|v255|, -|src_scc| clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x87,0x2f,0xd6,0xfa,0xfe,0xf7,0xe3,0xff,0x6f,0x05,0x30]
+
+v_maximum3_f16 v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x30,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_maximum3_f16 v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x30,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_maximum3_f16 v5, v1, 2.0, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x30,0xd6,0xfa,0xe8,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_maximum3_f16 v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x30,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_maximum3_f16 v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x30,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_maximum3_f16 v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x30,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_maximum3_f16 v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x30,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_maximum3_f16 v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x30,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_maximum3_f16 v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x30,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_maximum3_f16 v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX12: [0x05,0x01,0x30,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_maximum3_f16 v5, v1, -|v2|, exec_hi row_ror:1
+// GFX12: [0x05,0x02,0x30,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_maximum3_f16 v5, -v1, v2, |exec_lo| row_ror:15
+// GFX12: [0x05,0x04,0x30,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_maximum3_f16 v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x03,0x30,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_maximum3_f16 v5, -|v1|, v2, -|-1| row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x05,0x30,0xd6,0xfa,0x04,0x06,0xa3,0x01,0x5f,0x01,0x01]
+
+v_maximum3_f16 v5, v1, -|v2|, -|0.5| row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x06,0x30,0xd6,0xfa,0x04,0xc2,0xc3,0x01,0x60,0x09,0x13]
+
+v_maximum3_f16 v255, -|v255|, -|v255|, -|src_scc| clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x87,0x30,0xd6,0xfa,0xfe,0xf7,0xe3,0xff,0x6f,0x05,0x30]
+
+v_maximumminimum_f32 v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x6d,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_maximumminimum_f32 v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x6d,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_maximumminimum_f32 v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x6d,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_maximumminimum_f32 v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x6d,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_maximumminimum_f32 v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x6d,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_maximumminimum_f32 v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x6d,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_maximumminimum_f32 v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX12: [0x05,0x01,0x6d,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_maximumminimum_f32 v5, v1, -|v2|, exec_hi row_ror:1
+// GFX12: [0x05,0x02,0x6d,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_maximumminimum_f32 v5, -v1, v2, |exec_lo| row_ror:15
+// GFX12: [0x05,0x04,0x6d,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_maximumminimum_f32 v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x03,0x6d,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_maximumminimum_f32 v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x05,0x6d,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+
+v_maximumminimum_f32 v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x06,0x6d,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x09,0x13]
+
+v_maximumminimum_f32 v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x87,0x6d,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
+
+v_minimummaximum_f32 v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x6c,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_minimummaximum_f32 v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x6c,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_minimummaximum_f32 v5, v1, 2.0, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x6c,0xd6,0xfa,0xe8,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_minimummaximum_f32 v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x6c,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_minimummaximum_f32 v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x6c,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_minimummaximum_f32 v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x6c,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_minimummaximum_f32 v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x6c,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_minimummaximum_f32 v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x6c,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_minimummaximum_f32 v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x6c,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_minimummaximum_f32 v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX12: [0x05,0x01,0x6c,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_minimummaximum_f32 v5, v1, -|v2|, exec_hi row_ror:1
+// GFX12: [0x05,0x02,0x6c,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_minimummaximum_f32 v5, -v1, v2, |exec_lo| row_ror:15
+// GFX12: [0x05,0x04,0x6c,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_minimummaximum_f32 v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x03,0x6c,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_minimummaximum_f32 v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x05,0x6c,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+
+v_minimummaximum_f32 v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x06,0x6c,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x09,0x13]
+
+v_minimummaximum_f32 v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x87,0x6c,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
+
+v_maximumminimum_f16 v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x6f,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_maximumminimum_f16 v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x6f,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_maximumminimum_f16 v5, v1, 2.0, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x6f,0xd6,0xfa,0xe8,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_maximumminimum_f16 v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x6f,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_maximumminimum_f16 v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x6f,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_maximumminimum_f16 v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x6f,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_maximumminimum_f16 v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x6f,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_maximumminimum_f16 v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x6f,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_maximumminimum_f16 v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x6f,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_maximumminimum_f16 v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX12: [0x05,0x01,0x6f,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_maximumminimum_f16 v5, v1, -|v2|, exec_hi row_ror:1
+// GFX12: [0x05,0x02,0x6f,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_maximumminimum_f16 v5, -v1, v2, |exec_lo| row_ror:15
+// GFX12: [0x05,0x04,0x6f,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_maximumminimum_f16 v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x03,0x6f,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_maximumminimum_f16 v5, -|v1|, v2, -|-1| row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x05,0x6f,0xd6,0xfa,0x04,0x06,0xa3,0x01,0x5f,0x01,0x01]
+
+v_maximumminimum_f16 v5, v1, -|v2|, -|0.5| row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x06,0x6f,0xd6,0xfa,0x04,0xc2,0xc3,0x01,0x60,0x09,0x13]
+
+v_maximumminimum_f16 v255, -|v255|, -|v255|, -|src_scc| clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x87,0x6f,0xd6,0xfa,0xfe,0xf7,0xe3,0xff,0x6f,0x05,0x30]
+
+v_minimummaximum_f16 v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x6e,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_minimummaximum_f16 v5, v1, s2, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x6e,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_minimummaximum_f16 v5, v1, 2.0, v3 quad_perm:[3,2,1,0]
+// GFX12: [0x05,0x00,0x6e,0xd6,0xfa,0xe8,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_minimummaximum_f16 v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX12: [0x05,0x00,0x6e,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_minimummaximum_f16 v5, v1, v2, v3 row_mirror
+// GFX12: [0x05,0x00,0x6e,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_minimummaximum_f16 v5, v1, v2, v255 row_half_mirror
+// GFX12: [0x05,0x00,0x6e,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_minimummaximum_f16 v5, v1, v2, s105 row_shl:1
+// GFX12: [0x05,0x00,0x6e,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_minimummaximum_f16 v5, v1, v2, vcc_hi row_shl:15
+// GFX12: [0x05,0x00,0x6e,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_minimummaximum_f16 v5, v1, v2, vcc_lo row_shr:1
+// GFX12: [0x05,0x00,0x6e,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_minimummaximum_f16 v5, |v1|, v2, -ttmp15 row_shr:15
+// GFX12: [0x05,0x01,0x6e,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_minimummaximum_f16 v5, v1, -|v2|, exec_hi row_ror:1
+// GFX12: [0x05,0x02,0x6e,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_minimummaximum_f16 v5, -v1, v2, |exec_lo| row_ror:15
+// GFX12: [0x05,0x04,0x6e,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_minimummaximum_f16 v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: [0x05,0x03,0x6e,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_minimummaximum_f16 v5, -|v1|, v2, -|-1| row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: [0x05,0x05,0x6e,0xd6,0xfa,0x04,0x06,0xa3,0x01,0x5f,0x01,0x01]
+
+v_minimummaximum_f16 v5, v1, -|v2|, -|0.5| row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: [0x05,0x06,0x6e,0xd6,0xfa,0x04,0xc2,0xc3,0x01,0x60,0x09,0x13]
+
+v_minimummaximum_f16 v255, -|v255|, -|v255|, -|src_scc| clamp row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: [0xff,0x87,0x6e,0xd6,0xfa,0xfe,0xf7,0xe3,0xff,0x6f,0x05,0x30]
diff --git a/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp16.s b/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp16.s
index 91817b9..14b489e 100644
--- a/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp16.s
+++ b/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp16.s
@@ -1,7 +1,7 @@
-// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize32 -show-encoding %s | FileCheck --check-prefixes=GFX12,W32 %s
-// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64 -show-encoding %s | FileCheck --check-prefixes=GFX12,W64 %s
-// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize32 %s 2>&1 | FileCheck --check-prefixes=GFX12-ERR,W32-ERR --implicit-check-not=error: %s
-// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64 %s 2>&1 | FileCheck --check-prefixes=GFX12-ERR,W64-ERR --implicit-check-not=error: %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize32,+real-true16 -show-encoding %s | FileCheck --check-prefixes=GFX12,W32 %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64,+real-true16 -show-encoding %s | FileCheck --check-prefixes=GFX12,W64 %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize32,+real-true16 %s 2>&1 | FileCheck --check-prefixes=GFX12-ERR,W32-ERR --implicit-check-not=error: %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64,+real-true16 %s 2>&1 | FileCheck --check-prefixes=GFX12-ERR,W64-ERR --implicit-check-not=error: %s
v_add3_u32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
// GFX12: [0x05,0x00,0x55,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
diff --git a/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp8-fake16.s b/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp8-fake16.s
new file mode 100644
index 0000000..4622797
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp8-fake16.s
@@ -0,0 +1,3814 @@
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize32,-real-true16 -show-encoding %s | FileCheck --check-prefixes=GFX12,W32 %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64,-real-true16 -show-encoding %s | FileCheck --check-prefixes=GFX12,W64 %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize32,-real-true16 %s 2>&1 | FileCheck --check-prefixes=GFX12-ERR,W32-ERR --implicit-check-not=error: %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64,-real-true16 %s 2>&1 | FileCheck --check-prefixes=GFX12-ERR,W64-ERR --implicit-check-not=error: %s
+
+v_add3_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x55,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_add3_u32_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x55,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_add3_u32_e64_dpp v5, v1, 10, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x55,0xd6,0xe9,0x14,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_add3_u32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x55,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_add3_u32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x55,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_add3_u32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x55,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_add3_u32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x55,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_add3_u32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x55,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_add3_u32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x55,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_add3_u32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x55,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_add3_u32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x55,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_add3_u32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x55,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_add3_u32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x55,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_add3_u32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x55,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_add_co_u32_e64_dpp v5, s6, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W32: [0x05,0x06,0x00,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s105, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W32: [0x05,0x69,0x00,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s105, v1, s2 dpp8:[7,6,5,4,3,2,1,0]
+// W32: [0x05,0x69,0x00,0xd7,0xe9,0x04,0x00,0x00,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, vcc_lo, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W32: [0x05,0x6a,0x00,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, vcc_hi, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W32: [0x05,0x6b,0x00,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, ttmp15, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// W32: [0x05,0x7b,0x00,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s[12:13], v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W64: [0x05,0x0c,0x00,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s[104:105], v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W64: [0x05,0x68,0x00,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, s[104:105], v1, s2 dpp8:[7,6,5,4,3,2,1,0]
+// W64: [0x05,0x68,0x00,0xd7,0xe9,0x04,0x00,0x00,0x01,0x77,0x39,0x05]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, vcc, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W64: [0x05,0x6a,0x00,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v5, ttmp[14:15], v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// W64: [0x05,0x7a,0x00,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_add_co_u32_e64_dpp v255, null, v255, v255 clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0xfc,0x00,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x47,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_add_lshl_u32_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x47,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_add_lshl_u32_e64_dpp v5, v1, 10, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x47,0xd6,0xe9,0x14,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x47,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x47,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x47,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x47,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x47,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x47,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x47,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x47,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x47,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_add_lshl_u32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x47,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_add_lshl_u32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x47,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0d,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x0d,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_add_nc_i16_e64_dpp v255, v255, v255 clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x80,0x0d,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_add_nc_i32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x26,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_add_nc_i32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x26,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_add_nc_i32_e64_dpp v255, v255, v255 clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x80,0x26,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x03,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x03,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_add_nc_u16_e64_dpp v255, v255, v255 clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x80,0x03,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x16,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_alignbit_b32_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x16,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_alignbit_b32_e64_dpp v5, v1, 10, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x16,0xd6,0xe9,0x14,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x16,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x16,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x16,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x16,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x16,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x16,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x16,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x16,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_alignbit_b32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x16,0xd6,0xea,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_alignbit_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x16,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x17,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_alignbyte_b32_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x17,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_alignbyte_b32_e64_dpp v5, v1, 10, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x17,0xd6,0xe9,0x14,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x17,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_alignbyte_b32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x17,0xd6,0xea,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_alignbyte_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x17,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_and_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x62,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_and_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x62,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_and_b16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x62,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_and_or_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x57,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_and_or_b32_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x57,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_and_or_b32_e64_dpp v5, v1, 10, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x57,0xd6,0xe9,0x14,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_and_or_b32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x57,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_and_or_b32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x57,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_and_or_b32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x57,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_and_or_b32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x57,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_and_or_b32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x57,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_and_or_b32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x57,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_and_or_b32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x57,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_and_or_b32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x57,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_and_or_b32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x57,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_and_or_b32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x57,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_and_or_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x57,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_ashrrev_i16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x3a,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_ashrrev_i16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x3a,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_ashrrev_i16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x3a,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_bcnt_u32_b32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1e,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_bcnt_u32_b32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x1e,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_bcnt_u32_b32_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x1e,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_bfe_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x11,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_bfe_i32_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x11,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_bfe_i32_e64_dpp v5, v1, 10, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x11,0xd6,0xe9,0x14,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_bfe_i32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x11,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_bfe_i32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x11,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_bfe_i32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x11,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_bfe_i32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x11,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_bfe_i32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x11,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_bfe_i32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x11,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_bfe_i32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x11,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_bfe_i32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x11,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_bfe_i32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x11,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_bfe_i32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x11,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_bfe_i32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x11,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_bfe_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x10,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_bfe_u32_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x10,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_bfe_u32_e64_dpp v5, v1, 10, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x10,0xd6,0xe9,0x14,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_bfe_u32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x10,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_bfe_u32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x10,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_bfe_u32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x10,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_bfe_u32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x10,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_bfe_u32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x10,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_bfe_u32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x10,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_bfe_u32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x10,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_bfe_u32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x10,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_bfe_u32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x10,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_bfe_u32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x10,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_bfe_u32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x10,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_bfi_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x12,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_bfi_b32_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x12,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_bfi_b32_e64_dpp v5, v1, 10, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x12,0xd6,0xe9,0x14,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_bfi_b32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x12,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_bfi_b32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x12,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_bfi_b32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x12,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_bfi_b32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x12,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_bfi_b32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x12,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_bfi_b32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x12,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_bfi_b32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x12,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_bfi_b32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x12,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_bfi_b32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x12,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_bfi_b32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x12,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_bfi_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x12,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_bfm_b32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1d,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_bfm_b32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x1d,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_bfm_b32_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x1d,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s3 dpp8:[7,6,5,4,3,2,1,0]
+// W32: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0x0e,0x00,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, s2, s3 dpp8:[7,6,5,4,3,2,1,0]
+// W32: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0x0c,0x00,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, 10, s3 dpp8:[7,6,5,4,3,2,1,0]
+// W32: [0x05,0x00,0x5d,0xd6,0xe9,0x14,0x0d,0x00,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// W32: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// W32: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, |v1|, -v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// W32: [0x05,0x01,0x5d,0xd6,0xe9,0x04,0xaa,0x41,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, -v1, |v2|, ttmp15 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// W32: [0x05,0x02,0x5d,0xd6,0xea,0x04,0xee,0x21,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s[6:7] dpp8:[7,6,5,4,3,2,1,0]
+// W64: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0x1a,0x00,0x01,0x77,0x39,0x05]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, v1, v2, s[104:105] dpp8:[7,6,5,4,3,2,1,0]
+// W64: [0x05,0x00,0x5d,0xd6,0xe9,0x04,0xa2,0x01,0x01,0x77,0x39,0x05]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, |v1|, -v2, vcc dpp8:[7,6,5,4,3,2,1,0]
+// W64: [0x05,0x01,0x5d,0xd6,0xe9,0x04,0xaa,0x41,0x01,0x77,0x39,0x05]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, -v1, |v2|, ttmp[14:15] dpp8:[7,6,5,4,3,2,1,0] fi:1
+// W64: [0x05,0x02,0x5d,0xd6,0xea,0x04,0xea,0x21,0x01,0x77,0x39,0x05]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v5, -v1, |s2|, ttmp[14:15] dpp8:[7,6,5,4,3,2,1,0] fi:1
+// W64: [0x05,0x02,0x5d,0xd6,0xea,0x04,0xe8,0x21,0x01,0x77,0x39,0x05]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_cndmask_b16_e64_dpp v255, -|v255|, -|v255|, null dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x03,0x5d,0xd6,0xe9,0xfe,0xf3,0x61,0xff,0x00,0x00,0x00]
+
+v_cubeid_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0c,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_cubeid_f32_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0c,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_cubeid_f32_e64_dpp v5, v1, 2.0, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0c,0xd6,0xe9,0xe8,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_cubeid_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0c,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_cubeid_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0c,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_cubeid_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0c,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_cubeid_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0c,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_cubeid_f32_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x01,0x0c,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_cubeid_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x02,0x0c,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_cubeid_f32_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x04,0x0c,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_cubeid_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x03,0x0c,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_cubeid_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x05,0x0c,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+
+v_cubeid_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x06,0x0c,0xd6,0xea,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+
+v_cubeid_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x87,0x0c,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+
+v_cubema_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0f,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_cubema_f32_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0f,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_cubema_f32_e64_dpp v5, v1, 2.0, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0f,0xd6,0xe9,0xe8,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_cubema_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0f,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_cubema_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0f,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_cubema_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0f,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_cubema_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0f,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_cubema_f32_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x01,0x0f,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_cubema_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x02,0x0f,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_cubema_f32_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x04,0x0f,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_cubema_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x03,0x0f,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_cubema_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x05,0x0f,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+
+v_cubema_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x06,0x0f,0xd6,0xea,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+
+v_cubema_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x87,0x0f,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+
+v_cubesc_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0d,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_cubesc_f32_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0d,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_cubesc_f32_e64_dpp v5, v1, 2.0, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0d,0xd6,0xe9,0xe8,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_cubesc_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0d,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_cubesc_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0d,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_cubesc_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0d,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_cubesc_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0d,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_cubesc_f32_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x01,0x0d,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_cubesc_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x02,0x0d,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_cubesc_f32_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x04,0x0d,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_cubesc_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x03,0x0d,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_cubesc_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x05,0x0d,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+
+v_cubesc_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x06,0x0d,0xd6,0xea,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+
+v_cubesc_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x87,0x0d,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+
+v_cubetc_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0e,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_cubetc_f32_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0e,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_cubetc_f32_e64_dpp v5, v1, 2.0, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0e,0xd6,0xe9,0xe8,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_cubetc_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0e,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_cubetc_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0e,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_cubetc_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0e,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_cubetc_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0e,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_cubetc_f32_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x01,0x0e,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_cubetc_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x02,0x0e,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_cubetc_f32_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x04,0x0e,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_cubetc_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x03,0x0e,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_cubetc_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x05,0x0e,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+
+v_cubetc_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x06,0x0e,0xd6,0xea,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+
+v_cubetc_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x87,0x0e,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+
+v_cvt_pk_fp8_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,2,3,0,1]
+// GFX12: encoding: [0x05,0x00,0x69,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21]
+
+v_cvt_pk_fp8_f32_e64_dpp v5, |v1|, -v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: encoding: [0x05,0x01,0x69,0xd7,0xe9,0x04,0x02,0x40,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_fp8_f32_e64_dpp v5, -v1, |v2| dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: encoding: [0x05,0x02,0x69,0xd7,0xea,0x04,0x02,0x20,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_fp8_f32_e64_dpp v255, -|v255|, -|v255| dpp8:[0,0,0,0,0,0,0,0]
+// GFX12: encoding: [0xff,0x03,0x69,0xd7,0xe9,0xfe,0x03,0x60,0xff,0x00,0x00,0x00]
+
+v_cvt_pk_bf8_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: encoding: [0x05,0x00,0x6a,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_bf8_f32_e64_dpp v5, |v1|, -v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: encoding: [0x05,0x01,0x6a,0xd7,0xe9,0x04,0x02,0x40,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_bf8_f32_e64_dpp v5, -v1, |v2| dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: encoding: [0x05,0x02,0x6a,0xd7,0xea,0x04,0x02,0x20,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_bf8_f32_e64_dpp v255, -|v255|, -|v255| dpp8:[0,0,0,0,0,0,0,0]
+// GFX12: encoding: [0xff,0x03,0x6a,0xd7,0xe9,0xfe,0x03,0x60,0xff,0x00,0x00,0x00]
+
+v_cvt_sr_fp8_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: encoding: [0x05,0x00,0x6b,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_cvt_sr_fp8_f32_e64_dpp v5, |v1|, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: encoding: [0x05,0x01,0x6b,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_cvt_sr_fp8_f32_e64_dpp v5, -v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: encoding: [0x05,0x00,0x6b,0xd7,0xe9,0x04,0x02,0x20,0x01,0x77,0x39,0x05]
+
+v_cvt_sr_fp8_f32_e64_dpp v255, -|v255|, v255 dpp8:[0,0,0,0,0,0,0,0]
+// GFX12: encoding: [0xff,0x01,0x6b,0xd7,0xe9,0xfe,0x03,0x20,0xff,0x00,0x00,0x00]
+
+v_cvt_sr_fp8_f32 v1, v2, v3 byte_sel:0 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_cvt_sr_fp8_f32_e64_dpp v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x00,0x6b,0xd7,0xe9,0x06,0x02,0x00,0x02,0x77,0x39,0x05]
+
+v_cvt_sr_fp8_f32 v1, v2, v3 byte_sel:1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_cvt_sr_fp8_f32_e64_dpp v1, v2, v3 byte_sel:1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x20,0x6b,0xd7,0xe9,0x06,0x02,0x00,0x02,0x77,0x39,0x05]
+
+v_cvt_sr_fp8_f32 v1, v2, v3 byte_sel:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_cvt_sr_fp8_f32_e64_dpp v1, v2, v3 byte_sel:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x40,0x6b,0xd7,0xe9,0x06,0x02,0x00,0x02,0x77,0x39,0x05]
+
+v_cvt_sr_fp8_f32 v1, v2, v3 byte_sel:3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_cvt_sr_fp8_f32_e64_dpp v1, v2, v3 byte_sel:3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x60,0x6b,0xd7,0xe9,0x06,0x02,0x00,0x02,0x77,0x39,0x05]
+
+v_cvt_sr_bf8_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: encoding: [0x05,0x00,0x6c,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_cvt_sr_bf8_f32_e64_dpp v5, |v1|, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: encoding: [0x05,0x01,0x6c,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_cvt_sr_bf8_f32_e64_dpp v5, -v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: encoding: [0x05,0x00,0x6c,0xd7,0xe9,0x04,0x02,0x20,0x01,0x77,0x39,0x05]
+
+v_cvt_sr_bf8_f32_e64_dpp v255, -|v255|, v255 dpp8:[0,0,0,0,0,0,0,0]
+// GFX12: encoding: [0xff,0x01,0x6c,0xd7,0xe9,0xfe,0x03,0x20,0xff,0x00,0x00,0x00]
+
+v_cvt_sr_bf8_f32 v1, v2, v3 byte_sel:0 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_cvt_sr_bf8_f32_e64_dpp v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x00,0x6c,0xd7,0xe9,0x06,0x02,0x00,0x02,0x77,0x39,0x05]
+
+v_cvt_sr_bf8_f32 v1, v2, v3 byte_sel:1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_cvt_sr_bf8_f32_e64_dpp v1, v2, v3 byte_sel:1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x20,0x6c,0xd7,0xe9,0x06,0x02,0x00,0x02,0x77,0x39,0x05]
+
+v_cvt_sr_bf8_f32 v1, v2, v3 byte_sel:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_cvt_sr_bf8_f32_e64_dpp v1, v2, v3 byte_sel:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x40,0x6c,0xd7,0xe9,0x06,0x02,0x00,0x02,0x77,0x39,0x05]
+
+v_cvt_sr_bf8_f32 v1, v2, v3 byte_sel:3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_cvt_sr_bf8_f32_e64_dpp v1, v2, v3 byte_sel:3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x60,0x6c,0xd7,0xe9,0x06,0x02,0x00,0x02,0x77,0x39,0x05]
+
+v_cvt_pk_i16_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x06,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_i16_f32_e64_dpp v5, |v1|, -v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x01,0x06,0xd7,0xe9,0x04,0x02,0x40,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_i16_f32_e64_dpp v5, -v1, |v2| dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x02,0x06,0xd7,0xea,0x04,0x02,0x20,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_i16_f32_e64_dpp v255, -|v255|, -|v255| dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x03,0x06,0xd7,0xe9,0xfe,0x03,0x60,0xff,0x00,0x00,0x00]
+
+v_cvt_pk_i16_i32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x24,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_i16_i32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x24,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_i16_i32_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x24,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x12,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, |v1|, -v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x01,0x12,0xd7,0xe9,0x04,0x02,0x40,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, -v1, |v2| dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x02,0x12,0xd7,0xea,0x04,0x02,0x20,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v255, -|v255|, -|v255| dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x03,0x12,0xd7,0xe9,0xfe,0x03,0x60,0xff,0x00,0x00,0x00]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x13,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, |v1|, -v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x01,0x13,0xd7,0xe9,0x04,0x02,0x40,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, -v1, |v2| dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x02,0x13,0xd7,0xea,0x04,0x02,0x20,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v255, -|v255|, -|v255| dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x03,0x13,0xd7,0xe9,0xfe,0x03,0x60,0xff,0x00,0x00,0x00]
+
+v_cvt_pk_u16_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x07,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_u16_f32_e64_dpp v5, |v1|, -v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x01,0x07,0xd7,0xe9,0x04,0x02,0x40,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_u16_f32_e64_dpp v5, -v1, |v2| dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x02,0x07,0xd7,0xea,0x04,0x02,0x20,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_u16_f32_e64_dpp v255, -|v255|, -|v255| dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x03,0x07,0xd7,0xe9,0xfe,0x03,0x60,0xff,0x00,0x00,0x00]
+
+v_cvt_pk_u16_u32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x23,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_u16_u32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x23,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_u16_u32_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x23,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x26,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x26,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, 2.0, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x26,0xd6,0xe9,0xe8,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x26,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x26,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x26,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x26,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x26,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x26,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x26,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x26,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x26,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_u8_f32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x26,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_u8_f32_e64_dpp v255, -|v255|, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x01,0x26,0xd6,0xe9,0xfe,0xf7,0x23,0xff,0x00,0x00,0x00]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x12,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, |v1|, -v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x01,0x12,0xd7,0xe9,0x04,0x02,0x40,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, -v1, |v2| dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x02,0x12,0xd7,0xea,0x04,0x02,0x20,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v255, -|v255|, -|v255| dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x03,0x12,0xd7,0xe9,0xfe,0x03,0x60,0xff,0x00,0x00,0x00]
+
+v_cvt_pk_norm_i16_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x21,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_i16_f32_e64_dpp v5, |v1|, -v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x01,0x21,0xd7,0xe9,0x04,0x02,0x40,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_i16_f32_e64_dpp v5, -v1, |v2| dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x02,0x21,0xd7,0xea,0x04,0x02,0x20,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_i16_f32_e64_dpp v255, -|v255|, -|v255| dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x03,0x21,0xd7,0xe9,0xfe,0x03,0x60,0xff,0x00,0x00,0x00]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x13,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, |v1|, -v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x01,0x13,0xd7,0xe9,0x04,0x02,0x40,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, -v1, |v2| dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x02,0x13,0xd7,0xea,0x04,0x02,0x20,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v255, -|v255|, -|v255| dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x03,0x13,0xd7,0xe9,0xfe,0x03,0x60,0xff,0x00,0x00,0x00]
+
+v_cvt_pk_norm_u16_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x22,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_u16_f32_e64_dpp v5, |v1|, -v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x01,0x22,0xd7,0xe9,0x04,0x02,0x40,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_u16_f32_e64_dpp v5, -v1, |v2| dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x02,0x22,0xd7,0xea,0x04,0x02,0x20,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_u16_f32_e64_dpp v255, -|v255|, -|v255| dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x03,0x22,0xd7,0xe9,0xfe,0x03,0x60,0xff,0x00,0x00,0x00]
+
+v_div_fixup_f16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x54,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_div_fixup_f16_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x54,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_div_fixup_f16_e64_dpp v5, v1, 2.0, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x54,0xd6,0xe9,0xe8,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_div_fixup_f16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x54,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_div_fixup_f16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x54,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_div_fixup_f16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x54,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_div_fixup_f16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x54,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_div_fixup_f16_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x01,0x54,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_div_fixup_f16_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x02,0x54,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_div_fixup_f16_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x04,0x54,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_div_fixup_f16_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x03,0x54,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_div_fixup_f16_e64_dpp v5, -|v1|, v2, -|-1| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x05,0x54,0xd6,0xe9,0x04,0x06,0xa3,0x01,0x77,0x39,0x05]
+
+v_div_fixup_f16_e64_dpp v5, v1, -|v2|, -|0.5| dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x06,0x54,0xd6,0xea,0x04,0xc2,0xc3,0x01,0x77,0x39,0x05]
+
+v_div_fixup_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x87,0x54,0xd6,0xe9,0xfe,0xf7,0xe3,0xff,0x00,0x00,0x00]
+
+v_fma_f16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x48,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_fma_f16_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x48,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_fma_f16_e64_dpp v5, v1, 2.0, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x48,0xd6,0xe9,0xe8,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_fma_f16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x48,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_fma_f16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x48,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_fma_f16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x48,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_fma_f16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x48,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_fma_f16_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x01,0x48,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_fma_f16_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x02,0x48,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_fma_f16_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x04,0x48,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_fma_f16_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x03,0x48,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_fma_f16_e64_dpp v5, -|v1|, v2, -|-1| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x05,0x48,0xd6,0xe9,0x04,0x06,0xa3,0x01,0x77,0x39,0x05]
+
+v_fma_f16_e64_dpp v5, v1, -|v2|, -|0.5| dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x06,0x48,0xd6,0xea,0x04,0xc2,0xc3,0x01,0x77,0x39,0x05]
+
+v_fma_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x87,0x48,0xd6,0xe9,0xfe,0xf7,0xe3,0xff,0x00,0x00,0x00]
+
+v_fma_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x13,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_fma_f32_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x13,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_fma_f32_e64_dpp v5, v1, 2.0, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x13,0xd6,0xe9,0xe8,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_fma_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x13,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_fma_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x13,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_fma_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x13,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_fma_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x13,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_fma_f32_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x01,0x13,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_fma_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x02,0x13,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_fma_f32_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x04,0x13,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_fma_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x03,0x13,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_fma_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x05,0x13,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+
+v_fma_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x06,0x13,0xd6,0xea,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+
+v_fma_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x87,0x13,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+
+v_ldexp_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1c,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_ldexp_f32_e64_dpp v5, v1, v2 mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1c,0xd7,0xe9,0x04,0x02,0x08,0x01,0x77,0x39,0x05]
+
+v_ldexp_f32_e64_dpp v5, v1, v2 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x1c,0xd7,0xea,0x04,0x02,0x10,0x01,0x77,0x39,0x05]
+
+v_ldexp_f32_e64_dpp v255, -|v255|, v255 clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x81,0x1c,0xd7,0xe9,0xfe,0x03,0x38,0xff,0x00,0x00,0x00]
+
+v_lerp_u8_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x15,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_lerp_u8_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x15,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_lerp_u8_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x15,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_lerp_u8_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x15,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_lerp_u8_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x15,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_lerp_u8_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x15,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_lerp_u8_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x15,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_lerp_u8_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x15,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_lerp_u8_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x15,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_lerp_u8_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x15,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_lerp_u8_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x15,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_lerp_u8_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x15,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_lerp_u8_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x15,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x46,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_lshl_add_u32_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x46,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_lshl_add_u32_e64_dpp v5, v1, 10, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x46,0xd6,0xe9,0x14,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x46,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x46,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x46,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x46,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x46,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x46,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x46,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x46,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x46,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_lshl_add_u32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x46,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_lshl_add_u32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x46,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x56,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_lshl_or_b32_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x56,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_lshl_or_b32_e64_dpp v5, v1, 10, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x56,0xd6,0xe9,0x14,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x56,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x56,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x56,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x56,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x56,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x56,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x56,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x56,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x56,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_lshl_or_b32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x56,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_lshl_or_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x56,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_lshlrev_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x38,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_lshlrev_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x38,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_lshlrev_b16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x38,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_lshrrev_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x39,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_lshrrev_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x39,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_lshrrev_b16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x39,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_mad_i16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x53,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_mad_i16_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x53,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_mad_i16_e64_dpp v5, v1, 10, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x53,0xd6,0xe9,0x14,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_mad_i16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x53,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_mad_i16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x53,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x53,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x53,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i16_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x53,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i16_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x53,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i16_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x53,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i16_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x53,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i16_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x53,0xd6,0xea,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_mad_i16_e64_dpp v255, v255, v255, src_scc clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x80,0x53,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x5a,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i16_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x5a,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i16_e64_dpp v5, v1, 10, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x5a,0xd6,0xe9,0x14,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x5a,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x5a,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x5a,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x5a,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x5a,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x5a,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x5a,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x5a,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x5a,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x5a,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i16_e64_dpp v255, v255, v255, src_scc clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x80,0x5a,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0a,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i24_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0a,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i24_e64_dpp v5, v1, 10, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0a,0xd6,0xe9,0x14,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0a,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0a,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0a,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0a,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0a,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0a,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0a,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0a,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0a,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i24_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x0a,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i24_e64_dpp v255, v255, v255, src_scc clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x80,0x0a,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_mad_u16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x41,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_mad_u16_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x41,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_mad_u16_e64_dpp v5, v1, 10, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x41,0xd6,0xe9,0x14,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_mad_u16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x41,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_mad_u16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x41,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x41,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x41,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u16_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x41,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u16_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x41,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u16_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x41,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u16_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x41,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u16_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x41,0xd6,0xea,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_mad_u16_e64_dpp v255, v255, v255, src_scc clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x80,0x41,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x59,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u16_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x59,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u16_e64_dpp v5, v1, 10, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x59,0xd6,0xe9,0x14,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x59,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x59,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x59,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x59,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x59,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x59,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x59,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x59,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x59,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x59,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u16_e64_dpp v255, v255, v255, src_scc clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x80,0x59,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0b,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u24_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0b,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u24_e64_dpp v5, v1, 10, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0b,0xd6,0xe9,0x14,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0b,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0b,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0b,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0b,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0b,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0b,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0b,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0b,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0b,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u24_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x0b,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u24_e64_dpp v255, v255, v255, src_scc clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x80,0x0b,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_max3_num_f16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2c,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_max3_num_f16_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2c,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_max3_num_f16_e64_dpp v5, v1, 2.0, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2c,0xd6,0xe9,0xe8,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_max3_num_f16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2c,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_max3_num_f16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2c,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_num_f16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2c,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_num_f16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2c,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_num_f16_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x01,0x2c,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_max3_num_f16_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x02,0x2c,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_max3_num_f16_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x04,0x2c,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_max3_num_f16_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x03,0x2c,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_max3_num_f16_e64_dpp v5, -|v1|, v2, -|-1| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x05,0x2c,0xd6,0xe9,0x04,0x06,0xa3,0x01,0x77,0x39,0x05]
+
+v_max3_num_f16_e64_dpp v5, v1, -|v2|, -|0.5| dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x06,0x2c,0xd6,0xea,0x04,0xc2,0xc3,0x01,0x77,0x39,0x05]
+
+v_max3_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp dpp8:[0,0,0,0,0,0,0,0]
+// GFX12: [0xff,0x87,0x2c,0xd6,0xe9,0xfe,0xf7,0xe3,0xff,0x00,0x00,0x00]
+
+v_max3_num_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2a,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_max3_num_f32_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2a,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_max3_num_f32_e64_dpp v5, v1, 2.0, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2a,0xd6,0xe9,0xe8,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_max3_num_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2a,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_max3_num_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2a,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_num_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2a,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_num_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2a,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_num_f32_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x01,0x2a,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_max3_num_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x02,0x2a,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_max3_num_f32_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x04,0x2a,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_max3_num_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x03,0x2a,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_max3_num_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x05,0x2a,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+
+v_max3_num_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x06,0x2a,0xd6,0xea,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+
+v_max3_num_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0]
+// GFX12: [0xff,0x87,0x2a,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+
+v_max3_i16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4d,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_max3_i16_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4d,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_max3_i16_e64_dpp v5, v1, 10, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4d,0xd6,0xe9,0x14,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_max3_i16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4d,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_max3_i16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4d,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_i16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4d,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_i16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4d,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_i16_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4d,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_i16_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4d,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_i16_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4d,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_i16_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4d,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_i16_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x4d,0xd6,0xea,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_max3_i16_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x4d,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_max3_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1d,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_max3_i32_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1d,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_max3_i32_e64_dpp v5, v1, 10, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1d,0xd6,0xe9,0x14,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_max3_i32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1d,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_max3_i32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1d,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_i32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1d,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_i32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1d,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_i32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1d,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_i32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1d,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_i32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1d,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_i32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1d,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_i32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1d,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_max3_i32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x1d,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_max3_i32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x1d,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_max3_u16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4e,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_max3_u16_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4e,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_max3_u16_e64_dpp v5, v1, 10, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4e,0xd6,0xe9,0x14,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_max3_u16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4e,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_max3_u16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4e,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_u16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4e,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_u16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4e,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_u16_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4e,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_u16_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4e,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_u16_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4e,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_u16_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4e,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_u16_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x4e,0xd6,0xea,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_max3_u16_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x4e,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_max3_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1e,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_max3_u32_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1e,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_max3_u32_e64_dpp v5, v1, 10, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1e,0xd6,0xe9,0x14,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_max3_u32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1e,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_max3_u32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1e,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_u32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1e,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_u32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1e,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_u32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1e,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_u32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1e,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_u32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1e,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_u32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1e,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_u32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1e,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_max3_u32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x1e,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_max3_u32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x1e,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_max_i16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0a,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_max_i16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x0a,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_max_i16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x0a,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_max_u16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x09,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_max_u16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x09,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_max_u16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x09,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_maxmin_num_f16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_maxmin_num_f16_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_maxmin_num_f16_e64_dpp v5, v1, 2.0, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6b,0xd6,0xe9,0xe8,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_maxmin_num_f16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_maxmin_num_f16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_num_f16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_num_f16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_num_f16_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x01,0x6b,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_maxmin_num_f16_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x02,0x6b,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_maxmin_num_f16_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x04,0x6b,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_maxmin_num_f16_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x03,0x6b,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_maxmin_num_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x05,0x6b,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+
+v_maxmin_num_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x06,0x6b,0xd6,0xea,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+
+v_maxmin_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0]
+// GFX12: [0xff,0x87,0x6b,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+
+v_maxmin_num_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x69,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_maxmin_num_f32_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x69,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_maxmin_num_f32_e64_dpp v5, v1, 2.0, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x69,0xd6,0xe9,0xe8,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_maxmin_num_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x69,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_maxmin_num_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x69,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_num_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x69,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_num_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x69,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_num_f32_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x01,0x69,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_maxmin_num_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x02,0x69,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_maxmin_num_f32_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x04,0x69,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_maxmin_num_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x03,0x69,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_maxmin_num_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x05,0x69,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+
+v_maxmin_num_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x06,0x69,0xd6,0xea,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+
+v_maxmin_num_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0]
+// GFX12: [0xff,0x87,0x69,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x64,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_maxmin_i32_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x64,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_maxmin_i32_e64_dpp v5, v1, 10, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x64,0xd6,0xe9,0x14,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x64,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x64,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x64,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x64,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x64,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x64,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x64,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x64,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x64,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_maxmin_i32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x64,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_maxmin_i32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x64,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x62,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_maxmin_u32_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x62,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_maxmin_u32_e64_dpp v5, v1, 10, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x62,0xd6,0xe9,0x14,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x62,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x62,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x62,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x62,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x62,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x62,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x62,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x62,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x62,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_maxmin_u32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x62,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_maxmin_u32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x62,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_mbcnt_hi_u32_b32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x20,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_mbcnt_hi_u32_b32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x20,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_mbcnt_hi_u32_b32_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x20,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_mbcnt_lo_u32_b32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1f,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_mbcnt_lo_u32_b32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x1f,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_mbcnt_lo_u32_b32_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x1f,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_med3_num_f16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x32,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_med3_num_f16_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x32,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_med3_num_f16_e64_dpp v5, v1, 2.0, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x32,0xd6,0xe9,0xe8,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_med3_num_f16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x32,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_med3_num_f16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x32,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_num_f16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x32,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_num_f16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x32,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_num_f16_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x01,0x32,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_med3_num_f16_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x02,0x32,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_med3_num_f16_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x04,0x32,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_med3_num_f16_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x03,0x32,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_med3_num_f16_e64_dpp v5, -|v1|, v2, -|-1| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x05,0x32,0xd6,0xe9,0x04,0x06,0xa3,0x01,0x77,0x39,0x05]
+
+v_med3_num_f16_e64_dpp v5, v1, -|v2|, -|0.5| dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x06,0x32,0xd6,0xea,0x04,0xc2,0xc3,0x01,0x77,0x39,0x05]
+
+v_med3_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp dpp8:[0,0,0,0,0,0,0,0]
+// GFX12: [0xff,0x87,0x32,0xd6,0xe9,0xfe,0xf7,0xe3,0xff,0x00,0x00,0x00]
+
+v_med3_num_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x31,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_med3_num_f32_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x31,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_med3_num_f32_e64_dpp v5, v1, 2.0, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x31,0xd6,0xe9,0xe8,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_med3_num_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x31,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_med3_num_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x31,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_num_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x31,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_num_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x31,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_num_f32_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x01,0x31,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_med3_num_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x02,0x31,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_med3_num_f32_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x04,0x31,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_med3_num_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x03,0x31,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_med3_num_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x05,0x31,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+
+v_med3_num_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x06,0x31,0xd6,0xea,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+
+v_med3_num_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0]
+// GFX12: [0xff,0x87,0x31,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+
+v_med3_i16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x50,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_med3_i16_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x50,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_med3_i16_e64_dpp v5, v1, 10, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x50,0xd6,0xe9,0x14,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_med3_i16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x50,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_med3_i16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x50,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_i16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x50,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_i16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x50,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_i16_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x50,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_i16_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x50,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_i16_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x50,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_i16_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x50,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_i16_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x50,0xd6,0xea,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_med3_i16_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x50,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_med3_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x20,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_med3_i32_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x20,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_med3_i32_e64_dpp v5, v1, 10, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x20,0xd6,0xe9,0x14,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_med3_i32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x20,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_med3_i32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x20,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_i32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x20,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_i32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x20,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_i32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x20,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_i32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x20,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_i32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x20,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_i32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x20,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_i32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x20,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_med3_i32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x20,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_med3_i32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x20,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_med3_u16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x51,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_med3_u16_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x51,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_med3_u16_e64_dpp v5, v1, 10, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x51,0xd6,0xe9,0x14,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_med3_u16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x51,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_med3_u16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x51,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_u16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x51,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_u16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x51,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_u16_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x51,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_u16_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x51,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_u16_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x51,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_u16_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x51,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_u16_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x51,0xd6,0xea,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_med3_u16_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x51,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_med3_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x21,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_med3_u32_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x21,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_med3_u32_e64_dpp v5, v1, 10, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x21,0xd6,0xe9,0x14,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_med3_u32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x21,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_med3_u32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x21,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_u32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x21,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_u32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x21,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_u32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x21,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_u32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x21,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_u32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x21,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_u32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x21,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_u32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x21,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_med3_u32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x21,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_med3_u32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x21,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_min3_num_f16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2b,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_min3_num_f16_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2b,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_min3_num_f16_e64_dpp v5, v1, 2.0, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2b,0xd6,0xe9,0xe8,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_min3_num_f16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2b,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_min3_num_f16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2b,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_num_f16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2b,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_num_f16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2b,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_num_f16_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x01,0x2b,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_min3_num_f16_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x02,0x2b,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_min3_num_f16_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x04,0x2b,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_min3_num_f16_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x03,0x2b,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_min3_num_f16_e64_dpp v5, -|v1|, v2, -|-1| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x05,0x2b,0xd6,0xe9,0x04,0x06,0xa3,0x01,0x77,0x39,0x05]
+
+v_min3_num_f16_e64_dpp v5, v1, -|v2|, -|0.5| dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x06,0x2b,0xd6,0xea,0x04,0xc2,0xc3,0x01,0x77,0x39,0x05]
+
+v_min3_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp dpp8:[0,0,0,0,0,0,0,0]
+// GFX12: [0xff,0x87,0x2b,0xd6,0xe9,0xfe,0xf7,0xe3,0xff,0x00,0x00,0x00]
+
+v_min3_num_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x29,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_min3_num_f32_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x29,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_min3_num_f32_e64_dpp v5, v1, 2.0, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x29,0xd6,0xe9,0xe8,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_min3_num_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x29,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_min3_num_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x29,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_num_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x29,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_num_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x29,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_num_f32_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x01,0x29,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_min3_num_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x02,0x29,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_min3_num_f32_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x04,0x29,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_min3_num_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x03,0x29,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_min3_num_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x05,0x29,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+
+v_min3_num_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x06,0x29,0xd6,0xea,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+
+v_min3_num_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0]
+// GFX12: [0xff,0x87,0x29,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+
+v_min3_i16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4a,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_min3_i16_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4a,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_min3_i16_e64_dpp v5, v1, 10, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4a,0xd6,0xe9,0x14,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_min3_i16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4a,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_min3_i16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4a,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_i16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4a,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_i16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4a,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_i16_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4a,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_i16_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4a,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_i16_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4a,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_i16_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4a,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_i16_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x4a,0xd6,0xea,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_min3_i16_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x4a,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_min3_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1a,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_min3_i32_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1a,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_min3_i32_e64_dpp v5, v1, 10, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1a,0xd6,0xe9,0x14,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_min3_i32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1a,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_min3_i32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1a,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_i32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1a,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_i32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1a,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_i32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1a,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_i32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1a,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_i32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1a,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_i32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1a,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_i32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1a,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_min3_i32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x1a,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_min3_i32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x1a,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_min3_u16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4b,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_min3_u16_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4b,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_min3_u16_e64_dpp v5, v1, 10, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4b,0xd6,0xe9,0x14,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_min3_u16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4b,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_min3_u16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4b,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_u16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4b,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_u16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4b,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_u16_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4b,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_u16_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4b,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_u16_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4b,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_u16_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x4b,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_u16_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x4b,0xd6,0xea,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_min3_u16_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x4b,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_min3_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1b,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_min3_u32_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1b,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_min3_u32_e64_dpp v5, v1, 10, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1b,0xd6,0xe9,0x14,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_min3_u32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1b,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_min3_u32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1b,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_u32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1b,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_u32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1b,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_u32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1b,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_u32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1b,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_u32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1b,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_u32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1b,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_u32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x1b,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_min3_u32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x1b,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_min3_u32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x1b,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_min_i16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0c,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_min_i16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x0c,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_min_i16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x0c,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_min_u16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0b,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_min_u16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x0b,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_min_u16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x0b,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_minmax_num_f16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_minmax_num_f16_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_minmax_num_f16_e64_dpp v5, v1, 2.0, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6a,0xd6,0xe9,0xe8,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_minmax_num_f16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_minmax_num_f16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_num_f16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_num_f16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_num_f16_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x01,0x6a,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_minmax_num_f16_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x02,0x6a,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_minmax_num_f16_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x04,0x6a,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_minmax_num_f16_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x03,0x6a,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_minmax_num_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x05,0x6a,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+
+v_minmax_num_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x06,0x6a,0xd6,0xea,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+
+v_minmax_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0]
+// GFX12: [0xff,0x87,0x6a,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+
+v_minmax_num_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x68,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_minmax_num_f32_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x68,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_minmax_num_f32_e64_dpp v5, v1, 2.0, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x68,0xd6,0xe9,0xe8,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_minmax_num_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x68,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_minmax_num_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x68,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_num_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x68,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_num_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x68,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_num_f32_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x01,0x68,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_minmax_num_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x02,0x68,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_minmax_num_f32_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x04,0x68,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_minmax_num_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x03,0x68,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_minmax_num_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x05,0x68,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+
+v_minmax_num_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x06,0x68,0xd6,0xea,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+
+v_minmax_num_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0]
+// GFX12: [0xff,0x87,0x68,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+
+v_minmax_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x65,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_minmax_i32_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x65,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_minmax_i32_e64_dpp v5, v1, 10, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x65,0xd6,0xe9,0x14,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_minmax_i32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x65,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_minmax_i32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x65,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_i32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x65,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_i32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x65,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_i32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x65,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_i32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x65,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_i32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x65,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_i32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x65,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_i32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x65,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_minmax_i32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x65,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_minmax_i32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x65,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_minmax_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x63,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_minmax_u32_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x63,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_minmax_u32_e64_dpp v5, v1, 10, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x63,0xd6,0xe9,0x14,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_minmax_u32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x63,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_minmax_u32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x63,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_u32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x63,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_u32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x63,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_u32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x63,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_u32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x63,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_u32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x63,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_u32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x63,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_u32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x63,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_minmax_u32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x63,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_minmax_u32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x63,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_msad_u8_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x39,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_msad_u8_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x39,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_msad_u8_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x39,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_msad_u8_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x39,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_msad_u8_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x39,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_msad_u8_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x39,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_msad_u8_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x39,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_msad_u8_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x39,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_msad_u8_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x39,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_msad_u8_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x39,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_msad_u8_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x39,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_msad_u8_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x39,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_msad_u8_e64_dpp v255, v255, v255, src_scc clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x80,0x39,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_mul_lo_u16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x05,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_mul_lo_u16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x05,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_mul_lo_u16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x05,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_mullit_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x18,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_mullit_f32_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x18,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_mullit_f32_e64_dpp v5, v1, 2.0, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x18,0xd6,0xe9,0xe8,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_mullit_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x18,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_mullit_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x18,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_mullit_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x18,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_mullit_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x18,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_mullit_f32_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x01,0x18,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_mullit_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x02,0x18,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_mullit_f32_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x04,0x18,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_mullit_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x03,0x18,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_mullit_f32_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x05,0x18,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+
+v_mullit_f32_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x06,0x18,0xd6,0xea,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+
+v_mullit_f32_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x87,0x18,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+
+v_or3_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x58,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_or3_b32_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x58,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_or3_b32_e64_dpp v5, v1, 10, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x58,0xd6,0xe9,0x14,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_or3_b32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x58,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_or3_b32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x58,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_or3_b32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x58,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_or3_b32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x58,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_or3_b32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x58,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_or3_b32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x58,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_or3_b32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x58,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_or3_b32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x58,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_or3_b32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x58,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_or3_b32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x58,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_or3_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x58,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_or_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x63,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_or_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x63,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_or_b16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x63,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_pack_b32_f16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x11,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_pack_b32_f16_e64_dpp v5, |v1|, -v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x01,0x11,0xd7,0xe9,0x04,0x02,0x40,0x01,0x77,0x39,0x05]
+
+v_pack_b32_f16_e64_dpp v5, -v1, |v2| dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x02,0x11,0xd7,0xea,0x04,0x02,0x20,0x01,0x77,0x39,0x05]
+
+v_pack_b32_f16_e64_dpp v255, -|v255|, -|v255| dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x03,0x11,0xd7,0xe9,0xfe,0x03,0x60,0xff,0x00,0x00,0x00]
+
+v_perm_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x44,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_perm_b32_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x44,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_perm_b32_e64_dpp v5, v1, 10, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x44,0xd6,0xe9,0x14,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_perm_b32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x44,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_perm_b32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x44,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_perm_b32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x44,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_perm_b32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x44,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_perm_b32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x44,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_perm_b32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x44,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_perm_b32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x44,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_perm_b32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x44,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_perm_b32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x44,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_perm_b32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x44,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_perm_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x44,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x23,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_sad_hi_u8_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x23,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x23,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x23,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x23,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x23,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x23,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x23,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x23,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x23,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x23,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_sad_hi_u8_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x23,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_sad_hi_u8_e64_dpp v255, v255, v255, src_scc clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x80,0x23,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_sad_u16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x24,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_sad_u16_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x24,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_sad_u16_e64_dpp v5, v1, 10, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x24,0xd6,0xe9,0x14,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_sad_u16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x24,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_sad_u16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x24,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x24,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x24,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u16_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x24,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u16_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x24,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u16_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x24,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u16_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x24,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u16_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x24,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_sad_u16_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x24,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_sad_u16_e64_dpp v255, v255, v255, src_scc clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x80,0x24,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_sad_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x25,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_sad_u32_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x25,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_sad_u32_e64_dpp v5, v1, 10, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x25,0xd6,0xe9,0x14,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_sad_u32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x25,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_sad_u32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x25,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x25,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x25,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x25,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x25,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x25,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x25,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x25,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_sad_u32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x25,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_sad_u32_e64_dpp v255, v255, v255, src_scc clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x80,0x25,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_sad_u8_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x22,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_sad_u8_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x22,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_sad_u8_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x22,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_sad_u8_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x22,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u8_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x22,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u8_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x22,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u8_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x22,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u8_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x22,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u8_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x22,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u8_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x22,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_sad_u8_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x22,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_sad_u8_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x22,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_sad_u8_e64_dpp v255, v255, v255, src_scc clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x80,0x22,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_sub_co_u32_e64_dpp v5, s6, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W32: [0x05,0x06,0x01,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s6, v1, s2 dpp8:[7,6,5,4,3,2,1,0]
+// W32: [0x05,0x06,0x01,0xd7,0xe9,0x04,0x00,0x00,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s105, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W32: [0x05,0x69,0x01,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, vcc_lo, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W32: [0x05,0x6a,0x01,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, vcc_hi, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W32: [0x05,0x6b,0x01,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, ttmp15, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// W32: [0x05,0x7b,0x01,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s[12:13], v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W64: [0x05,0x0c,0x01,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s[12:13], v1, s2 dpp8:[7,6,5,4,3,2,1,0]
+// W64: [0x05,0x0c,0x01,0xd7,0xe9,0x04,0x00,0x00,0x01,0x77,0x39,0x05]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, s[104:105], v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W64: [0x05,0x68,0x01,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, vcc, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W64: [0x05,0x6a,0x01,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v5, ttmp[14:15], v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// W64: [0x05,0x7a,0x01,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_sub_co_u32_e64_dpp v255, null, v255, v255 clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0xfc,0x01,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x0e,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x0e,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_sub_nc_i16_e64_dpp v255, v255, v255 clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x80,0x0e,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_sub_nc_i32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x25,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_sub_nc_i32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x25,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_sub_nc_i32_e64_dpp v255, v255, v255 clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x80,0x25,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x04,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x04,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_sub_nc_u16_e64_dpp v255, v255, v255 clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x80,0x04,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_subrev_co_u32_e64_dpp v5, s6, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W32: [0x05,0x06,0x02,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s6, v1, s2 dpp8:[7,6,5,4,3,2,1,0]
+// W32: [0x05,0x06,0x02,0xd7,0xe9,0x04,0x00,0x00,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s105, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W32: [0x05,0x69,0x02,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, vcc_lo, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W32: [0x05,0x6a,0x02,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, vcc_hi, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W32: [0x05,0x6b,0x02,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, ttmp15, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// W32: [0x05,0x7b,0x02,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W64-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s[12:13], v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W64: [0x05,0x0c,0x02,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s[104:105], v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W64: [0x05,0x68,0x02,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, s[104:105], v1, s2 dpp8:[7,6,5,4,3,2,1,0]
+// W64: [0x05,0x68,0x02,0xd7,0xe9,0x04,0x00,0x00,0x01,0x77,0x39,0x05]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, vcc, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// W64: [0x05,0x6a,0x02,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v5, ttmp[14:15], v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// W64: [0x05,0x7a,0x02,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// W32-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_subrev_co_u32_e64_dpp v255, null, v255, v255 clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0xfc,0x02,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_xad_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x45,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_xad_u32_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x45,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_xad_u32_e64_dpp v5, v1, 10, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x45,0xd6,0xe9,0x14,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_xad_u32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x45,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_xad_u32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x45,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_xad_u32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x45,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_xad_u32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x45,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_xad_u32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x45,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_xad_u32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x45,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_xad_u32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x45,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_xad_u32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x45,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_xad_u32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x45,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_xad_u32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x45,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_xad_u32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x45,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_xor3_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x40,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_xor3_b32_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x40,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_xor3_b32_e64_dpp v5, v1, 10, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x40,0xd6,0xe9,0x14,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_xor3_b32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x40,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_xor3_b32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x40,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_xor3_b32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x40,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_xor3_b32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x40,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_xor3_b32_e64_dpp v5, v1, v2, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x40,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_xor3_b32_e64_dpp v5, v1, v2, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x40,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_xor3_b32_e64_dpp v5, v1, v2, exec_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x40,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_xor3_b32_e64_dpp v5, v1, v2, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x40,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_xor3_b32_e64_dpp v5, v1, v2, -1 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x40,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_xor3_b32_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x40,0xd6,0xea,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_xor3_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x40,0xd6,0xe9,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_xor_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x64,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_xor_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x00,0x64,0xd7,0xea,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_xor_b16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x00,0x64,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 op_sel:[1,1,1] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x58,0x0d,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 op_sel:[1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x08,0x0d,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_add_nc_i16_e64_dpp v5, v1, v2 op_sel:[0,1,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x10,0x0d,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_add_nc_i16_e64_dpp v255, v255, v255 op_sel:[0,0,1] clamp dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX12: [0xff,0xc0,0x0d,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 op_sel:[1,1,1] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x58,0x03,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 op_sel:[1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x08,0x03,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_add_nc_u16_e64_dpp v5, v1, v2 op_sel:[0,1,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x10,0x03,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_add_nc_u16_e64_dpp v255, v255, v255 op_sel:[0,0,1] clamp dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX12: [0xff,0xc0,0x03,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v5, -v1, |v2| op_sel:[1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x0a,0x12,0xd7,0xe9,0x04,0x02,0x20,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_i16_f16_e64_dpp v255, -|v255|, -|v255| op_sel:[0,1,0] dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX12: [0xff,0x13,0x12,0xd7,0xea,0xfe,0x03,0x60,0xff,0x00,0x00,0x00]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v5, -v1, |v2| op_sel:[1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x0a,0x13,0xd7,0xe9,0x04,0x02,0x20,0x01,0x77,0x39,0x05]
+
+v_cvt_pk_norm_u16_f16_e64_dpp v255, -|v255|, -|v255| op_sel:[0,1,0] dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX12: [0xff,0x13,0x13,0xd7,0xea,0xfe,0x03,0x60,0xff,0x00,0x00,0x00]
+
+v_div_fixup_f16_e64_dpp v5, -v1, v2, |exec_lo| op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x7c,0x54,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_div_fixup_f16_e64_dpp v5, -|v1|, -|v2|, null op_sel:[1,0,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x0b,0x54,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_div_fixup_f16_e64_dpp v5, -|v1|, v2, -|-1| op_sel:[0,1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x15,0x54,0xd6,0xe9,0x04,0x06,0xa3,0x01,0x77,0x39,0x05]
+
+v_div_fixup_f16_e64_dpp v5, v1, -|v2|, -|0.5| op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x26,0x54,0xd6,0xe9,0x04,0xc2,0xc3,0x01,0x77,0x39,0x05]
+
+v_div_fixup_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| op_sel:[0,0,0,1] clamp dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX12: [0xff,0xc7,0x54,0xd6,0xea,0xfe,0xf7,0xe3,0xff,0x00,0x00,0x00]
+
+v_fma_f16_e64_dpp v5, -v1, v2, |exec_lo| op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x7c,0x48,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_fma_f16_e64_dpp v5, -|v1|, -|v2|, null op_sel:[1,0,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x0b,0x48,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_fma_f16_e64_dpp v5, -|v1|, v2, -|-1| op_sel:[0,1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x15,0x48,0xd6,0xe9,0x04,0x06,0xa3,0x01,0x77,0x39,0x05]
+
+v_fma_f16_e64_dpp v5, v1, -|v2|, -|0.5| op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x26,0x48,0xd6,0xe9,0x04,0xc2,0xc3,0x01,0x77,0x39,0x05]
+
+v_fma_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| op_sel:[0,0,0,1] clamp dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX12: [0xff,0xc7,0x48,0xd6,0xea,0xfe,0xf7,0xe3,0xff,0x00,0x00,0x00]
+
+v_mad_i16_e64_dpp v5, v1, v2, exec_hi op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x78,0x53,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i16_e64_dpp v5, v1, v2, exec_lo op_sel:[1,0,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x08,0x53,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i16_e64_dpp v5, v1, v2, null op_sel:[0,1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x10,0x53,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_i16_e64_dpp v5, v1, v2, -1 op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x20,0x53,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_mad_i16_e64_dpp v255, v255, v255, src_scc op_sel:[0,0,0,1] clamp dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX12: [0xff,0xc0,0x53,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_mad_i32_i16_e64_dpp v5, v1, v2, 0.5 op_sel:[1,0,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x08,0x5a,0xd6,0xe9,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_mad_i32_i16_e64_dpp v255, v255, v255, src_scc op_sel:[0,1,0,0] clamp dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX12: [0xff,0x90,0x5a,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_mad_u16_e64_dpp v5, v1, v2, exec_hi op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x78,0x41,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u16_e64_dpp v5, v1, v2, exec_lo op_sel:[1,0,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x08,0x41,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u16_e64_dpp v5, v1, v2, null op_sel:[0,1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x10,0x41,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_mad_u16_e64_dpp v5, v1, v2, -1 op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x20,0x41,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_mad_u16_e64_dpp v255, v255, v255, src_scc op_sel:[0,0,0,1] clamp dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX12: [0xff,0xc0,0x41,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_mad_u32_u16_e64_dpp v5, v1, v2, 0.5 op_sel:[1,0,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x08,0x59,0xd6,0xe9,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_mad_u32_u16_e64_dpp v255, v255, v255, src_scc op_sel:[0,1,0,0] clamp dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX12: [0xff,0x90,0x59,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_max3_num_f16_e64_dpp v5, -v1, v2, |exec_lo| op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x7c,0x2c,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_max3_num_f16_e64_dpp v5, -|v1|, -|v2|, null op_sel:[1,0,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x0b,0x2c,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_max3_num_f16_e64_dpp v5, -|v1|, v2, -|-1| op_sel:[0,1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x15,0x2c,0xd6,0xe9,0x04,0x06,0xa3,0x01,0x77,0x39,0x05]
+
+v_max3_num_f16_e64_dpp v5, v1, -|v2|, -|0.5| op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x26,0x2c,0xd6,0xe9,0x04,0xc2,0xc3,0x01,0x77,0x39,0x05]
+
+v_max3_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| op_sel:[0,0,0,1] clamp dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX12: [0xff,0xc7,0x2c,0xd6,0xea,0xfe,0xf7,0xe3,0xff,0x00,0x00,0x00]
+
+v_max3_i16_e64_dpp v5, v1, v2, exec_hi op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x78,0x4d,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_i16_e64_dpp v5, v1, v2, exec_lo op_sel:[1,0,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x08,0x4d,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_i16_e64_dpp v5, v1, v2, null op_sel:[0,1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x10,0x4d,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_i16_e64_dpp v5, v1, v2, -1 op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x20,0x4d,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_max3_i16_e64_dpp v255, v255, v255, src_scc op_sel:[0,0,0,1] dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX12: [0xff,0x40,0x4d,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_max3_u16_e64_dpp v5, v1, v2, exec_hi op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x78,0x4e,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_u16_e64_dpp v5, v1, v2, exec_lo op_sel:[1,0,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x08,0x4e,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_u16_e64_dpp v5, v1, v2, null op_sel:[0,1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x10,0x4e,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_max3_u16_e64_dpp v5, v1, v2, -1 op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x20,0x4e,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_max3_u16_e64_dpp v255, v255, v255, src_scc op_sel:[0,0,0,1] dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX12: [0xff,0x40,0x4e,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_med3_num_f16_e64_dpp v5, -v1, v2, |exec_lo| op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x7c,0x32,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_med3_num_f16_e64_dpp v5, -|v1|, -|v2|, null op_sel:[1,0,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x0b,0x32,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_med3_num_f16_e64_dpp v5, -|v1|, v2, -|-1| op_sel:[0,1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x15,0x32,0xd6,0xe9,0x04,0x06,0xa3,0x01,0x77,0x39,0x05]
+
+v_med3_num_f16_e64_dpp v5, v1, -|v2|, -|0.5| op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x26,0x32,0xd6,0xe9,0x04,0xc2,0xc3,0x01,0x77,0x39,0x05]
+
+v_med3_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| op_sel:[0,0,0,1] clamp dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX12: [0xff,0xc7,0x32,0xd6,0xea,0xfe,0xf7,0xe3,0xff,0x00,0x00,0x00]
+
+v_med3_i16_e64_dpp v5, v1, v2, exec_hi op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x78,0x50,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_i16_e64_dpp v5, v1, v2, exec_lo op_sel:[1,0,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x08,0x50,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_i16_e64_dpp v5, v1, v2, null op_sel:[0,1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x10,0x50,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_i16_e64_dpp v5, v1, v2, -1 op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x20,0x50,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_med3_i16_e64_dpp v255, v255, v255, src_scc op_sel:[0,0,0,1] dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX12: [0xff,0x40,0x50,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_med3_u16_e64_dpp v5, v1, v2, exec_hi op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x78,0x51,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_u16_e64_dpp v5, v1, v2, exec_lo op_sel:[1,0,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x08,0x51,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_u16_e64_dpp v5, v1, v2, null op_sel:[0,1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x10,0x51,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_med3_u16_e64_dpp v5, v1, v2, -1 op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x20,0x51,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_med3_u16_e64_dpp v255, v255, v255, src_scc op_sel:[0,0,0,1] dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX12: [0xff,0x40,0x51,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_min3_num_f16_e64_dpp v5, -v1, v2, |exec_lo| op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x7c,0x2b,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_min3_num_f16_e64_dpp v5, -|v1|, -|v2|, null op_sel:[1,0,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x0b,0x2b,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_min3_num_f16_e64_dpp v5, -|v1|, v2, -|-1| op_sel:[0,1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x15,0x2b,0xd6,0xe9,0x04,0x06,0xa3,0x01,0x77,0x39,0x05]
+
+v_min3_num_f16_e64_dpp v5, v1, -|v2|, -|0.5| op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x26,0x2b,0xd6,0xe9,0x04,0xc2,0xc3,0x01,0x77,0x39,0x05]
+
+v_min3_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| op_sel:[0,0,0,1] clamp dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX12: [0xff,0xc7,0x2b,0xd6,0xea,0xfe,0xf7,0xe3,0xff,0x00,0x00,0x00]
+
+v_min3_i16_e64_dpp v5, v1, v2, exec_hi op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x78,0x4a,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_i16_e64_dpp v5, v1, v2, exec_lo op_sel:[1,0,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x08,0x4a,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_i16_e64_dpp v5, v1, v2, null op_sel:[0,1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x10,0x4a,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_i16_e64_dpp v5, v1, v2, -1 op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x20,0x4a,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_min3_i16_e64_dpp v255, v255, v255, src_scc op_sel:[0,0,0,1] dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX12: [0xff,0x40,0x4a,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_min3_u16_e64_dpp v5, v1, v2, exec_hi op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x78,0x4b,0xd6,0xe9,0x04,0xfe,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_u16_e64_dpp v5, v1, v2, exec_lo op_sel:[1,0,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x08,0x4b,0xd6,0xe9,0x04,0xfa,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_u16_e64_dpp v5, v1, v2, null op_sel:[0,1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x10,0x4b,0xd6,0xe9,0x04,0xf2,0x01,0x01,0x77,0x39,0x05]
+
+v_min3_u16_e64_dpp v5, v1, v2, -1 op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x20,0x4b,0xd6,0xe9,0x04,0x06,0x03,0x01,0x77,0x39,0x05]
+
+v_min3_u16_e64_dpp v255, v255, v255, src_scc op_sel:[0,0,0,1] dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX12: [0xff,0x40,0x4b,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
+
+v_pack_b32_f16_e64_dpp v5, -v1, |v2| op_sel:[1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x0a,0x11,0xd7,0xe9,0x04,0x02,0x20,0x01,0x77,0x39,0x05]
+
+v_pack_b32_f16_e64_dpp v255, -|v255|, -|v255| op_sel:[0,1,0] dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX12: [0xff,0x13,0x11,0xd7,0xea,0xfe,0x03,0x60,0xff,0x00,0x00,0x00]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 op_sel:[1,1,1] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x58,0x0e,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 op_sel:[1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x08,0x0e,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_sub_nc_i16_e64_dpp v5, v1, v2 op_sel:[0,1,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x10,0x0e,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_sub_nc_i16_e64_dpp v255, v255, v255 op_sel:[0,0,1] clamp dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX12: [0xff,0xc0,0x0e,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 op_sel:[1,1,1] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x58,0x04,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 op_sel:[1,0,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x08,0x04,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_sub_nc_u16_e64_dpp v5, v1, v2 op_sel:[0,1,0] dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x10,0x04,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_sub_nc_u16_e64_dpp v255, v255, v255 op_sel:[0,0,1] clamp dpp8:[0,0,0,0,0,0,0,0] fi:1
+// GFX12: [0xff,0xc0,0x04,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+
+v_dot2_f16_f16_e64_dpp v0, v1, v2, v3 dpp8:[0,1,2,3,4,4,4,4]
+// GFX12: [0x00,0x00,0x66,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x88,0x46,0x92]
+
+v_dot2_f16_f16_e64_dpp v0, v1, v2, v3 op_sel:[1,1,0,0] dpp8:[0,1,2,3,4,4,4,4]
+// GFX12-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid op_sel operand
+
+v_dot2_f16_f16_e64_dpp v0, s1, v2, v3 dpp8:[0,1,2,3,4,4,4,4]
+// GFX12-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_dot2_f16_f16_e64_dpp v0, v1, s2, v3 dpp8:[0,1,2,3,4,4,4,4]
+// GFX12: [0x00,0x00,0x66,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x88,0x46,0x92]
+
+v_dot2_f16_f16_e64_dpp v0, v1, v2, v3 op_sel:[0,0,1,1] dpp8:[0,1,2,3,4,4,4,4]
+// GFX12: [0x00,0x60,0x66,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x88,0x46,0x92]
+
+v_dot2_f16_f16_e64_dpp v0, |v1|, -v2, -|s3| op_sel:[0,0,1,1] dpp8:[0,1,2,3,4,4,4,4]
+// GFX12: [0x00,0x65,0x66,0xd6,0xe9,0x04,0x0e,0xc0,0x01,0x88,0x46,0x92]
+
+v_dot2_f16_f16_e64_dpp v5, v1, v2, 0.5 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x66,0xd6,0xe9,0x04,0xc2,0x03,0x01,0x77,0x39,0x05]
+
+v_dot2_bf16_bf16_e64_dpp v0, v1, v2, v3 dpp8:[0,1,2,3,4,4,4,4]
+// GFX12: [0x00,0x00,0x67,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x88,0x46,0x92]
+
+v_dot2_bf16_bf16_e64_dpp v0, v1, v2, v3 op_sel:[1,1,0,0] dpp8:[0,1,2,3,4,4,4,4]
+// GFX12-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid op_sel operand
+
+v_dot2_bf16_bf16_e64_dpp v0, s1, v2, v3 dpp8:[0,1,2,3,4,4,4,4]
+// GFX12-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+
+v_dot2_bf16_bf16_e64_dpp v0, v1, s2, v3 dpp8:[0,1,2,3,4,4,4,4]
+// GFX12: [0x00,0x00,0x67,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x88,0x46,0x92]
+
+v_dot2_bf16_bf16_e64_dpp v0, v1, v2, v3 op_sel:[0,0,1,1] dpp8:[0,1,2,3,4,4,4,4]
+// GFX12: [0x00,0x60,0x67,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x88,0x46,0x92]
+
+v_dot2_bf16_bf16_e64_dpp v0, |v1|, -v2, -|s3| op_sel:[0,0,1,1] dpp8:[0,1,2,3,4,4,4,4]
+// GFX12: [0x00,0x65,0x67,0xd6,0xe9,0x04,0x0e,0xc0,0x01,0x88,0x46,0x92]
+
+v_dot2_bf16_bf16_e64_dpp v5, v1, v2, 0 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x67,0xd6,0xe9,0x04,0x02,0x02,0x01,0x77,0x39,0x05]
+
+v_minimum_f32 v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x65,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_minimum_f32 v5, |v1|, -v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x01,0x65,0xd7,0xe9,0x04,0x02,0x40,0x01,0x77,0x39,0x05]
+
+v_minimum_f32 v5, -v1, |v2| dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x02,0x65,0xd7,0xea,0x04,0x02,0x20,0x01,0x77,0x39,0x05]
+
+v_minimum_f32 v255, -|v255|, -|v255| dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x03,0x65,0xd7,0xe9,0xfe,0x03,0x60,0xff,0x00,0x00,0x00]
+
+v_maximum_f32 v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x66,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_maximum_f32 v5, |v1|, -v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x01,0x66,0xd7,0xe9,0x04,0x02,0x40,0x01,0x77,0x39,0x05]
+
+v_maximum_f32 v5, -v1, |v2| dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x02,0x66,0xd7,0xea,0x04,0x02,0x20,0x01,0x77,0x39,0x05]
+
+v_maximum_f32 v255, -|v255|, -|v255| dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x03,0x66,0xd7,0xe9,0xfe,0x03,0x60,0xff,0x00,0x00,0x00]
+
+v_minimum_f16 v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x67,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_minimum_f16 v5, |v1|, -v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x01,0x67,0xd7,0xe9,0x04,0x02,0x40,0x01,0x77,0x39,0x05]
+
+v_minimum_f16 v5, -v1, |v2| dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x02,0x67,0xd7,0xea,0x04,0x02,0x20,0x01,0x77,0x39,0x05]
+
+v_minimum_f16 v255, -|v255|, -|v255| dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x03,0x67,0xd7,0xe9,0xfe,0x03,0x60,0xff,0x00,0x00,0x00]
+
+v_maximum_f16 v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x68,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+v_maximum_f16 v5, |v1|, -v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x01,0x68,0xd7,0xe9,0x04,0x02,0x40,0x01,0x77,0x39,0x05]
+
+v_maximum_f16 v5, -v1, |v2| dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x02,0x68,0xd7,0xea,0x04,0x02,0x20,0x01,0x77,0x39,0x05]
+
+v_maximum_f16 v255, -|v255|, -|v255| dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x03,0x68,0xd7,0xe9,0xfe,0x03,0x60,0xff,0x00,0x00,0x00]
+
+v_minimum3_f32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2d,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_minimum3_f32 v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2d,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_minimum3_f32 v5, v1, 2.0, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2d,0xd6,0xe9,0xe8,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_minimum3_f32 v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2d,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_minimum3_f32 v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2d,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_minimum3_f32 v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2d,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_minimum3_f32 v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2d,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_minimum3_f32 v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x01,0x2d,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_minimum3_f32 v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x02,0x2d,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_minimum3_f32 v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x04,0x2d,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_minimum3_f32 v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x03,0x2d,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_minimum3_f32 v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x05,0x2d,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+
+v_minimum3_f32 v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x06,0x2d,0xd6,0xea,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+
+v_minimum3_f32 v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x87,0x2d,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+
+v_maximum3_f32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2e,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_maximum3_f32 v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2e,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_maximum3_f32 v5, v1, 2.0, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2e,0xd6,0xe9,0xe8,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_maximum3_f32 v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2e,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_maximum3_f32 v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2e,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_maximum3_f32 v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2e,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_maximum3_f32 v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2e,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_maximum3_f32 v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x01,0x2e,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_maximum3_f32 v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x02,0x2e,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_maximum3_f32 v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x04,0x2e,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_maximum3_f32 v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x03,0x2e,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_maximum3_f32 v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x05,0x2e,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+
+v_maximum3_f32 v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x06,0x2e,0xd6,0xea,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+
+v_maximum3_f32 v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x87,0x2e,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+
+v_minimum3_f16 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2f,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_minimum3_f16 v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2f,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_minimum3_f16 v5, v1, 2.0, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2f,0xd6,0xe9,0xe8,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_minimum3_f16 v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2f,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_minimum3_f16 v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2f,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_minimum3_f16 v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2f,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_minimum3_f16 v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x2f,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_minimum3_f16 v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x01,0x2f,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_minimum3_f16 v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x02,0x2f,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_minimum3_f16 v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x04,0x2f,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_minimum3_f16 v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x03,0x2f,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_minimum3_f16 v5, -|v1|, v2, -|-1| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x05,0x2f,0xd6,0xe9,0x04,0x06,0xa3,0x01,0x77,0x39,0x05]
+
+v_minimum3_f16 v5, v1, -|v2|, -|0.5| dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x06,0x2f,0xd6,0xea,0x04,0xc2,0xc3,0x01,0x77,0x39,0x05]
+
+v_minimum3_f16 v255, -|v255|, -|v255|, -|src_scc| clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x87,0x2f,0xd6,0xe9,0xfe,0xf7,0xe3,0xff,0x00,0x00,0x00]
+
+v_maximum3_f16 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x30,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_maximum3_f16 v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x30,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_maximum3_f16 v5, v1, 2.0, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x30,0xd6,0xe9,0xe8,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_maximum3_f16 v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x30,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_maximum3_f16 v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x30,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_maximum3_f16 v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x30,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_maximum3_f16 v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x30,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_maximum3_f16 v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x01,0x30,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_maximum3_f16 v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x02,0x30,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_maximum3_f16 v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x04,0x30,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_maximum3_f16 v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x03,0x30,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_maximum3_f16 v5, -|v1|, v2, -|-1| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x05,0x30,0xd6,0xe9,0x04,0x06,0xa3,0x01,0x77,0x39,0x05]
+
+v_maximum3_f16 v5, v1, -|v2|, -|0.5| dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x06,0x30,0xd6,0xea,0x04,0xc2,0xc3,0x01,0x77,0x39,0x05]
+
+v_maximum3_f16 v255, -|v255|, -|v255|, -|src_scc| clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x87,0x30,0xd6,0xe9,0xfe,0xf7,0xe3,0xff,0x00,0x00,0x00]
+
+v_maximumminimum_f32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6d,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_maximumminimum_f32 v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6d,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_maximumminimum_f32 v5, v1, 2.0, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6d,0xd6,0xe9,0xe8,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_maximumminimum_f32 v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6d,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_maximumminimum_f32 v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6d,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_maximumminimum_f32 v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6d,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_maximumminimum_f32 v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6d,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_maximumminimum_f32 v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x01,0x6d,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_maximumminimum_f32 v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x02,0x6d,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_maximumminimum_f32 v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x04,0x6d,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_maximumminimum_f32 v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x03,0x6d,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_maximumminimum_f32 v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x05,0x6d,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+
+v_maximumminimum_f32 v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x06,0x6d,0xd6,0xea,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+
+v_maximumminimum_f32 v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x87,0x6d,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+
+v_minimummaximum_f32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6c,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_minimummaximum_f32 v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6c,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_minimummaximum_f32 v5, v1, 2.0, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6c,0xd6,0xe9,0xe8,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_minimummaximum_f32 v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6c,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_minimummaximum_f32 v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6c,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_minimummaximum_f32 v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6c,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_minimummaximum_f32 v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6c,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_minimummaximum_f32 v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x01,0x6c,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_minimummaximum_f32 v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x02,0x6c,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_minimummaximum_f32 v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x04,0x6c,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_minimummaximum_f32 v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x03,0x6c,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_minimummaximum_f32 v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x05,0x6c,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+
+v_minimummaximum_f32 v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x06,0x6c,0xd6,0xea,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+
+v_minimummaximum_f32 v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x87,0x6c,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+
+v_maximumminimum_f16 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6f,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_maximumminimum_f16 v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6f,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_maximumminimum_f16 v5, v1, 2.0, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6f,0xd6,0xe9,0xe8,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_maximumminimum_f16 v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6f,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_maximumminimum_f16 v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6f,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_maximumminimum_f16 v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6f,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_maximumminimum_f16 v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6f,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_maximumminimum_f16 v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x01,0x6f,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_maximumminimum_f16 v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x02,0x6f,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_maximumminimum_f16 v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x04,0x6f,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_maximumminimum_f16 v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x03,0x6f,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_maximumminimum_f16 v5, -|v1|, v2, -|-1| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x05,0x6f,0xd6,0xe9,0x04,0x06,0xa3,0x01,0x77,0x39,0x05]
+
+v_maximumminimum_f16 v5, v1, -|v2|, -|0.5| dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x06,0x6f,0xd6,0xea,0x04,0xc2,0xc3,0x01,0x77,0x39,0x05]
+
+v_maximumminimum_f16 v255, -|v255|, -|v255|, -|src_scc| clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x87,0x6f,0xd6,0xe9,0xfe,0xf7,0xe3,0xff,0x00,0x00,0x00]
+
+v_minimummaximum_f16 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6e,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_minimummaximum_f16 v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6e,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_minimummaximum_f16 v5, v1, 2.0, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6e,0xd6,0xe9,0xe8,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_minimummaximum_f16 v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6e,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_minimummaximum_f16 v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6e,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_minimummaximum_f16 v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6e,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_minimummaximum_f16 v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x00,0x6e,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_minimummaximum_f16 v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x01,0x6e,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_minimummaximum_f16 v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x02,0x6e,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_minimummaximum_f16 v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x04,0x6e,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_minimummaximum_f16 v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x03,0x6e,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_minimummaximum_f16 v5, -|v1|, v2, -|-1| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: [0x05,0x05,0x6e,0xd6,0xe9,0x04,0x06,0xa3,0x01,0x77,0x39,0x05]
+
+v_minimummaximum_f16 v5, v1, -|v2|, -|0.5| dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: [0x05,0x06,0x6e,0xd6,0xea,0x04,0xc2,0xc3,0x01,0x77,0x39,0x05]
+
+v_minimummaximum_f16 v255, -|v255|, -|v255|, -|src_scc| clamp dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: [0xff,0x87,0x6e,0xd6,0xe9,0xfe,0xf7,0xe3,0xff,0x00,0x00,0x00]
diff --git a/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp8.s b/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp8.s
index 3003d72..2d912a4 100644
--- a/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp8.s
+++ b/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp8.s
@@ -1,7 +1,7 @@
-// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize32 -show-encoding %s | FileCheck --check-prefixes=GFX12,W32 %s
-// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64 -show-encoding %s | FileCheck --check-prefixes=GFX12,W64 %s
-// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize32 %s 2>&1 | FileCheck --check-prefixes=GFX12-ERR,W32-ERR --implicit-check-not=error: %s
-// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64 %s 2>&1 | FileCheck --check-prefixes=GFX12-ERR,W64-ERR --implicit-check-not=error: %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize32,+real-true16 -show-encoding %s | FileCheck --check-prefixes=GFX12,W32 %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64,+real-true16 -show-encoding %s | FileCheck --check-prefixes=GFX12,W64 %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize32,+real-true16 %s 2>&1 | FileCheck --check-prefixes=GFX12-ERR,W32-ERR --implicit-check-not=error: %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64,+real-true16 %s 2>&1 | FileCheck --check-prefixes=GFX12-ERR,W64-ERR --implicit-check-not=error: %s
v_add3_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
// GFX12: [0x05,0x00,0x55,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
diff --git a/llvm/test/MC/AMDGPU/gfx12_flat_instructions_err.s b/llvm/test/MC/AMDGPU/gfx12_flat_instructions_err.s
new file mode 100644
index 0000000..5e31002
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/gfx12_flat_instructions_err.s
@@ -0,0 +1,289 @@
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 %s 2>&1 | FileCheck --check-prefixes=GFX12 --implicit-check-not=error: %s
+
+global_atomic_add_f32 v0, v2, null
+// GFX12: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+
+global_atomic_add_f32 v0, v2, v4, null glc
+// GFX12: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+
+global_atomic_add_u32 v0, v2, null
+// GFX12: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+
+global_atomic_add_u32 v0, v2, v4, null glc
+// GFX12: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_add_u64 v0, v[2:3], null
+// GFX12: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_add_u64 v[0:1], v2, v[4:5], null
+// GFX12: :[[@LINE-1]]:43: error: invalid operand for instruction
+
+global_atomic_and_b32 v0, v2, null
+// GFX12: :[[@LINE-1]]:31: error: invalid operand for instruction
+
+global_atomic_and_b32 v0, v2, v4, null
+// GFX12: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_and_b64 v0, v[2:3], null
+// GFX12: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_and_b64 v[0:1], v2, v[4:5], null
+// GFX12: :[[@LINE-1]]:43: error: invalid operand for instruction
+
+global_atomic_cmpswap_b32 v0, v[2:3], null
+// GFX12: :[[@LINE-1]]:39: error: invalid operand for instruction
+
+global_atomic_cmpswap_b32 v0, v2, v[4:5], null
+// GFX12: :[[@LINE-1]]:43: error: invalid operand for instruction
+
+global_atomic_cmpswap_b64 v0, v[2:5], null
+// GFX12: :[[@LINE-1]]:39: error: invalid operand for instruction
+
+global_atomic_cmpswap_b64 v[0:1], v2, v[4:7], null
+// GFX12: :[[@LINE-1]]:47: error: invalid operand for instruction
+
+global_atomic_cond_sub_u32 v0, v2, null
+// GFX12: :[[@LINE-1]]:36: error: invalid operand for instruction
+
+global_atomic_cond_sub_u32 v0, v2, v4, null
+// GFX12: :[[@LINE-1]]:40: error: invalid operand for instruction
+
+global_atomic_dec_u32 v0, v2, null
+// GFX12: :[[@LINE-1]]:31: error: invalid operand for instruction
+
+global_atomic_dec_u32 v0, v2, v4, null
+// GFX12: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_dec_u64 v0, v[2:3], null
+// GFX12: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_dec_u64 v[0:1], v2, v[4:5], null
+// GFX12: :[[@LINE-1]]:43: error: invalid operand for instruction
+
+global_atomic_inc_u32 v0, v2, null
+// GFX12: :[[@LINE-1]]:31: error: invalid operand for instruction
+
+global_atomic_inc_u32 v0, v2, v4, null
+// GFX12: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_inc_u64 v0, v[2:3], null
+// GFX12: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_inc_u64 v[0:1], v2, v[4:5], null
+// GFX12: :[[@LINE-1]]:43: error: invalid operand for instruction
+
+global_atomic_max_i32 v0, v2, null
+// GFX12: :[[@LINE-1]]:31: error: invalid operand for instruction
+
+global_atomic_max_i32 v0, v2, v4, null
+// GFX12: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_max_i64 v0, v[2:3], null
+// GFX12: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_max_i64 v[0:1], v2, v[4:5], null
+// GFX12: :[[@LINE-1]]:43: error: invalid operand for instruction
+
+global_atomic_max_num_f32 v0, v2, null
+// GFX12: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_max_num_f32 v0, v2, v4, null
+// GFX12: :[[@LINE-1]]:39: error: invalid operand for instruction
+
+global_atomic_max_u32 v0, v2, null
+// GFX12: :[[@LINE-1]]:31: error: invalid operand for instruction
+
+global_atomic_max_u32 v0, v2, v4, null
+// GFX12: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_max_u64 v0, v[2:3], null
+// GFX12: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_max_u64 v[0:1], v2, v[4:5], null
+// GFX12: :[[@LINE-1]]:43: error: invalid operand for instruction
+
+global_atomic_min_i32 v0, v2, v4, null
+// GFX12: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_min_i32 v0, v2, null
+// GFX12: :[[@LINE-1]]:31: error: invalid operand for instruction
+
+global_atomic_min_i64 v0, v[2:3], null
+// GFX12: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_min_i64 v[0:1], v2, v[4:5], null
+// GFX12: :[[@LINE-1]]:43: error: invalid operand for instruction
+
+global_atomic_min_num_f32 v0, v2, null
+// GFX12: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_min_num_f32 v0, v2, v4, null
+// GFX12: :[[@LINE-1]]:39: error: invalid operand for instruction
+
+global_atomic_min_u32 v0, v2, null
+// GFX12: :[[@LINE-1]]:31: error: invalid operand for instruction
+
+global_atomic_min_u32 v0, v2, v4, null
+// GFX12: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_min_u64 v0, v[2:3], null
+// GFX12: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_min_u64 v[0:1], v2, v[4:5], null
+// GFX12: :[[@LINE-1]]:43: error: invalid operand for instruction
+
+global_atomic_or_b32 v0, v2, null
+// GFX12: :[[@LINE-1]]:30: error: invalid operand for instruction
+
+global_atomic_or_b32 v0, v2, v4, null
+// GFX12: :[[@LINE-1]]:34: error: invalid operand for instruction
+
+global_atomic_or_b64 v0, v[2:3], null
+// GFX12: :[[@LINE-1]]:34: error: invalid operand for instruction
+
+global_atomic_or_b64 v[0:1], v2, v[4:5], null
+// GFX12: :[[@LINE-1]]:42: error: invalid operand for instruction
+
+global_atomic_ordered_add_b64 v0, v[2:3], null
+// GFX12: :[[@LINE-1]]:43: error: invalid operand for instruction
+
+global_atomic_ordered_add_b64 v[0:1], v2, v[4:5], null
+// GFX12: :[[@LINE-1]]:51: error: invalid operand for instruction
+
+global_atomic_pk_add_bf16 v0, v2, null
+// GFX12: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_pk_add_bf16 v0, v2, v4, null
+// GFX12: :[[@LINE-1]]:39: error: invalid operand for instruction
+
+global_atomic_pk_add_f16 v0, v2, null
+// GFX12: :[[@LINE-1]]:34: error: invalid operand for instruction
+
+global_atomic_pk_add_f16 v0, v2, v4, null
+// GFX12: :[[@LINE-1]]:38: error: invalid operand for instruction
+
+global_atomic_sub_clamp_u32 v0, v2, null
+// GFX12: :[[@LINE-1]]:37: error: invalid operand for instruction
+
+global_atomic_sub_clamp_u32 v0, v2, v4, null
+// GFX12: :[[@LINE-1]]:41: error: invalid operand for instruction
+
+global_atomic_sub_u32 v0, v2, null
+// GFX12: :[[@LINE-1]]:31: error: invalid operand for instruction
+
+global_atomic_sub_u32 v0, v2, v4, null
+// GFX12: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_sub_u64 v0, v[2:3], null
+// GFX12: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_sub_u64 v[0:1], v2, v[4:5], null
+// GFX12: :[[@LINE-1]]:43: error: invalid operand for instruction
+
+global_atomic_swap_b32 v0, v2, null
+// GFX12: :[[@LINE-1]]:32: error: invalid operand for instruction
+
+global_atomic_swap_b32 v0, v2, v4, null
+// GFX12: :[[@LINE-1]]:36: error: invalid operand for instruction
+
+global_atomic_swap_b64 v0, v[2:3], null
+// GFX12: :[[@LINE-1]]:36: error: invalid operand for instruction
+
+global_atomic_swap_b64 v[0:1], v2, v[4:5], null
+// GFX12: :[[@LINE-1]]:44: error: invalid operand for instruction
+
+global_atomic_xor_b32 v0, v2, null
+// GFX12: :[[@LINE-1]]:31: error: invalid operand for instruction
+
+global_atomic_xor_b32 v0, v2, v4, null
+// GFX12: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_xor_b64 v0, v[2:3], null
+// GFX12: :[[@LINE-1]]:35: error: invalid operand for instruction
+
+global_atomic_xor_b64 v[0:1], v2, v[4:5], null
+// GFX12: :[[@LINE-1]]:43: error: invalid operand for instruction
+
+global_load_b128 v[0:3], v4, null
+// GFX12: :[[@LINE-1]]:30: error: invalid operand for instruction
+
+global_load_b32 v0, v4, null
+// GFX12: :[[@LINE-1]]:25: error: invalid operand for instruction
+
+global_load_b64 v[0:1], v4, null
+// GFX12: :[[@LINE-1]]:29: error: invalid operand for instruction
+
+global_load_b96 v[0:2], v4, null
+// GFX12: :[[@LINE-1]]:29: error: invalid operand for instruction
+
+global_load_block v[0:31], v32, null
+// GFX12: :[[@LINE-1]]:33: error: invalid operand for instruction
+
+global_load_d16_b16 v0, v2, null
+// GFX12: :[[@LINE-1]]:29: error: invalid operand for instruction
+
+global_load_d16_hi_b16 v0, v2, null
+// GFX12: :[[@LINE-1]]:32: error: invalid operand for instruction
+
+global_load_d16_hi_i8 v0, v2, null
+// GFX12: :[[@LINE-1]]:31: error: invalid operand for instruction
+
+global_load_d16_hi_u8 v0, v2, null
+// GFX12: :[[@LINE-1]]:31: error: invalid operand for instruction
+
+global_load_d16_i8 v0, v2, null
+// GFX12: :[[@LINE-1]]:28: error: invalid operand for instruction
+
+global_load_d16_u8 v0, v2, null
+// GFX12: :[[@LINE-1]]:28: error: invalid operand for instruction
+
+global_load_i16 v0, v2, null
+// GFX12: :[[@LINE-1]]:25: error: invalid operand for instruction
+
+global_load_i8 v0, v2, null
+// GFX12: :[[@LINE-1]]:24: error: invalid operand for instruction
+
+global_load_tr_b128 v[0:3], v4, null
+// GFX12: :[[@LINE-1]]:33: error: invalid operand for instruction
+
+global_load_tr_b128 v[0:1], v4, null
+// GFX12: :[[@LINE-1]]:33: error: invalid operand for instruction
+
+global_load_tr_b64 v[0:1], v4, null
+// GFX12: :[[@LINE-1]]:32: error: invalid operand for instruction
+
+global_load_tr_b64 v0, v4, null
+// GFX12: :[[@LINE-1]]:28: error: invalid operand for instruction
+
+global_load_u16 v0, v2, null
+// GFX12: :[[@LINE-1]]:25: error: invalid operand for instruction
+
+global_load_u8 v0, v2, null
+// GFX12: :[[@LINE-1]]:24: error: invalid operand for instruction
+
+global_store_b128 v0, v[2:5], null
+// GFX12: :[[@LINE-1]]:31: error: invalid operand for instruction
+
+global_store_b16 v0, v2, null
+// GFX12: :[[@LINE-1]]:26: error: invalid operand for instruction
+
+global_store_b32 v0, v2, null
+// GFX12: :[[@LINE-1]]:26: error: invalid operand for instruction
+
+global_store_b64 v0, v[2:3], null
+// GFX12: :[[@LINE-1]]:30: error: invalid operand for instruction
+
+global_store_b8 v0, v2, null
+// GFX12: :[[@LINE-1]]:25: error: invalid operand for instruction
+
+global_store_b96 v0, v[2:4], null
+// GFX12: :[[@LINE-1]]:30: error: invalid operand for instruction
+
+global_store_block v32, v[0:31], null
+// GFX12: :[[@LINE-1]]:34: error: invalid operand for instruction
+
+global_store_d16_hi_b16 v0, v2, null
+// GFX12: :[[@LINE-1]]:33: error: invalid operand for instruction
+
+global_store_d16_hi_b8 v0, v2, null
+// GFX12: :[[@LINE-1]]:32: error: invalid operand for instruction
diff --git a/llvm/test/MC/AMDGPU/gfx12_unsupported.s b/llvm/test/MC/AMDGPU/gfx12_unsupported.s
index f0debbf..c34cb9e 100644
--- a/llvm/test/MC/AMDGPU/gfx12_unsupported.s
+++ b/llvm/test/MC/AMDGPU/gfx12_unsupported.s
@@ -232,9 +232,15 @@ buffer_gl0_inv
buffer_gl1_inv
// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+buffer_store_lds_dword s[4:7], -1 offset:4095 lds
+// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
buffer_wbinvl1
// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+buffer_wbinvl1_vol
+// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
flat_atomic_csub v1, v[0:1], v2 offset:64 th:TH_ATOMIC_RETURN
// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: invalid instruction
diff --git a/llvm/test/MC/AMDGPU/gfx940_unsupported.s b/llvm/test/MC/AMDGPU/gfx940_unsupported.s
new file mode 100644
index 0000000..4ef53c7
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/gfx940_unsupported.s
@@ -0,0 +1,11 @@
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx940 %s 2>&1 | FileCheck --check-prefixes=CHECK --implicit-check-not=error: %s
+
+buffer_store_lds_dword s[4:7], -1 offset:4095 lds
+// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+buffer_wbinvl1
+// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+buffer_wbinvl1_vol
+// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
diff --git a/llvm/test/MC/Disassembler/AMDGPU/decode-err.txt b/llvm/test/MC/Disassembler/AMDGPU/decode-err.txt
index d6e8b7e..f819a61 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/decode-err.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/decode-err.txt
@@ -1,16 +1,11 @@
# RUN: llvm-mc -triple=amdgcn -mcpu=gfx900 -disassemble -show-encoding -filetype=null < %s 2>&1 | FileCheck -check-prefix=GCN-ERR %s
# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -disassemble -show-encoding < %s 2>&1 | FileCheck -check-prefixes=W32 %s
# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64 -disassemble -show-encoding < %s 2>&1 | FileCheck -check-prefixes=W64 %s
-# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -disassemble -show-encoding -filetype=null < %s 2>&1 | FileCheck -check-prefix=GFX11-ERR %s
# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -disassemble -show-encoding -filetype=null < %s 2>&1 | FileCheck -check-prefix=GFX12-ERR %s
# GCN-ERR: [[@LINE+1]]:1: warning: invalid instruction encoding
0xdf,0x00,0x00,0x02
-# this is s_singleuse_vdst 0x1234, which is only valid on gfx1150
-# GFX11-ERR: [[@LINE+1]]:1: warning: invalid instruction encoding
-0x34,0x12,0x93,0xbf
-
# this is s_waitcnt_vscnt exec_hi, 0x1234, which is valid on gfx11, but not on gfx12
# GFX12-ERR: [[@LINE+1]]:1: warning: invalid instruction encoding
0x34,0x12,0x7f,0xbc
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx10_flat.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx10_flat.txt
index f8d3129..7e5366b 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx10_flat.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx10_flat.txt
@@ -761,9 +761,6 @@
# GFX10: global_store_byte_d16_hi v[3:4], v1, off ; encoding: [0x00,0x80,0x64,0xdc,0x03,0x01,0x7d,0x00]
0x00,0x80,0x64,0xdc,0x03,0x01,0x7d,0x00
-# GFX10: global_store_dword v3, v1, exec ; encoding: [0x00,0x80,0x70,0xdc,0x03,0x01,0x7e,0x00]
-0x00,0x80,0x70,0xdc,0x03,0x01,0x7e,0x00
-
# GFX10: global_store_dword v[3:4], v1, off ; encoding: [0x00,0x80,0x70,0xdc,0x03,0x01,0x7d,0x00]
0x00,0x80,0x70,0xdc,0x03,0x01,0x7d,0x00
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1150_dasm_sopp.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1150_dasm_sopp.txt
deleted file mode 100644
index 8fa266a..0000000
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx1150_dasm_sopp.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1150 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX1150 %s
-
-# GFX1150: s_singleuse_vdst 0x0 ; encoding: [0x00,0x00,0x93,0xbf]
-0x00,0x00,0x93,0xbf
-
-# GFX1150: s_singleuse_vdst 0xffff ; encoding: [0xff,0xff,0x93,0xbf]
-0xff,0xff,0x93,0xbf
-
-# GFX1150: s_singleuse_vdst 0x1234 ; encoding: [0x34,0x12,0x93,0xbf]
-0x34,0x12,0x93,0xbf
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3.txt
index 0c1d538a..c58b696 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3.txt
@@ -1,5 +1,8 @@
-# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX11,W32 %s
-# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX11,W64 %s
+; NOTE: Assertions have been autogenerated by utils/update_mc_test_checks.py UTC_ARGS: --version 5
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX11,W32,W32-REAL16 %s
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX11,W32,W32-FAKE16 %s
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64,+real-true16 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX11,W64,W64-REAL16 %s
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64,-real-true16 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX11,W64,W64-FAKE16 %s
# GFX11: v_add3_u32 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x55,0xd6,0x01,0x05,0x0e,0x00]
0x05,0x00,0x55,0xd6,0x01,0x05,0x0e,0x00
@@ -411,49 +414,94 @@
# GFX11: v_alignbyte_b32 v255, 0xaf123456, vcc_hi, null ; encoding: [0xff,0x00,0x17,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
0xff,0x00,0x17,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf
-# GFX11: v_and_b16 v5, v1, v2 ; encoding: [0x05,0x00,0x62,0xd7,0x01,0x05,0x02,0x00]
+# W32-REAL16: v_and_b16 v5.l, v1.l, v2.l ; encoding: [0x05,0x00,0x62,0xd7,0x01,0x05,0x02,0x00]
+# W32-FAKE16: v_and_b16 v5, v1, v2 ; encoding: [0x05,0x00,0x62,0xd7,0x01,0x05,0x02,0x00]
+# W64-REAL16: v_and_b16 v5.l, v1.l, v2.l ; encoding: [0x05,0x00,0x62,0xd7,0x01,0x05,0x02,0x00]
+# W64-FAKE16: v_and_b16 v5, v1, v2 ; encoding: [0x05,0x00,0x62,0xd7,0x01,0x05,0x02,0x00]
0x05,0x00,0x62,0xd7,0x01,0x05,0x02,0x00
-# GFX11: v_and_b16 v5, v255, v255 ; encoding: [0x05,0x00,0x62,0xd7,0xff,0xff,0x03,0x00]
+# W32-REAL16: v_and_b16 v5.l, v255.l, v255.l ; encoding: [0x05,0x00,0x62,0xd7,0xff,0xff,0x03,0x00]
+# W32-FAKE16: v_and_b16 v5, v255, v255 ; encoding: [0x05,0x00,0x62,0xd7,0xff,0xff,0x03,0x00]
+# W64-REAL16: v_and_b16 v5.l, v255.l, v255.l ; encoding: [0x05,0x00,0x62,0xd7,0xff,0xff,0x03,0x00]
+# W64-FAKE16: v_and_b16 v5, v255, v255 ; encoding: [0x05,0x00,0x62,0xd7,0xff,0xff,0x03,0x00]
0x05,0x00,0x62,0xd7,0xff,0xff,0x03,0x00
-# GFX11: v_and_b16 v5, s1, s2 ; encoding: [0x05,0x00,0x62,0xd7,0x01,0x04,0x00,0x00]
+# W32-REAL16: v_and_b16 v5.l, s1, s2 ; encoding: [0x05,0x00,0x62,0xd7,0x01,0x04,0x00,0x00]
+# W32-FAKE16: v_and_b16 v5, s1, s2 ; encoding: [0x05,0x00,0x62,0xd7,0x01,0x04,0x00,0x00]
+# W64-REAL16: v_and_b16 v5.l, s1, s2 ; encoding: [0x05,0x00,0x62,0xd7,0x01,0x04,0x00,0x00]
+# W64-FAKE16: v_and_b16 v5, s1, s2 ; encoding: [0x05,0x00,0x62,0xd7,0x01,0x04,0x00,0x00]
0x05,0x00,0x62,0xd7,0x01,0x04,0x00,0x00
-# GFX11: v_and_b16 v5, s105, s105 ; encoding: [0x05,0x00,0x62,0xd7,0x69,0xd2,0x00,0x00]
+# W32-REAL16: v_and_b16 v5.l, s105, s105 ; encoding: [0x05,0x00,0x62,0xd7,0x69,0xd2,0x00,0x00]
+# W32-FAKE16: v_and_b16 v5, s105, s105 ; encoding: [0x05,0x00,0x62,0xd7,0x69,0xd2,0x00,0x00]
+# W64-REAL16: v_and_b16 v5.l, s105, s105 ; encoding: [0x05,0x00,0x62,0xd7,0x69,0xd2,0x00,0x00]
+# W64-FAKE16: v_and_b16 v5, s105, s105 ; encoding: [0x05,0x00,0x62,0xd7,0x69,0xd2,0x00,0x00]
0x05,0x00,0x62,0xd7,0x69,0xd2,0x00,0x00
-# GFX11: v_and_b16 v5, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x62,0xd7,0x6a,0xf6,0x00,0x00]
+# W32-REAL16: v_and_b16 v5.l, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x62,0xd7,0x6a,0xf6,0x00,0x00]
+# W32-FAKE16: v_and_b16 v5, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x62,0xd7,0x6a,0xf6,0x00,0x00]
+# W64-REAL16: v_and_b16 v5.l, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x62,0xd7,0x6a,0xf6,0x00,0x00]
+# W64-FAKE16: v_and_b16 v5, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x62,0xd7,0x6a,0xf6,0x00,0x00]
0x05,0x00,0x62,0xd7,0x6a,0xf6,0x00,0x00
-# GFX11: v_and_b16 v5, vcc_hi, 0xfe0b ; encoding: [0x05,0x00,0x62,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+# W32-REAL16: v_and_b16 v5.l, vcc_hi, 0xfe0b ; encoding: [0x05,0x00,0x62,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+# W32-FAKE16: v_and_b16 v5, vcc_hi, 0xfe0b ; encoding: [0x05,0x00,0x62,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+# W64-REAL16: v_and_b16 v5.l, vcc_hi, 0xfe0b ; encoding: [0x05,0x00,0x62,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+# W64-FAKE16: v_and_b16 v5, vcc_hi, 0xfe0b ; encoding: [0x05,0x00,0x62,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
0x05,0x00,0x62,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
-# GFX11: v_and_b16 v5, ttmp15, src_scc ; encoding: [0x05,0x00,0x62,0xd7,0x7b,0xfa,0x01,0x00]
+# W32-REAL16: v_and_b16 v5.l, ttmp15, src_scc ; encoding: [0x05,0x00,0x62,0xd7,0x7b,0xfa,0x01,0x00]
+# W32-FAKE16: v_and_b16 v5, ttmp15, src_scc ; encoding: [0x05,0x00,0x62,0xd7,0x7b,0xfa,0x01,0x00]
+# W64-REAL16: v_and_b16 v5.l, ttmp15, src_scc ; encoding: [0x05,0x00,0x62,0xd7,0x7b,0xfa,0x01,0x00]
+# W64-FAKE16: v_and_b16 v5, ttmp15, src_scc ; encoding: [0x05,0x00,0x62,0xd7,0x7b,0xfa,0x01,0x00]
0x05,0x00,0x62,0xd7,0x7b,0xfa,0x01,0x00
-# GFX11: v_and_b16 v5, m0, 0x3800
+# W32-REAL16: v_and_b16 v5.l, m0, 0x3800 ; encoding: [0x05,0x00,0x62,0xd7,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+# W32-FAKE16: v_and_b16 v5, m0, 0x3800 ; encoding: [0x05,0x00,0x62,0xd7,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+# W64-REAL16: v_and_b16 v5.l, m0, 0x3800 ; encoding: [0x05,0x00,0x62,0xd7,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+# W64-FAKE16: v_and_b16 v5, m0, 0x3800 ; encoding: [0x05,0x00,0x62,0xd7,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
0x05,0x00,0x62,0xd7,0x7d,0xe0,0x01,0x00
-# GFX11: v_and_b16 v5, exec_lo, -1 ; encoding: [0x05,0x00,0x62,0xd7,0x7e,0x82,0x01,0x00]
+# W32-REAL16: v_and_b16 v5.l, exec_lo, -1 ; encoding: [0x05,0x00,0x62,0xd7,0x7e,0x82,0x01,0x00]
+# W32-FAKE16: v_and_b16 v5, exec_lo, -1 ; encoding: [0x05,0x00,0x62,0xd7,0x7e,0x82,0x01,0x00]
+# W64-REAL16: v_and_b16 v5.l, exec_lo, -1 ; encoding: [0x05,0x00,0x62,0xd7,0x7e,0x82,0x01,0x00]
+# W64-FAKE16: v_and_b16 v5, exec_lo, -1 ; encoding: [0x05,0x00,0x62,0xd7,0x7e,0x82,0x01,0x00]
0x05,0x00,0x62,0xd7,0x7e,0x82,0x01,0x00
-# GFX11: v_and_b16 v5, exec_hi, null ; encoding: [0x05,0x00,0x62,0xd7,0x7f,0xf8,0x00,0x00]
+# W32-REAL16: v_and_b16 v5.l, exec_hi, null ; encoding: [0x05,0x00,0x62,0xd7,0x7f,0xf8,0x00,0x00]
+# W32-FAKE16: v_and_b16 v5, exec_hi, null ; encoding: [0x05,0x00,0x62,0xd7,0x7f,0xf8,0x00,0x00]
+# W64-REAL16: v_and_b16 v5.l, exec_hi, null ; encoding: [0x05,0x00,0x62,0xd7,0x7f,0xf8,0x00,0x00]
+# W64-FAKE16: v_and_b16 v5, exec_hi, null ; encoding: [0x05,0x00,0x62,0xd7,0x7f,0xf8,0x00,0x00]
0x05,0x00,0x62,0xd7,0x7f,0xf8,0x00,0x00
-# GFX11: v_and_b16 v5, null, exec_lo ; encoding: [0x05,0x00,0x62,0xd7,0x7c,0xfc,0x00,0x00]
+# W32-REAL16: v_and_b16 v5.l, null, exec_lo ; encoding: [0x05,0x00,0x62,0xd7,0x7c,0xfc,0x00,0x00]
+# W32-FAKE16: v_and_b16 v5, null, exec_lo ; encoding: [0x05,0x00,0x62,0xd7,0x7c,0xfc,0x00,0x00]
+# W64-REAL16: v_and_b16 v5.l, null, exec_lo ; encoding: [0x05,0x00,0x62,0xd7,0x7c,0xfc,0x00,0x00]
+# W64-FAKE16: v_and_b16 v5, null, exec_lo ; encoding: [0x05,0x00,0x62,0xd7,0x7c,0xfc,0x00,0x00]
0x05,0x00,0x62,0xd7,0x7c,0xfc,0x00,0x00
-# GFX11: v_and_b16 v5, -1, exec_hi ; encoding: [0x05,0x00,0x62,0xd7,0xc1,0xfe,0x00,0x00]
+# W32-REAL16: v_and_b16 v5.l, -1, exec_hi ; encoding: [0x05,0x00,0x62,0xd7,0xc1,0xfe,0x00,0x00]
+# W32-FAKE16: v_and_b16 v5, -1, exec_hi ; encoding: [0x05,0x00,0x62,0xd7,0xc1,0xfe,0x00,0x00]
+# W64-REAL16: v_and_b16 v5.l, -1, exec_hi ; encoding: [0x05,0x00,0x62,0xd7,0xc1,0xfe,0x00,0x00]
+# W64-FAKE16: v_and_b16 v5, -1, exec_hi ; encoding: [0x05,0x00,0x62,0xd7,0xc1,0xfe,0x00,0x00]
0x05,0x00,0x62,0xd7,0xc1,0xfe,0x00,0x00
-# GFX11: v_and_b16 v5, 0x3800, m0
+# W32-REAL16: v_and_b16 v5.l, 0x3800, m0 ; encoding: [0x05,0x00,0x62,0xd7,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+# W32-FAKE16: v_and_b16 v5, 0x3800, m0 ; encoding: [0x05,0x00,0x62,0xd7,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+# W64-REAL16: v_and_b16 v5.l, 0x3800, m0 ; encoding: [0x05,0x00,0x62,0xd7,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+# W64-FAKE16: v_and_b16 v5, 0x3800, m0 ; encoding: [0x05,0x00,0x62,0xd7,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
0x05,0x00,0x62,0xd7,0xf0,0xfa,0x00,0x00
-# GFX11: v_and_b16 v5, src_scc, vcc_lo ; encoding: [0x05,0x00,0x62,0xd7,0xfd,0xd4,0x00,0x00]
+# W32-REAL16: v_and_b16 v5.l, src_scc, vcc_lo ; encoding: [0x05,0x00,0x62,0xd7,0xfd,0xd4,0x00,0x00]
+# W32-FAKE16: v_and_b16 v5, src_scc, vcc_lo ; encoding: [0x05,0x00,0x62,0xd7,0xfd,0xd4,0x00,0x00]
+# W64-REAL16: v_and_b16 v5.l, src_scc, vcc_lo ; encoding: [0x05,0x00,0x62,0xd7,0xfd,0xd4,0x00,0x00]
+# W64-FAKE16: v_and_b16 v5, src_scc, vcc_lo ; encoding: [0x05,0x00,0x62,0xd7,0xfd,0xd4,0x00,0x00]
0x05,0x00,0x62,0xd7,0xfd,0xd4,0x00,0x00
-# GFX11: v_and_b16 v255, 0xfe0b, vcc_hi ; encoding: [0xff,0x00,0x62,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+# W32-REAL16: v_and_b16 v255.l, 0xfe0b, vcc_hi ; encoding: [0xff,0x00,0x62,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+# W32-FAKE16: v_and_b16 v255, 0xfe0b, vcc_hi ; encoding: [0xff,0x00,0x62,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+# W64-REAL16: v_and_b16 v255.l, 0xfe0b, vcc_hi ; encoding: [0xff,0x00,0x62,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+# W64-FAKE16: v_and_b16 v255, 0xfe0b, vcc_hi ; encoding: [0xff,0x00,0x62,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
0xff,0x00,0x62,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00
# GFX11: v_and_or_b32 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x57,0xd6,0x01,0x05,0x0e,0x00]
@@ -4738,49 +4786,94 @@
# GFX11: v_or3_b32 v255, 0xaf123456, vcc_hi, null ; encoding: [0xff,0x00,0x58,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
0xff,0x00,0x58,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf
-# GFX11: v_or_b16 v5, v1, v2 ; encoding: [0x05,0x00,0x63,0xd7,0x01,0x05,0x02,0x00]
+# W32-REAL16: v_or_b16 v5.l, v1.l, v2.l ; encoding: [0x05,0x00,0x63,0xd7,0x01,0x05,0x02,0x00]
+# W32-FAKE16: v_or_b16 v5, v1, v2 ; encoding: [0x05,0x00,0x63,0xd7,0x01,0x05,0x02,0x00]
+# W64-REAL16: v_or_b16 v5.l, v1.l, v2.l ; encoding: [0x05,0x00,0x63,0xd7,0x01,0x05,0x02,0x00]
+# W64-FAKE16: v_or_b16 v5, v1, v2 ; encoding: [0x05,0x00,0x63,0xd7,0x01,0x05,0x02,0x00]
0x05,0x00,0x63,0xd7,0x01,0x05,0x02,0x00
-# GFX11: v_or_b16 v5, v255, v255 ; encoding: [0x05,0x00,0x63,0xd7,0xff,0xff,0x03,0x00]
+# W32-REAL16: v_or_b16 v5.l, v255.l, v255.l ; encoding: [0x05,0x00,0x63,0xd7,0xff,0xff,0x03,0x00]
+# W32-FAKE16: v_or_b16 v5, v255, v255 ; encoding: [0x05,0x00,0x63,0xd7,0xff,0xff,0x03,0x00]
+# W64-REAL16: v_or_b16 v5.l, v255.l, v255.l ; encoding: [0x05,0x00,0x63,0xd7,0xff,0xff,0x03,0x00]
+# W64-FAKE16: v_or_b16 v5, v255, v255 ; encoding: [0x05,0x00,0x63,0xd7,0xff,0xff,0x03,0x00]
0x05,0x00,0x63,0xd7,0xff,0xff,0x03,0x00
-# GFX11: v_or_b16 v5, s1, s2 ; encoding: [0x05,0x00,0x63,0xd7,0x01,0x04,0x00,0x00]
+# W32-REAL16: v_or_b16 v5.l, s1, s2 ; encoding: [0x05,0x00,0x63,0xd7,0x01,0x04,0x00,0x00]
+# W32-FAKE16: v_or_b16 v5, s1, s2 ; encoding: [0x05,0x00,0x63,0xd7,0x01,0x04,0x00,0x00]
+# W64-REAL16: v_or_b16 v5.l, s1, s2 ; encoding: [0x05,0x00,0x63,0xd7,0x01,0x04,0x00,0x00]
+# W64-FAKE16: v_or_b16 v5, s1, s2 ; encoding: [0x05,0x00,0x63,0xd7,0x01,0x04,0x00,0x00]
0x05,0x00,0x63,0xd7,0x01,0x04,0x00,0x00
-# GFX11: v_or_b16 v5, s105, s105 ; encoding: [0x05,0x00,0x63,0xd7,0x69,0xd2,0x00,0x00]
+# W32-REAL16: v_or_b16 v5.l, s105, s105 ; encoding: [0x05,0x00,0x63,0xd7,0x69,0xd2,0x00,0x00]
+# W32-FAKE16: v_or_b16 v5, s105, s105 ; encoding: [0x05,0x00,0x63,0xd7,0x69,0xd2,0x00,0x00]
+# W64-REAL16: v_or_b16 v5.l, s105, s105 ; encoding: [0x05,0x00,0x63,0xd7,0x69,0xd2,0x00,0x00]
+# W64-FAKE16: v_or_b16 v5, s105, s105 ; encoding: [0x05,0x00,0x63,0xd7,0x69,0xd2,0x00,0x00]
0x05,0x00,0x63,0xd7,0x69,0xd2,0x00,0x00
-# GFX11: v_or_b16 v5, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x63,0xd7,0x6a,0xf6,0x00,0x00]
+# W32-REAL16: v_or_b16 v5.l, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x63,0xd7,0x6a,0xf6,0x00,0x00]
+# W32-FAKE16: v_or_b16 v5, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x63,0xd7,0x6a,0xf6,0x00,0x00]
+# W64-REAL16: v_or_b16 v5.l, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x63,0xd7,0x6a,0xf6,0x00,0x00]
+# W64-FAKE16: v_or_b16 v5, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x63,0xd7,0x6a,0xf6,0x00,0x00]
0x05,0x00,0x63,0xd7,0x6a,0xf6,0x00,0x00
-# GFX11: v_or_b16 v5, vcc_hi, 0xfe0b ; encoding: [0x05,0x00,0x63,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+# W32-REAL16: v_or_b16 v5.l, vcc_hi, 0xfe0b ; encoding: [0x05,0x00,0x63,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+# W32-FAKE16: v_or_b16 v5, vcc_hi, 0xfe0b ; encoding: [0x05,0x00,0x63,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+# W64-REAL16: v_or_b16 v5.l, vcc_hi, 0xfe0b ; encoding: [0x05,0x00,0x63,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+# W64-FAKE16: v_or_b16 v5, vcc_hi, 0xfe0b ; encoding: [0x05,0x00,0x63,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
0x05,0x00,0x63,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
-# GFX11: v_or_b16 v5, ttmp15, src_scc ; encoding: [0x05,0x00,0x63,0xd7,0x7b,0xfa,0x01,0x00]
+# W32-REAL16: v_or_b16 v5.l, ttmp15, src_scc ; encoding: [0x05,0x00,0x63,0xd7,0x7b,0xfa,0x01,0x00]
+# W32-FAKE16: v_or_b16 v5, ttmp15, src_scc ; encoding: [0x05,0x00,0x63,0xd7,0x7b,0xfa,0x01,0x00]
+# W64-REAL16: v_or_b16 v5.l, ttmp15, src_scc ; encoding: [0x05,0x00,0x63,0xd7,0x7b,0xfa,0x01,0x00]
+# W64-FAKE16: v_or_b16 v5, ttmp15, src_scc ; encoding: [0x05,0x00,0x63,0xd7,0x7b,0xfa,0x01,0x00]
0x05,0x00,0x63,0xd7,0x7b,0xfa,0x01,0x00
-# GFX11: v_or_b16 v5, m0, 0x3800
+# W32-REAL16: v_or_b16 v5.l, m0, 0x3800 ; encoding: [0x05,0x00,0x63,0xd7,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+# W32-FAKE16: v_or_b16 v5, m0, 0x3800 ; encoding: [0x05,0x00,0x63,0xd7,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+# W64-REAL16: v_or_b16 v5.l, m0, 0x3800 ; encoding: [0x05,0x00,0x63,0xd7,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+# W64-FAKE16: v_or_b16 v5, m0, 0x3800 ; encoding: [0x05,0x00,0x63,0xd7,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
0x05,0x00,0x63,0xd7,0x7d,0xe0,0x01,0x00
-# GFX11: v_or_b16 v5, exec_lo, -1 ; encoding: [0x05,0x00,0x63,0xd7,0x7e,0x82,0x01,0x00]
+# W32-REAL16: v_or_b16 v5.l, exec_lo, -1 ; encoding: [0x05,0x00,0x63,0xd7,0x7e,0x82,0x01,0x00]
+# W32-FAKE16: v_or_b16 v5, exec_lo, -1 ; encoding: [0x05,0x00,0x63,0xd7,0x7e,0x82,0x01,0x00]
+# W64-REAL16: v_or_b16 v5.l, exec_lo, -1 ; encoding: [0x05,0x00,0x63,0xd7,0x7e,0x82,0x01,0x00]
+# W64-FAKE16: v_or_b16 v5, exec_lo, -1 ; encoding: [0x05,0x00,0x63,0xd7,0x7e,0x82,0x01,0x00]
0x05,0x00,0x63,0xd7,0x7e,0x82,0x01,0x00
-# GFX11: v_or_b16 v5, exec_hi, null ; encoding: [0x05,0x00,0x63,0xd7,0x7f,0xf8,0x00,0x00]
+# W32-REAL16: v_or_b16 v5.l, exec_hi, null ; encoding: [0x05,0x00,0x63,0xd7,0x7f,0xf8,0x00,0x00]
+# W32-FAKE16: v_or_b16 v5, exec_hi, null ; encoding: [0x05,0x00,0x63,0xd7,0x7f,0xf8,0x00,0x00]
+# W64-REAL16: v_or_b16 v5.l, exec_hi, null ; encoding: [0x05,0x00,0x63,0xd7,0x7f,0xf8,0x00,0x00]
+# W64-FAKE16: v_or_b16 v5, exec_hi, null ; encoding: [0x05,0x00,0x63,0xd7,0x7f,0xf8,0x00,0x00]
0x05,0x00,0x63,0xd7,0x7f,0xf8,0x00,0x00
-# GFX11: v_or_b16 v5, null, exec_lo ; encoding: [0x05,0x00,0x63,0xd7,0x7c,0xfc,0x00,0x00]
+# W32-REAL16: v_or_b16 v5.l, null, exec_lo ; encoding: [0x05,0x00,0x63,0xd7,0x7c,0xfc,0x00,0x00]
+# W32-FAKE16: v_or_b16 v5, null, exec_lo ; encoding: [0x05,0x00,0x63,0xd7,0x7c,0xfc,0x00,0x00]
+# W64-REAL16: v_or_b16 v5.l, null, exec_lo ; encoding: [0x05,0x00,0x63,0xd7,0x7c,0xfc,0x00,0x00]
+# W64-FAKE16: v_or_b16 v5, null, exec_lo ; encoding: [0x05,0x00,0x63,0xd7,0x7c,0xfc,0x00,0x00]
0x05,0x00,0x63,0xd7,0x7c,0xfc,0x00,0x00
-# GFX11: v_or_b16 v5, -1, exec_hi ; encoding: [0x05,0x00,0x63,0xd7,0xc1,0xfe,0x00,0x00]
+# W32-REAL16: v_or_b16 v5.l, -1, exec_hi ; encoding: [0x05,0x00,0x63,0xd7,0xc1,0xfe,0x00,0x00]
+# W32-FAKE16: v_or_b16 v5, -1, exec_hi ; encoding: [0x05,0x00,0x63,0xd7,0xc1,0xfe,0x00,0x00]
+# W64-REAL16: v_or_b16 v5.l, -1, exec_hi ; encoding: [0x05,0x00,0x63,0xd7,0xc1,0xfe,0x00,0x00]
+# W64-FAKE16: v_or_b16 v5, -1, exec_hi ; encoding: [0x05,0x00,0x63,0xd7,0xc1,0xfe,0x00,0x00]
0x05,0x00,0x63,0xd7,0xc1,0xfe,0x00,0x00
-# GFX11: v_or_b16 v5, 0x3800, m0
+# W32-REAL16: v_or_b16 v5.l, 0x3800, m0 ; encoding: [0x05,0x00,0x63,0xd7,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+# W32-FAKE16: v_or_b16 v5, 0x3800, m0 ; encoding: [0x05,0x00,0x63,0xd7,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+# W64-REAL16: v_or_b16 v5.l, 0x3800, m0 ; encoding: [0x05,0x00,0x63,0xd7,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+# W64-FAKE16: v_or_b16 v5, 0x3800, m0 ; encoding: [0x05,0x00,0x63,0xd7,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
0x05,0x00,0x63,0xd7,0xf0,0xfa,0x00,0x00
-# GFX11: v_or_b16 v5, src_scc, vcc_lo ; encoding: [0x05,0x00,0x63,0xd7,0xfd,0xd4,0x00,0x00]
+# W32-REAL16: v_or_b16 v5.l, src_scc, vcc_lo ; encoding: [0x05,0x00,0x63,0xd7,0xfd,0xd4,0x00,0x00]
+# W32-FAKE16: v_or_b16 v5, src_scc, vcc_lo ; encoding: [0x05,0x00,0x63,0xd7,0xfd,0xd4,0x00,0x00]
+# W64-REAL16: v_or_b16 v5.l, src_scc, vcc_lo ; encoding: [0x05,0x00,0x63,0xd7,0xfd,0xd4,0x00,0x00]
+# W64-FAKE16: v_or_b16 v5, src_scc, vcc_lo ; encoding: [0x05,0x00,0x63,0xd7,0xfd,0xd4,0x00,0x00]
0x05,0x00,0x63,0xd7,0xfd,0xd4,0x00,0x00
-# GFX11: v_or_b16 v255, 0xfe0b, vcc_hi ; encoding: [0xff,0x00,0x63,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+# W32-REAL16: v_or_b16 v255.l, 0xfe0b, vcc_hi ; encoding: [0xff,0x00,0x63,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+# W32-FAKE16: v_or_b16 v255, 0xfe0b, vcc_hi ; encoding: [0xff,0x00,0x63,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+# W64-REAL16: v_or_b16 v255.l, 0xfe0b, vcc_hi ; encoding: [0xff,0x00,0x63,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+# W64-FAKE16: v_or_b16 v255, 0xfe0b, vcc_hi ; encoding: [0xff,0x00,0x63,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
0xff,0x00,0x63,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00
# GFX11: v_pack_b32_f16 v5, v1, v2 ; encoding: [0x05,0x00,0x11,0xd7,0x01,0x05,0x02,0x00]
@@ -5642,47 +5735,92 @@
# GFX11: v_xor3_b32 v255, 0xaf123456, vcc_hi, null ; encoding: [0xff,0x00,0x40,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
0xff,0x00,0x40,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf
-# GFX11: v_xor_b16 v5, v1, v2 ; encoding: [0x05,0x00,0x64,0xd7,0x01,0x05,0x02,0x00]
+# W32-REAL16: v_xor_b16 v5.l, v1.l, v2.l ; encoding: [0x05,0x00,0x64,0xd7,0x01,0x05,0x02,0x00]
+# W32-FAKE16: v_xor_b16 v5, v1, v2 ; encoding: [0x05,0x00,0x64,0xd7,0x01,0x05,0x02,0x00]
+# W64-REAL16: v_xor_b16 v5.l, v1.l, v2.l ; encoding: [0x05,0x00,0x64,0xd7,0x01,0x05,0x02,0x00]
+# W64-FAKE16: v_xor_b16 v5, v1, v2 ; encoding: [0x05,0x00,0x64,0xd7,0x01,0x05,0x02,0x00]
0x05,0x00,0x64,0xd7,0x01,0x05,0x02,0x00
-# GFX11: v_xor_b16 v5, v255, v255 ; encoding: [0x05,0x00,0x64,0xd7,0xff,0xff,0x03,0x00]
+# W32-REAL16: v_xor_b16 v5.l, v255.l, v255.l ; encoding: [0x05,0x00,0x64,0xd7,0xff,0xff,0x03,0x00]
+# W32-FAKE16: v_xor_b16 v5, v255, v255 ; encoding: [0x05,0x00,0x64,0xd7,0xff,0xff,0x03,0x00]
+# W64-REAL16: v_xor_b16 v5.l, v255.l, v255.l ; encoding: [0x05,0x00,0x64,0xd7,0xff,0xff,0x03,0x00]
+# W64-FAKE16: v_xor_b16 v5, v255, v255 ; encoding: [0x05,0x00,0x64,0xd7,0xff,0xff,0x03,0x00]
0x05,0x00,0x64,0xd7,0xff,0xff,0x03,0x00
-# GFX11: v_xor_b16 v5, s1, s2 ; encoding: [0x05,0x00,0x64,0xd7,0x01,0x04,0x00,0x00]
+# W32-REAL16: v_xor_b16 v5.l, s1, s2 ; encoding: [0x05,0x00,0x64,0xd7,0x01,0x04,0x00,0x00]
+# W32-FAKE16: v_xor_b16 v5, s1, s2 ; encoding: [0x05,0x00,0x64,0xd7,0x01,0x04,0x00,0x00]
+# W64-REAL16: v_xor_b16 v5.l, s1, s2 ; encoding: [0x05,0x00,0x64,0xd7,0x01,0x04,0x00,0x00]
+# W64-FAKE16: v_xor_b16 v5, s1, s2 ; encoding: [0x05,0x00,0x64,0xd7,0x01,0x04,0x00,0x00]
0x05,0x00,0x64,0xd7,0x01,0x04,0x00,0x00
-# GFX11: v_xor_b16 v5, s105, s105 ; encoding: [0x05,0x00,0x64,0xd7,0x69,0xd2,0x00,0x00]
+# W32-REAL16: v_xor_b16 v5.l, s105, s105 ; encoding: [0x05,0x00,0x64,0xd7,0x69,0xd2,0x00,0x00]
+# W32-FAKE16: v_xor_b16 v5, s105, s105 ; encoding: [0x05,0x00,0x64,0xd7,0x69,0xd2,0x00,0x00]
+# W64-REAL16: v_xor_b16 v5.l, s105, s105 ; encoding: [0x05,0x00,0x64,0xd7,0x69,0xd2,0x00,0x00]
+# W64-FAKE16: v_xor_b16 v5, s105, s105 ; encoding: [0x05,0x00,0x64,0xd7,0x69,0xd2,0x00,0x00]
0x05,0x00,0x64,0xd7,0x69,0xd2,0x00,0x00
-# GFX11: v_xor_b16 v5, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x64,0xd7,0x6a,0xf6,0x00,0x00]
+# W32-REAL16: v_xor_b16 v5.l, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x64,0xd7,0x6a,0xf6,0x00,0x00]
+# W32-FAKE16: v_xor_b16 v5, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x64,0xd7,0x6a,0xf6,0x00,0x00]
+# W64-REAL16: v_xor_b16 v5.l, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x64,0xd7,0x6a,0xf6,0x00,0x00]
+# W64-FAKE16: v_xor_b16 v5, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x64,0xd7,0x6a,0xf6,0x00,0x00]
0x05,0x00,0x64,0xd7,0x6a,0xf6,0x00,0x00
-# GFX11: v_xor_b16 v5, vcc_hi, 0xfe0b ; encoding: [0x05,0x00,0x64,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+# W32-REAL16: v_xor_b16 v5.l, vcc_hi, 0xfe0b ; encoding: [0x05,0x00,0x64,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+# W32-FAKE16: v_xor_b16 v5, vcc_hi, 0xfe0b ; encoding: [0x05,0x00,0x64,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+# W64-REAL16: v_xor_b16 v5.l, vcc_hi, 0xfe0b ; encoding: [0x05,0x00,0x64,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+# W64-FAKE16: v_xor_b16 v5, vcc_hi, 0xfe0b ; encoding: [0x05,0x00,0x64,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
0x05,0x00,0x64,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
-# GFX11: v_xor_b16 v5, ttmp15, src_scc ; encoding: [0x05,0x00,0x64,0xd7,0x7b,0xfa,0x01,0x00]
+# W32-REAL16: v_xor_b16 v5.l, ttmp15, src_scc ; encoding: [0x05,0x00,0x64,0xd7,0x7b,0xfa,0x01,0x00]
+# W32-FAKE16: v_xor_b16 v5, ttmp15, src_scc ; encoding: [0x05,0x00,0x64,0xd7,0x7b,0xfa,0x01,0x00]
+# W64-REAL16: v_xor_b16 v5.l, ttmp15, src_scc ; encoding: [0x05,0x00,0x64,0xd7,0x7b,0xfa,0x01,0x00]
+# W64-FAKE16: v_xor_b16 v5, ttmp15, src_scc ; encoding: [0x05,0x00,0x64,0xd7,0x7b,0xfa,0x01,0x00]
0x05,0x00,0x64,0xd7,0x7b,0xfa,0x01,0x00
-# GFX11: v_xor_b16 v5, m0, 0x3800
+# W32-REAL16: v_xor_b16 v5.l, m0, 0x3800 ; encoding: [0x05,0x00,0x64,0xd7,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+# W32-FAKE16: v_xor_b16 v5, m0, 0x3800 ; encoding: [0x05,0x00,0x64,0xd7,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+# W64-REAL16: v_xor_b16 v5.l, m0, 0x3800 ; encoding: [0x05,0x00,0x64,0xd7,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+# W64-FAKE16: v_xor_b16 v5, m0, 0x3800 ; encoding: [0x05,0x00,0x64,0xd7,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
0x05,0x00,0x64,0xd7,0x7d,0xe0,0x01,0x00
-# GFX11: v_xor_b16 v5, exec_lo, -1 ; encoding: [0x05,0x00,0x64,0xd7,0x7e,0x82,0x01,0x00]
+# W32-REAL16: v_xor_b16 v5.l, exec_lo, -1 ; encoding: [0x05,0x00,0x64,0xd7,0x7e,0x82,0x01,0x00]
+# W32-FAKE16: v_xor_b16 v5, exec_lo, -1 ; encoding: [0x05,0x00,0x64,0xd7,0x7e,0x82,0x01,0x00]
+# W64-REAL16: v_xor_b16 v5.l, exec_lo, -1 ; encoding: [0x05,0x00,0x64,0xd7,0x7e,0x82,0x01,0x00]
+# W64-FAKE16: v_xor_b16 v5, exec_lo, -1 ; encoding: [0x05,0x00,0x64,0xd7,0x7e,0x82,0x01,0x00]
0x05,0x00,0x64,0xd7,0x7e,0x82,0x01,0x00
-# GFX11: v_xor_b16 v5, exec_hi, null ; encoding: [0x05,0x00,0x64,0xd7,0x7f,0xf8,0x00,0x00]
+# W32-REAL16: v_xor_b16 v5.l, exec_hi, null ; encoding: [0x05,0x00,0x64,0xd7,0x7f,0xf8,0x00,0x00]
+# W32-FAKE16: v_xor_b16 v5, exec_hi, null ; encoding: [0x05,0x00,0x64,0xd7,0x7f,0xf8,0x00,0x00]
+# W64-REAL16: v_xor_b16 v5.l, exec_hi, null ; encoding: [0x05,0x00,0x64,0xd7,0x7f,0xf8,0x00,0x00]
+# W64-FAKE16: v_xor_b16 v5, exec_hi, null ; encoding: [0x05,0x00,0x64,0xd7,0x7f,0xf8,0x00,0x00]
0x05,0x00,0x64,0xd7,0x7f,0xf8,0x00,0x00
-# GFX11: v_xor_b16 v5, null, exec_lo ; encoding: [0x05,0x00,0x64,0xd7,0x7c,0xfc,0x00,0x00]
+# W32-REAL16: v_xor_b16 v5.l, null, exec_lo ; encoding: [0x05,0x00,0x64,0xd7,0x7c,0xfc,0x00,0x00]
+# W32-FAKE16: v_xor_b16 v5, null, exec_lo ; encoding: [0x05,0x00,0x64,0xd7,0x7c,0xfc,0x00,0x00]
+# W64-REAL16: v_xor_b16 v5.l, null, exec_lo ; encoding: [0x05,0x00,0x64,0xd7,0x7c,0xfc,0x00,0x00]
+# W64-FAKE16: v_xor_b16 v5, null, exec_lo ; encoding: [0x05,0x00,0x64,0xd7,0x7c,0xfc,0x00,0x00]
0x05,0x00,0x64,0xd7,0x7c,0xfc,0x00,0x00
-# GFX11: v_xor_b16 v5, -1, exec_hi ; encoding: [0x05,0x00,0x64,0xd7,0xc1,0xfe,0x00,0x00]
+# W32-REAL16: v_xor_b16 v5.l, -1, exec_hi ; encoding: [0x05,0x00,0x64,0xd7,0xc1,0xfe,0x00,0x00]
+# W32-FAKE16: v_xor_b16 v5, -1, exec_hi ; encoding: [0x05,0x00,0x64,0xd7,0xc1,0xfe,0x00,0x00]
+# W64-REAL16: v_xor_b16 v5.l, -1, exec_hi ; encoding: [0x05,0x00,0x64,0xd7,0xc1,0xfe,0x00,0x00]
+# W64-FAKE16: v_xor_b16 v5, -1, exec_hi ; encoding: [0x05,0x00,0x64,0xd7,0xc1,0xfe,0x00,0x00]
0x05,0x00,0x64,0xd7,0xc1,0xfe,0x00,0x00
-# GFX11: v_xor_b16 v5, 0x3800, m0
+# W32-REAL16: v_xor_b16 v5.l, 0x3800, m0 ; encoding: [0x05,0x00,0x64,0xd7,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+# W32-FAKE16: v_xor_b16 v5, 0x3800, m0 ; encoding: [0x05,0x00,0x64,0xd7,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+# W64-REAL16: v_xor_b16 v5.l, 0x3800, m0 ; encoding: [0x05,0x00,0x64,0xd7,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+# W64-FAKE16: v_xor_b16 v5, 0x3800, m0 ; encoding: [0x05,0x00,0x64,0xd7,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
0x05,0x00,0x64,0xd7,0xf0,0xfa,0x00,0x00
-# GFX11: v_xor_b16 v5, src_scc, vcc_lo ; encoding: [0x05,0x00,0x64,0xd7,0xfd,0xd4,0x00,0x00]
+# W32-REAL16: v_xor_b16 v5.l, src_scc, vcc_lo ; encoding: [0x05,0x00,0x64,0xd7,0xfd,0xd4,0x00,0x00]
+# W32-FAKE16: v_xor_b16 v5, src_scc, vcc_lo ; encoding: [0x05,0x00,0x64,0xd7,0xfd,0xd4,0x00,0x00]
+# W64-REAL16: v_xor_b16 v5.l, src_scc, vcc_lo ; encoding: [0x05,0x00,0x64,0xd7,0xfd,0xd4,0x00,0x00]
+# W64-FAKE16: v_xor_b16 v5, src_scc, vcc_lo ; encoding: [0x05,0x00,0x64,0xd7,0xfd,0xd4,0x00,0x00]
0x05,0x00,0x64,0xd7,0xfd,0xd4,0x00,0x00
-# GFX11: v_xor_b16 v255, 0xfe0b, vcc_hi ; encoding: [0xff,0x00,0x64,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+# W32-REAL16: v_xor_b16 v255.l, 0xfe0b, vcc_hi ; encoding: [0xff,0x00,0x64,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+# W32-FAKE16: v_xor_b16 v255, 0xfe0b, vcc_hi ; encoding: [0xff,0x00,0x64,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+# W64-REAL16: v_xor_b16 v255.l, 0xfe0b, vcc_hi ; encoding: [0xff,0x00,0x64,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+# W64-FAKE16: v_xor_b16 v255, 0xfe0b, vcc_hi ; encoding: [0xff,0x00,0x64,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
0xff,0x00,0x64,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp16.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp16.txt
index 486243c..bf3fa3b 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp16.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp16.txt
@@ -1,5 +1,8 @@
-# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX11,W32 %s
-# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX11,W64 %s
+; NOTE: Assertions have been autogenerated by utils/update_mc_test_checks.py UTC_ARGS: --version 5
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX11,W32,W32-REAL16 %s
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX11,W32,W32-FAKE16 %s
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64,+real-true16 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX11,W64,W64-REAL16 %s
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64,-real-true16 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX11,W64,W64-FAKE16 %s
# GFX11: v_add3_u32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x55,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
0x05,0x00,0x55,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff
@@ -266,46 +269,88 @@
# GFX11: v_alignbyte_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x17,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30]
0xff,0x00,0x17,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30
-# GFX11: v_and_b16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+# W32-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+# W32-FAKE16: v_and_b16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+# W64-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+# W64-FAKE16: v_and_b16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff
-# GFX11: v_and_b16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+# W32-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+# W32-FAKE16: v_and_b16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+# W64-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+# W64-FAKE16: v_and_b16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff
-# GFX11: v_and_b16_e64_dpp v5, v1, v2 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+# W32-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+# W32-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+# W64-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+# W64-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff
-# GFX11: v_and_b16_e64_dpp v5, v1, v2 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+# W32-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+# W32-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+# W64-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+# W64-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff
-# GFX11: v_and_b16_e64_dpp v5, v1, v2 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+# W32-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+# W32-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+# W64-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+# W64-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff
-# GFX11: v_and_b16_e64_dpp v5, v1, v2 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+# W32-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+# W32-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+# W64-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+# W64-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff
-# GFX11: v_and_b16_e64_dpp v5, v1, v2 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+# W32-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+# W32-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+# W64-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+# W64-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff
-# GFX11: v_and_b16_e64_dpp v5, v1, v2 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+# W32-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+# W32-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+# W64-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+# W64-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff
-# GFX11: v_and_b16_e64_dpp v5, v1, v2 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+# W32-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+# W32-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+# W64-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+# W64-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff
-# GFX11: v_and_b16_e64_dpp v5, v1, v2 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+# W32-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+# W32-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+# W64-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+# W64-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff
-# GFX11: v_and_b16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+# W32-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+# W32-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+# W64-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+# W64-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff
-# GFX11: v_and_b16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+# W32-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+# W32-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+# W64-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+# W64-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01
-# GFX11: v_and_b16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13]
+# W32-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13]
+# W32-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13]
+# W64-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13]
+# W64-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13]
0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13
-# GFX11: v_and_b16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x62,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
+# W32-REAL16: v_and_b16_e64_dpp v255.l, v255.l, v255.l row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x62,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
+# W32-FAKE16: v_and_b16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x62,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
+# W64-REAL16: v_and_b16_e64_dpp v255.l, v255.l, v255.l row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x62,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
+# W64-FAKE16: v_and_b16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x62,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
0xff,0x00,0x62,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30
# GFX11: v_and_or_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x57,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
@@ -2637,46 +2682,88 @@
# GFX11: v_or3_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x58,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30]
0xff,0x00,0x58,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30
-# GFX11: v_or_b16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+# W32-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+# W32-FAKE16: v_or_b16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+# W64-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+# W64-FAKE16: v_or_b16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff
-# GFX11: v_or_b16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+# W32-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+# W32-FAKE16: v_or_b16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+# W64-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+# W64-FAKE16: v_or_b16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff
-# GFX11: v_or_b16_e64_dpp v5, v1, v2 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+# W32-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+# W32-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+# W64-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+# W64-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff
-# GFX11: v_or_b16_e64_dpp v5, v1, v2 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+# W32-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+# W32-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+# W64-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+# W64-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff
-# GFX11: v_or_b16_e64_dpp v5, v1, v2 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+# W32-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+# W32-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+# W64-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+# W64-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff
-# GFX11: v_or_b16_e64_dpp v5, v1, v2 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+# W32-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+# W32-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+# W64-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+# W64-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff
-# GFX11: v_or_b16_e64_dpp v5, v1, v2 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+# W32-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+# W32-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+# W64-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+# W64-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff
-# GFX11: v_or_b16_e64_dpp v5, v1, v2 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+# W32-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+# W32-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+# W64-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+# W64-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff
-# GFX11: v_or_b16_e64_dpp v5, v1, v2 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+# W32-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+# W32-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+# W64-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+# W64-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff
-# GFX11: v_or_b16_e64_dpp v5, v1, v2 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+# W32-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+# W32-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+# W64-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+# W64-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff
-# GFX11: v_or_b16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+# W32-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+# W32-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+# W64-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+# W64-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff
-# GFX11: v_or_b16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+# W32-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+# W32-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+# W64-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+# W64-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01
-# GFX11: v_or_b16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13]
+# W32-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13]
+# W32-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13]
+# W64-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13]
+# W64-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13]
0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13
-# GFX11: v_or_b16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x63,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
+# W32-REAL16: v_or_b16_e64_dpp v255.l, v255.l, v255.l row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x63,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
+# W32-FAKE16: v_or_b16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x63,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
+# W64-REAL16: v_or_b16_e64_dpp v255.l, v255.l, v255.l row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x63,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
+# W64-FAKE16: v_or_b16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x63,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
0xff,0x00,0x63,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30
# GFX11: v_perm_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x44,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
@@ -3125,46 +3212,88 @@
# GFX11: v_xor3_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x40,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30]
0xff,0x00,0x40,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30
-# GFX11: v_xor_b16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+# W32-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+# W32-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+# W64-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+# W64-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff
-# GFX11: v_xor_b16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+# W32-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+# W32-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+# W64-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+# W64-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff
-# GFX11: v_xor_b16_e64_dpp v5, v1, v2 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+# W32-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+# W32-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+# W64-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+# W64-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff
-# GFX11: v_xor_b16_e64_dpp v5, v1, v2 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+# W32-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+# W32-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+# W64-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+# W64-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff
-# GFX11: v_xor_b16_e64_dpp v5, v1, v2 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+# W32-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+# W32-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+# W64-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+# W64-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff
-# GFX11: v_xor_b16_e64_dpp v5, v1, v2 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+# W32-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+# W32-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+# W64-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+# W64-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff
-# GFX11: v_xor_b16_e64_dpp v5, v1, v2 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+# W32-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+# W32-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+# W64-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+# W64-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff
-# GFX11: v_xor_b16_e64_dpp v5, v1, v2 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+# W32-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+# W32-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+# W64-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+# W64-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff
-# GFX11: v_xor_b16_e64_dpp v5, v1, v2 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+# W32-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+# W32-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+# W64-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+# W64-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff
-# GFX11: v_xor_b16_e64_dpp v5, v1, v2 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+# W32-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+# W32-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+# W64-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+# W64-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff
-# GFX11: v_xor_b16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+# W32-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+# W32-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+# W64-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+# W64-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff
-# GFX11: v_xor_b16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+# W32-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+# W32-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+# W64-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+# W64-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01
-# GFX11: v_xor_b16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13]
+# W32-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13]
+# W32-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13]
+# W64-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13]
+# W64-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13]
0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13
-# GFX11: v_xor_b16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x64,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
+# W32-REAL16: v_xor_b16_e64_dpp v255.l, v255.l, v255.l row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x64,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
+# W32-FAKE16: v_xor_b16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x64,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
+# W64-REAL16: v_xor_b16_e64_dpp v255.l, v255.l, v255.l row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x64,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
+# W64-FAKE16: v_xor_b16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x64,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
0xff,0x00,0x64,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30
# GFX11: v_add_nc_i16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x0d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp8.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp8.txt
index e88aad3..cdbf798 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp8.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp8.txt
@@ -1,5 +1,8 @@
-# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX11,W32 %s
-# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX11,W64 %s
+; NOTE: Assertions have been autogenerated by utils/update_mc_test_checks.py UTC_ARGS: --version 5
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX11,W32,W32-REAL16 %s
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX11,W32,W32-FAKE16 %s
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64,+real-true16 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX11,W64,W64-REAL16 %s
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64,-real-true16 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX11,W64,W64-FAKE16 %s
# GFX11: v_add3_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x55,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
0x05,0x00,0x55,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
@@ -164,10 +167,16 @@
# GFX11: v_alignbyte_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x17,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
0xff,0x00,0x17,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00
-# GFX11: v_and_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x62,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x62,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_and_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x62,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x62,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_and_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x62,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
0x05,0x00,0x62,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05
-# GFX11: v_and_b16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x62,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+# W32-REAL16: v_and_b16_e64_dpp v255.l, v255.l, v255.l dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x62,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+# W32-FAKE16: v_and_b16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x62,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+# W64-REAL16: v_and_b16_e64_dpp v255.l, v255.l, v255.l dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x62,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+# W64-FAKE16: v_and_b16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x62,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
0xff,0x00,0x62,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00
# GFX11: v_and_or_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x57,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
@@ -1599,10 +1608,16 @@
# GFX11: v_or3_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x58,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
0xff,0x00,0x58,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00
-# GFX11: v_or_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x63,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x63,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_or_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x63,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x63,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_or_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x63,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
0x05,0x00,0x63,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05
-# GFX11: v_or_b16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x63,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+# W32-REAL16: v_or_b16_e64_dpp v255.l, v255.l, v255.l dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x63,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+# W32-FAKE16: v_or_b16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x63,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+# W64-REAL16: v_or_b16_e64_dpp v255.l, v255.l, v255.l dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x63,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+# W64-FAKE16: v_or_b16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x63,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
0xff,0x00,0x63,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00
# GFX11: v_perm_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x44,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
@@ -1901,10 +1916,16 @@
# GFX11: v_xor3_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x40,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
0xff,0x00,0x40,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00
-# GFX11: v_xor_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x64,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x64,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x64,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x64,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x64,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
0x05,0x00,0x64,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05
-# GFX11: v_xor_b16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x64,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+# W32-REAL16: v_xor_b16_e64_dpp v255.l, v255.l, v255.l dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x64,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+# W32-FAKE16: v_xor_b16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x64,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+# W64-REAL16: v_xor_b16_e64_dpp v255.l, v255.l, v255.l dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x64,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+# W64-FAKE16: v_xor_b16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x64,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
0xff,0x00,0x64,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00
# GFX11: v_add_nc_i16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x0d,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_sopp.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_sopp.txt
index d42f920..d698015 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_sopp.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_sopp.txt
@@ -60,14 +60,6 @@
# GFX12: s_wait_storecnt_dscnt 0xc1d1 ; encoding: [0xd1,0xc1,0xc9,0xbf]
0xd1,0xc1,0xc9,0xbf
-# GFX12: s_singleuse_vdst 0x0 ; encoding: [0x00,0x00,0x93,0xbf]
-0x00,0x00,0x93,0xbf
-
-# GFX12: s_singleuse_vdst 0xffff ; encoding: [0xff,0xff,0x93,0xbf]
-0xff,0xff,0x93,0xbf
-
-# GFX12: s_singleuse_vdst 0x1234 ; encoding: [0x34,0x12,0x93,0xbf]
-0x34,0x12,0x93,0xbf
# GFX12: s_barrier_wait 0xffff ; encoding: [0xff,0xff,0x94,0xbf]
0xff,0xff,0x94,0xbf
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3.txt
index 44cbe5f..9b41b22 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3.txt
@@ -1,5 +1,8 @@
-# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX12,W32 %s
-# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX12,W64 %s
+; NOTE: Assertions have been autogenerated by utils/update_mc_test_checks.py UTC_ARGS: --version 5
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+real-true16 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX12,W32,W32-REAL16 %s
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=-real-true16 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX12,W32,W32-FAKE16 %s
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64,+real-true16 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX12,W64,W64-REAL16 %s
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64,-real-true16 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX12,W64,W64-FAKE16 %s
# GFX12: v_add3_u32 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x55,0xd6,0x01,0x05,0x0e,0x00]
0x05,0x00,0x55,0xd6,0x01,0x05,0x0e,0x00
@@ -375,49 +378,94 @@
# GFX12: v_alignbyte_b32 v255, 0xaf123456, vcc_hi, null ; encoding: [0xff,0x00,0x17,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
0xff,0x00,0x17,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf
-# GFX12: v_and_b16 v5, v1, v2 ; encoding: [0x05,0x00,0x62,0xd7,0x01,0x05,0x02,0x00]
+# W32-REAL16: v_and_b16 v5.l, v1.l, v2.l ; encoding: [0x05,0x00,0x62,0xd7,0x01,0x05,0x02,0x00]
+# W32-FAKE16: v_and_b16 v5, v1, v2 ; encoding: [0x05,0x00,0x62,0xd7,0x01,0x05,0x02,0x00]
+# W64-REAL16: v_and_b16 v5.l, v1.l, v2.l ; encoding: [0x05,0x00,0x62,0xd7,0x01,0x05,0x02,0x00]
+# W64-FAKE16: v_and_b16 v5, v1, v2 ; encoding: [0x05,0x00,0x62,0xd7,0x01,0x05,0x02,0x00]
0x05,0x00,0x62,0xd7,0x01,0x05,0x02,0x00
-# GFX12: v_and_b16 v5, v255, v255 ; encoding: [0x05,0x00,0x62,0xd7,0xff,0xff,0x03,0x00]
+# W32-REAL16: v_and_b16 v5.l, v255.l, v255.l ; encoding: [0x05,0x00,0x62,0xd7,0xff,0xff,0x03,0x00]
+# W32-FAKE16: v_and_b16 v5, v255, v255 ; encoding: [0x05,0x00,0x62,0xd7,0xff,0xff,0x03,0x00]
+# W64-REAL16: v_and_b16 v5.l, v255.l, v255.l ; encoding: [0x05,0x00,0x62,0xd7,0xff,0xff,0x03,0x00]
+# W64-FAKE16: v_and_b16 v5, v255, v255 ; encoding: [0x05,0x00,0x62,0xd7,0xff,0xff,0x03,0x00]
0x05,0x00,0x62,0xd7,0xff,0xff,0x03,0x00
-# GFX12: v_and_b16 v5, s1, s2 ; encoding: [0x05,0x00,0x62,0xd7,0x01,0x04,0x00,0x00]
+# W32-REAL16: v_and_b16 v5.l, s1, s2 ; encoding: [0x05,0x00,0x62,0xd7,0x01,0x04,0x00,0x00]
+# W32-FAKE16: v_and_b16 v5, s1, s2 ; encoding: [0x05,0x00,0x62,0xd7,0x01,0x04,0x00,0x00]
+# W64-REAL16: v_and_b16 v5.l, s1, s2 ; encoding: [0x05,0x00,0x62,0xd7,0x01,0x04,0x00,0x00]
+# W64-FAKE16: v_and_b16 v5, s1, s2 ; encoding: [0x05,0x00,0x62,0xd7,0x01,0x04,0x00,0x00]
0x05,0x00,0x62,0xd7,0x01,0x04,0x00,0x00
-# GFX12: v_and_b16 v5, s105, s105 ; encoding: [0x05,0x00,0x62,0xd7,0x69,0xd2,0x00,0x00]
+# W32-REAL16: v_and_b16 v5.l, s105, s105 ; encoding: [0x05,0x00,0x62,0xd7,0x69,0xd2,0x00,0x00]
+# W32-FAKE16: v_and_b16 v5, s105, s105 ; encoding: [0x05,0x00,0x62,0xd7,0x69,0xd2,0x00,0x00]
+# W64-REAL16: v_and_b16 v5.l, s105, s105 ; encoding: [0x05,0x00,0x62,0xd7,0x69,0xd2,0x00,0x00]
+# W64-FAKE16: v_and_b16 v5, s105, s105 ; encoding: [0x05,0x00,0x62,0xd7,0x69,0xd2,0x00,0x00]
0x05,0x00,0x62,0xd7,0x69,0xd2,0x00,0x00
-# GFX12: v_and_b16 v5, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x62,0xd7,0x6a,0xf6,0x00,0x00]
+# W32-REAL16: v_and_b16 v5.l, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x62,0xd7,0x6a,0xf6,0x00,0x00]
+# W32-FAKE16: v_and_b16 v5, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x62,0xd7,0x6a,0xf6,0x00,0x00]
+# W64-REAL16: v_and_b16 v5.l, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x62,0xd7,0x6a,0xf6,0x00,0x00]
+# W64-FAKE16: v_and_b16 v5, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x62,0xd7,0x6a,0xf6,0x00,0x00]
0x05,0x00,0x62,0xd7,0x6a,0xf6,0x00,0x00
-# GFX12: v_and_b16 v5, vcc_hi, 0xfe0b ; encoding: [0x05,0x00,0x62,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+# W32-REAL16: v_and_b16 v5.l, vcc_hi, 0xfe0b ; encoding: [0x05,0x00,0x62,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+# W32-FAKE16: v_and_b16 v5, vcc_hi, 0xfe0b ; encoding: [0x05,0x00,0x62,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+# W64-REAL16: v_and_b16 v5.l, vcc_hi, 0xfe0b ; encoding: [0x05,0x00,0x62,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+# W64-FAKE16: v_and_b16 v5, vcc_hi, 0xfe0b ; encoding: [0x05,0x00,0x62,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
0x05,0x00,0x62,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
-# GFX12: v_and_b16 v5, ttmp15, src_scc ; encoding: [0x05,0x00,0x62,0xd7,0x7b,0xfa,0x01,0x00]
+# W32-REAL16: v_and_b16 v5.l, ttmp15, src_scc ; encoding: [0x05,0x00,0x62,0xd7,0x7b,0xfa,0x01,0x00]
+# W32-FAKE16: v_and_b16 v5, ttmp15, src_scc ; encoding: [0x05,0x00,0x62,0xd7,0x7b,0xfa,0x01,0x00]
+# W64-REAL16: v_and_b16 v5.l, ttmp15, src_scc ; encoding: [0x05,0x00,0x62,0xd7,0x7b,0xfa,0x01,0x00]
+# W64-FAKE16: v_and_b16 v5, ttmp15, src_scc ; encoding: [0x05,0x00,0x62,0xd7,0x7b,0xfa,0x01,0x00]
0x05,0x00,0x62,0xd7,0x7b,0xfa,0x01,0x00
-# GFX12: v_and_b16 v5, m0, 0x3800
+# W32-REAL16: v_and_b16 v5.l, m0, 0x3800 ; encoding: [0x05,0x00,0x62,0xd7,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+# W32-FAKE16: v_and_b16 v5, m0, 0x3800 ; encoding: [0x05,0x00,0x62,0xd7,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+# W64-REAL16: v_and_b16 v5.l, m0, 0x3800 ; encoding: [0x05,0x00,0x62,0xd7,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+# W64-FAKE16: v_and_b16 v5, m0, 0x3800 ; encoding: [0x05,0x00,0x62,0xd7,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
0x05,0x00,0x62,0xd7,0x7d,0xe0,0x01,0x00
-# GFX12: v_and_b16 v5, exec_lo, -1 ; encoding: [0x05,0x00,0x62,0xd7,0x7e,0x82,0x01,0x00]
+# W32-REAL16: v_and_b16 v5.l, exec_lo, -1 ; encoding: [0x05,0x00,0x62,0xd7,0x7e,0x82,0x01,0x00]
+# W32-FAKE16: v_and_b16 v5, exec_lo, -1 ; encoding: [0x05,0x00,0x62,0xd7,0x7e,0x82,0x01,0x00]
+# W64-REAL16: v_and_b16 v5.l, exec_lo, -1 ; encoding: [0x05,0x00,0x62,0xd7,0x7e,0x82,0x01,0x00]
+# W64-FAKE16: v_and_b16 v5, exec_lo, -1 ; encoding: [0x05,0x00,0x62,0xd7,0x7e,0x82,0x01,0x00]
0x05,0x00,0x62,0xd7,0x7e,0x82,0x01,0x00
-# GFX12: v_and_b16 v5, exec_hi, null ; encoding: [0x05,0x00,0x62,0xd7,0x7f,0xf8,0x00,0x00]
+# W32-REAL16: v_and_b16 v5.l, exec_hi, null ; encoding: [0x05,0x00,0x62,0xd7,0x7f,0xf8,0x00,0x00]
+# W32-FAKE16: v_and_b16 v5, exec_hi, null ; encoding: [0x05,0x00,0x62,0xd7,0x7f,0xf8,0x00,0x00]
+# W64-REAL16: v_and_b16 v5.l, exec_hi, null ; encoding: [0x05,0x00,0x62,0xd7,0x7f,0xf8,0x00,0x00]
+# W64-FAKE16: v_and_b16 v5, exec_hi, null ; encoding: [0x05,0x00,0x62,0xd7,0x7f,0xf8,0x00,0x00]
0x05,0x00,0x62,0xd7,0x7f,0xf8,0x00,0x00
-# GFX12: v_and_b16 v5, null, exec_lo ; encoding: [0x05,0x00,0x62,0xd7,0x7c,0xfc,0x00,0x00]
+# W32-REAL16: v_and_b16 v5.l, null, exec_lo ; encoding: [0x05,0x00,0x62,0xd7,0x7c,0xfc,0x00,0x00]
+# W32-FAKE16: v_and_b16 v5, null, exec_lo ; encoding: [0x05,0x00,0x62,0xd7,0x7c,0xfc,0x00,0x00]
+# W64-REAL16: v_and_b16 v5.l, null, exec_lo ; encoding: [0x05,0x00,0x62,0xd7,0x7c,0xfc,0x00,0x00]
+# W64-FAKE16: v_and_b16 v5, null, exec_lo ; encoding: [0x05,0x00,0x62,0xd7,0x7c,0xfc,0x00,0x00]
0x05,0x00,0x62,0xd7,0x7c,0xfc,0x00,0x00
-# GFX12: v_and_b16 v5, -1, exec_hi ; encoding: [0x05,0x00,0x62,0xd7,0xc1,0xfe,0x00,0x00]
+# W32-REAL16: v_and_b16 v5.l, -1, exec_hi ; encoding: [0x05,0x00,0x62,0xd7,0xc1,0xfe,0x00,0x00]
+# W32-FAKE16: v_and_b16 v5, -1, exec_hi ; encoding: [0x05,0x00,0x62,0xd7,0xc1,0xfe,0x00,0x00]
+# W64-REAL16: v_and_b16 v5.l, -1, exec_hi ; encoding: [0x05,0x00,0x62,0xd7,0xc1,0xfe,0x00,0x00]
+# W64-FAKE16: v_and_b16 v5, -1, exec_hi ; encoding: [0x05,0x00,0x62,0xd7,0xc1,0xfe,0x00,0x00]
0x05,0x00,0x62,0xd7,0xc1,0xfe,0x00,0x00
-# GFX12: v_and_b16 v5, 0x3800, m0
+# W32-REAL16: v_and_b16 v5.l, 0x3800, m0 ; encoding: [0x05,0x00,0x62,0xd7,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+# W32-FAKE16: v_and_b16 v5, 0x3800, m0 ; encoding: [0x05,0x00,0x62,0xd7,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+# W64-REAL16: v_and_b16 v5.l, 0x3800, m0 ; encoding: [0x05,0x00,0x62,0xd7,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+# W64-FAKE16: v_and_b16 v5, 0x3800, m0 ; encoding: [0x05,0x00,0x62,0xd7,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
0x05,0x00,0x62,0xd7,0xf0,0xfa,0x00,0x00
-# GFX12: v_and_b16 v5, src_scc, vcc_lo ; encoding: [0x05,0x00,0x62,0xd7,0xfd,0xd4,0x00,0x00]
+# W32-REAL16: v_and_b16 v5.l, src_scc, vcc_lo ; encoding: [0x05,0x00,0x62,0xd7,0xfd,0xd4,0x00,0x00]
+# W32-FAKE16: v_and_b16 v5, src_scc, vcc_lo ; encoding: [0x05,0x00,0x62,0xd7,0xfd,0xd4,0x00,0x00]
+# W64-REAL16: v_and_b16 v5.l, src_scc, vcc_lo ; encoding: [0x05,0x00,0x62,0xd7,0xfd,0xd4,0x00,0x00]
+# W64-FAKE16: v_and_b16 v5, src_scc, vcc_lo ; encoding: [0x05,0x00,0x62,0xd7,0xfd,0xd4,0x00,0x00]
0x05,0x00,0x62,0xd7,0xfd,0xd4,0x00,0x00
-# GFX12: v_and_b16 v255, 0xfe0b, vcc_hi ; encoding: [0xff,0x00,0x62,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+# W32-REAL16: v_and_b16 v255.l, 0xfe0b, vcc_hi ; encoding: [0xff,0x00,0x62,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+# W32-FAKE16: v_and_b16 v255, 0xfe0b, vcc_hi ; encoding: [0xff,0x00,0x62,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+# W64-REAL16: v_and_b16 v255.l, 0xfe0b, vcc_hi ; encoding: [0xff,0x00,0x62,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+# W64-FAKE16: v_and_b16 v255, 0xfe0b, vcc_hi ; encoding: [0xff,0x00,0x62,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
0xff,0x00,0x62,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00
# GFX12: v_and_or_b32 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x57,0xd6,0x01,0x05,0x0e,0x00]
@@ -4597,49 +4645,107 @@
# GFX12: v_or3_b32 v255, 0xaf123456, vcc_hi, null ; encoding: [0xff,0x00,0x58,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
0xff,0x00,0x58,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf
-# GFX12: v_or_b16 v5, v1, v2 ; encoding: [0x05,0x00,0x63,0xd7,0x01,0x05,0x02,0x00]
+
+# W32-REAL16: v_or_b16 v5.l, v1.l, v2.l ; encoding: [0x05,0x00,0x63,0xd7,0x01,0x05,0x02,0x00]
+# W32-FAKE16: v_or_b16 v5, v1, v2 ; encoding: [0x05,0x00,0x63,0xd7,0x01,0x05,0x02,0x00]
+# W64-REAL16: v_or_b16 v5.l, v1.l, v2.l ; encoding: [0x05,0x00,0x63,0xd7,0x01,0x05,0x02,0x00]
+# W64-FAKE16: v_or_b16 v5, v1, v2 ; encoding: [0x05,0x00,0x63,0xd7,0x01,0x05,0x02,0x00]
0x05,0x00,0x63,0xd7,0x01,0x05,0x02,0x00
-# GFX12: v_or_b16 v5, v255, v255 ; encoding: [0x05,0x00,0x63,0xd7,0xff,0xff,0x03,0x00]
+
+# W32-REAL16: v_or_b16 v5.l, v255.l, v255.l ; encoding: [0x05,0x00,0x63,0xd7,0xff,0xff,0x03,0x00]
+# W32-FAKE16: v_or_b16 v5, v255, v255 ; encoding: [0x05,0x00,0x63,0xd7,0xff,0xff,0x03,0x00]
+# W64-REAL16: v_or_b16 v5.l, v255.l, v255.l ; encoding: [0x05,0x00,0x63,0xd7,0xff,0xff,0x03,0x00]
+# W64-FAKE16: v_or_b16 v5, v255, v255 ; encoding: [0x05,0x00,0x63,0xd7,0xff,0xff,0x03,0x00]
0x05,0x00,0x63,0xd7,0xff,0xff,0x03,0x00
-# GFX12: v_or_b16 v5, s1, s2 ; encoding: [0x05,0x00,0x63,0xd7,0x01,0x04,0x00,0x00]
+
+# W32-REAL16: v_or_b16 v5.l, s1, s2 ; encoding: [0x05,0x00,0x63,0xd7,0x01,0x04,0x00,0x00]
+# W32-FAKE16: v_or_b16 v5, s1, s2 ; encoding: [0x05,0x00,0x63,0xd7,0x01,0x04,0x00,0x00]
+# W64-REAL16: v_or_b16 v5.l, s1, s2 ; encoding: [0x05,0x00,0x63,0xd7,0x01,0x04,0x00,0x00]
+# W64-FAKE16: v_or_b16 v5, s1, s2 ; encoding: [0x05,0x00,0x63,0xd7,0x01,0x04,0x00,0x00]
0x05,0x00,0x63,0xd7,0x01,0x04,0x00,0x00
-# GFX12: v_or_b16 v5, s105, s105 ; encoding: [0x05,0x00,0x63,0xd7,0x69,0xd2,0x00,0x00]
+
+# W32-REAL16: v_or_b16 v5.l, s105, s105 ; encoding: [0x05,0x00,0x63,0xd7,0x69,0xd2,0x00,0x00]
+# W32-FAKE16: v_or_b16 v5, s105, s105 ; encoding: [0x05,0x00,0x63,0xd7,0x69,0xd2,0x00,0x00]
+# W64-REAL16: v_or_b16 v5.l, s105, s105 ; encoding: [0x05,0x00,0x63,0xd7,0x69,0xd2,0x00,0x00]
+# W64-FAKE16: v_or_b16 v5, s105, s105 ; encoding: [0x05,0x00,0x63,0xd7,0x69,0xd2,0x00,0x00]
0x05,0x00,0x63,0xd7,0x69,0xd2,0x00,0x00
-# GFX12: v_or_b16 v5, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x63,0xd7,0x6a,0xf6,0x00,0x00]
+
+# W32-REAL16: v_or_b16 v5.l, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x63,0xd7,0x6a,0xf6,0x00,0x00]
+# W32-FAKE16: v_or_b16 v5, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x63,0xd7,0x6a,0xf6,0x00,0x00]
+# W64-REAL16: v_or_b16 v5.l, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x63,0xd7,0x6a,0xf6,0x00,0x00]
+# W64-FAKE16: v_or_b16 v5, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x63,0xd7,0x6a,0xf6,0x00,0x00]
0x05,0x00,0x63,0xd7,0x6a,0xf6,0x00,0x00
-# GFX12: v_or_b16 v5, vcc_hi, 0xfe0b ; encoding: [0x05,0x00,0x63,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+# W32-REAL16: v_or_b16 v5.l, vcc_hi, 0xfe0b ; encoding: [0x05,0x00,0x63,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+# W32-FAKE16: v_or_b16 v5, vcc_hi, 0xfe0b ; encoding: [0x05,0x00,0x63,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+# W64-REAL16: v_or_b16 v5.l, vcc_hi, 0xfe0b ; encoding: [0x05,0x00,0x63,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+# W64-FAKE16: v_or_b16 v5, vcc_hi, 0xfe0b ; encoding: [0x05,0x00,0x63,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
0x05,0x00,0x63,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
-# GFX12: v_or_b16 v5, ttmp15, src_scc ; encoding: [0x05,0x00,0x63,0xd7,0x7b,0xfa,0x01,0x00]
+
+# W32-REAL16: v_or_b16 v5.l, ttmp15, src_scc ; encoding: [0x05,0x00,0x63,0xd7,0x7b,0xfa,0x01,0x00]
+# W32-FAKE16: v_or_b16 v5, ttmp15, src_scc ; encoding: [0x05,0x00,0x63,0xd7,0x7b,0xfa,0x01,0x00]
+# W64-REAL16: v_or_b16 v5.l, ttmp15, src_scc ; encoding: [0x05,0x00,0x63,0xd7,0x7b,0xfa,0x01,0x00]
+# W64-FAKE16: v_or_b16 v5, ttmp15, src_scc ; encoding: [0x05,0x00,0x63,0xd7,0x7b,0xfa,0x01,0x00]
0x05,0x00,0x63,0xd7,0x7b,0xfa,0x01,0x00
-# GFX12: v_or_b16 v5, m0, 0x3800
+# W32-REAL16: v_or_b16 v5.l, m0, 0x3800 ; encoding: [0x05,0x00,0x63,0xd7,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+# W32-FAKE16: v_or_b16 v5, m0, 0x3800 ; encoding: [0x05,0x00,0x63,0xd7,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+# W64-REAL16: v_or_b16 v5.l, m0, 0x3800 ; encoding: [0x05,0x00,0x63,0xd7,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+# W64-FAKE16: v_or_b16 v5, m0, 0x3800 ; encoding: [0x05,0x00,0x63,0xd7,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
0x05,0x00,0x63,0xd7,0x7d,0xe0,0x01,0x00
-# GFX12: v_or_b16 v5, exec_lo, -1 ; encoding: [0x05,0x00,0x63,0xd7,0x7e,0x82,0x01,0x00]
+
+# W32-REAL16: v_or_b16 v5.l, exec_lo, -1 ; encoding: [0x05,0x00,0x63,0xd7,0x7e,0x82,0x01,0x00]
+# W32-FAKE16: v_or_b16 v5, exec_lo, -1 ; encoding: [0x05,0x00,0x63,0xd7,0x7e,0x82,0x01,0x00]
+# W64-REAL16: v_or_b16 v5.l, exec_lo, -1 ; encoding: [0x05,0x00,0x63,0xd7,0x7e,0x82,0x01,0x00]
+# W64-FAKE16: v_or_b16 v5, exec_lo, -1 ; encoding: [0x05,0x00,0x63,0xd7,0x7e,0x82,0x01,0x00]
0x05,0x00,0x63,0xd7,0x7e,0x82,0x01,0x00
-# GFX12: v_or_b16 v5, exec_hi, null ; encoding: [0x05,0x00,0x63,0xd7,0x7f,0xf8,0x00,0x00]
+
+# W32-REAL16: v_or_b16 v5.l, exec_hi, null ; encoding: [0x05,0x00,0x63,0xd7,0x7f,0xf8,0x00,0x00]
+# W32-FAKE16: v_or_b16 v5, exec_hi, null ; encoding: [0x05,0x00,0x63,0xd7,0x7f,0xf8,0x00,0x00]
+# W64-REAL16: v_or_b16 v5.l, exec_hi, null ; encoding: [0x05,0x00,0x63,0xd7,0x7f,0xf8,0x00,0x00]
+# W64-FAKE16: v_or_b16 v5, exec_hi, null ; encoding: [0x05,0x00,0x63,0xd7,0x7f,0xf8,0x00,0x00]
0x05,0x00,0x63,0xd7,0x7f,0xf8,0x00,0x00
-# GFX12: v_or_b16 v5, null, exec_lo ; encoding: [0x05,0x00,0x63,0xd7,0x7c,0xfc,0x00,0x00]
+
+# W32-REAL16: v_or_b16 v5.l, null, exec_lo ; encoding: [0x05,0x00,0x63,0xd7,0x7c,0xfc,0x00,0x00]
+# W32-FAKE16: v_or_b16 v5, null, exec_lo ; encoding: [0x05,0x00,0x63,0xd7,0x7c,0xfc,0x00,0x00]
+# W64-REAL16: v_or_b16 v5.l, null, exec_lo ; encoding: [0x05,0x00,0x63,0xd7,0x7c,0xfc,0x00,0x00]
+# W64-FAKE16: v_or_b16 v5, null, exec_lo ; encoding: [0x05,0x00,0x63,0xd7,0x7c,0xfc,0x00,0x00]
0x05,0x00,0x63,0xd7,0x7c,0xfc,0x00,0x00
-# GFX12: v_or_b16 v5, -1, exec_hi ; encoding: [0x05,0x00,0x63,0xd7,0xc1,0xfe,0x00,0x00]
+
+# W32-REAL16: v_or_b16 v5.l, -1, exec_hi ; encoding: [0x05,0x00,0x63,0xd7,0xc1,0xfe,0x00,0x00]
+# W32-FAKE16: v_or_b16 v5, -1, exec_hi ; encoding: [0x05,0x00,0x63,0xd7,0xc1,0xfe,0x00,0x00]
+# W64-REAL16: v_or_b16 v5.l, -1, exec_hi ; encoding: [0x05,0x00,0x63,0xd7,0xc1,0xfe,0x00,0x00]
+# W64-FAKE16: v_or_b16 v5, -1, exec_hi ; encoding: [0x05,0x00,0x63,0xd7,0xc1,0xfe,0x00,0x00]
0x05,0x00,0x63,0xd7,0xc1,0xfe,0x00,0x00
-# GFX12: v_or_b16 v5, 0x3800, m0
+# W32-REAL16: v_or_b16 v5.l, 0x3800, m0 ; encoding: [0x05,0x00,0x63,0xd7,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+# W32-FAKE16: v_or_b16 v5, 0x3800, m0 ; encoding: [0x05,0x00,0x63,0xd7,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+# W64-REAL16: v_or_b16 v5.l, 0x3800, m0 ; encoding: [0x05,0x00,0x63,0xd7,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+# W64-FAKE16: v_or_b16 v5, 0x3800, m0 ; encoding: [0x05,0x00,0x63,0xd7,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
0x05,0x00,0x63,0xd7,0xf0,0xfa,0x00,0x00
-# GFX12: v_or_b16 v5, src_scc, vcc_lo ; encoding: [0x05,0x00,0x63,0xd7,0xfd,0xd4,0x00,0x00]
+
+# W32-REAL16: v_or_b16 v5.l, src_scc, vcc_lo ; encoding: [0x05,0x00,0x63,0xd7,0xfd,0xd4,0x00,0x00]
+# W32-FAKE16: v_or_b16 v5, src_scc, vcc_lo ; encoding: [0x05,0x00,0x63,0xd7,0xfd,0xd4,0x00,0x00]
+# W64-REAL16: v_or_b16 v5.l, src_scc, vcc_lo ; encoding: [0x05,0x00,0x63,0xd7,0xfd,0xd4,0x00,0x00]
+# W64-FAKE16: v_or_b16 v5, src_scc, vcc_lo ; encoding: [0x05,0x00,0x63,0xd7,0xfd,0xd4,0x00,0x00]
0x05,0x00,0x63,0xd7,0xfd,0xd4,0x00,0x00
-# GFX12: v_or_b16 v255, 0xfe0b, vcc_hi ; encoding: [0xff,0x00,0x63,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+# W32-REAL16: v_or_b16 v255.l, 0xfe0b, vcc_hi ; encoding: [0xff,0x00,0x63,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+# W32-FAKE16: v_or_b16 v255, 0xfe0b, vcc_hi ; encoding: [0xff,0x00,0x63,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+# W64-REAL16: v_or_b16 v255.l, 0xfe0b, vcc_hi ; encoding: [0xff,0x00,0x63,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+# W64-FAKE16: v_or_b16 v255, 0xfe0b, vcc_hi ; encoding: [0xff,0x00,0x63,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
0xff,0x00,0x63,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00
# GFX12: v_pack_b32_f16 v5, v1, v2 ; encoding: [0x05,0x00,0x11,0xd7,0x01,0x05,0x02,0x00]
@@ -5555,49 +5661,107 @@
# GFX12: v_xor3_b32 v255, 0xaf123456, vcc_hi, null ; encoding: [0xff,0x00,0x40,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf]
0xff,0x00,0x40,0xd6,0xff,0xd6,0xf0,0x01,0x56,0x34,0x12,0xaf
-# GFX12: v_xor_b16 v5, v1, v2 ; encoding: [0x05,0x00,0x64,0xd7,0x01,0x05,0x02,0x00]
+
+# W32-REAL16: v_xor_b16 v5.l, v1.l, v2.l ; encoding: [0x05,0x00,0x64,0xd7,0x01,0x05,0x02,0x00]
+# W32-FAKE16: v_xor_b16 v5, v1, v2 ; encoding: [0x05,0x00,0x64,0xd7,0x01,0x05,0x02,0x00]
+# W64-REAL16: v_xor_b16 v5.l, v1.l, v2.l ; encoding: [0x05,0x00,0x64,0xd7,0x01,0x05,0x02,0x00]
+# W64-FAKE16: v_xor_b16 v5, v1, v2 ; encoding: [0x05,0x00,0x64,0xd7,0x01,0x05,0x02,0x00]
0x05,0x00,0x64,0xd7,0x01,0x05,0x02,0x00
-# GFX12: v_xor_b16 v5, v255, v255 ; encoding: [0x05,0x00,0x64,0xd7,0xff,0xff,0x03,0x00]
+
+# W32-REAL16: v_xor_b16 v5.l, v255.l, v255.l ; encoding: [0x05,0x00,0x64,0xd7,0xff,0xff,0x03,0x00]
+# W32-FAKE16: v_xor_b16 v5, v255, v255 ; encoding: [0x05,0x00,0x64,0xd7,0xff,0xff,0x03,0x00]
+# W64-REAL16: v_xor_b16 v5.l, v255.l, v255.l ; encoding: [0x05,0x00,0x64,0xd7,0xff,0xff,0x03,0x00]
+# W64-FAKE16: v_xor_b16 v5, v255, v255 ; encoding: [0x05,0x00,0x64,0xd7,0xff,0xff,0x03,0x00]
0x05,0x00,0x64,0xd7,0xff,0xff,0x03,0x00
-# GFX12: v_xor_b16 v5, s1, s2 ; encoding: [0x05,0x00,0x64,0xd7,0x01,0x04,0x00,0x00]
+
+# W32-REAL16: v_xor_b16 v5.l, s1, s2 ; encoding: [0x05,0x00,0x64,0xd7,0x01,0x04,0x00,0x00]
+# W32-FAKE16: v_xor_b16 v5, s1, s2 ; encoding: [0x05,0x00,0x64,0xd7,0x01,0x04,0x00,0x00]
+# W64-REAL16: v_xor_b16 v5.l, s1, s2 ; encoding: [0x05,0x00,0x64,0xd7,0x01,0x04,0x00,0x00]
+# W64-FAKE16: v_xor_b16 v5, s1, s2 ; encoding: [0x05,0x00,0x64,0xd7,0x01,0x04,0x00,0x00]
0x05,0x00,0x64,0xd7,0x01,0x04,0x00,0x00
-# GFX12: v_xor_b16 v5, s105, s105 ; encoding: [0x05,0x00,0x64,0xd7,0x69,0xd2,0x00,0x00]
+
+# W32-REAL16: v_xor_b16 v5.l, s105, s105 ; encoding: [0x05,0x00,0x64,0xd7,0x69,0xd2,0x00,0x00]
+# W32-FAKE16: v_xor_b16 v5, s105, s105 ; encoding: [0x05,0x00,0x64,0xd7,0x69,0xd2,0x00,0x00]
+# W64-REAL16: v_xor_b16 v5.l, s105, s105 ; encoding: [0x05,0x00,0x64,0xd7,0x69,0xd2,0x00,0x00]
+# W64-FAKE16: v_xor_b16 v5, s105, s105 ; encoding: [0x05,0x00,0x64,0xd7,0x69,0xd2,0x00,0x00]
0x05,0x00,0x64,0xd7,0x69,0xd2,0x00,0x00
-# GFX12: v_xor_b16 v5, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x64,0xd7,0x6a,0xf6,0x00,0x00]
+
+# W32-REAL16: v_xor_b16 v5.l, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x64,0xd7,0x6a,0xf6,0x00,0x00]
+# W32-FAKE16: v_xor_b16 v5, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x64,0xd7,0x6a,0xf6,0x00,0x00]
+# W64-REAL16: v_xor_b16 v5.l, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x64,0xd7,0x6a,0xf6,0x00,0x00]
+# W64-FAKE16: v_xor_b16 v5, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x64,0xd7,0x6a,0xf6,0x00,0x00]
0x05,0x00,0x64,0xd7,0x6a,0xf6,0x00,0x00
-# GFX12: v_xor_b16 v5, vcc_hi, 0xfe0b ; encoding: [0x05,0x00,0x64,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+# W32-REAL16: v_xor_b16 v5.l, vcc_hi, 0xfe0b ; encoding: [0x05,0x00,0x64,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+# W32-FAKE16: v_xor_b16 v5, vcc_hi, 0xfe0b ; encoding: [0x05,0x00,0x64,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+# W64-REAL16: v_xor_b16 v5.l, vcc_hi, 0xfe0b ; encoding: [0x05,0x00,0x64,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+# W64-FAKE16: v_xor_b16 v5, vcc_hi, 0xfe0b ; encoding: [0x05,0x00,0x64,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
0x05,0x00,0x64,0xd7,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
-# GFX12: v_xor_b16 v5, ttmp15, src_scc ; encoding: [0x05,0x00,0x64,0xd7,0x7b,0xfa,0x01,0x00]
+
+# W32-REAL16: v_xor_b16 v5.l, ttmp15, src_scc ; encoding: [0x05,0x00,0x64,0xd7,0x7b,0xfa,0x01,0x00]
+# W32-FAKE16: v_xor_b16 v5, ttmp15, src_scc ; encoding: [0x05,0x00,0x64,0xd7,0x7b,0xfa,0x01,0x00]
+# W64-REAL16: v_xor_b16 v5.l, ttmp15, src_scc ; encoding: [0x05,0x00,0x64,0xd7,0x7b,0xfa,0x01,0x00]
+# W64-FAKE16: v_xor_b16 v5, ttmp15, src_scc ; encoding: [0x05,0x00,0x64,0xd7,0x7b,0xfa,0x01,0x00]
0x05,0x00,0x64,0xd7,0x7b,0xfa,0x01,0x00
-# GFX12: v_xor_b16 v5, m0, 0x3800
+# W32-REAL16: v_xor_b16 v5.l, m0, 0x3800 ; encoding: [0x05,0x00,0x64,0xd7,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+# W32-FAKE16: v_xor_b16 v5, m0, 0x3800 ; encoding: [0x05,0x00,0x64,0xd7,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+# W64-REAL16: v_xor_b16 v5.l, m0, 0x3800 ; encoding: [0x05,0x00,0x64,0xd7,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+# W64-FAKE16: v_xor_b16 v5, m0, 0x3800 ; encoding: [0x05,0x00,0x64,0xd7,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
0x05,0x00,0x64,0xd7,0x7d,0xe0,0x01,0x00
-# GFX12: v_xor_b16 v5, exec_lo, -1 ; encoding: [0x05,0x00,0x64,0xd7,0x7e,0x82,0x01,0x00]
+
+# W32-REAL16: v_xor_b16 v5.l, exec_lo, -1 ; encoding: [0x05,0x00,0x64,0xd7,0x7e,0x82,0x01,0x00]
+# W32-FAKE16: v_xor_b16 v5, exec_lo, -1 ; encoding: [0x05,0x00,0x64,0xd7,0x7e,0x82,0x01,0x00]
+# W64-REAL16: v_xor_b16 v5.l, exec_lo, -1 ; encoding: [0x05,0x00,0x64,0xd7,0x7e,0x82,0x01,0x00]
+# W64-FAKE16: v_xor_b16 v5, exec_lo, -1 ; encoding: [0x05,0x00,0x64,0xd7,0x7e,0x82,0x01,0x00]
0x05,0x00,0x64,0xd7,0x7e,0x82,0x01,0x00
-# GFX12: v_xor_b16 v5, exec_hi, null ; encoding: [0x05,0x00,0x64,0xd7,0x7f,0xf8,0x00,0x00]
+
+# W32-REAL16: v_xor_b16 v5.l, exec_hi, null ; encoding: [0x05,0x00,0x64,0xd7,0x7f,0xf8,0x00,0x00]
+# W32-FAKE16: v_xor_b16 v5, exec_hi, null ; encoding: [0x05,0x00,0x64,0xd7,0x7f,0xf8,0x00,0x00]
+# W64-REAL16: v_xor_b16 v5.l, exec_hi, null ; encoding: [0x05,0x00,0x64,0xd7,0x7f,0xf8,0x00,0x00]
+# W64-FAKE16: v_xor_b16 v5, exec_hi, null ; encoding: [0x05,0x00,0x64,0xd7,0x7f,0xf8,0x00,0x00]
0x05,0x00,0x64,0xd7,0x7f,0xf8,0x00,0x00
-# GFX12: v_xor_b16 v5, null, exec_lo ; encoding: [0x05,0x00,0x64,0xd7,0x7c,0xfc,0x00,0x00]
+
+# W32-REAL16: v_xor_b16 v5.l, null, exec_lo ; encoding: [0x05,0x00,0x64,0xd7,0x7c,0xfc,0x00,0x00]
+# W32-FAKE16: v_xor_b16 v5, null, exec_lo ; encoding: [0x05,0x00,0x64,0xd7,0x7c,0xfc,0x00,0x00]
+# W64-REAL16: v_xor_b16 v5.l, null, exec_lo ; encoding: [0x05,0x00,0x64,0xd7,0x7c,0xfc,0x00,0x00]
+# W64-FAKE16: v_xor_b16 v5, null, exec_lo ; encoding: [0x05,0x00,0x64,0xd7,0x7c,0xfc,0x00,0x00]
0x05,0x00,0x64,0xd7,0x7c,0xfc,0x00,0x00
-# GFX12: v_xor_b16 v5, -1, exec_hi ; encoding: [0x05,0x00,0x64,0xd7,0xc1,0xfe,0x00,0x00]
+
+# W32-REAL16: v_xor_b16 v5.l, -1, exec_hi ; encoding: [0x05,0x00,0x64,0xd7,0xc1,0xfe,0x00,0x00]
+# W32-FAKE16: v_xor_b16 v5, -1, exec_hi ; encoding: [0x05,0x00,0x64,0xd7,0xc1,0xfe,0x00,0x00]
+# W64-REAL16: v_xor_b16 v5.l, -1, exec_hi ; encoding: [0x05,0x00,0x64,0xd7,0xc1,0xfe,0x00,0x00]
+# W64-FAKE16: v_xor_b16 v5, -1, exec_hi ; encoding: [0x05,0x00,0x64,0xd7,0xc1,0xfe,0x00,0x00]
0x05,0x00,0x64,0xd7,0xc1,0xfe,0x00,0x00
-# GFX12: v_xor_b16 v5, 0x3800, m0
+# W32-REAL16: v_xor_b16 v5.l, 0x3800, m0 ; encoding: [0x05,0x00,0x64,0xd7,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+# W32-FAKE16: v_xor_b16 v5, 0x3800, m0 ; encoding: [0x05,0x00,0x64,0xd7,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+# W64-REAL16: v_xor_b16 v5.l, 0x3800, m0 ; encoding: [0x05,0x00,0x64,0xd7,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+# W64-FAKE16: v_xor_b16 v5, 0x3800, m0 ; encoding: [0x05,0x00,0x64,0xd7,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
0x05,0x00,0x64,0xd7,0xf0,0xfa,0x00,0x00
-# GFX12: v_xor_b16 v5, src_scc, vcc_lo ; encoding: [0x05,0x00,0x64,0xd7,0xfd,0xd4,0x00,0x00]
+
+# W32-REAL16: v_xor_b16 v5.l, src_scc, vcc_lo ; encoding: [0x05,0x00,0x64,0xd7,0xfd,0xd4,0x00,0x00]
+# W32-FAKE16: v_xor_b16 v5, src_scc, vcc_lo ; encoding: [0x05,0x00,0x64,0xd7,0xfd,0xd4,0x00,0x00]
+# W64-REAL16: v_xor_b16 v5.l, src_scc, vcc_lo ; encoding: [0x05,0x00,0x64,0xd7,0xfd,0xd4,0x00,0x00]
+# W64-FAKE16: v_xor_b16 v5, src_scc, vcc_lo ; encoding: [0x05,0x00,0x64,0xd7,0xfd,0xd4,0x00,0x00]
0x05,0x00,0x64,0xd7,0xfd,0xd4,0x00,0x00
-# GFX12: v_xor_b16 v255, 0xfe0b, vcc_hi ; encoding: [0xff,0x00,0x64,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+# W32-REAL16: v_xor_b16 v255.l, 0xfe0b, vcc_hi ; encoding: [0xff,0x00,0x64,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+# W32-FAKE16: v_xor_b16 v255, 0xfe0b, vcc_hi ; encoding: [0xff,0x00,0x64,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+# W64-REAL16: v_xor_b16 v255.l, 0xfe0b, vcc_hi ; encoding: [0xff,0x00,0x64,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+# W64-FAKE16: v_xor_b16 v255, 0xfe0b, vcc_hi ; encoding: [0xff,0x00,0x64,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
0xff,0x00,0x64,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00
# GFX12: v_minimum_f32 v255, -|0xaf123456|, -|vcc_hi| ; encoding: [0xff,0x03,0x65,0xd7,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp16.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp16.txt
index f6bb2e4..f9efef4 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp16.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp16.txt
@@ -1,5 +1,8 @@
-# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX12,W32 %s
-# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX12,W64 %s
+; NOTE: Assertions have been autogenerated by utils/update_mc_test_checks.py UTC_ARGS: --version 5
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+real-true16 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX12,W32,W32-REAL16 %s
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=-real-true16 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX12,W32,W32-FAKE16 %s
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64,+real-true16 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX12,W64,W64-REAL16 %s
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64,-real-true16 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX12,W64,W64-FAKE16 %s
# GFX12: v_add3_u32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x55,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
0x05,0x00,0x55,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff
@@ -281,46 +284,88 @@
# GFX12: v_alignbyte_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x17,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30]
0xff,0x00,0x17,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30
-# GFX12: v_and_b16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+# W32-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+# W32-FAKE16: v_and_b16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+# W64-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+# W64-FAKE16: v_and_b16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff
-# GFX12: v_and_b16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+# W32-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+# W32-FAKE16: v_and_b16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+# W64-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+# W64-FAKE16: v_and_b16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff
-# GFX12: v_and_b16_e64_dpp v5, v1, v2 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+# W32-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+# W32-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+# W64-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+# W64-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff
-# GFX12: v_and_b16_e64_dpp v5, v1, v2 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+# W32-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+# W32-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+# W64-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+# W64-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff
-# GFX12: v_and_b16_e64_dpp v5, v1, v2 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+# W32-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+# W32-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+# W64-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+# W64-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff
-# GFX12: v_and_b16_e64_dpp v5, v1, v2 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+# W32-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+# W32-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+# W64-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+# W64-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff
-# GFX12: v_and_b16_e64_dpp v5, v1, v2 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+# W32-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+# W32-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+# W64-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+# W64-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff
-# GFX12: v_and_b16_e64_dpp v5, v1, v2 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+# W32-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+# W32-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+# W64-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+# W64-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff
-# GFX12: v_and_b16_e64_dpp v5, v1, v2 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+# W32-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+# W32-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+# W64-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+# W64-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff
-# GFX12: v_and_b16_e64_dpp v5, v1, v2 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+# W32-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+# W32-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+# W64-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+# W64-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff
-# GFX12: v_and_b16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+# W32-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+# W32-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+# W64-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+# W64-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff
-# GFX12: v_and_b16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+# W32-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+# W32-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+# W64-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+# W64-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01
-# GFX12: v_and_b16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13]
+# W32-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13]
+# W32-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13]
+# W64-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13]
+# W64-FAKE16: v_and_b16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13]
0x05,0x00,0x62,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13
-# GFX12: v_and_b16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x62,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
+# W32-REAL16: v_and_b16_e64_dpp v255.l, v255.l, v255.l row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x62,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
+# W32-FAKE16: v_and_b16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x62,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
+# W64-REAL16: v_and_b16_e64_dpp v255.l, v255.l, v255.l row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x62,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
+# W64-FAKE16: v_and_b16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x62,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
0xff,0x00,0x62,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30
# GFX12: v_and_or_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x57,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
@@ -867,7 +912,7 @@
# GFX12: v_cvt_pk_bf8_f32_e64_dpp v1, -v2, |v3| quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd ; encoding: [0x01,0x02,0x6a,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
0x01,0x02,0x6a,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed
-# GFX12: v_cvt_pk_bf8_f32 v1, -v2, |v3| ; encoding: [0x01,0x02,0x6a,0xd7,0x02,0x07,0x02,0x20]
+# GFX12: v_cvt_pk_bf8_f32 v1, -v2, |v3| ; encoding: [0x01,0x02,0x6a,0xd7,0x02,0x07,0x02,0x20]
0x01,0x02,0x6a,0xd7,0x02,0x07,0x02,0x20
# GFX12: v_cvt_pk_bf8_f32_e64_dpp v6, -v2, |v3| quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd ; encoding: [0x06,0x02,0x6a,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
@@ -894,7 +939,7 @@
# GFX12: v_cvt_pk_fp8_f32_e64_dpp v1, -v2, |v3| quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd ; encoding: [0x01,0x02,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
0x01,0x02,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed
-# GFX12: v_cvt_pk_fp8_f32 v1, -v2, |v3| ; encoding: [0x01,0x02,0x69,0xd7,0x02,0x07,0x02,0x20]
+# GFX12: v_cvt_pk_fp8_f32 v1, -v2, |v3| ; encoding: [0x01,0x02,0x69,0xd7,0x02,0x07,0x02,0x20]
0x01,0x02,0x69,0xd7,0x02,0x07,0x02,0x20
# GFX12: v_cvt_pk_fp8_f32_e64_dpp v6, -v2, |v3| quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd ; encoding: [0x06,0x02,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
@@ -2883,46 +2928,88 @@
# GFX12: v_or3_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x58,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30]
0xff,0x00,0x58,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30
-# GFX12: v_or_b16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+# W32-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+# W32-FAKE16: v_or_b16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+# W64-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+# W64-FAKE16: v_or_b16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff
-# GFX12: v_or_b16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+# W32-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+# W32-FAKE16: v_or_b16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+# W64-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+# W64-FAKE16: v_or_b16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff
-# GFX12: v_or_b16_e64_dpp v5, v1, v2 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+# W32-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+# W32-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+# W64-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+# W64-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff
-# GFX12: v_or_b16_e64_dpp v5, v1, v2 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+# W32-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+# W32-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+# W64-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+# W64-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff
-# GFX12: v_or_b16_e64_dpp v5, v1, v2 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+# W32-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+# W32-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+# W64-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+# W64-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff
-# GFX12: v_or_b16_e64_dpp v5, v1, v2 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+# W32-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+# W32-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+# W64-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+# W64-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff
-# GFX12: v_or_b16_e64_dpp v5, v1, v2 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+# W32-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+# W32-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+# W64-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+# W64-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff
-# GFX12: v_or_b16_e64_dpp v5, v1, v2 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+# W32-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+# W32-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+# W64-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+# W64-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff
-# GFX12: v_or_b16_e64_dpp v5, v1, v2 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+# W32-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+# W32-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+# W64-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+# W64-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff
-# GFX12: v_or_b16_e64_dpp v5, v1, v2 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+# W32-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+# W32-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+# W64-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+# W64-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff
-# GFX12: v_or_b16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+# W32-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+# W32-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+# W64-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+# W64-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff
-# GFX12: v_or_b16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+# W32-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+# W32-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+# W64-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+# W64-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01
-# GFX12: v_or_b16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13]
+# W32-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13]
+# W32-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13]
+# W64-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13]
+# W64-FAKE16: v_or_b16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13]
0x05,0x00,0x63,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13
-# GFX12: v_or_b16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x63,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
+# W32-REAL16: v_or_b16_e64_dpp v255.l, v255.l, v255.l row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x63,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
+# W32-FAKE16: v_or_b16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x63,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
+# W64-REAL16: v_or_b16_e64_dpp v255.l, v255.l, v255.l row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x63,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
+# W64-FAKE16: v_or_b16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x63,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
0xff,0x00,0x63,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30
# GFX12: v_perm_b32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x44,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
@@ -3392,46 +3479,88 @@
# GFX12: v_xor3_b32_e64_dpp v255, v255, v255, src_scc row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x40,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30]
0xff,0x00,0x40,0xd6,0xfa,0xfe,0xf7,0x03,0xff,0x6f,0x0d,0x30
-# GFX12: v_xor_b16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+# W32-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+# W32-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+# W64-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+# W64-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff
-# GFX12: v_xor_b16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+# W32-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+# W32-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+# W64-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+# W64-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff
-# GFX12: v_xor_b16_e64_dpp v5, v1, v2 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+# W32-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+# W32-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+# W64-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+# W64-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff
-# GFX12: v_xor_b16_e64_dpp v5, v1, v2 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+# W32-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+# W32-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+# W64-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+# W64-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff
-# GFX12: v_xor_b16_e64_dpp v5, v1, v2 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+# W32-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+# W32-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+# W64-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+# W64-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff
-# GFX12: v_xor_b16_e64_dpp v5, v1, v2 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+# W32-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+# W32-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+# W64-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+# W64-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff
-# GFX12: v_xor_b16_e64_dpp v5, v1, v2 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+# W32-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+# W32-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+# W64-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+# W64-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff
-# GFX12: v_xor_b16_e64_dpp v5, v1, v2 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+# W32-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+# W32-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+# W64-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+# W64-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff
-# GFX12: v_xor_b16_e64_dpp v5, v1, v2 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+# W32-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+# W32-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+# W64-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+# W64-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff
-# GFX12: v_xor_b16_e64_dpp v5, v1, v2 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+# W32-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+# W32-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+# W64-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+# W64-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff
-# GFX12: v_xor_b16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+# W32-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+# W32-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+# W64-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+# W64-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff
-# GFX12: v_xor_b16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+# W32-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+# W32-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+# W64-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
+# W64-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01]
0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x5f,0x01,0x01
-# GFX12: v_xor_b16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13]
+# W32-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13]
+# W32-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13]
+# W64-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13]
+# W64-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13]
0x05,0x00,0x64,0xd7,0xfa,0x04,0x02,0x00,0x01,0x60,0x01,0x13
-# GFX12: v_xor_b16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x64,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
+# W32-REAL16: v_xor_b16_e64_dpp v255.l, v255.l, v255.l row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x64,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
+# W32-FAKE16: v_xor_b16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x64,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
+# W64-REAL16: v_xor_b16_e64_dpp v255.l, v255.l, v255.l row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x64,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
+# W64-FAKE16: v_xor_b16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x64,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
0xff,0x00,0x64,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30
# GFX12: v_add_nc_i16_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x0d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp8.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp8.txt
index f291795..47611e0 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp8.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp8.txt
@@ -1,5 +1,8 @@
-# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX12,W32 %s
-# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX12,W64 %s
+; NOTE: Assertions have been autogenerated by utils/update_mc_test_checks.py UTC_ARGS: --version 5
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+real-true16 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX12,W32,W32-REAL16 %s
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=-real-true16 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX12,W32,W32-FAKE16 %s
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64,+real-true16 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX12,W64,W64-REAL16 %s
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64,-real-true16 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX12,W64,W64-FAKE16 %s
# GFX12: v_add3_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x55,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
0x05,0x00,0x55,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
@@ -179,10 +182,16 @@
# GFX12: v_alignbyte_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x17,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
0xff,0x00,0x17,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00
-# GFX12: v_and_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x62,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x62,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_and_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x62,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_and_b16_e64_dpp v5.l, v1.l, v2.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x62,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_and_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x62,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
0x05,0x00,0x62,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05
-# GFX12: v_and_b16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x62,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+# W32-REAL16: v_and_b16_e64_dpp v255.l, v255.l, v255.l dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x62,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+# W32-FAKE16: v_and_b16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x62,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+# W64-REAL16: v_and_b16_e64_dpp v255.l, v255.l, v255.l dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x62,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+# W64-FAKE16: v_and_b16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x62,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
0xff,0x00,0x62,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00
# GFX12: v_and_or_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x57,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
@@ -1779,10 +1788,16 @@
# GFX12: v_or3_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x58,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
0xff,0x00,0x58,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00
-# GFX12: v_or_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x63,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x63,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_or_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x63,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_or_b16_e64_dpp v5.l, v1.l, v2.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x63,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_or_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x63,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
0x05,0x00,0x63,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05
-# GFX12: v_or_b16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x63,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+# W32-REAL16: v_or_b16_e64_dpp v255.l, v255.l, v255.l dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x63,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+# W32-FAKE16: v_or_b16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x63,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+# W64-REAL16: v_or_b16_e64_dpp v255.l, v255.l, v255.l dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x63,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+# W64-FAKE16: v_or_b16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x63,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
0xff,0x00,0x63,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00
# GFX12: v_perm_b32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x44,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
@@ -2102,10 +2117,16 @@
# GFX12: v_xor3_b32_e64_dpp v255, v255, v255, src_scc dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x40,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00]
0xff,0x00,0x40,0xd6,0xea,0xfe,0xf7,0x03,0xff,0x00,0x00,0x00
-# GFX12: v_xor_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x64,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x64,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x64,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_xor_b16_e64_dpp v5.l, v1.l, v2.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x64,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_xor_b16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x64,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
0x05,0x00,0x64,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05
-# GFX12: v_xor_b16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x64,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+# W32-REAL16: v_xor_b16_e64_dpp v255.l, v255.l, v255.l dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x64,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+# W32-FAKE16: v_xor_b16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x64,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+# W64-REAL16: v_xor_b16_e64_dpp v255.l, v255.l, v255.l dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x64,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
+# W64-FAKE16: v_xor_b16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x64,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
0xff,0x00,0x64,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00
# GFX12: v_add_nc_i16_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x0d,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
diff --git a/llvm/test/MC/Disassembler/X86/apx/kmov.txt b/llvm/test/MC/Disassembler/X86/apx/kmov.txt
index 45fedbd..ba77dda 100644
--- a/llvm/test/MC/Disassembler/X86/apx/kmov.txt
+++ b/llvm/test/MC/Disassembler/X86/apx/kmov.txt
@@ -17,20 +17,20 @@
# INTEL: {evex} kmovq k2, k1
0x62,0xf1,0xfc,0x08,0x90,0xd1
-# ATT: {evex} kmovb -16(%rax), %k0
-# INTEL: {evex} kmovb k0, byte ptr [rax - 16]
+# ATT: kmovb -16(%rax), %k0
+# INTEL: kmovb k0, byte ptr [rax - 16]
0x62,0xf1,0x7d,0x08,0x90,0x40,0xf0
-# ATT: {evex} kmovw -16(%rax), %k0
-# INTEL: {evex} kmovw k0, word ptr [rax - 16]
+# ATT: kmovw -16(%rax), %k0
+# INTEL: kmovw k0, word ptr [rax - 16]
0x62,0xf1,0x7c,0x08,0x90,0x40,0xf0
-# ATT: {evex} kmovd -16(%rax), %k0
-# INTEL: {evex} kmovd k0, dword ptr [rax - 16]
+# ATT: kmovd -16(%rax), %k0
+# INTEL: kmovd k0, dword ptr [rax - 16]
0x62,0xf1,0xfd,0x08,0x90,0x40,0xf0
-# ATT: {evex} kmovq -16(%rax), %k0
-# INTEL: {evex} kmovq k0, qword ptr [rax - 16]
+# ATT: kmovq -16(%rax), %k0
+# INTEL: kmovq k0, qword ptr [rax - 16]
0x62,0xf1,0xfc,0x08,0x90,0x40,0xf0
# ATT-NOT: {evex}
diff --git a/llvm/test/MC/ELF/relocation-alias.s b/llvm/test/MC/ELF/relocation-alias.s
index 51fb0c3..66bf2cee 100644
--- a/llvm/test/MC/ELF/relocation-alias.s
+++ b/llvm/test/MC/ELF/relocation-alias.s
@@ -16,7 +16,10 @@ movabsq $memcpy+2, %rax
# CHECK: movq (%rip), %rax
# CHECK-NEXT: R_X86_64_REX_GOTPCRELX abs-0x4
+# CHECK: movq (%rip), %r16
+# CHECK-NEXT: R_X86_64_REX2_GOTPCRELX abs-0x4
movq abs@GOTPCREL(%rip), %rax
+movq abs@GOTPCREL(%rip), %r16
abs = 42
# CHECK: movabsq $0, %rbx
diff --git a/llvm/test/MC/RISCV/machine-csr-names-invalid.s b/llvm/test/MC/RISCV/machine-csr-names-invalid.s
deleted file mode 100644
index 526c417..0000000
--- a/llvm/test/MC/RISCV/machine-csr-names-invalid.s
+++ /dev/null
@@ -1,40 +0,0 @@
-# RUN: not llvm-mc -triple riscv64 < %s 2>&1 \
-# RUN: | FileCheck -check-prefixes=CHECK-NEED-RV32 %s
-
-# These machine mode CSR register names are RV32 only.
-
-csrrs t1, pmpcfg1, zero # CHECK-NEED-RV32: :[[@LINE]]:11: error: system register 'pmpcfg1' is RV32 only
-csrrs t1, pmpcfg3, zero # CHECK-NEED-RV32: :[[@LINE]]:11: error: system register 'pmpcfg3' is RV32 only
-
-csrrs t1, mcycleh, zero # CHECK-NEED-RV32: :[[@LINE]]:11: error: system register 'mcycleh' is RV32 only
-csrrs t1, minstreth, zero # CHECK-NEED-RV32: :[[@LINE]]:11: error: system register 'minstreth' is RV32 only
-
-csrrs t1, mhpmcounter3h, zero # CHECK-NEED-RV32: :[[@LINE]]:11: error: system register 'mhpmcounter3h' is RV32 only
-csrrs t1, mhpmcounter4h, zero # CHECK-NEED-RV32: :[[@LINE]]:11: error: system register 'mhpmcounter4h' is RV32 only
-csrrs t1, mhpmcounter5h, zero # CHECK-NEED-RV32: :[[@LINE]]:11: error: system register 'mhpmcounter5h' is RV32 only
-csrrs t1, mhpmcounter6h, zero # CHECK-NEED-RV32: :[[@LINE]]:11: error: system register 'mhpmcounter6h' is RV32 only
-csrrs t1, mhpmcounter7h, zero # CHECK-NEED-RV32: :[[@LINE]]:11: error: system register 'mhpmcounter7h' is RV32 only
-csrrs t1, mhpmcounter8h, zero # CHECK-NEED-RV32: :[[@LINE]]:11: error: system register 'mhpmcounter8h' is RV32 only
-csrrs t1, mhpmcounter9h, zero # CHECK-NEED-RV32: :[[@LINE]]:11: error: system register 'mhpmcounter9h' is RV32 only
-csrrs t1, mhpmcounter10h, zero # CHECK-NEED-RV32: :[[@LINE]]:11: error: system register 'mhpmcounter10h' is RV32 only
-csrrs t1, mhpmcounter11h, zero # CHECK-NEED-RV32: :[[@LINE]]:11: error: system register 'mhpmcounter11h' is RV32 only
-csrrs t1, mhpmcounter12h, zero # CHECK-NEED-RV32: :[[@LINE]]:11: error: system register 'mhpmcounter12h' is RV32 only
-csrrs t1, mhpmcounter13h, zero # CHECK-NEED-RV32: :[[@LINE]]:11: error: system register 'mhpmcounter13h' is RV32 only
-csrrs t1, mhpmcounter14h, zero # CHECK-NEED-RV32: :[[@LINE]]:11: error: system register 'mhpmcounter14h' is RV32 only
-csrrs t1, mhpmcounter15h, zero # CHECK-NEED-RV32: :[[@LINE]]:11: error: system register 'mhpmcounter15h' is RV32 only
-csrrs t1, mhpmcounter16h, zero # CHECK-NEED-RV32: :[[@LINE]]:11: error: system register 'mhpmcounter16h' is RV32 only
-csrrs t1, mhpmcounter17h, zero # CHECK-NEED-RV32: :[[@LINE]]:11: error: system register 'mhpmcounter17h' is RV32 only
-csrrs t1, mhpmcounter18h, zero # CHECK-NEED-RV32: :[[@LINE]]:11: error: system register 'mhpmcounter18h' is RV32 only
-csrrs t1, mhpmcounter19h, zero # CHECK-NEED-RV32: :[[@LINE]]:11: error: system register 'mhpmcounter19h' is RV32 only
-csrrs t1, mhpmcounter20h, zero # CHECK-NEED-RV32: :[[@LINE]]:11: error: system register 'mhpmcounter20h' is RV32 only
-csrrs t1, mhpmcounter21h, zero # CHECK-NEED-RV32: :[[@LINE]]:11: error: system register 'mhpmcounter21h' is RV32 only
-csrrs t1, mhpmcounter22h, zero # CHECK-NEED-RV32: :[[@LINE]]:11: error: system register 'mhpmcounter22h' is RV32 only
-csrrs t1, mhpmcounter23h, zero # CHECK-NEED-RV32: :[[@LINE]]:11: error: system register 'mhpmcounter23h' is RV32 only
-csrrs t1, mhpmcounter24h, zero # CHECK-NEED-RV32: :[[@LINE]]:11: error: system register 'mhpmcounter24h' is RV32 only
-csrrs t1, mhpmcounter25h, zero # CHECK-NEED-RV32: :[[@LINE]]:11: error: system register 'mhpmcounter25h' is RV32 only
-csrrs t1, mhpmcounter26h, zero # CHECK-NEED-RV32: :[[@LINE]]:11: error: system register 'mhpmcounter26h' is RV32 only
-csrrs t1, mhpmcounter27h, zero # CHECK-NEED-RV32: :[[@LINE]]:11: error: system register 'mhpmcounter27h' is RV32 only
-csrrs t1, mhpmcounter28h, zero # CHECK-NEED-RV32: :[[@LINE]]:11: error: system register 'mhpmcounter28h' is RV32 only
-csrrs t1, mhpmcounter29h, zero # CHECK-NEED-RV32: :[[@LINE]]:11: error: system register 'mhpmcounter29h' is RV32 only
-csrrs t1, mhpmcounter30h, zero # CHECK-NEED-RV32: :[[@LINE]]:11: error: system register 'mhpmcounter30h' is RV32 only
-csrrs t1, mhpmcounter31h, zero # CHECK-NEED-RV32: :[[@LINE]]:11: error: system register 'mhpmcounter31h' is RV32 only
diff --git a/llvm/test/MC/RISCV/rv64-relax-all.s b/llvm/test/MC/RISCV/rv64-relax-all.s
index 70a3f77..6705d6e 100644
--- a/llvm/test/MC/RISCV/rv64-relax-all.s
+++ b/llvm/test/MC/RISCV/rv64-relax-all.s
@@ -14,3 +14,9 @@ c.beqz a0, NEAR
# INSTR: c.j 0x0 <NEAR>
# RELAX-INSTR: jal zero, 0x0 <NEAR>
c.j NEAR
+
+bnez s0, .foo
+j .foo
+beqz s0, .foo
+.foo:
+ret
diff --git a/llvm/test/MC/WebAssembly/type-checker-errors.s b/llvm/test/MC/WebAssembly/type-checker-errors.s
index e8b8274..3106fe76 100644
--- a/llvm/test/MC/WebAssembly/type-checker-errors.s
+++ b/llvm/test/MC/WebAssembly/type-checker-errors.s
@@ -93,12 +93,14 @@ global_set_type_mismatch:
table_get_expected_expression_operand:
.functype table_get_expected_expression_operand () -> ()
+ i32.const 0
# CHECK: :[[@LINE+1]]:13: error: expected expression operand
table.get 1
end_function
table_get_missing_tabletype:
.functype table_get_missing_tabletype () -> ()
+ i32.const 0
# CHECK: :[[@LINE+1]]:13: error: symbol foo: missing .tabletype
table.get foo
end_function
@@ -851,3 +853,23 @@ br_incorrect_func_signature:
drop
i32.const 1
end_function
+
+multiple_errors_in_function:
+ .functype multiple_errors_in_function () -> ()
+# CHECK: :[[@LINE+2]]:3: error: empty stack while popping i32
+# CHECK: :[[@LINE+1]]:13: error: expected expression operand
+ table.get 1
+
+# CHECK: :[[@LINE+3]]:3: error: empty stack while popping i32
+# CHECK: :[[@LINE+2]]:3: error: empty stack while popping externref
+# CHECK: :[[@LINE+1]]:3: error: empty stack while popping i32
+ table.fill valid_table
+
+ f32.const 0.0
+ ref.null_extern
+# CHECK: :[[@LINE+2]]:3: error: popped externref, expected i32
+# CHECK: :[[@LINE+1]]:3: error: popped f32, expected i32
+ i32.add
+ drop
+
+ end_function
diff --git a/llvm/test/MC/X86/gotpcrelx.s b/llvm/test/MC/X86/gotpcrelx.s
index e63e3e9..5a8ba45 100644
--- a/llvm/test/MC/X86/gotpcrelx.s
+++ b/llvm/test/MC/X86/gotpcrelx.s
@@ -37,6 +37,16 @@
# CHECK-NEXT: R_X86_64_REX_GOTPCRELX sbb
# CHECK-NEXT: R_X86_64_REX_GOTPCRELX sub
# CHECK-NEXT: R_X86_64_REX_GOTPCRELX xor
+# CHECK-NEXT: R_X86_64_REX2_GOTPCRELX mov
+# CHECK-NEXT: R_X86_64_REX2_GOTPCRELX test
+# CHECK-NEXT: R_X86_64_REX2_GOTPCRELX adc
+# CHECK-NEXT: R_X86_64_REX2_GOTPCRELX add
+# CHECK-NEXT: R_X86_64_REX2_GOTPCRELX and
+# CHECK-NEXT: R_X86_64_REX2_GOTPCRELX cmp
+# CHECK-NEXT: R_X86_64_REX2_GOTPCRELX or
+# CHECK-NEXT: R_X86_64_REX2_GOTPCRELX sbb
+# CHECK-NEXT: R_X86_64_REX2_GOTPCRELX sub
+# CHECK-NEXT: R_X86_64_REX2_GOTPCRELX xor
# CHECK-NEXT: }
# NORELAX-NEXT: R_X86_64_GOTPCREL mov
@@ -71,6 +81,16 @@
# NORELAX-NEXT: R_X86_64_GOTPCREL sbb
# NORELAX-NEXT: R_X86_64_GOTPCREL sub
# NORELAX-NEXT: R_X86_64_GOTPCREL xor
+# NORELAX-NEXT: R_X86_64_GOTPCREL mov
+# NORELAX-NEXT: R_X86_64_GOTPCREL test
+# NORELAX-NEXT: R_X86_64_GOTPCREL adc
+# NORELAX-NEXT: R_X86_64_GOTPCREL add
+# NORELAX-NEXT: R_X86_64_GOTPCREL and
+# NORELAX-NEXT: R_X86_64_GOTPCREL cmp
+# NORELAX-NEXT: R_X86_64_GOTPCREL or
+# NORELAX-NEXT: R_X86_64_GOTPCREL sbb
+# NORELAX-NEXT: R_X86_64_GOTPCREL sub
+# NORELAX-NEXT: R_X86_64_GOTPCREL xor
# NORELAX-NEXT: }
movl mov@GOTPCREL(%rip), %eax
@@ -108,10 +128,22 @@ sbb sbb@GOTPCREL(%rip), %rax
sub sub@GOTPCREL(%rip), %rax
xor xor@GOTPCREL(%rip), %rax
+movq mov@GOTPCREL(%rip), %r16
+test %r16, test@GOTPCREL(%rip)
+adc adc@GOTPCREL(%rip), %r16
+add add@GOTPCREL(%rip), %r16
+and and@GOTPCREL(%rip), %r16
+cmp cmp@GOTPCREL(%rip), %r16
+or or@GOTPCREL(%rip), %r16
+sbb sbb@GOTPCREL(%rip), %r16
+sub sub@GOTPCREL(%rip), %r16
+xor xor@GOTPCREL(%rip), %r16
+
# COMMON-NEXT: Section ({{.*}}) .rela.norelax {
# COMMON-NEXT: R_X86_64_GOTPCREL mov 0x0
# COMMON-NEXT: R_X86_64_GOTPCREL mov 0xFFFFFFFFFFFFFFFD
# COMMON-NEXT: R_X86_64_GOTPCREL mov 0xFFFFFFFFFFFFFFFC
+# COMMON-NEXT: R_X86_64_GOTPCREL mov 0xFFFFFFFFFFFFFFFD
# COMMON-NEXT: }
# COMMON-NEXT: ]
@@ -123,3 +155,5 @@ movl mov@GOTPCREL+4(%rip), %eax
movq mov@GOTPCREL+1(%rip), %rax
## We could emit R_X86_64_GOTPCRELX, but it is probably unnecessary.
movl mov@GOTPCREL+0(%rip), %eax
+## Don't emit R_X86_64_GOTPCRELX.
+movq mov@GOTPCREL+1(%rip), %r16
diff --git a/llvm/test/MC/X86/reloc-directive-elf-64.s b/llvm/test/MC/X86/reloc-directive-elf-64.s
index 8f5d8c8..323603e 100644
--- a/llvm/test/MC/X86/reloc-directive-elf-64.s
+++ b/llvm/test/MC/X86/reloc-directive-elf-64.s
@@ -9,6 +9,7 @@
# PRINT-NEXT: .reloc 0, R_X86_64_64, .data+2
# PRINT-NEXT: .reloc 0, R_X86_64_GOTPCRELX, foo+3
# PRINT-NEXT: .reloc 0, R_X86_64_REX_GOTPCRELX, 5
+# PRINT-NEXT: .reloc 0, R_X86_64_REX2_GOTPCRELX, 7
# PRINT: .reloc 0, BFD_RELOC_NONE, 9
# PRINT-NEXT: .reloc 0, BFD_RELOC_8, 9
# PRINT-NEXT: .reloc 0, BFD_RELOC_16, 9
@@ -21,6 +22,7 @@
# CHECK-NEXT: 0x0 R_X86_64_64 .data 0x2
# CHECK-NEXT: 0x0 R_X86_64_GOTPCRELX foo 0x3
# CHECK-NEXT: 0x0 R_X86_64_REX_GOTPCRELX - 0x5
+# CHECK-NEXT: 0x0 R_X86_64_REX2_GOTPCRELX - 0x7
# CHECK-NEXT: 0x0 R_X86_64_NONE - 0x9
# CHECK-NEXT: 0x0 R_X86_64_8 - 0x9
# CHECK-NEXT: 0x0 R_X86_64_16 - 0x9
@@ -37,6 +39,7 @@
.reloc 0, R_X86_64_64, .data+2
.reloc 0, R_X86_64_GOTPCRELX, foo+3
.reloc 0, R_X86_64_REX_GOTPCRELX, 5
+ .reloc 0, R_X86_64_REX2_GOTPCRELX, 7
.reloc 0, BFD_RELOC_NONE, 9
.reloc 0, BFD_RELOC_8, 9
diff --git a/llvm/test/TableGen/listflatten-error.td b/llvm/test/TableGen/listflatten-error.td
new file mode 100644
index 0000000..2f13356
--- /dev/null
+++ b/llvm/test/TableGen/listflatten-error.td
@@ -0,0 +1,6 @@
+// RUN: not llvm-tblgen %s 2>&1 | FileCheck %s -DFILE=%s
+
+// CHECK: [[FILE]]:[[@LINE+2]]:33: error: expected list type argument in unary operator
+class Flatten<int A> {
+ list<int> F = !listflatten(A);
+}
diff --git a/llvm/test/TableGen/listflatten.td b/llvm/test/TableGen/listflatten.td
new file mode 100644
index 0000000..a76ac21
--- /dev/null
+++ b/llvm/test/TableGen/listflatten.td
@@ -0,0 +1,32 @@
+// RUN: llvm-tblgen %s | FileCheck %s
+
+class Flatten<list<int> A, list<int> B> {
+ list<int> Flat1 = !listflatten([A, B, [6], [7, 8]]);
+
+ list<list<int>> X = [A, B];
+ list<int> Flat2 = !listflatten(!listconcat(X, [[7]]));
+
+ // Generate a nested list of integers.
+ list<int> Y0 = [1, 2, 3, 4];
+ list<list<int>> Y1 = !foreach(elem, Y0, [elem]);
+ list<list<list<int>>> Y2 = !foreach(elem, Y1, [elem]);
+ list<list<list<list<int>>>> Y3 = !foreach(elem, Y2, [elem]);
+
+ // Flatten it completely.
+ list<int> Flat3=!listflatten(!listflatten(!listflatten(Y3)));
+
+ // Flatten it partially.
+ list<list<list<int>>> Flat4 = !listflatten(Y3);
+ list<list<int>> Flat5 = !listflatten(!listflatten(Y3));
+
+ // Test NOP flattening.
+ list<string> Flat6 = !listflatten(["a", "b"]);
+}
+
+// CHECK: list<int> Flat1 = [1, 2, 3, 4, 5, 6, 7, 8];
+// CHECK: list<int> Flat2 = [1, 2, 3, 4, 5, 7];
+// CHECK: list<int> Flat3 = [1, 2, 3, 4];
+// CHECK{LITERAL}: list<list<list<int>>> Flat4 = [[[1]], [[2]], [[3]], [[4]]];
+// CHECK: list<string> Flat6 = ["a", "b"];
+def F : Flatten<[1,2], [3,4,5]>;
+
diff --git a/llvm/test/Transforms/AggressiveInstCombine/inline-strcmp-debugloc.ll b/llvm/test/Transforms/AggressiveInstCombine/inline-strcmp-debugloc.ll
new file mode 100644
index 0000000..94c9128
--- /dev/null
+++ b/llvm/test/Transforms/AggressiveInstCombine/inline-strcmp-debugloc.ll
@@ -0,0 +1,56 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+;; Tests that when we replace a call to strcmp with compiler-generated inline
+;; code, we pass the strcmp call's dbg location to the inline code.
+; RUN: opt < %s -passes=aggressive-instcombine -S | FileCheck %s
+
+@.str = constant [3 x i8] c"-h\00"
+
+define i32 @main() {
+; CHECK-LABEL: define i32 @main() {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: br label %[[SUB_0:.*]], !dbg [[DBG4:![0-9]+]]
+; CHECK: [[SUB_0]]:
+; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr null, align 1, !dbg [[DBG4]]
+; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[TMP0]] to i32, !dbg [[DBG4]]
+; CHECK-NEXT: [[TMP2:%.*]] = sub i32 [[TMP1]], 45, !dbg [[DBG4]]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0, !dbg [[DBG4]]
+; CHECK-NEXT: br i1 [[TMP3]], label %[[NE:.*]], label %[[SUB_1:.*]], !dbg [[DBG4]]
+; CHECK: [[SUB_1]]:
+; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr inbounds (i8, ptr null, i64 1), align 1, !dbg [[DBG4]]
+; CHECK-NEXT: [[TMP5:%.*]] = zext i8 [[TMP4]] to i32, !dbg [[DBG4]]
+; CHECK-NEXT: [[TMP6:%.*]] = sub i32 [[TMP5]], 104, !dbg [[DBG4]]
+; CHECK-NEXT: [[TMP7:%.*]] = icmp ne i32 [[TMP6]], 0, !dbg [[DBG4]]
+; CHECK-NEXT: br i1 [[TMP7]], label %[[NE]], label %[[SUB_2:.*]], !dbg [[DBG4]]
+; CHECK: [[SUB_2]]:
+; CHECK-NEXT: br label %[[NE]], !dbg [[DBG4]]
+; CHECK: [[NE]]:
+; CHECK-NEXT: br label %[[ENTRY_TAIL:.*]], !dbg [[DBG4]]
+; CHECK: [[ENTRY_TAIL]]:
+; CHECK-NEXT: ret i32 0
+;
+entry:
+ %call.i = call i32 @strcmp(ptr null, ptr @.str), !dbg !4
+ %cmp.i.not = icmp eq i32 %call.i, 0
+ ret i32 0
+}
+
+declare i32 @strcmp(ptr, ptr)
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C11, file: !1, producer: "clang version 20.0.0git", isOptimized: true, emissionKind: FullDebug, enums: !2, retainedTypes: !2, globals: !2)
+!1 = !DIFile(filename: "test.c", directory: "/tmp")
+!2 = !{}
+!3 = !{i32 2, !"Debug Info Version", i32 3}
+!4 = !DILocation(line: 258, column: 10, scope: !5)
+!5 = distinct !DISubprogram(name: "streq", scope: !1, file: !1, line: 257, type: !7, scopeLine: 257, unit: !0, retainedNodes: !2)
+!7 = !DISubroutineType(types: !2)
+;.
+; CHECK: [[META0:![0-9]+]] = distinct !DICompileUnit(language: DW_LANG_C11, file: [[META1:![0-9]+]], producer: "{{.*}}clang version {{.*}}", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: [[META2:![0-9]+]], retainedTypes: [[META2]], globals: [[META2]])
+; CHECK: [[META1]] = !DIFile(filename: "test.c", directory: {{.*}})
+; CHECK: [[META2]] = !{}
+; CHECK: [[DBG4]] = !DILocation(line: 258, column: 10, scope: [[META5:![0-9]+]])
+; CHECK: [[META5]] = distinct !DISubprogram(name: "streq", scope: [[META1]], file: [[META1]], line: 257, type: [[META6:![0-9]+]], scopeLine: 257, spFlags: DISPFlagDefinition, unit: [[META0]], retainedNodes: [[META2]])
+; CHECK: [[META6]] = !DISubroutineType(types: [[META2]])
+;.
diff --git a/llvm/test/Transforms/DFAJumpThreading/dfa-jump-threading-transform.ll b/llvm/test/Transforms/DFAJumpThreading/dfa-jump-threading-transform.ll
index c38f81d..cba1ba8 100644
--- a/llvm/test/Transforms/DFAJumpThreading/dfa-jump-threading-transform.ll
+++ b/llvm/test/Transforms/DFAJumpThreading/dfa-jump-threading-transform.ll
@@ -300,3 +300,126 @@ define void @self-reference() {
end:
ret void
}
+
+define void @pr106083_invalidBBarg_fold(i1 %cmp1, i1 %cmp2, i1 %not, ptr %d) {
+; CHECK-LABEL: @pr106083_invalidBBarg_fold(
+; CHECK-NEXT: bb:
+; CHECK-NEXT: br i1 [[CMP1:%.*]], label [[BB1:%.*]], label [[SEL_SI_UNFOLD_FALSE:%.*]]
+; CHECK: sel.si.unfold.false:
+; CHECK-NEXT: [[DOTSI_UNFOLD_PHI1:%.*]] = phi i32 [ 1, [[BB:%.*]] ]
+; CHECK-NEXT: br label [[BB1]]
+; CHECK: BB1:
+; CHECK-NEXT: [[I:%.*]] = phi i16 [ 0, [[BB1_BACKEDGE:%.*]] ], [ 0, [[BB]] ], [ 1, [[BB7:%.*]] ], [ 0, [[SEL_SI_UNFOLD_FALSE]] ], [ 1, [[BB7_JT0:%.*]] ]
+; CHECK-NEXT: [[SEL_SI_UNFOLD_PHI:%.*]] = phi i32 [ [[SEL_SI_UNFOLD_PHI]], [[BB1_BACKEDGE]] ], [ [[SEL_SI_UNFOLD_PHI]], [[BB7]] ], [ 0, [[BB]] ], [ [[DOTSI_UNFOLD_PHI1]], [[SEL_SI_UNFOLD_FALSE]] ], [ [[SEL_SI_UNFOLD_PHI]], [[BB7_JT0]] ]
+; CHECK-NEXT: br i1 [[NOT:%.*]], label [[BB7_JT0]], label [[BB2:%.*]]
+; CHECK: BB2:
+; CHECK-NEXT: store i16 0, ptr [[D:%.*]], align 2
+; CHECK-NEXT: br i1 [[CMP2:%.*]], label [[BB7]], label [[SPEC_SELECT_SI_UNFOLD_FALSE_JT0:%.*]]
+; CHECK: spec.select.si.unfold.false:
+; CHECK-NEXT: br label [[BB7]]
+; CHECK: spec.select.si.unfold.false.jt0:
+; CHECK-NEXT: [[DOTSI_UNFOLD_PHI_JT0:%.*]] = phi i32 [ 0, [[BB2]] ]
+; CHECK-NEXT: br label [[BB7_JT0]]
+; CHECK: BB7:
+; CHECK-NEXT: [[D_PROMOTED4:%.*]] = phi i16 [ 1, [[BB2]] ], [ 1, [[SPEC_SELECT_SI_UNFOLD_FALSE:%.*]] ]
+; CHECK-NEXT: [[_3:%.*]] = phi i32 [ [[SEL_SI_UNFOLD_PHI]], [[BB2]] ], [ poison, [[SPEC_SELECT_SI_UNFOLD_FALSE]] ]
+; CHECK-NEXT: switch i32 [[_3]], label [[BB1_BACKEDGE]] [
+; CHECK-NEXT: i32 0, label [[BB1]]
+; CHECK-NEXT: i32 1, label [[BB8:%.*]]
+; CHECK-NEXT: ]
+; CHECK: BB7.jt0:
+; CHECK-NEXT: [[D_PROMOTED4_JT0:%.*]] = phi i16 [ 0, [[BB1]] ], [ 1, [[SPEC_SELECT_SI_UNFOLD_FALSE_JT0]] ]
+; CHECK-NEXT: [[_3_JT0:%.*]] = phi i32 [ 0, [[BB1]] ], [ [[DOTSI_UNFOLD_PHI_JT0]], [[SPEC_SELECT_SI_UNFOLD_FALSE_JT0]] ]
+; CHECK-NEXT: br label [[BB1]]
+; CHECK: BB1.backedge:
+; CHECK-NEXT: br label [[BB1]]
+; CHECK: BB8:
+; CHECK-NEXT: ret void
+;
+bb:
+ %sel = select i1 %cmp1, i32 0, i32 1
+ br label %BB1
+
+BB1: ; preds = %BB1.backedge, %BB7, %bb
+ %i = phi i16 [ 0, %BB1.backedge ], [ 0, %bb ], [ 1, %BB7 ]
+ br i1 %not, label %BB7, label %BB2
+
+BB2: ; preds = %BB1
+ store i16 0, ptr %d, align 2
+ %spec.select = select i1 %cmp2, i32 %sel, i32 0
+ br label %BB7
+
+BB7: ; preds = %BB2, %BB1
+ %d.promoted4 = phi i16 [ 0, %BB1 ], [ 1, %BB2 ]
+ %_3 = phi i32 [ 0, %BB1 ], [ %spec.select, %BB2 ]
+ switch i32 %_3, label %BB1.backedge [
+ i32 0, label %BB1
+ i32 1, label %BB8
+ ]
+
+BB1.backedge: ; preds = %BB7
+ br label %BB1
+
+BB8: ; preds = %BB7
+ ret void
+}
+
+define void @pr106083_select_dead_uses(i1 %cmp1, i1 %not, ptr %p) {
+; CHECK-LABEL: @pr106083_select_dead_uses(
+; CHECK-NEXT: bb:
+; CHECK-NEXT: br i1 [[CMP1:%.*]], label [[DOTLOOPEXIT6:%.*]], label [[SPEC_SELECT_SI_UNFOLD_FALSE:%.*]]
+; CHECK: spec.select.si.unfold.false:
+; CHECK-NEXT: [[DOTSI_UNFOLD_PHI1:%.*]] = phi i32 [ 1, [[BB:%.*]] ]
+; CHECK-NEXT: br label [[DOTLOOPEXIT6]]
+; CHECK: .loopexit6:
+; CHECK-NEXT: [[SPEC_SELECT_SI_UNFOLD_PHI:%.*]] = phi i32 [ [[SPEC_SELECT_SI_UNFOLD_PHI]], [[SELECT_UNFOLD:%.*]] ], [ 0, [[BB]] ], [ [[DOTSI_UNFOLD_PHI1]], [[SPEC_SELECT_SI_UNFOLD_FALSE]] ]
+; CHECK-NEXT: br i1 [[NOT:%.*]], label [[SELECT_UNFOLD_JT0:%.*]], label [[BB1:%.*]]
+; CHECK: bb1:
+; CHECK-NEXT: [[I:%.*]] = load i32, ptr [[P:%.*]], align 4
+; CHECK-NEXT: [[NOT2:%.*]] = icmp eq i32 0, 0
+; CHECK-NEXT: br i1 [[NOT2]], label [[SELECT_UNFOLD]], label [[SPEC_SELECT7_SI_UNFOLD_FALSE_JT0:%.*]]
+; CHECK: spec.select7.si.unfold.false:
+; CHECK-NEXT: br label [[SELECT_UNFOLD]]
+; CHECK: spec.select7.si.unfold.false.jt0:
+; CHECK-NEXT: [[DOTSI_UNFOLD_PHI_JT0:%.*]] = phi i32 [ 0, [[BB1]] ]
+; CHECK-NEXT: br label [[SELECT_UNFOLD_JT0]]
+; CHECK: select.unfold:
+; CHECK-NEXT: [[_2:%.*]] = phi i32 [ [[SPEC_SELECT_SI_UNFOLD_PHI]], [[BB1]] ], [ poison, [[SPEC_SELECT7_SI_UNFOLD_FALSE:%.*]] ]
+; CHECK-NEXT: switch i32 [[_2]], label [[BB2:%.*]] [
+; CHECK-NEXT: i32 0, label [[DOTPREHEADER_PREHEADER:%.*]]
+; CHECK-NEXT: i32 1, label [[DOTLOOPEXIT6]]
+; CHECK-NEXT: ]
+; CHECK: select.unfold.jt0:
+; CHECK-NEXT: [[_2_JT0:%.*]] = phi i32 [ 0, [[DOTLOOPEXIT6]] ], [ [[DOTSI_UNFOLD_PHI_JT0]], [[SPEC_SELECT7_SI_UNFOLD_FALSE_JT0]] ]
+; CHECK-NEXT: br label [[DOTPREHEADER_PREHEADER]]
+; CHECK: .preheader.preheader:
+; CHECK-NEXT: ret void
+; CHECK: bb2:
+; CHECK-NEXT: unreachable
+;
+bb:
+ %spec.select = select i1 %cmp1, i32 0, i32 1
+ br label %.loopexit6
+
+.loopexit6: ; preds = %select.unfold, %bb
+ br i1 %not, label %select.unfold, label %bb1
+
+bb1: ; preds = %.loopexit6
+ %i = load i32, ptr %p, align 4
+ %not2 = icmp eq i32 0, 0
+ %spec.select7 = select i1 %not2, i32 %spec.select, i32 0
+ br label %select.unfold
+
+select.unfold: ; preds = %bb1, %.loopexit6
+ %_2 = phi i32 [ 0, %.loopexit6 ], [ %spec.select7, %bb1 ]
+ switch i32 %_2, label %bb2 [
+ i32 0, label %.preheader.preheader
+ i32 1, label %.loopexit6
+ ]
+
+.preheader.preheader: ; preds = %select.unfold
+ ret void
+
+bb2: ; preds = %select.unfold
+ unreachable
+}
diff --git a/llvm/test/Transforms/EliminateAvailableExternally/transform-to-local.ll b/llvm/test/Transforms/EliminateAvailableExternally/transform-to-local.ll
index 786cc26..4908fba 100644
--- a/llvm/test/Transforms/EliminateAvailableExternally/transform-to-local.ll
+++ b/llvm/test/Transforms/EliminateAvailableExternally/transform-to-local.ll
@@ -1,6 +1,16 @@
; REQUIRES: asserts
; RUN: opt -passes=elim-avail-extern -avail-extern-to-local -stats -S 2>&1 < %s | FileCheck %s
+;
+; RUN: echo '[{"Guid":1234, "Counters": [1]}]' | llvm-ctxprof-util fromJSON --input=- --output=%t_profile.ctxprofdata
+;
+; Because we pass a contextual profile with a root defined in this module, we expect the outcome to be the same as-if
+; we passed -avail-extern-to-local, i.e. available_externally don't get elided and instead get converted to local linkage
+; RUN: opt -passes='assign-guid,require<ctx-prof-analysis>,elim-avail-extern' -use-ctx-profile=%t_profile.ctxprofdata -stats -S 2>&1 < %s | FileCheck %s
+; If the profile doesn't apply to this module, available_externally won't get converted to internal linkage, and will be
+; removed instead.
+; RUN: echo '[{"Guid":5678, "Counters": [1]}]' | llvm-ctxprof-util fromJSON --input=- --output=%t_profile_bad.ctxprofdata
+; RUN: opt -passes='assign-guid,require<ctx-prof-analysis>,elim-avail-extern' -use-ctx-profile=%t_profile_bad.ctxprofdata -stats -S 2>&1 < %s | FileCheck %s --check-prefix=NOOP
declare void @call_out(ptr %fct)
@@ -12,13 +22,15 @@ define available_externally hidden void @g() {
ret void
}
-define void @hello(ptr %g) {
+define void @hello(ptr %g) !guid !0 {
call void @f()
%f = load ptr, ptr @f
call void @call_out(ptr %f)
ret void
}
+!0 = !{i64 1234}
+
; CHECK: define internal void @f.__uniq.{{[0-9|a-f]*}}()
; CHECK: declare hidden void @g()
; CHECK: call void @f.__uniq.{{[0-9|a-f]*}}()
@@ -26,4 +38,6 @@ define void @hello(ptr %g) {
; CHECK-NEXT: call void @call_out(ptr %f)
; CHECK: Statistics Collected
; CHECK: 1 elim-avail-extern - Number of functions converted
-; CHECK: 1 elim-avail-extern - Number of functions removed \ No newline at end of file
+; CHECK: 1 elim-avail-extern - Number of functions removed
+
+; NOOP: 2 elim-avail-extern - Number of functions removed \ No newline at end of file
diff --git a/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-insr.ll b/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-insr.ll
new file mode 100644
index 0000000..e8489c5
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-insr.ll
@@ -0,0 +1,73 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -passes=instcombine < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+define <vscale x 16 x i8> @insr_val_into_splatted_val_int(i8 %a) #0 {
+; CHECK-LABEL: @insr_val_into_splatted_val_int(
+; CHECK-NEXT: [[T0:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[A:%.*]], i64 0
+; CHECK-NEXT: [[T1:%.*]] = shufflevector <vscale x 16 x i8> [[T0]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+; CHECK-NEXT: ret <vscale x 16 x i8> [[T1]]
+;
+ %t0 = insertelement <vscale x 16 x i8> poison, i8 %a, i64 0
+ %t1 = shufflevector <vscale x 16 x i8> %t0, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+ %t2 = tail call <vscale x 16 x i8> @llvm.aarch64.sve.insr.nxv16i8(<vscale x 16 x i8> %t1, i8 %a)
+ ret <vscale x 16 x i8> %t2
+}
+
+define <vscale x 8 x i16> @insr_five_into_fives() #0 {
+; CHECK-LABEL: @insr_five_into_fives(
+; CHECK-NEXT: ret <vscale x 8 x i16> shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 5, i64 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer)
+;
+ %t1 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.insr.nxv8i16(<vscale x 8 x i16> splat (i16 5), i16 5)
+ ret <vscale x 8 x i16> %t1
+}
+
+define <vscale x 4 x float> @insr_val_into_splatted_val_fp(float %a) #0 {
+; CHECK-LABEL: @insr_val_into_splatted_val_fp(
+; CHECK-NEXT: [[T0:%.*]] = insertelement <vscale x 4 x float> poison, float [[A:%.*]], i64 0
+; CHECK-NEXT: [[T1:%.*]] = shufflevector <vscale x 4 x float> [[T0]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: ret <vscale x 4 x float> [[T1]]
+;
+ %t0 = insertelement <vscale x 4 x float> poison, float %a, i64 0
+ %t1 = shufflevector <vscale x 4 x float> %t0, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
+ %t2 = tail call <vscale x 4 x float> @llvm.aarch64.sve.insr.nxv4f32(<vscale x 4 x float> %t1, float %a)
+ ret <vscale x 4 x float> %t2
+}
+
+define <vscale x 2 x double> @insr_zero_into_zero() #0 {
+; CHECK-LABEL: @insr_zero_into_zero(
+; CHECK-NEXT: ret <vscale x 2 x double> zeroinitializer
+;
+ %t1 = tail call <vscale x 2 x double> @llvm.aarch64.sve.insr.nxv2f64(<vscale x 2 x double> zeroinitializer, double zeroinitializer)
+ ret <vscale x 2 x double> %t1
+}
+
+define <vscale x 16 x i8> @insr_val_into_splatted_other(i8 %a, i8 %b) #0 {
+; CHECK-LABEL: @insr_val_into_splatted_other(
+; CHECK-NEXT: [[T0:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[B:%.*]], i64 0
+; CHECK-NEXT: [[T1:%.*]] = shufflevector <vscale x 16 x i8> [[T0]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+; CHECK-NEXT: [[T2:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.insr.nxv16i8(<vscale x 16 x i8> [[T1]], i8 [[A:%.*]])
+; CHECK-NEXT: ret <vscale x 16 x i8> [[T2]]
+;
+ %t0 = insertelement <vscale x 16 x i8> poison, i8 %b, i64 0
+ %t1 = shufflevector <vscale x 16 x i8> %t0, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+ %t2 = tail call <vscale x 16 x i8> @llvm.aarch64.sve.insr.nxv16i8(<vscale x 16 x i8> %t1, i8 %a)
+ ret <vscale x 16 x i8> %t2
+}
+
+define <vscale x 8 x i16> @insr_three_into_fives() #0 {
+; CHECK-LABEL: @insr_three_into_fives(
+; CHECK-NEXT: [[T1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.insr.nxv8i16(<vscale x 8 x i16> shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 5, i64 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer), i16 3)
+; CHECK-NEXT: ret <vscale x 8 x i16> [[T1]]
+;
+ %t1 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.insr.nxv8i16(<vscale x 8 x i16> splat (i16 5), i16 3)
+ ret <vscale x 8 x i16> %t1
+}
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.insr.nxv16i8(<vscale x 16 x i8>, i8)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.insr.nxv8i16(<vscale x 8 x i16>, i16)
+declare <vscale x 4 x float> @llvm.aarch64.sve.insr.nxv4f32(<vscale x 4 x float>, float)
+declare <vscale x 2 x double> @llvm.aarch64.sve.insr.nxv2f64(<vscale x 2 x double>, double)
+
+attributes #0 = { "target-features"="+sve" }
diff --git a/llvm/test/Transforms/InstCombine/compare-3way.ll b/llvm/test/Transforms/InstCombine/compare-3way.ll
index e206736..5d443cd 100644
--- a/llvm/test/Transforms/InstCombine/compare-3way.ll
+++ b/llvm/test/Transforms/InstCombine/compare-3way.ll
@@ -15,8 +15,7 @@ define void @test_low_sgt(i64 %a, i64 %b) {
; CHECK: normal:
; CHECK-NEXT: ret void
; CHECK: unreached:
-; CHECK-NEXT: [[EQ:%.*]] = icmp ne i64 [[A]], [[B]]
-; CHECK-NEXT: [[RESULT:%.*]] = zext i1 [[EQ]] to i32
+; CHECK-NEXT: [[RESULT:%.*]] = call i32 @llvm.scmp.i32.i64(i64 [[A]], i64 [[B]])
; CHECK-NEXT: call void @use(i32 [[RESULT]])
; CHECK-NEXT: ret void
;
@@ -62,10 +61,7 @@ define void @test_low_sge(i64 %a, i64 %b) {
; CHECK: normal:
; CHECK-NEXT: ret void
; CHECK: unreached:
-; CHECK-NEXT: [[EQ:%.*]] = icmp eq i64 [[A]], [[B]]
-; CHECK-NEXT: [[SLT:%.*]] = icmp slt i64 [[A]], [[B]]
-; CHECK-NEXT: [[DOT:%.*]] = select i1 [[SLT]], i32 -1, i32 1
-; CHECK-NEXT: [[RESULT:%.*]] = select i1 [[EQ]], i32 0, i32 [[DOT]]
+; CHECK-NEXT: [[RESULT:%.*]] = call i32 @llvm.scmp.i32.i64(i64 [[A]], i64 [[B]])
; CHECK-NEXT: call void @use(i32 [[RESULT]])
; CHECK-NEXT: ret void
;
@@ -114,8 +110,7 @@ define void @test_low_ne(i64 %a, i64 %b) {
; CHECK: normal:
; CHECK-NEXT: ret void
; CHECK: unreached:
-; CHECK-NEXT: [[EQ:%.*]] = icmp ne i64 [[A]], [[B]]
-; CHECK-NEXT: [[RESULT:%.*]] = zext i1 [[EQ]] to i32
+; CHECK-NEXT: [[RESULT:%.*]] = call i32 @llvm.scmp.i32.i64(i64 [[A]], i64 [[B]])
; CHECK-NEXT: call void @use(i32 [[RESULT]])
; CHECK-NEXT: ret void
;
@@ -212,8 +207,7 @@ define void @test_mid_sge(i64 %a, i64 %b) {
; CHECK: normal:
; CHECK-NEXT: ret void
; CHECK: unreached:
-; CHECK-NEXT: [[EQ:%.*]] = icmp ne i64 [[A]], [[B]]
-; CHECK-NEXT: [[RESULT:%.*]] = zext i1 [[EQ]] to i32
+; CHECK-NEXT: [[RESULT:%.*]] = call i32 @llvm.scmp.i32.i64(i64 [[A]], i64 [[B]])
; CHECK-NEXT: call void @use(i32 [[RESULT]])
; CHECK-NEXT: ret void
;
@@ -238,10 +232,7 @@ define void @test_mid_sle(i64 %a, i64 %b) {
; CHECK: normal:
; CHECK-NEXT: ret void
; CHECK: unreached:
-; CHECK-NEXT: [[EQ:%.*]] = icmp eq i64 [[A]], [[B]]
-; CHECK-NEXT: [[SLT:%.*]] = icmp slt i64 [[A]], [[B]]
-; CHECK-NEXT: [[DOT:%.*]] = select i1 [[SLT]], i32 -1, i32 1
-; CHECK-NEXT: [[RESULT:%.*]] = select i1 [[EQ]], i32 0, i32 [[DOT]]
+; CHECK-NEXT: [[RESULT:%.*]] = call i32 @llvm.scmp.i32.i64(i64 [[A]], i64 [[B]])
; CHECK-NEXT: call void @use(i32 [[RESULT]])
; CHECK-NEXT: ret void
;
@@ -266,9 +257,8 @@ define void @test_mid_ne(i64 %a, i64 %b) {
; CHECK: normal:
; CHECK-NEXT: ret void
; CHECK: unreached:
-; CHECK-NEXT: [[SLT:%.*]] = icmp slt i64 [[A]], [[B]]
-; CHECK-NEXT: [[DOT:%.*]] = select i1 [[SLT]], i32 -1, i32 1
-; CHECK-NEXT: call void @use(i32 [[DOT]])
+; CHECK-NEXT: [[RESULT:%.*]] = call i32 @llvm.scmp.i32.i64(i64 [[A]], i64 [[B]])
+; CHECK-NEXT: call void @use(i32 [[RESULT]])
; CHECK-NEXT: ret void
;
%eq = icmp eq i64 %a, %b
@@ -338,10 +328,7 @@ define void @test_high_slt(i64 %a, i64 %b) {
; CHECK: normal:
; CHECK-NEXT: ret void
; CHECK: unreached:
-; CHECK-NEXT: [[EQ:%.*]] = icmp eq i64 [[A]], [[B]]
-; CHECK-NEXT: [[SLT:%.*]] = icmp slt i64 [[A]], [[B]]
-; CHECK-NEXT: [[DOT:%.*]] = select i1 [[SLT]], i32 -1, i32 1
-; CHECK-NEXT: [[RESULT:%.*]] = select i1 [[EQ]], i32 0, i32 [[DOT]]
+; CHECK-NEXT: [[RESULT:%.*]] = call i32 @llvm.scmp.i32.i64(i64 [[A]], i64 [[B]])
; CHECK-NEXT: call void @use(i32 [[RESULT]])
; CHECK-NEXT: ret void
;
@@ -389,10 +376,7 @@ define void @test_high_sle(i64 %a, i64 %b) {
; CHECK: normal:
; CHECK-NEXT: ret void
; CHECK: unreached:
-; CHECK-NEXT: [[EQ:%.*]] = icmp eq i64 [[A]], [[B]]
-; CHECK-NEXT: [[SLT:%.*]] = icmp slt i64 [[A]], [[B]]
-; CHECK-NEXT: [[DOT:%.*]] = select i1 [[SLT]], i32 -1, i32 1
-; CHECK-NEXT: [[RESULT:%.*]] = select i1 [[EQ]], i32 0, i32 [[DOT]]
+; CHECK-NEXT: [[RESULT:%.*]] = call i32 @llvm.scmp.i32.i64(i64 [[A]], i64 [[B]])
; CHECK-NEXT: call void @use(i32 [[RESULT]])
; CHECK-NEXT: ret void
;
@@ -417,10 +401,7 @@ define void @test_high_ne(i64 %a, i64 %b) {
; CHECK: normal:
; CHECK-NEXT: ret void
; CHECK: unreached:
-; CHECK-NEXT: [[EQ:%.*]] = icmp eq i64 [[A]], [[B]]
-; CHECK-NEXT: [[SLT:%.*]] = icmp slt i64 [[A]], [[B]]
-; CHECK-NEXT: [[DOT:%.*]] = select i1 [[SLT]], i32 -1, i32 1
-; CHECK-NEXT: [[RESULT:%.*]] = select i1 [[EQ]], i32 0, i32 [[DOT]]
+; CHECK-NEXT: [[RESULT:%.*]] = call i32 @llvm.scmp.i32.i64(i64 [[A]], i64 [[B]])
; CHECK-NEXT: call void @use(i32 [[RESULT]])
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/InstCombine/icmp-inttoptr.ll b/llvm/test/Transforms/InstCombine/icmp-inttoptr.ll
new file mode 100644
index 0000000..dd731a9
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/icmp-inttoptr.ll
@@ -0,0 +1,97 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -passes=instcombine -S | FileCheck %s
+
+declare void @use_ptr(ptr)
+
+define i1 @inttoptr(i64 %x, i64 %y) {
+; CHECK-LABEL: @inttoptr(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %xptr = inttoptr i64 %x to ptr
+ %yptr = inttoptr i64 %y to ptr
+ %cmp = icmp eq ptr %xptr, %yptr
+ ret i1 %cmp
+}
+
+define i1 @inttoptr_constant(i64 %x) {
+; CHECK-LABEL: @inttoptr_constant(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[X:%.*]], 42
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %xptr = inttoptr i64 %x to ptr
+ %cmp = icmp eq ptr %xptr, inttoptr (i64 42 to ptr)
+ ret i1 %cmp
+}
+
+define <2 x i1> @inttoptr_vector(<2 x i64> %x, <2 x i64> %y) {
+; CHECK-LABEL: @inttoptr_vector(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i64> [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: ret <2 x i1> [[CMP]]
+;
+ %xptr = inttoptr <2 x i64> %x to <2 x ptr>
+ %yptr = inttoptr <2 x i64> %y to <2 x ptr>
+ %cmp = icmp eq <2 x ptr> %xptr, %yptr
+ ret <2 x i1> %cmp
+}
+
+define <2 x i1> @inttoptr_vector_constant(<2 x i64> %x) {
+; CHECK-LABEL: @inttoptr_vector_constant(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i64> [[X:%.*]], <i64 42, i64 123>
+; CHECK-NEXT: ret <2 x i1> [[CMP]]
+;
+ %xptr = inttoptr <2 x i64> %x to <2 x ptr>
+ %cmp = icmp eq <2 x ptr> %xptr, inttoptr (<2 x i64> <i64 42, i64 123> to <2 x ptr>)
+ ret <2 x i1> %cmp
+}
+
+define i1 @inttoptr_size_mismatch(i200 %x, i64 %y) {
+; CHECK-LABEL: @inttoptr_size_mismatch(
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i200 [[X:%.*]] to i64
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[Y:%.*]], [[TMP1]]
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %xptr = inttoptr i200 %x to ptr
+ %yptr = inttoptr i64 %y to ptr
+ %cmp = icmp eq ptr %xptr, %yptr
+ ret i1 %cmp
+}
+
+define <2 x i1> @inttoptr_vector_constant_size_mismatch(<2 x i64> %x) {
+; CHECK-LABEL: @inttoptr_vector_constant_size_mismatch(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i64> [[X:%.*]], <i64 42, i64 123>
+; CHECK-NEXT: ret <2 x i1> [[CMP]]
+;
+ %xptr = inttoptr <2 x i64> %x to <2 x ptr>
+ %cmp = icmp eq <2 x ptr> %xptr, inttoptr (<2x i9> <i9 42, i9 123> to <2 x ptr>)
+ ret <2 x i1> %cmp
+}
+
+define i1 @inttoptr_oneside(i64 %x, ptr %y) {
+; CHECK-LABEL: @inttoptr_oneside(
+; CHECK-NEXT: [[XPTR:%.*]] = inttoptr i64 [[X:%.*]] to ptr
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[Y:%.*]], [[XPTR]]
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %xptr = inttoptr i64 %x to ptr
+ %cmp = icmp eq ptr %xptr, %y
+ ret i1 %cmp
+}
+
+define i1 @inttoptr_used(i64 %x, i64 %y) {
+; CHECK-LABEL: @inttoptr_used(
+; CHECK-NEXT: [[XPTR:%.*]] = inttoptr i64 [[X:%.*]] to ptr
+; CHECK-NEXT: [[YPTR:%.*]] = inttoptr i64 [[Y:%.*]] to ptr
+; CHECK-NEXT: call void @use_ptr(ptr [[XPTR]])
+; CHECK-NEXT: call void @use_ptr(ptr [[YPTR]])
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[X]], [[Y]]
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %xptr = inttoptr i64 %x to ptr
+ %yptr = inttoptr i64 %y to ptr
+ call void @use_ptr(ptr %xptr)
+ call void @use_ptr(ptr %yptr)
+ %cmp = icmp ugt ptr %xptr, %yptr
+ ret i1 %cmp
+}
+
diff --git a/llvm/test/Transforms/InstCombine/phi-with-multiple-unsimplifiable-values.ll b/llvm/test/Transforms/InstCombine/phi-with-multiple-unsimplifiable-values.ll
index 2b75d5c..cd40aa9 100644
--- a/llvm/test/Transforms/InstCombine/phi-with-multiple-unsimplifiable-values.ll
+++ b/llvm/test/Transforms/InstCombine/phi-with-multiple-unsimplifiable-values.ll
@@ -133,3 +133,35 @@ exit:
%r = icmp slt i8 %phi, 0
ret i1 %r
}
+
+; Same as the first transformation, but the phi node uses the result of scmp twice. This verifies that we don't clone values more than once per block
+define i1 @icmp_of_phi_of_scmp_with_constant_one_user_two_uses(i8 %c, i16 %x, i16 %y, i8 %false_val) {
+; CHECK-LABEL: define i1 @icmp_of_phi_of_scmp_with_constant_one_user_two_uses(
+; CHECK-SAME: i8 [[C:%.*]], i16 [[X:%.*]], i16 [[Y:%.*]], i8 [[FALSE_VAL:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = icmp slt i16 [[X]], [[Y]]
+; CHECK-NEXT: switch i8 [[C]], label %[[BB_2:.*]] [
+; CHECK-NEXT: i8 0, label %[[BB:.*]]
+; CHECK-NEXT: i8 1, label %[[BB]]
+; CHECK-NEXT: ]
+; CHECK: [[BB_2]]:
+; CHECK-NEXT: br label %[[BB]]
+; CHECK: [[BB]]:
+; CHECK-NEXT: [[R:%.*]] = phi i1 [ [[TMP0]], %[[ENTRY]] ], [ [[TMP0]], %[[ENTRY]] ], [ false, %[[BB_2]] ]
+; CHECK-NEXT: ret i1 [[R]]
+;
+entry:
+ %cmp = call i8 @llvm.scmp(i16 %x, i16 %y)
+ switch i8 %c, label %bb_2 [
+ i8 0, label %bb
+ i8 1, label %bb
+ ]
+
+bb_2:
+ br label %bb
+
+bb:
+ %phi = phi i8 [ %cmp, %entry ], [ %cmp, %entry ], [ 0, %bb_2 ]
+ %r = icmp slt i8 %phi, 0
+ ret i1 %r
+}
diff --git a/llvm/test/Transforms/InstCombine/scmp.ll b/llvm/test/Transforms/InstCombine/scmp.ll
index 123bc64..2140a59d 100644
--- a/llvm/test/Transforms/InstCombine/scmp.ll
+++ b/llvm/test/Transforms/InstCombine/scmp.ll
@@ -343,3 +343,133 @@ define i8 @scmp_from_select_gt_and_lt(i32 %x, i32 %y) {
%r = select i1 %gt, i8 1, i8 %lt
ret i8 %r
}
+
+; (x == y) ? 0 : (x s> y ? 1 : -1) into scmp(x, y)
+define i8 @scmp_from_select_eq_and_gt(i32 %x, i32 %y) {
+; CHECK-LABEL: define i8 @scmp_from_select_eq_and_gt(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.scmp.i8.i32(i32 [[X]], i32 [[Y]])
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %eq = icmp eq i32 %x, %y
+ %gt = icmp sgt i32 %x, %y
+ %sel1 = select i1 %gt, i8 1, i8 -1
+ %r = select i1 %eq, i8 0, i8 %sel1
+ ret i8 %r
+}
+
+define i8 @scmp_from_select_eq_and_gt_inverse(i32 %x, i32 %y) {
+; CHECK-LABEL: define i8 @scmp_from_select_eq_and_gt_inverse(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.scmp.i8.i32(i32 [[X]], i32 [[Y]])
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %ne = icmp ne i32 %x, %y
+ %gt = icmp sgt i32 %x, %y
+ %sel1 = select i1 %gt, i8 1, i8 -1
+ %r = select i1 %ne, i8 %sel1, i8 0
+ ret i8 %r
+}
+
+define <4 x i8> @scmp_from_select_eq_and_gt_vec(<4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: define <4 x i8> @scmp_from_select_eq_and_gt_vec(
+; CHECK-SAME: <4 x i32> [[X:%.*]], <4 x i32> [[Y:%.*]]) {
+; CHECK-NEXT: [[R:%.*]] = call <4 x i8> @llvm.scmp.v4i8.v4i32(<4 x i32> [[X]], <4 x i32> [[Y]])
+; CHECK-NEXT: ret <4 x i8> [[R]]
+;
+ %eq = icmp eq <4 x i32> %x, %y
+ %gt = icmp sgt <4 x i32> %x, %y
+ %sel1 = select <4 x i1> %gt, <4 x i8> splat(i8 1), <4 x i8> splat(i8 -1)
+ %r = select <4 x i1> %eq, <4 x i8> splat(i8 0), <4 x i8> %sel1
+ ret <4 x i8> %r
+}
+
+define i8 @scmp_from_select_eq_and_gt_commuted1(i32 %x, i32 %y) {
+; CHECK-LABEL: define i8 @scmp_from_select_eq_and_gt_commuted1(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.scmp.i8.i32(i32 [[Y]], i32 [[X]])
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %eq = icmp eq i32 %x, %y
+ %gt = icmp slt i32 %x, %y
+ %sel1 = select i1 %gt, i8 1, i8 -1
+ %r = select i1 %eq, i8 0, i8 %sel1
+ ret i8 %r
+}
+
+define i8 @scmp_from_select_eq_and_gt_commuted2(i32 %x, i32 %y) {
+; CHECK-LABEL: define i8 @scmp_from_select_eq_and_gt_commuted2(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.scmp.i8.i32(i32 [[Y]], i32 [[X]])
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %eq = icmp eq i32 %x, %y
+ %gt = icmp sgt i32 %x, %y
+ %sel1 = select i1 %gt, i8 -1, i8 1
+ %r = select i1 %eq, i8 0, i8 %sel1
+ ret i8 %r
+}
+
+define i8 @scmp_from_select_eq_and_gt_commuted3(i32 %x, i32 %y) {
+; CHECK-LABEL: define i8 @scmp_from_select_eq_and_gt_commuted3(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.scmp.i8.i32(i32 [[X]], i32 [[Y]])
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %eq = icmp eq i32 %x, %y
+ %gt = icmp slt i32 %x, %y
+ %sel1 = select i1 %gt, i8 -1, i8 1
+ %r = select i1 %eq, i8 0, i8 %sel1
+ ret i8 %r
+}
+
+; Negative test: true value of outer select is not zero
+define i8 @scmp_from_select_eq_and_gt_neg1(i32 %x, i32 %y) {
+; CHECK-LABEL: define i8 @scmp_from_select_eq_and_gt_neg1(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT: [[EQ:%.*]] = icmp eq i32 [[X]], [[Y]]
+; CHECK-NEXT: [[GT:%.*]] = icmp sgt i32 [[X]], [[Y]]
+; CHECK-NEXT: [[SEL1:%.*]] = select i1 [[GT]], i8 1, i8 -1
+; CHECK-NEXT: [[R:%.*]] = select i1 [[EQ]], i8 5, i8 [[SEL1]]
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %eq = icmp eq i32 %x, %y
+ %gt = icmp sgt i32 %x, %y
+ %sel1 = select i1 %gt, i8 1, i8 -1
+ %r = select i1 %eq, i8 5, i8 %sel1
+ ret i8 %r
+}
+
+; Negative test: true value of inner select is not 1 or -1
+define i8 @scmp_from_select_eq_and_gt_neg2(i32 %x, i32 %y) {
+; CHECK-LABEL: define i8 @scmp_from_select_eq_and_gt_neg2(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT: [[EQ:%.*]] = icmp eq i32 [[X]], [[Y]]
+; CHECK-NEXT: [[GT:%.*]] = icmp sgt i32 [[X]], [[Y]]
+; CHECK-NEXT: [[SEL1:%.*]] = select i1 [[GT]], i8 2, i8 -1
+; CHECK-NEXT: [[R:%.*]] = select i1 [[EQ]], i8 0, i8 [[SEL1]]
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %eq = icmp eq i32 %x, %y
+ %gt = icmp sgt i32 %x, %y
+ %sel1 = select i1 %gt, i8 2, i8 -1
+ %r = select i1 %eq, i8 0, i8 %sel1
+ ret i8 %r
+}
+
+; Negative test: false value of inner select is not 1 or -1
+define i8 @scmp_from_select_eq_and_gt_neg3(i32 %x, i32 %y) {
+; CHECK-LABEL: define i8 @scmp_from_select_eq_and_gt_neg3(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT: [[EQ:%.*]] = icmp eq i32 [[X]], [[Y]]
+; CHECK-NEXT: [[GT:%.*]] = icmp sgt i32 [[X]], [[Y]]
+; CHECK-NEXT: [[SEL1:%.*]] = select i1 [[GT]], i8 1, i8 22
+; CHECK-NEXT: [[R:%.*]] = select i1 [[EQ]], i8 0, i8 [[SEL1]]
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %eq = icmp eq i32 %x, %y
+ %gt = icmp sgt i32 %x, %y
+ %sel1 = select i1 %gt, i8 1, i8 22
+ %r = select i1 %eq, i8 0, i8 %sel1
+ ret i8 %r
+}
diff --git a/llvm/test/Transforms/InstCombine/select-select.ll b/llvm/test/Transforms/InstCombine/select-select.ll
index 1feae5a..94e88c2 100644
--- a/llvm/test/Transforms/InstCombine/select-select.ll
+++ b/llvm/test/Transforms/InstCombine/select-select.ll
@@ -177,10 +177,7 @@ define <2 x i8> @sel_shuf_narrowing_commute2(<4 x i8> %x, <4 x i8> %y, <2 x i8>
define i8 @strong_order_cmp_slt_eq(i32 %a, i32 %b) {
; CHECK-LABEL: @strong_order_cmp_slt_eq(
-; CHECK-NEXT: [[CMP_LT:%.*]] = icmp slt i32 [[A:%.*]], [[B:%.*]]
-; CHECK-NEXT: [[SEL_LT:%.*]] = select i1 [[CMP_LT]], i8 -1, i8 1
-; CHECK-NEXT: [[CMP_EQ:%.*]] = icmp eq i32 [[A]], [[B]]
-; CHECK-NEXT: [[SEL_EQ:%.*]] = select i1 [[CMP_EQ]], i8 0, i8 [[SEL_LT]]
+; CHECK-NEXT: [[SEL_EQ:%.*]] = call i8 @llvm.scmp.i8.i32(i32 [[A:%.*]], i32 [[B:%.*]])
; CHECK-NEXT: ret i8 [[SEL_EQ]]
;
%cmp.lt = icmp slt i32 %a, %b
@@ -192,10 +189,7 @@ define i8 @strong_order_cmp_slt_eq(i32 %a, i32 %b) {
define i8 @strong_order_cmp_ult_eq(i32 %a, i32 %b) {
; CHECK-LABEL: @strong_order_cmp_ult_eq(
-; CHECK-NEXT: [[CMP_LT:%.*]] = icmp ult i32 [[A:%.*]], [[B:%.*]]
-; CHECK-NEXT: [[SEL_LT:%.*]] = select i1 [[CMP_LT]], i8 -1, i8 1
-; CHECK-NEXT: [[CMP_EQ:%.*]] = icmp eq i32 [[A]], [[B]]
-; CHECK-NEXT: [[SEL_EQ:%.*]] = select i1 [[CMP_EQ]], i8 0, i8 [[SEL_LT]]
+; CHECK-NEXT: [[SEL_EQ:%.*]] = call i8 @llvm.ucmp.i8.i32(i32 [[A:%.*]], i32 [[B:%.*]])
; CHECK-NEXT: ret i8 [[SEL_EQ]]
;
%cmp.lt = icmp ult i32 %a, %b
@@ -252,10 +246,7 @@ define i8 @strong_order_cmp_slt_ult_wrong_pred(i32 %a, i32 %b) {
define i8 @strong_order_cmp_sgt_eq(i32 %a, i32 %b) {
; CHECK-LABEL: @strong_order_cmp_sgt_eq(
-; CHECK-NEXT: [[CMP_GT:%.*]] = icmp sgt i32 [[A:%.*]], [[B:%.*]]
-; CHECK-NEXT: [[SEL_GT:%.*]] = select i1 [[CMP_GT]], i8 1, i8 -1
-; CHECK-NEXT: [[CMP_EQ:%.*]] = icmp eq i32 [[A]], [[B]]
-; CHECK-NEXT: [[SEL_EQ:%.*]] = select i1 [[CMP_EQ]], i8 0, i8 [[SEL_GT]]
+; CHECK-NEXT: [[SEL_EQ:%.*]] = call i8 @llvm.scmp.i8.i32(i32 [[A:%.*]], i32 [[B:%.*]])
; CHECK-NEXT: ret i8 [[SEL_EQ]]
;
%cmp.gt = icmp sgt i32 %a, %b
@@ -267,10 +258,7 @@ define i8 @strong_order_cmp_sgt_eq(i32 %a, i32 %b) {
define i8 @strong_order_cmp_ugt_eq(i32 %a, i32 %b) {
; CHECK-LABEL: @strong_order_cmp_ugt_eq(
-; CHECK-NEXT: [[CMP_GT:%.*]] = icmp ugt i32 [[A:%.*]], [[B:%.*]]
-; CHECK-NEXT: [[SEL_GT:%.*]] = select i1 [[CMP_GT]], i8 1, i8 -1
-; CHECK-NEXT: [[CMP_EQ:%.*]] = icmp eq i32 [[A]], [[B]]
-; CHECK-NEXT: [[SEL_EQ:%.*]] = select i1 [[CMP_EQ]], i8 0, i8 [[SEL_GT]]
+; CHECK-NEXT: [[SEL_EQ:%.*]] = call i8 @llvm.ucmp.i8.i32(i32 [[A:%.*]], i32 [[B:%.*]])
; CHECK-NEXT: ret i8 [[SEL_EQ]]
;
%cmp.gt = icmp ugt i32 %a, %b
@@ -395,9 +383,7 @@ define i8 @strong_order_cmp_slt_eq_slt_not_oneuse(i32 %a, i32 %b) {
; CHECK-LABEL: @strong_order_cmp_slt_eq_slt_not_oneuse(
; CHECK-NEXT: [[CMP_LT:%.*]] = icmp slt i32 [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: call void @use1(i1 [[CMP_LT]])
-; CHECK-NEXT: [[SEL_LT:%.*]] = select i1 [[CMP_LT]], i8 -1, i8 1
-; CHECK-NEXT: [[CMP_EQ:%.*]] = icmp eq i32 [[A]], [[B]]
-; CHECK-NEXT: [[SEL_EQ:%.*]] = select i1 [[CMP_EQ]], i8 0, i8 [[SEL_LT]]
+; CHECK-NEXT: [[SEL_EQ:%.*]] = call i8 @llvm.scmp.i8.i32(i32 [[A]], i32 [[B]])
; CHECK-NEXT: ret i8 [[SEL_EQ]]
;
%cmp.lt = icmp slt i32 %a, %b
@@ -410,11 +396,9 @@ define i8 @strong_order_cmp_slt_eq_slt_not_oneuse(i32 %a, i32 %b) {
define i8 @strong_order_cmp_sgt_eq_eq_not_oneuse(i32 %a, i32 %b) {
; CHECK-LABEL: @strong_order_cmp_sgt_eq_eq_not_oneuse(
-; CHECK-NEXT: [[CMP_GT:%.*]] = icmp sgt i32 [[A:%.*]], [[B:%.*]]
-; CHECK-NEXT: [[SEL_GT:%.*]] = select i1 [[CMP_GT]], i8 1, i8 -1
-; CHECK-NEXT: [[CMP_EQ:%.*]] = icmp eq i32 [[A]], [[B]]
+; CHECK-NEXT: [[CMP_EQ:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: call void @use1(i1 [[CMP_EQ]])
-; CHECK-NEXT: [[SEL_EQ:%.*]] = select i1 [[CMP_EQ]], i8 0, i8 [[SEL_GT]]
+; CHECK-NEXT: [[SEL_EQ:%.*]] = call i8 @llvm.scmp.i8.i32(i32 [[A]], i32 [[B]])
; CHECK-NEXT: ret i8 [[SEL_EQ]]
;
%cmp.gt = icmp sgt i32 %a, %b
diff --git a/llvm/test/Transforms/InstCombine/sink_to_unreachable.ll b/llvm/test/Transforms/InstCombine/sink_to_unreachable.ll
index 01510f8..72aa6dc 100644
--- a/llvm/test/Transforms/InstCombine/sink_to_unreachable.ll
+++ b/llvm/test/Transforms/InstCombine/sink_to_unreachable.ll
@@ -10,8 +10,7 @@ define void @test_01(i32 %x, i32 %y) {
; CHECK-NEXT: [[C2:%.*]] = icmp slt i32 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: br i1 [[C2]], label [[EXIT:%.*]], label [[UNREACHED:%.*]]
; CHECK: unreached:
-; CHECK-NEXT: [[C1:%.*]] = icmp ne i32 [[X]], [[Y]]
-; CHECK-NEXT: [[COMPARATOR:%.*]] = zext i1 [[C1]] to i32
+; CHECK-NEXT: [[COMPARATOR:%.*]] = call i32 @llvm.scmp.i32.i32(i32 [[X]], i32 [[Y]])
; CHECK-NEXT: call void @use(i32 [[COMPARATOR]])
; CHECK-NEXT: unreachable
; CHECK: exit:
@@ -42,8 +41,7 @@ define void @test_02(i32 %x, i32 %y) {
; CHECK-NEXT: [[C3:%.*]] = icmp sgt i32 [[X]], [[Y]]
; CHECK-NEXT: br i1 [[C3]], label [[EXIT]], label [[UNREACHED:%.*]]
; CHECK: unreached:
-; CHECK-NEXT: [[C1:%.*]] = icmp ne i32 [[X]], [[Y]]
-; CHECK-NEXT: [[COMPARATOR:%.*]] = zext i1 [[C1]] to i32
+; CHECK-NEXT: [[COMPARATOR:%.*]] = call i32 @llvm.scmp.i32.i32(i32 [[X]], i32 [[Y]])
; CHECK-NEXT: call void @use(i32 [[COMPARATOR]])
; CHECK-NEXT: unreachable
; CHECK: exit:
@@ -77,8 +75,7 @@ define i32 @test_03(i32 %x, i32 %y) {
; CHECK-NEXT: [[C3:%.*]] = icmp sgt i32 [[X]], [[Y]]
; CHECK-NEXT: br i1 [[C3]], label [[EXIT]], label [[UNREACHED:%.*]]
; CHECK: unreached:
-; CHECK-NEXT: [[C1:%.*]] = icmp ne i32 [[X]], [[Y]]
-; CHECK-NEXT: [[COMPARATOR:%.*]] = zext i1 [[C1]] to i32
+; CHECK-NEXT: [[COMPARATOR:%.*]] = call i32 @llvm.scmp.i32.i32(i32 [[X]], i32 [[Y]])
; CHECK-NEXT: ret i32 [[COMPARATOR]]
; CHECK: exit:
; CHECK-NEXT: ret i32 0
diff --git a/llvm/test/Transforms/InstCombine/ucmp.ll b/llvm/test/Transforms/InstCombine/ucmp.ll
index 13755f1..2d50360 100644
--- a/llvm/test/Transforms/InstCombine/ucmp.ll
+++ b/llvm/test/Transforms/InstCombine/ucmp.ll
@@ -541,3 +541,17 @@ define i8 @ucmp_from_select_gt_and_lt(i32 %x, i32 %y) {
%r = select i1 %gt, i8 1, i8 %lt
ret i8 %r
}
+
+; (x == y) ? 0 : (x u> y ? 1 : -1) into ucmp(x, y)
+define i8 @scmp_from_select_eq_and_gt(i32 %x, i32 %y) {
+; CHECK-LABEL: define i8 @scmp_from_select_eq_and_gt(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.ucmp.i8.i32(i32 [[X]], i32 [[Y]])
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %eq = icmp eq i32 %x, %y
+ %gt = icmp ugt i32 %x, %y
+ %sel1 = select i1 %gt, i8 1, i8 -1
+ %r = select i1 %eq, i8 0, i8 %sel1
+ ret i8 %r
+}
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-call.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-call.ll
index 498f205..7797c0b 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-call.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-call.ll
@@ -54,9 +54,9 @@ for.end: ; preds = %for.body, %entry
define void @vec_ptr(i64 %N, ptr noalias %a, ptr readnone %b) {
; CHECK-LABEL: @vec_ptr
-; CHECK: vector.body:
-; CHECK: %[[LOAD:.*]] = load <vscale x 2 x ptr>, ptr
-; CHECK: call <vscale x 2 x i64> @bar_vec(<vscale x 2 x ptr> %[[LOAD]])
+; CHECK: for.body:
+; CHECK: %[[LOAD:.*]] = load ptr, ptr
+; CHECK: call i64 @bar(ptr %[[LOAD]])
entry:
%cmp7 = icmp sgt i64 %N, 0
br i1 %cmp7, label %for.body, label %for.end
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-fp-ext-trunc-illegal-type.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-fp-ext-trunc-illegal-type.ll
new file mode 100644
index 0000000..92b043a9
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-fp-ext-trunc-illegal-type.ll
@@ -0,0 +1,76 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3
+; RUN: opt < %s -mattr=+sve -passes=loop-vectorize -debug-only=loop-vectorize -vectorizer-maximize-bandwidth -force-vector-interleave=1 -S 2>&1 | FileCheck %s
+; REQUIRES: asserts
+
+target triple = "aarch64-unknown-linux-gnu"
+
+;; Make sure we reject scalable vectors for fp128 types. We were previously
+;; crashing before reaching the cost model when checking for the number of
+;; registers required for a <vscale x 4 x fp128> when trying to maximize
+;; vector bandwidth with SVE.
+
+; CHECK: LV: Found an estimated cost of Invalid for VF vscale x 2 For instruction: %load.ext = fpext double %load.in to fp128
+
+define void @load_ext_trunc_store(ptr readonly %in, ptr noalias %out, i64 %N) {
+; CHECK-LABEL: define void @load_ext_trunc_store(
+; CHECK-SAME: ptr readonly [[IN:%.*]], ptr noalias [[OUT:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[N]], i64 1)
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[UMAX]], 4
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[UMAX]], 4
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[UMAX]], [[N_MOD_VF]]
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds double, ptr [[IN]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds double, ptr [[TMP2]], i32 0
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x double>, ptr [[TMP4]], align 8
+; CHECK-NEXT: [[TMP3:%.*]] = fpext <4 x double> [[WIDE_LOAD]] to <4 x fp128>
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[OUT]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP5:%.*]] = fptrunc <4 x fp128> [[TMP3]] to <4 x float>
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 0
+; CHECK-NEXT: store <4 x float> [[TMP5]], ptr [[TMP12]], align 4
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[UMAX]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[GEP_IN:%.*]] = getelementptr inbounds nuw double, ptr [[IN]], i64 [[IV]]
+; CHECK-NEXT: [[LOAD_IN:%.*]] = load double, ptr [[GEP_IN]], align 8
+; CHECK-NEXT: [[LOAD_EXT:%.*]] = fpext double [[LOAD_IN]] to fp128
+; CHECK-NEXT: [[GEP_OUT:%.*]] = getelementptr inbounds nuw float, ptr [[OUT]], i64 [[IV]]
+; CHECK-NEXT: [[TRUNC_OUT:%.*]] = fptrunc fp128 [[LOAD_EXT]] to float
+; CHECK-NEXT: store float [[TRUNC_OUT]], ptr [[GEP_OUT]], align 4
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ult i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_BODY]], label %[[FOR_EXIT]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK: [[FOR_EXIT]]:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %gep.in = getelementptr inbounds nuw double, ptr %in, i64 %iv
+ %load.in = load double, ptr %gep.in, align 8
+ %load.ext = fpext double %load.in to fp128
+ %gep.out = getelementptr inbounds nuw float, ptr %out, i64 %iv
+ %trunc.out = fptrunc fp128 %load.ext to float
+ store float %trunc.out, ptr %gep.out, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp ult i64 %iv.next, %N
+ br i1 %exitcond, label %for.body, label %for.exit
+
+for.exit:
+ ret void
+}
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/veclib-function-calls.ll b/llvm/test/Transforms/LoopVectorize/AArch64/veclib-function-calls.ll
index d9cc630..41ccb3c 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/veclib-function-calls.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/veclib-function-calls.ll
@@ -2902,35 +2902,36 @@ define void @log2_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
ret void
}
+; FIXME: Re-enable modf[f] vectorization once aliasing issues due to output
+; pointers have been resolved.
+
declare double @modf(double, ptr)
declare float @modff(float, ptr)
define void @modf_f64(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; SLEEF-NEON-LABEL: define void @modf_f64
; SLEEF-NEON-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; SLEEF-NEON: [[TMP5:%.*]] = call <2 x double> @_ZGVnN2vl8_modf(<2 x double> [[WIDE_LOAD:%.*]], ptr [[TMP4:%.*]])
+; SLEEF-NEON: [[DATA:%.*]] = call double @modf(double [[NUM:%.*]], ptr [[GEPB:%.*]])
;
; SLEEF-SVE-LABEL: define void @modf_f64
; SLEEF-SVE-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; SLEEF-SVE: [[DATA:%.*]] = call double @modf(double [[NUM:%.*]], ptr [[GEPB:%.*]]) #[[ATTR4:[0-9]+]]
+; SLEEF-SVE: [[DATA:%.*]] = call double @modf(double [[NUM:%.*]], ptr [[GEPB:%.*]])
;
; SLEEF-SVE-NOPRED-LABEL: define void @modf_f64
; SLEEF-SVE-NOPRED-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; SLEEF-SVE-NOPRED: [[TMP17:%.*]] = call <vscale x 2 x double> @_ZGVsNxvl8_modf(<vscale x 2 x double> [[WIDE_LOAD:%.*]], ptr [[TMP16:%.*]])
-; SLEEF-SVE-NOPRED: [[DATA:%.*]] = call double @modf(double [[NUM:%.*]], ptr [[GEPB:%.*]]) #[[ATTR64:[0-9]+]]
+; SLEEF-SVE-NOPRED: [[DATA:%.*]] = call double @modf(double [[NUM:%.*]], ptr [[GEPB:%.*]])
;
; ARMPL-NEON-LABEL: define void @modf_f64
; ARMPL-NEON-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; ARMPL-NEON: [[TMP5:%.*]] = call <2 x double> @armpl_vmodfq_f64(<2 x double> [[WIDE_LOAD:%.*]], ptr [[TMP4:%.*]])
+; ARMPL-NEON: [[DATA:%.*]] = call double @modf(double [[NUM:%.*]], ptr [[GEPB:%.*]])
;
; ARMPL-SVE-LABEL: define void @modf_f64
; ARMPL-SVE-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; ARMPL-SVE: [[TMP23:%.*]] = call <vscale x 2 x double> @armpl_svmodf_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], ptr [[TMP22:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; ARMPL-SVE: [[DATA:%.*]] = call double @modf(double [[NUM:%.*]], ptr [[GEPB:%.*]])
;
; ARMPL-SVE-NOPRED-LABEL: define void @modf_f64
; ARMPL-SVE-NOPRED-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; ARMPL-SVE-NOPRED: [[TMP17:%.*]] = call <vscale x 2 x double> @armpl_svmodf_f64_x(<vscale x 2 x double> [[WIDE_LOAD:%.*]], ptr [[TMP16:%.*]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer))
-; ARMPL-SVE-NOPRED: [[DATA:%.*]] = call double @modf(double [[NUM:%.*]], ptr [[GEPB:%.*]]) #[[ATTR64:[0-9]+]]
+; ARMPL-SVE-NOPRED: [[DATA:%.*]] = call double @modf(double [[NUM:%.*]], ptr [[GEPB:%.*]])
;
entry:
br label %for.body
@@ -2954,29 +2955,27 @@ for.cond.cleanup:
define void @modf_f32(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; SLEEF-NEON-LABEL: define void @modf_f32
; SLEEF-NEON-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; SLEEF-NEON: [[TMP5:%.*]] = call <4 x float> @_ZGVnN4vl4_modff(<4 x float> [[WIDE_LOAD:%.*]], ptr [[TMP4:%.*]])
+; SLEEF-NEON: [[DATA:%.*]] = call float @modff(float [[NUM:%.*]], ptr [[GEPB:%.*]])
;
; SLEEF-SVE-LABEL: define void @modf_f32
; SLEEF-SVE-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; SLEEF-SVE: [[DATA:%.*]] = call float @modff(float [[NUM:%.*]], ptr [[GEPB:%.*]]) #[[ATTR5:[0-9]+]]
+; SLEEF-SVE: [[DATA:%.*]] = call float @modff(float [[NUM:%.*]], ptr [[GEPB:%.*]])
;
; SLEEF-SVE-NOPRED-LABEL: define void @modf_f32
; SLEEF-SVE-NOPRED-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; SLEEF-SVE-NOPRED: [[TMP17:%.*]] = call <vscale x 4 x float> @_ZGVsNxvl4_modff(<vscale x 4 x float> [[WIDE_LOAD:%.*]], ptr [[TMP16:%.*]])
-; SLEEF-SVE-NOPRED: [[DATA:%.*]] = call float @modff(float [[NUM:%.*]], ptr [[GEPB:%.*]]) #[[ATTR65:[0-9]+]]
+; SLEEF-SVE-NOPRED: [[DATA:%.*]] = call float @modff(float [[NUM:%.*]], ptr [[GEPB:%.*]])
;
; ARMPL-NEON-LABEL: define void @modf_f32
; ARMPL-NEON-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; ARMPL-NEON: [[TMP5:%.*]] = call <4 x float> @armpl_vmodfq_f32(<4 x float> [[WIDE_LOAD:%.*]], ptr [[TMP4:%.*]])
+; ARMPL-NEON: [[DATA:%.*]] = call float @modff(float [[NUM:%.*]], ptr [[GEPB:%.*]])
;
; ARMPL-SVE-LABEL: define void @modf_f32
; ARMPL-SVE-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; ARMPL-SVE: [[TMP23:%.*]] = call <vscale x 4 x float> @armpl_svmodf_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], ptr [[TMP22:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; ARMPL-SVE: [[DATA:%.*]] = call float @modff(float [[NUM:%.*]], ptr [[GEPB:%.*]])
;
; ARMPL-SVE-NOPRED-LABEL: define void @modf_f32
; ARMPL-SVE-NOPRED-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; ARMPL-SVE-NOPRED: [[TMP17:%.*]] = call <vscale x 4 x float> @armpl_svmodf_f32_x(<vscale x 4 x float> [[WIDE_LOAD:%.*]], ptr [[TMP16:%.*]], <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
-; ARMPL-SVE-NOPRED: [[DATA:%.*]] = call float @modff(float [[NUM:%.*]], ptr [[GEPB:%.*]]) #[[ATTR65:[0-9]+]]
+; ARMPL-SVE-NOPRED: [[DATA:%.*]] = call float @modff(float [[NUM:%.*]], ptr [[GEPB:%.*]])
;
entry:
br label %for.body
@@ -3276,35 +3275,36 @@ define void @sin_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
ret void
}
+; FIXME: Re-enable sincos[f] vectorization once aliasing issues with output
+; pointers have been resolved.
+
declare void @sincos(double, ptr, ptr)
declare void @sincosf(float, ptr, ptr)
define void @sincos_f64(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; SLEEF-NEON-LABEL: define void @sincos_f64
; SLEEF-NEON-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; SLEEF-NEON: call void @_ZGVnN2vl8l8_sincos(<2 x double> [[WIDE_LOAD:%.*]], ptr [[TMP5:%.*]], ptr [[TMP6:%.*]])
+; SLEEF-NEON: call void @sincos(double [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]])
;
; SLEEF-SVE-LABEL: define void @sincos_f64
; SLEEF-SVE-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; SLEEF-SVE: call void @sincos(double [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]]) #[[ATTR6:[0-9]+]]
+; SLEEF-SVE: call void @sincos(double [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]])
;
; SLEEF-SVE-NOPRED-LABEL: define void @sincos_f64
; SLEEF-SVE-NOPRED-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; SLEEF-SVE-NOPRED: call void @_ZGVsNxvl8l8_sincos(<vscale x 2 x double> [[WIDE_LOAD:%.*]], ptr [[TMP17:%.*]], ptr [[TMP18:%.*]])
-; SLEEF-SVE-NOPRED: call void @sincos(double [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]]) #[[ATTR72:[0-9]+]]
+; SLEEF-SVE-NOPRED: call void @sincos(double [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]])
;
; ARMPL-NEON-LABEL: define void @sincos_f64
; ARMPL-NEON-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; ARMPL-NEON: call void @armpl_vsincosq_f64(<2 x double> [[WIDE_LOAD:%.*]], ptr [[TMP5:%.*]], ptr [[TMP6:%.*]])
+; ARMPL-NEON: call void @sincos(double [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]])
;
; ARMPL-SVE-LABEL: define void @sincos_f64
; ARMPL-SVE-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; ARMPL-SVE: call void @armpl_svsincos_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], ptr [[TMP23:%.*]], ptr [[TMP24:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; ARMPL-SVE: call void @sincos(double [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]])
;
; ARMPL-SVE-NOPRED-LABEL: define void @sincos_f64
; ARMPL-SVE-NOPRED-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; ARMPL-SVE-NOPRED: call void @armpl_svsincos_f64_x(<vscale x 2 x double> [[WIDE_LOAD:%.*]], ptr [[TMP17:%.*]], ptr [[TMP18:%.*]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer))
-; ARMPL-SVE-NOPRED: call void @sincos(double [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]]) #[[ATTR72:[0-9]+]]
+; ARMPL-SVE-NOPRED: call void @sincos(double [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]])
;
entry:
br label %for.body
@@ -3327,29 +3327,27 @@ for.cond.cleanup:
define void @sincos_f32(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; SLEEF-NEON-LABEL: define void @sincos_f32
; SLEEF-NEON-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; SLEEF-NEON: call void @_ZGVnN4vl4l4_sincosf(<4 x float> [[WIDE_LOAD:%.*]], ptr [[TMP5:%.*]], ptr [[TMP6:%.*]])
+; SLEEF-NEON: call void @sincosf(float [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]])
;
; SLEEF-SVE-LABEL: define void @sincos_f32
; SLEEF-SVE-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; SLEEF-SVE: call void @sincosf(float [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]]) #[[ATTR7:[0-9]+]]
+; SLEEF-SVE: call void @sincosf(float [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]])
;
; SLEEF-SVE-NOPRED-LABEL: define void @sincos_f32
; SLEEF-SVE-NOPRED-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; SLEEF-SVE-NOPRED: call void @_ZGVsNxvl4l4_sincosf(<vscale x 4 x float> [[WIDE_LOAD:%.*]], ptr [[TMP17:%.*]], ptr [[TMP18:%.*]])
-; SLEEF-SVE-NOPRED: call void @sincosf(float [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]]) #[[ATTR73:[0-9]+]]
+; SLEEF-SVE-NOPRED: call void @sincosf(float [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]])
;
; ARMPL-NEON-LABEL: define void @sincos_f32
; ARMPL-NEON-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; ARMPL-NEON: call void @armpl_vsincosq_f32(<4 x float> [[WIDE_LOAD:%.*]], ptr [[TMP5:%.*]], ptr [[TMP6:%.*]])
+; ARMPL-NEON: call void @sincosf(float [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]])
;
; ARMPL-SVE-LABEL: define void @sincos_f32
; ARMPL-SVE-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; ARMPL-SVE: call void @armpl_svsincos_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], ptr [[TMP23:%.*]], ptr [[TMP24:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; ARMPL-SVE: call void @sincosf(float [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]])
;
; ARMPL-SVE-NOPRED-LABEL: define void @sincos_f32
; ARMPL-SVE-NOPRED-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; ARMPL-SVE-NOPRED: call void @armpl_svsincos_f32_x(<vscale x 4 x float> [[WIDE_LOAD:%.*]], ptr [[TMP17:%.*]], ptr [[TMP18:%.*]], <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
-; ARMPL-SVE-NOPRED: call void @sincosf(float [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]]) #[[ATTR73:[0-9]+]]
+; ARMPL-SVE-NOPRED: call void @sincosf(float [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]])
;
entry:
br label %for.body
@@ -3369,35 +3367,36 @@ for.cond.cleanup:
ret void
}
+; FIXME: Re-enable sincospi[f] vectorization once aliasing issues with output
+; pointers have been resolved.
+
declare void @sincospi(double, ptr, ptr)
declare void @sincospif(float, ptr, ptr)
define void @sincospi_f64(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; SLEEF-NEON-LABEL: define void @sincospi_f64
; SLEEF-NEON-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; SLEEF-NEON: call void @_ZGVnN2vl8l8_sincospi(<2 x double> [[WIDE_LOAD:%.*]], ptr [[TMP5:%.*]], ptr [[TMP6:%.*]])
+; SLEEF-NEON: call void @sincospi(double [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]])
;
; SLEEF-SVE-LABEL: define void @sincospi_f64
; SLEEF-SVE-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; SLEEF-SVE: call void @sincospi(double [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]]) #[[ATTR8:[0-9]+]]
+; SLEEF-SVE: call void @sincospi(double [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]])
;
; SLEEF-SVE-NOPRED-LABEL: define void @sincospi_f64
; SLEEF-SVE-NOPRED-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; SLEEF-SVE-NOPRED: call void @_ZGVsNxvl8l8_sincospi(<vscale x 2 x double> [[WIDE_LOAD:%.*]], ptr [[TMP17:%.*]], ptr [[TMP18:%.*]])
-; SLEEF-SVE-NOPRED: call void @sincospi(double [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]]) #[[ATTR74:[0-9]+]]
+; SLEEF-SVE-NOPRED: call void @sincospi(double [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]])
;
; ARMPL-NEON-LABEL: define void @sincospi_f64
; ARMPL-NEON-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; ARMPL-NEON: call void @armpl_vsincospiq_f64(<2 x double> [[WIDE_LOAD:%.*]], ptr [[TMP5:%.*]], ptr [[TMP6:%.*]])
+; ARMPL-NEON: call void @sincospi(double [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]])
;
; ARMPL-SVE-LABEL: define void @sincospi_f64
; ARMPL-SVE-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; ARMPL-SVE: call void @armpl_svsincospi_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], ptr [[TMP23:%.*]], ptr [[TMP24:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; ARMPL-SVE: call void @sincospi(double [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]])
;
; ARMPL-SVE-NOPRED-LABEL: define void @sincospi_f64
; ARMPL-SVE-NOPRED-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; ARMPL-SVE-NOPRED: call void @armpl_svsincospi_f64_x(<vscale x 2 x double> [[WIDE_LOAD:%.*]], ptr [[TMP17:%.*]], ptr [[TMP18:%.*]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer))
-; ARMPL-SVE-NOPRED: call void @sincospi(double [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]]) #[[ATTR74:[0-9]+]]
+; ARMPL-SVE-NOPRED: call void @sincospi(double [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]])
;
entry:
br label %for.body
@@ -3420,29 +3419,27 @@ for.cond.cleanup:
define void @sincospi_f32(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; SLEEF-NEON-LABEL: define void @sincospi_f32
; SLEEF-NEON-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; SLEEF-NEON: call void @_ZGVnN4vl4l4_sincospif(<4 x float> [[WIDE_LOAD:%.*]], ptr [[TMP5:%.*]], ptr [[TMP6:%.*]])
+; SLEEF-NEON: call void @sincospif(float [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]])
;
; SLEEF-SVE-LABEL: define void @sincospi_f32
; SLEEF-SVE-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; SLEEF-SVE: call void @sincospif(float [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]]) #[[ATTR9:[0-9]+]]
+; SLEEF-SVE: call void @sincospif(float [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]])
;
; SLEEF-SVE-NOPRED-LABEL: define void @sincospi_f32
; SLEEF-SVE-NOPRED-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; SLEEF-SVE-NOPRED: call void @_ZGVsNxvl4l4_sincospif(<vscale x 4 x float> [[WIDE_LOAD:%.*]], ptr [[TMP17:%.*]], ptr [[TMP18:%.*]])
-; SLEEF-SVE-NOPRED: call void @sincospif(float [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]]) #[[ATTR75:[0-9]+]]
+; SLEEF-SVE-NOPRED: call void @sincospif(float [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]])
;
; ARMPL-NEON-LABEL: define void @sincospi_f32
; ARMPL-NEON-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; ARMPL-NEON: call void @armpl_vsincospiq_f32(<4 x float> [[WIDE_LOAD:%.*]], ptr [[TMP5:%.*]], ptr [[TMP6:%.*]])
+; ARMPL-NEON: call void @sincospif(float [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]])
;
; ARMPL-SVE-LABEL: define void @sincospi_f32
; ARMPL-SVE-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; ARMPL-SVE: call void @armpl_svsincospi_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], ptr [[TMP23:%.*]], ptr [[TMP24:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; ARMPL-SVE: call void @sincospif(float [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]])
;
; ARMPL-SVE-NOPRED-LABEL: define void @sincospi_f32
; ARMPL-SVE-NOPRED-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
-; ARMPL-SVE-NOPRED: call void @armpl_svsincospi_f32_x(<vscale x 4 x float> [[WIDE_LOAD:%.*]], ptr [[TMP17:%.*]], ptr [[TMP18:%.*]], <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
-; ARMPL-SVE-NOPRED: call void @sincospif(float [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]]) #[[ATTR75:[0-9]+]]
+; ARMPL-SVE-NOPRED: call void @sincospif(float [[NUM:%.*]], ptr [[GEPB:%.*]], ptr [[GEPC:%.*]])
;
entry:
br label %for.body
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/vector-call-linear-args.ll b/llvm/test/Transforms/LoopVectorize/AArch64/vector-call-linear-args.ll
index f60ab5e..29904a7 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/vector-call-linear-args.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/vector-call-linear-args.ll
@@ -13,48 +13,27 @@ target triple = "aarch64-unknown-linux-gnu"
define void @test_linear8(ptr noalias %a, ptr readnone %b, i64 %n) {
; NEON-LABEL: define void @test_linear8
; NEON-SAME: (ptr noalias [[A:%.*]], ptr readnone [[B:%.*]], i64 [[N:%.*]]) {
-; NEON: [[TMP2:%.*]] = extractelement <2 x ptr> [[TMP1:%.*]], i32 0
-; NEON: [[TMP3:%.*]] = call <2 x i64> @vec_foo_linear8_nomask_neon(ptr [[TMP2]])
; NEON: [[DATA:%.*]] = call i64 @foo(ptr [[GEPB:%.*]]) #[[ATTR0:[0-9]+]]
;
; NEON_INTERLEAVE-LABEL: define void @test_linear8
; NEON_INTERLEAVE-SAME: (ptr noalias [[A:%.*]], ptr readnone [[B:%.*]], i64 [[N:%.*]]) {
-; NEON_INTERLEAVE: [[TMP4:%.*]] = extractelement <2 x ptr> [[TMP2:%.*]], i32 0
-; NEON_INTERLEAVE: [[TMP5:%.*]] = call <2 x i64> @vec_foo_linear8_nomask_neon(ptr [[TMP4]])
-; NEON_INTERLEAVE: [[TMP6:%.*]] = extractelement <2 x ptr> [[TMP3:%.*]], i32 0
-; NEON_INTERLEAVE: [[TMP7:%.*]] = call <2 x i64> @vec_foo_linear8_nomask_neon(ptr [[TMP6]])
; NEON_INTERLEAVE: [[DATA:%.*]] = call i64 @foo(ptr [[GEPB:%.*]]) #[[ATTR0:[0-9]+]]
;
; SVE_OR_NEON-LABEL: define void @test_linear8
; SVE_OR_NEON-SAME: (ptr noalias [[A:%.*]], ptr readnone [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
-; SVE_OR_NEON: [[TMP14:%.*]] = extractelement <vscale x 2 x ptr> [[TMP13:%.*]], i32 0
-; SVE_OR_NEON: [[TMP15:%.*]] = call <vscale x 2 x i64> @vec_foo_linear8_nomask_sve(ptr [[TMP14]])
; SVE_OR_NEON: [[DATA:%.*]] = call i64 @foo(ptr [[GEPB:%.*]]) #[[ATTR2:[0-9]+]]
;
; SVE_OR_NEON_INTERLEAVE-LABEL: define void @test_linear8
; SVE_OR_NEON_INTERLEAVE-SAME: (ptr noalias [[A:%.*]], ptr readnone [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
-; SVE_OR_NEON_INTERLEAVE: [[TMP33:%.*]] = extractelement <vscale x 2 x ptr> [[TMP31:%.*]], i32 0
-; SVE_OR_NEON_INTERLEAVE: [[TMP34:%.*]] = call <vscale x 2 x i64> @vec_foo_linear8_mask_sve(ptr [[TMP33]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE_OR_NEON_INTERLEAVE: [[TMP35:%.*]] = extractelement <vscale x 2 x ptr> [[TMP32:%.*]], i32 0
-; SVE_OR_NEON_INTERLEAVE: [[TMP36:%.*]] = call <vscale x 2 x i64> @vec_foo_linear8_mask_sve(ptr [[TMP35]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK2:%.*]])
-; SVE_OR_NEON_INTERLEAVE: [[TMP48:%.*]] = extractelement <vscale x 2 x i1> [[TMP46:%.*]], i32 0
-; SVE_OR_NEON_INTERLEAVE: [[DATA:%.*]] = call i64 @foo(ptr [[GEPB:%.*]]) #[[ATTR4:[0-9]+]]
+; SVE_OR_NEON_INTERLEAVE: [[DATA:%.*]] = call i64 @foo(ptr [[GEPB:%.*]]) #[[ATTR1:[0-9]+]]
;
; SVE_TF-LABEL: define void @test_linear8
; SVE_TF-SAME: (ptr noalias [[A:%.*]], ptr readnone [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
-; SVE_TF: [[TMP20:%.*]] = extractelement <vscale x 2 x ptr> [[TMP19:%.*]], i32 0
-; SVE_TF: [[TMP21:%.*]] = call <vscale x 2 x i64> @vec_foo_linear8_mask_sve(ptr [[TMP20]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE_TF: [[TMP25:%.*]] = extractelement <vscale x 2 x i1> [[TMP24:%.*]], i32 0
-; SVE_TF: [[DATA:%.*]] = call i64 @foo(ptr [[GEPB:%.*]]) #[[ATTR4:[0-9]+]]
+; SVE_TF: [[DATA:%.*]] = call i64 @foo(ptr [[GEPB:%.*]]) #[[ATTR1:[0-9]+]]
;
; SVE_TF_INTERLEAVE-LABEL: define void @test_linear8
; SVE_TF_INTERLEAVE-SAME: (ptr noalias [[A:%.*]], ptr readnone [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
-; SVE_TF_INTERLEAVE: [[TMP33:%.*]] = extractelement <vscale x 2 x ptr> [[TMP31:%.*]], i32 0
-; SVE_TF_INTERLEAVE: [[TMP34:%.*]] = call <vscale x 2 x i64> @vec_foo_linear8_mask_sve(ptr [[TMP33]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE_TF_INTERLEAVE: [[TMP35:%.*]] = extractelement <vscale x 2 x ptr> [[TMP32:%.*]], i32 0
-; SVE_TF_INTERLEAVE: [[TMP36:%.*]] = call <vscale x 2 x i64> @vec_foo_linear8_mask_sve(ptr [[TMP35]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK2:%.*]])
-; SVE_TF_INTERLEAVE: [[TMP48:%.*]] = extractelement <vscale x 2 x i1> [[TMP46:%.*]], i32 0
-; SVE_TF_INTERLEAVE: [[DATA:%.*]] = call i64 @foo(ptr [[GEPB:%.*]]) #[[ATTR4:[0-9]+]]
+; SVE_TF_INTERLEAVE: [[DATA:%.*]] = call i64 @foo(ptr [[GEPB:%.*]]) #[[ATTR1:[0-9]+]]
;
entry:
br label %for.body
@@ -76,35 +55,27 @@ for.cond.cleanup:
define void @test_vector_linear4(ptr noalias %a, ptr readnone %b, ptr readonly %c, i64 %n) {
; NEON-LABEL: define void @test_vector_linear4
; NEON-SAME: (ptr noalias [[A:%.*]], ptr readnone [[B:%.*]], ptr readonly [[C:%.*]], i64 [[N:%.*]]) {
-; NEON: [[TMP4:%.*]] = extractelement <4 x ptr> [[TMP3:%.*]], i32 0
-; NEON: [[TMP5:%.*]] = call <4 x i32> @vec_baz_vector_linear4_nomask_neon(<4 x i32> [[WIDE_LOAD:%.*]], ptr [[TMP4]])
; NEON: [[DATA:%.*]] = call i32 @baz(i32 [[INPUT:%.*]], ptr [[GEPB:%.*]]) #[[ATTR1:[0-9]+]]
;
; NEON_INTERLEAVE-LABEL: define void @test_vector_linear4
; NEON_INTERLEAVE-SAME: (ptr noalias [[A:%.*]], ptr readnone [[B:%.*]], ptr readonly [[C:%.*]], i64 [[N:%.*]]) {
-; NEON_INTERLEAVE: [[TMP8:%.*]] = extractelement <4 x ptr> [[TMP6:%.*]], i32 0
-; NEON_INTERLEAVE: [[TMP9:%.*]] = call <4 x i32> @vec_baz_vector_linear4_nomask_neon(<4 x i32> [[WIDE_LOAD:%.*]], ptr [[TMP8]])
-; NEON_INTERLEAVE: [[TMP10:%.*]] = extractelement <4 x ptr> [[TMP7:%.*]], i32 0
-; NEON_INTERLEAVE: [[TMP11:%.*]] = call <4 x i32> @vec_baz_vector_linear4_nomask_neon(<4 x i32> [[WIDE_LOAD2:%.*]], ptr [[TMP10]])
; NEON_INTERLEAVE: [[DATA:%.*]] = call i32 @baz(i32 [[INPUT:%.*]], ptr [[GEPB:%.*]]) #[[ATTR1:[0-9]+]]
;
; SVE_OR_NEON-LABEL: define void @test_vector_linear4
; SVE_OR_NEON-SAME: (ptr noalias [[A:%.*]], ptr readnone [[B:%.*]], ptr readonly [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; SVE_OR_NEON: [[TMP16:%.*]] = extractelement <vscale x 4 x ptr> [[TMP15:%.*]], i32 0
-; SVE_OR_NEON: [[TMP17:%.*]] = call <vscale x 4 x i32> @vec_baz_vector_linear4_nomask_sve(<vscale x 4 x i32> [[WIDE_LOAD:%.*]], ptr [[TMP16]])
; SVE_OR_NEON: [[DATA:%.*]] = call i32 @baz(i32 [[INPUT:%.*]], ptr [[GEPB:%.*]]) #[[ATTR3:[0-9]+]]
;
; SVE_OR_NEON_INTERLEAVE-LABEL: define void @test_vector_linear4
; SVE_OR_NEON_INTERLEAVE-SAME: (ptr noalias [[A:%.*]], ptr readnone [[B:%.*]], ptr readonly [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; SVE_OR_NEON_INTERLEAVE: [[DATA:%.*]] = call i32 @baz(i32 [[INPUT:%.*]], ptr [[GEPB:%.*]]) #[[ATTR5:[0-9]+]]
+; SVE_OR_NEON_INTERLEAVE: [[DATA:%.*]] = call i32 @baz(i32 [[INPUT:%.*]], ptr [[GEPB:%.*]]) #[[ATTR2:[0-9]+]]
;
; SVE_TF-LABEL: define void @test_vector_linear4
; SVE_TF-SAME: (ptr noalias [[A:%.*]], ptr readnone [[B:%.*]], ptr readonly [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; SVE_TF: [[DATA:%.*]] = call i32 @baz(i32 [[INPUT:%.*]], ptr [[GEPB:%.*]]) #[[ATTR5:[0-9]+]]
+; SVE_TF: [[DATA:%.*]] = call i32 @baz(i32 [[INPUT:%.*]], ptr [[GEPB:%.*]]) #[[ATTR2:[0-9]+]]
;
; SVE_TF_INTERLEAVE-LABEL: define void @test_vector_linear4
; SVE_TF_INTERLEAVE-SAME: (ptr noalias [[A:%.*]], ptr readnone [[B:%.*]], ptr readonly [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; SVE_TF_INTERLEAVE: [[DATA:%.*]] = call i32 @baz(i32 [[INPUT:%.*]], ptr [[GEPB:%.*]]) #[[ATTR5:[0-9]+]]
+; SVE_TF_INTERLEAVE: [[DATA:%.*]] = call i32 @baz(i32 [[INPUT:%.*]], ptr [[GEPB:%.*]]) #[[ATTR2:[0-9]+]]
;
entry:
br label %for.body
@@ -132,9 +103,7 @@ define void @test_linear8_bad_stride(ptr noalias %a, ptr readnone %b, i64 %n) {
;
; NEON_INTERLEAVE-LABEL: define void @test_linear8_bad_stride
; NEON_INTERLEAVE-SAME: (ptr noalias [[A:%.*]], ptr readnone [[B:%.*]], i64 [[N:%.*]]) {
-; NEON_INTERLEAVE: [[TMP4:%.*]] = call i64 @foo(ptr [[TMP2:%.*]]) #[[ATTR2:[0-9]+]]
-; NEON_INTERLEAVE: [[TMP5:%.*]] = call i64 @foo(ptr [[TMP3:%.*]]) #[[ATTR2]]
-; NEON_INTERLEAVE: [[DATA:%.*]] = call i64 @foo(ptr [[GEPB:%.*]]) #[[ATTR2]]
+; NEON_INTERLEAVE: [[DATA:%.*]] = call i64 @foo(ptr [[GEPB:%.*]]) #[[ATTR2:[0-9]+]]
;
; SVE_OR_NEON-LABEL: define void @test_linear8_bad_stride
; SVE_OR_NEON-SAME: (ptr noalias [[A:%.*]], ptr readnone [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
@@ -142,15 +111,15 @@ define void @test_linear8_bad_stride(ptr noalias %a, ptr readnone %b, i64 %n) {
;
; SVE_OR_NEON_INTERLEAVE-LABEL: define void @test_linear8_bad_stride
; SVE_OR_NEON_INTERLEAVE-SAME: (ptr noalias [[A:%.*]], ptr readnone [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; SVE_OR_NEON_INTERLEAVE: [[DATA:%.*]] = call i64 @foo(ptr [[GEPB:%.*]]) #[[ATTR6:[0-9]+]]
+; SVE_OR_NEON_INTERLEAVE: [[DATA:%.*]] = call i64 @foo(ptr [[GEPB:%.*]]) #[[ATTR3:[0-9]+]]
;
; SVE_TF-LABEL: define void @test_linear8_bad_stride
; SVE_TF-SAME: (ptr noalias [[A:%.*]], ptr readnone [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; SVE_TF: [[DATA:%.*]] = call i64 @foo(ptr [[GEPB:%.*]]) #[[ATTR6:[0-9]+]]
+; SVE_TF: [[DATA:%.*]] = call i64 @foo(ptr [[GEPB:%.*]]) #[[ATTR3:[0-9]+]]
;
; SVE_TF_INTERLEAVE-LABEL: define void @test_linear8_bad_stride
; SVE_TF_INTERLEAVE-SAME: (ptr noalias [[A:%.*]], ptr readnone [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; SVE_TF_INTERLEAVE: [[DATA:%.*]] = call i64 @foo(ptr [[GEPB:%.*]]) #[[ATTR6:[0-9]+]]
+; SVE_TF_INTERLEAVE: [[DATA:%.*]] = call i64 @foo(ptr [[GEPB:%.*]]) #[[ATTR3:[0-9]+]]
;
entry:
br label %for.body
@@ -172,35 +141,27 @@ for.cond.cleanup:
define void @test_linear16_wide_stride(ptr noalias %a, ptr readnone %b, i64 %n) {
; NEON-LABEL: define void @test_linear16_wide_stride
; NEON-SAME: (ptr noalias [[A:%.*]], ptr readnone [[B:%.*]], i64 [[N:%.*]]) {
-; NEON: [[TMP3:%.*]] = extractelement <2 x ptr> [[TMP2:%.*]], i32 0
-; NEON: [[TMP4:%.*]] = call <2 x i64> @vec_foo_linear16_nomask_neon(ptr [[TMP3]])
; NEON: [[DATA:%.*]] = call i64 @foo(ptr [[GEPB:%.*]]) #[[ATTR2]]
;
; NEON_INTERLEAVE-LABEL: define void @test_linear16_wide_stride
; NEON_INTERLEAVE-SAME: (ptr noalias [[A:%.*]], ptr readnone [[B:%.*]], i64 [[N:%.*]]) {
-; NEON_INTERLEAVE: [[TMP6:%.*]] = extractelement <2 x ptr> [[TMP4:%.*]], i32 0
-; NEON_INTERLEAVE: [[TMP7:%.*]] = call <2 x i64> @vec_foo_linear16_nomask_neon(ptr [[TMP6]])
-; NEON_INTERLEAVE: [[TMP8:%.*]] = extractelement <2 x ptr> [[TMP5:%.*]], i32 0
-; NEON_INTERLEAVE: [[TMP9:%.*]] = call <2 x i64> @vec_foo_linear16_nomask_neon(ptr [[TMP8]])
; NEON_INTERLEAVE: [[DATA:%.*]] = call i64 @foo(ptr [[GEPB:%.*]]) #[[ATTR2]]
;
; SVE_OR_NEON-LABEL: define void @test_linear16_wide_stride
; SVE_OR_NEON-SAME: (ptr noalias [[A:%.*]], ptr readnone [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; SVE_OR_NEON: [[TMP15:%.*]] = extractelement <vscale x 2 x ptr> [[TMP14:%.*]], i32 0
-; SVE_OR_NEON: [[TMP16:%.*]] = call <vscale x 2 x i64> @vec_foo_linear16_nomask_sve(ptr [[TMP15]])
; SVE_OR_NEON: [[DATA:%.*]] = call i64 @foo(ptr [[GEPB:%.*]]) #[[ATTR4]]
;
; SVE_OR_NEON_INTERLEAVE-LABEL: define void @test_linear16_wide_stride
; SVE_OR_NEON_INTERLEAVE-SAME: (ptr noalias [[A:%.*]], ptr readnone [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; SVE_OR_NEON_INTERLEAVE: [[DATA:%.*]] = call i64 @foo(ptr [[GEPB:%.*]]) #[[ATTR6]]
+; SVE_OR_NEON_INTERLEAVE: [[DATA:%.*]] = call i64 @foo(ptr [[GEPB:%.*]]) #[[ATTR3]]
;
; SVE_TF-LABEL: define void @test_linear16_wide_stride
; SVE_TF-SAME: (ptr noalias [[A:%.*]], ptr readnone [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; SVE_TF: [[DATA:%.*]] = call i64 @foo(ptr [[GEPB:%.*]]) #[[ATTR6]]
+; SVE_TF: [[DATA:%.*]] = call i64 @foo(ptr [[GEPB:%.*]]) #[[ATTR3]]
;
; SVE_TF_INTERLEAVE-LABEL: define void @test_linear16_wide_stride
; SVE_TF_INTERLEAVE-SAME: (ptr noalias [[A:%.*]], ptr readnone [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; SVE_TF_INTERLEAVE: [[DATA:%.*]] = call i64 @foo(ptr [[GEPB:%.*]]) #[[ATTR6]]
+; SVE_TF_INTERLEAVE: [[DATA:%.*]] = call i64 @foo(ptr [[GEPB:%.*]]) #[[ATTR3]]
;
entry:
br label %for.body
@@ -223,57 +184,27 @@ for.cond.cleanup:
define void @test_linear4_linear8(ptr noalias %a, ptr readnone %b, ptr readonly %c, i64 %n) {
; NEON-LABEL: define void @test_linear4_linear8
; NEON-SAME: (ptr noalias [[A:%.*]], ptr readnone [[B:%.*]], ptr readonly [[C:%.*]], i64 [[N:%.*]]) {
-; NEON: [[TMP3:%.*]] = extractelement <4 x ptr> [[TMP1:%.*]], i32 0
-; NEON: [[TMP4:%.*]] = extractelement <4 x ptr> [[TMP2:%.*]], i32 0
-; NEON: [[TMP5:%.*]] = call <4 x i32> @vec_quux_linear4_linear8_nomask_neon(ptr [[TMP3]], ptr [[TMP4]])
; NEON: [[DATA:%.*]] = call i32 @quux(ptr [[GEPC:%.*]], ptr [[GEPB:%.*]]) #[[ATTR3:[0-9]+]]
;
; NEON_INTERLEAVE-LABEL: define void @test_linear4_linear8
; NEON_INTERLEAVE-SAME: (ptr noalias [[A:%.*]], ptr readnone [[B:%.*]], ptr readonly [[C:%.*]], i64 [[N:%.*]]) {
-; NEON_INTERLEAVE: [[TMP6:%.*]] = extractelement <4 x ptr> [[TMP2:%.*]], i32 0
-; NEON_INTERLEAVE: [[TMP7:%.*]] = extractelement <4 x ptr> [[TMP4:%.*]], i32 0
-; NEON_INTERLEAVE: [[TMP8:%.*]] = call <4 x i32> @vec_quux_linear4_linear8_nomask_neon(ptr [[TMP6]], ptr [[TMP7]])
-; NEON_INTERLEAVE: [[TMP9:%.*]] = extractelement <4 x ptr> [[TMP3:%.*]], i32 0
-; NEON_INTERLEAVE: [[TMP10:%.*]] = extractelement <4 x ptr> [[TMP5:%.*]], i32 0
-; NEON_INTERLEAVE: [[TMP11:%.*]] = call <4 x i32> @vec_quux_linear4_linear8_nomask_neon(ptr [[TMP9]], ptr [[TMP10]])
; NEON_INTERLEAVE: [[DATA:%.*]] = call i32 @quux(ptr [[GEPC:%.*]], ptr [[GEPB:%.*]]) #[[ATTR3:[0-9]+]]
;
; SVE_OR_NEON-LABEL: define void @test_linear4_linear8
; SVE_OR_NEON-SAME: (ptr noalias [[A:%.*]], ptr readnone [[B:%.*]], ptr readonly [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; SVE_OR_NEON: [[TMP15:%.*]] = extractelement <vscale x 4 x ptr> [[TMP13:%.*]], i32 0
-; SVE_OR_NEON: [[TMP16:%.*]] = extractelement <vscale x 4 x ptr> [[TMP14:%.*]], i32 0
-; SVE_OR_NEON: [[TMP17:%.*]] = call <vscale x 4 x i32> @vec_quux_linear4_linear8_mask_sve(ptr [[TMP15]], ptr [[TMP16]], <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
; SVE_OR_NEON: [[DATA:%.*]] = call i32 @quux(ptr [[GEPC:%.*]], ptr [[GEPB:%.*]]) #[[ATTR5:[0-9]+]]
;
; SVE_OR_NEON_INTERLEAVE-LABEL: define void @test_linear4_linear8
; SVE_OR_NEON_INTERLEAVE-SAME: (ptr noalias [[A:%.*]], ptr readnone [[B:%.*]], ptr readonly [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; SVE_OR_NEON_INTERLEAVE: [[TMP35:%.*]] = extractelement <vscale x 4 x ptr> [[TMP31:%.*]], i32 0
-; SVE_OR_NEON_INTERLEAVE: [[TMP36:%.*]] = extractelement <vscale x 4 x ptr> [[TMP33:%.*]], i32 0
-; SVE_OR_NEON_INTERLEAVE: [[TMP37:%.*]] = call <vscale x 4 x i32> @vec_quux_linear4_linear8_mask_sve(ptr [[TMP35]], ptr [[TMP36]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE_OR_NEON_INTERLEAVE: [[TMP38:%.*]] = extractelement <vscale x 4 x ptr> [[TMP32:%.*]], i32 0
-; SVE_OR_NEON_INTERLEAVE: [[TMP39:%.*]] = extractelement <vscale x 4 x ptr> [[TMP34:%.*]], i32 0
-; SVE_OR_NEON_INTERLEAVE: [[TMP40:%.*]] = call <vscale x 4 x i32> @vec_quux_linear4_linear8_mask_sve(ptr [[TMP38]], ptr [[TMP39]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK2:%.*]])
-; SVE_OR_NEON_INTERLEAVE: [[TMP52:%.*]] = extractelement <vscale x 4 x i1> [[TMP50:%.*]], i32 0
-; SVE_OR_NEON_INTERLEAVE: [[DATA:%.*]] = call i32 @quux(ptr [[GEPC:%.*]], ptr [[GEPB:%.*]]) #[[ATTR7:[0-9]+]]
+; SVE_OR_NEON_INTERLEAVE: [[DATA:%.*]] = call i32 @quux(ptr [[GEPC:%.*]], ptr [[GEPB:%.*]]) #[[ATTR4:[0-9]+]]
;
; SVE_TF-LABEL: define void @test_linear4_linear8
; SVE_TF-SAME: (ptr noalias [[A:%.*]], ptr readnone [[B:%.*]], ptr readonly [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; SVE_TF: [[TMP21:%.*]] = extractelement <vscale x 4 x ptr> [[TMP19:%.*]], i32 0
-; SVE_TF: [[TMP22:%.*]] = extractelement <vscale x 4 x ptr> [[TMP20:%.*]], i32 0
-; SVE_TF: [[TMP23:%.*]] = call <vscale x 4 x i32> @vec_quux_linear4_linear8_mask_sve(ptr [[TMP21]], ptr [[TMP22]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE_TF: [[TMP27:%.*]] = extractelement <vscale x 4 x i1> [[TMP26:%.*]], i32 0
-; SVE_TF: [[DATA:%.*]] = call i32 @quux(ptr [[GEPC:%.*]], ptr [[GEPB:%.*]]) #[[ATTR7:[0-9]+]]
+; SVE_TF: [[DATA:%.*]] = call i32 @quux(ptr [[GEPC:%.*]], ptr [[GEPB:%.*]]) #[[ATTR4:[0-9]+]]
;
; SVE_TF_INTERLEAVE-LABEL: define void @test_linear4_linear8
; SVE_TF_INTERLEAVE-SAME: (ptr noalias [[A:%.*]], ptr readnone [[B:%.*]], ptr readonly [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; SVE_TF_INTERLEAVE: [[TMP35:%.*]] = extractelement <vscale x 4 x ptr> [[TMP31:%.*]], i32 0
-; SVE_TF_INTERLEAVE: [[TMP36:%.*]] = extractelement <vscale x 4 x ptr> [[TMP33:%.*]], i32 0
-; SVE_TF_INTERLEAVE: [[TMP37:%.*]] = call <vscale x 4 x i32> @vec_quux_linear4_linear8_mask_sve(ptr [[TMP35]], ptr [[TMP36]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE_TF_INTERLEAVE: [[TMP38:%.*]] = extractelement <vscale x 4 x ptr> [[TMP32:%.*]], i32 0
-; SVE_TF_INTERLEAVE: [[TMP39:%.*]] = extractelement <vscale x 4 x ptr> [[TMP34:%.*]], i32 0
-; SVE_TF_INTERLEAVE: [[TMP40:%.*]] = call <vscale x 4 x i32> @vec_quux_linear4_linear8_mask_sve(ptr [[TMP38]], ptr [[TMP39]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK2:%.*]])
-; SVE_TF_INTERLEAVE: [[TMP52:%.*]] = extractelement <vscale x 4 x i1> [[TMP50:%.*]], i32 0
-; SVE_TF_INTERLEAVE: [[DATA:%.*]] = call i32 @quux(ptr [[GEPC:%.*]], ptr [[GEPB:%.*]]) #[[ATTR7:[0-9]+]]
+; SVE_TF_INTERLEAVE: [[DATA:%.*]] = call i32 @quux(ptr [[GEPC:%.*]], ptr [[GEPB:%.*]]) #[[ATTR4:[0-9]+]]
;
entry:
br label %for.body
@@ -310,21 +241,21 @@ define void @test_linear3_non_ptr(ptr noalias %a, i64 %n) {
;
; SVE_OR_NEON-LABEL: define void @test_linear3_non_ptr
; SVE_OR_NEON-SAME: (ptr noalias [[A:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; SVE_OR_NEON: [[TMP14:%.*]] = extractelement <vscale x 4 x i32> [[TMP13:%.*]], i32 0
-; SVE_OR_NEON: [[TMP15:%.*]] = call <vscale x 4 x i32> @vec_bar_linear3_nomask_sve(i32 [[TMP14]])
+; SVE_OR_NEON: [[TMP13:%.*]] = extractelement <vscale x 4 x i32> [[TMP12:%.*]], i32 0
+; SVE_OR_NEON: [[TMP14:%.*]] = call <vscale x 4 x i32> @vec_bar_linear3_nomask_sve(i32 [[TMP13]])
; SVE_OR_NEON: [[DATA:%.*]] = call i32 @bar(i32 [[TREBLED:%.*]]) #[[ATTR6:[0-9]+]]
;
; SVE_OR_NEON_INTERLEAVE-LABEL: define void @test_linear3_non_ptr
; SVE_OR_NEON_INTERLEAVE-SAME: (ptr noalias [[A:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; SVE_OR_NEON_INTERLEAVE: [[DATA:%.*]] = call i32 @bar(i32 [[TREBLED:%.*]]) #[[ATTR8:[0-9]+]]
+; SVE_OR_NEON_INTERLEAVE: [[DATA:%.*]] = call i32 @bar(i32 [[TREBLED:%.*]]) #[[ATTR5:[0-9]+]]
;
; SVE_TF-LABEL: define void @test_linear3_non_ptr
; SVE_TF-SAME: (ptr noalias [[A:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; SVE_TF: [[DATA:%.*]] = call i32 @bar(i32 [[TREBLED:%.*]]) #[[ATTR8:[0-9]+]]
+; SVE_TF: [[DATA:%.*]] = call i32 @bar(i32 [[TREBLED:%.*]]) #[[ATTR5:[0-9]+]]
;
; SVE_TF_INTERLEAVE-LABEL: define void @test_linear3_non_ptr
; SVE_TF_INTERLEAVE-SAME: (ptr noalias [[A:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; SVE_TF_INTERLEAVE: [[DATA:%.*]] = call i32 @bar(i32 [[TREBLED:%.*]]) #[[ATTR8:[0-9]+]]
+; SVE_TF_INTERLEAVE: [[DATA:%.*]] = call i32 @bar(i32 [[TREBLED:%.*]]) #[[ATTR5:[0-9]+]]
;
entry:
br label %for.body
@@ -361,21 +292,21 @@ define void @test_linearn5_non_ptr_neg_stride(ptr noalias %a, i64 %n) {
;
; SVE_OR_NEON-LABEL: define void @test_linearn5_non_ptr_neg_stride
; SVE_OR_NEON-SAME: (ptr noalias [[A:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; SVE_OR_NEON: [[TMP14:%.*]] = extractelement <vscale x 4 x i32> [[TMP13:%.*]], i32 0
-; SVE_OR_NEON: [[TMP15:%.*]] = call <vscale x 4 x i32> @vec_bar_linearn5_nomask_sve(i32 [[TMP14]])
+; SVE_OR_NEON: [[TMP13:%.*]] = extractelement <vscale x 4 x i32> [[TMP12:%.*]], i32 0
+; SVE_OR_NEON: [[TMP14:%.*]] = call <vscale x 4 x i32> @vec_bar_linearn5_nomask_sve(i32 [[TMP13]])
; SVE_OR_NEON: [[DATA:%.*]] = call i32 @bar(i32 [[NEGSTRIDE:%.*]]) #[[ATTR7:[0-9]+]]
;
; SVE_OR_NEON_INTERLEAVE-LABEL: define void @test_linearn5_non_ptr_neg_stride
; SVE_OR_NEON_INTERLEAVE-SAME: (ptr noalias [[A:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; SVE_OR_NEON_INTERLEAVE: [[DATA:%.*]] = call i32 @bar(i32 [[NEGSTRIDE:%.*]]) #[[ATTR9:[0-9]+]]
+; SVE_OR_NEON_INTERLEAVE: [[DATA:%.*]] = call i32 @bar(i32 [[NEGSTRIDE:%.*]]) #[[ATTR6:[0-9]+]]
;
; SVE_TF-LABEL: define void @test_linearn5_non_ptr_neg_stride
; SVE_TF-SAME: (ptr noalias [[A:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; SVE_TF: [[DATA:%.*]] = call i32 @bar(i32 [[NEGSTRIDE:%.*]]) #[[ATTR9:[0-9]+]]
+; SVE_TF: [[DATA:%.*]] = call i32 @bar(i32 [[NEGSTRIDE:%.*]]) #[[ATTR6:[0-9]+]]
;
; SVE_TF_INTERLEAVE-LABEL: define void @test_linearn5_non_ptr_neg_stride
; SVE_TF_INTERLEAVE-SAME: (ptr noalias [[A:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; SVE_TF_INTERLEAVE: [[DATA:%.*]] = call i32 @bar(i32 [[NEGSTRIDE:%.*]]) #[[ATTR9:[0-9]+]]
+; SVE_TF_INTERLEAVE: [[DATA:%.*]] = call i32 @bar(i32 [[NEGSTRIDE:%.*]]) #[[ATTR6:[0-9]+]]
;
entry:
br label %for.body
@@ -398,48 +329,27 @@ for.cond.cleanup:
define void @test_linear8_return_void(ptr noalias %in, ptr noalias %out, i64 %n) {
; NEON-LABEL: define void @test_linear8_return_void
; NEON-SAME: (ptr noalias [[IN:%.*]], ptr noalias [[OUT:%.*]], i64 [[N:%.*]]) {
-; NEON: [[TMP4:%.*]] = extractelement <2 x ptr> [[TMP3:%.*]], i32 0
-; NEON: call void @vec_goo_linear8_nomask_neon(<2 x i64> [[WIDE_LOAD:%.*]], ptr [[TMP4]])
; NEON: call void @goo(i64 [[NUM:%.*]], ptr [[GEP_OUT:%.*]]) #[[ATTR6:[0-9]+]]
;
; NEON_INTERLEAVE-LABEL: define void @test_linear8_return_void
; NEON_INTERLEAVE-SAME: (ptr noalias [[IN:%.*]], ptr noalias [[OUT:%.*]], i64 [[N:%.*]]) {
-; NEON_INTERLEAVE: [[TMP8:%.*]] = extractelement <2 x ptr> [[TMP6:%.*]], i32 0
-; NEON_INTERLEAVE: call void @vec_goo_linear8_nomask_neon(<2 x i64> [[WIDE_LOAD:%.*]], ptr [[TMP8]])
-; NEON_INTERLEAVE: [[TMP9:%.*]] = extractelement <2 x ptr> [[TMP7:%.*]], i32 0
-; NEON_INTERLEAVE: call void @vec_goo_linear8_nomask_neon(<2 x i64> [[WIDE_LOAD2:%.*]], ptr [[TMP9]])
; NEON_INTERLEAVE: call void @goo(i64 [[NUM:%.*]], ptr [[GEP_OUT:%.*]]) #[[ATTR6:[0-9]+]]
;
; SVE_OR_NEON-LABEL: define void @test_linear8_return_void
; SVE_OR_NEON-SAME: (ptr noalias [[IN:%.*]], ptr noalias [[OUT:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; SVE_OR_NEON: [[TMP16:%.*]] = extractelement <vscale x 2 x ptr> [[TMP15:%.*]], i32 0
-; SVE_OR_NEON: call void @vec_goo_linear8_nomask_sve(<vscale x 2 x i64> [[WIDE_LOAD:%.*]], ptr [[TMP16]])
; SVE_OR_NEON: call void @goo(i64 [[NUM:%.*]], ptr [[GEP_OUT:%.*]]) #[[ATTR8:[0-9]+]]
;
; SVE_OR_NEON_INTERLEAVE-LABEL: define void @test_linear8_return_void
; SVE_OR_NEON_INTERLEAVE-SAME: (ptr noalias [[IN:%.*]], ptr noalias [[OUT:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; SVE_OR_NEON_INTERLEAVE: [[TMP39:%.*]] = extractelement <vscale x 2 x ptr> [[TMP37:%.*]], i32 0
-; SVE_OR_NEON_INTERLEAVE: call void @vec_goo_linear8_mask_sve(<vscale x 2 x i64> [[WIDE_MASKED_LOAD:%.*]], ptr [[TMP39]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE_OR_NEON_INTERLEAVE: [[TMP40:%.*]] = extractelement <vscale x 2 x ptr> [[TMP38:%.*]], i32 0
-; SVE_OR_NEON_INTERLEAVE: call void @vec_goo_linear8_mask_sve(<vscale x 2 x i64> [[WIDE_MASKED_LOAD4:%.*]], ptr [[TMP40]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK2:%.*]])
-; SVE_OR_NEON_INTERLEAVE: [[TMP46:%.*]] = extractelement <vscale x 2 x i1> [[TMP44:%.*]], i32 0
-; SVE_OR_NEON_INTERLEAVE: call void @goo(i64 [[NUM:%.*]], ptr [[GEP_OUT:%.*]]) #[[ATTR10:[0-9]+]]
+; SVE_OR_NEON_INTERLEAVE: call void @goo(i64 [[NUM:%.*]], ptr [[GEP_OUT:%.*]]) #[[ATTR7:[0-9]+]]
;
; SVE_TF-LABEL: define void @test_linear8_return_void
; SVE_TF-SAME: (ptr noalias [[IN:%.*]], ptr noalias [[OUT:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; SVE_TF: [[TMP22:%.*]] = extractelement <vscale x 2 x ptr> [[TMP21:%.*]], i32 0
-; SVE_TF: call void @vec_goo_linear8_mask_sve(<vscale x 2 x i64> [[WIDE_MASKED_LOAD:%.*]], ptr [[TMP22]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE_TF: [[TMP24:%.*]] = extractelement <vscale x 2 x i1> [[TMP23:%.*]], i32 0
-; SVE_TF: call void @goo(i64 [[NUM:%.*]], ptr [[GEP_OUT:%.*]]) #[[ATTR10:[0-9]+]]
+; SVE_TF: call void @goo(i64 [[NUM:%.*]], ptr [[GEP_OUT:%.*]]) #[[ATTR7:[0-9]+]]
;
; SVE_TF_INTERLEAVE-LABEL: define void @test_linear8_return_void
; SVE_TF_INTERLEAVE-SAME: (ptr noalias [[IN:%.*]], ptr noalias [[OUT:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; SVE_TF_INTERLEAVE: [[TMP39:%.*]] = extractelement <vscale x 2 x ptr> [[TMP37:%.*]], i32 0
-; SVE_TF_INTERLEAVE: call void @vec_goo_linear8_mask_sve(<vscale x 2 x i64> [[WIDE_MASKED_LOAD:%.*]], ptr [[TMP39]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; SVE_TF_INTERLEAVE: [[TMP40:%.*]] = extractelement <vscale x 2 x ptr> [[TMP38:%.*]], i32 0
-; SVE_TF_INTERLEAVE: call void @vec_goo_linear8_mask_sve(<vscale x 2 x i64> [[WIDE_MASKED_LOAD4:%.*]], ptr [[TMP40]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK2:%.*]])
-; SVE_TF_INTERLEAVE: [[TMP46:%.*]] = extractelement <vscale x 2 x i1> [[TMP44:%.*]], i32 0
-; SVE_TF_INTERLEAVE: call void @goo(i64 [[NUM:%.*]], ptr [[GEP_OUT:%.*]]) #[[ATTR10:[0-9]+]]
+; SVE_TF_INTERLEAVE: call void @goo(i64 [[NUM:%.*]], ptr [[GEP_OUT:%.*]]) #[[ATTR7:[0-9]+]]
;
entry:
br label %for.body
@@ -458,6 +368,9 @@ for.cond.cleanup:
ret void
}
+; Note: Vectorizing pointer arguments is currently disabled as LAA cannot detect
+; aliasing from output/input pointers.
+
declare i64 @foo(ptr)
declare i32 @baz(i32, ptr)
declare i32 @quux(ptr, ptr)
diff --git a/llvm/test/Transforms/LoopVectorize/float-induction.ll b/llvm/test/Transforms/LoopVectorize/float-induction.ll
index 9091b2c..cedaf01 100644
--- a/llvm/test/Transforms/LoopVectorize/float-induction.ll
+++ b/llvm/test/Transforms/LoopVectorize/float-induction.ll
@@ -1640,3 +1640,112 @@ for.inc:
for.end:
ret void
}
+
+define i32 @float_induction_with_dbg_on_fadd(ptr %dst) {
+; VEC4_INTERL1-LABEL: @float_induction_with_dbg_on_fadd(
+; VEC4_INTERL1-NEXT: entry:
+; VEC4_INTERL1-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; VEC4_INTERL1: vector.ph:
+; VEC4_INTERL1-NEXT: br label [[VECTOR_BODY:%.*]]
+; VEC4_INTERL1: vector.body:
+; VEC4_INTERL1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; VEC4_INTERL1-NEXT: [[TMP0:%.*]] = getelementptr float, ptr null, i64 [[INDEX]]
+; VEC4_INTERL1-NEXT: store <4 x float> poison, ptr [[TMP0]], align 8
+; VEC4_INTERL1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; VEC4_INTERL1-NEXT: [[TMP1:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200
+; VEC4_INTERL1-NEXT: br i1 [[TMP1]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
+; VEC4_INTERL1: middle.block:
+; VEC4_INTERL1-NEXT: br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]]
+; VEC4_INTERL1: scalar.ph:
+; VEC4_INTERL1-NEXT: br label [[LOOP:%.*]]
+; VEC4_INTERL1: loop:
+; VEC4_INTERL1-NEXT: br i1 poison, label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP16:![0-9]+]]
+; VEC4_INTERL1: exit:
+; VEC4_INTERL1-NEXT: ret i32 0
+;
+; VEC4_INTERL2-LABEL: @float_induction_with_dbg_on_fadd(
+; VEC4_INTERL2-NEXT: entry:
+; VEC4_INTERL2-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; VEC4_INTERL2: vector.ph:
+; VEC4_INTERL2-NEXT: br label [[VECTOR_BODY:%.*]]
+; VEC4_INTERL2: vector.body:
+; VEC4_INTERL2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; VEC4_INTERL2-NEXT: [[TMP0:%.*]] = getelementptr float, ptr null, i64 [[INDEX]]
+; VEC4_INTERL2-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP0]], i64 16
+; VEC4_INTERL2-NEXT: store <4 x float> poison, ptr [[TMP0]], align 8
+; VEC4_INTERL2-NEXT: store <4 x float> zeroinitializer, ptr [[TMP1]], align 8
+; VEC4_INTERL2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; VEC4_INTERL2-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200
+; VEC4_INTERL2-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
+; VEC4_INTERL2: middle.block:
+; VEC4_INTERL2-NEXT: br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]]
+; VEC4_INTERL2: scalar.ph:
+; VEC4_INTERL2-NEXT: br label [[LOOP:%.*]]
+; VEC4_INTERL2: loop:
+; VEC4_INTERL2-NEXT: br i1 poison, label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP16:![0-9]+]]
+; VEC4_INTERL2: exit:
+; VEC4_INTERL2-NEXT: ret i32 0
+;
+; VEC1_INTERL2-LABEL: @float_induction_with_dbg_on_fadd(
+; VEC1_INTERL2-NEXT: entry:
+; VEC1_INTERL2-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; VEC1_INTERL2: vector.ph:
+; VEC1_INTERL2-NEXT: br label [[VECTOR_BODY:%.*]]
+; VEC1_INTERL2: vector.body:
+; VEC1_INTERL2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; VEC1_INTERL2-NEXT: [[TMP0:%.*]] = or disjoint i64 [[INDEX]], 1
+; VEC1_INTERL2-NEXT: [[TMP1:%.*]] = getelementptr float, ptr null, i64 [[INDEX]]
+; VEC1_INTERL2-NEXT: [[TMP2:%.*]] = getelementptr float, ptr null, i64 [[TMP0]]
+; VEC1_INTERL2-NEXT: store float poison, ptr [[TMP1]], align 8
+; VEC1_INTERL2-NEXT: store float poison, ptr [[TMP2]], align 8
+; VEC1_INTERL2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
+; VEC1_INTERL2-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200
+; VEC1_INTERL2-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
+; VEC1_INTERL2: middle.block:
+; VEC1_INTERL2-NEXT: br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]]
+; VEC1_INTERL2: scalar.ph:
+; VEC1_INTERL2-NEXT: br label [[LOOP:%.*]]
+; VEC1_INTERL2: loop:
+; VEC1_INTERL2-NEXT: br i1 poison, label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP16:![0-9]+]]
+; VEC1_INTERL2: exit:
+; VEC1_INTERL2-NEXT: ret i32 0
+;
+; VEC2_INTERL1_PRED_STORE-LABEL: @float_induction_with_dbg_on_fadd(
+; VEC2_INTERL1_PRED_STORE-NEXT: entry:
+; VEC2_INTERL1_PRED_STORE-NEXT: br label [[VECTOR_BODY:%.*]]
+; VEC2_INTERL1_PRED_STORE: vector.body:
+; VEC2_INTERL1_PRED_STORE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP0:%.*]] = getelementptr float, ptr null, i64 [[INDEX]]
+; VEC2_INTERL1_PRED_STORE-NEXT: store <2 x float> poison, ptr [[TMP0]], align 8
+; VEC2_INTERL1_PRED_STORE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
+; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP1:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200
+; VEC2_INTERL1_PRED_STORE-NEXT: br i1 [[TMP1]], label [[EXIT:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
+; VEC2_INTERL1_PRED_STORE: exit:
+; VEC2_INTERL1_PRED_STORE-NEXT: ret i32 0
+;
+entry:
+ br label %loop
+
+loop:
+ %fp.iv = phi float [ 0.000000e+00, %entry ], [ %fp.iv.next, %loop ], !dbg !4
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %fp.iv.next = fadd reassoc float %fp.iv, 0.000000e+00
+ %gep = getelementptr float, ptr null, i64 %iv
+ store float %fp.iv.next, ptr %gep, align 8
+ %iv.next = add i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, 200
+ br i1 %exitcond.not, label %exit, label %loop
+
+exit:
+ ret i32 0
+}
+
+!llvm.module.flags = !{!3}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C11, file: !1)
+!1 = !DIFile(filename: "bbi-99425.c", directory: "/tmp")
+!2 = !{}
+!3 = !{i32 2, !"Debug Info Version", i32 3}
+!4 = !DILocation(line: 5, column: 12, scope: !8)
+!8 = distinct !DISubprogram(name: "main", scope: !1, file: !1, line: 3, type: !9, unit: !0, retainedNodes: !2)
+!9 = !DISubroutineType(types: !2)
diff --git a/llvm/test/Transforms/LoopVectorize/load-deref-pred-align.ll b/llvm/test/Transforms/LoopVectorize/load-deref-pred-align.ll
index a7c9a18..1ef01e3 100644
--- a/llvm/test/Transforms/LoopVectorize/load-deref-pred-align.ll
+++ b/llvm/test/Transforms/LoopVectorize/load-deref-pred-align.ll
@@ -296,3 +296,130 @@ latch:
loop_exit:
ret i8 %accum.next
}
+
+
+define i32 @loop_requires_scev_predicate(ptr %dest, i32 %end) {
+; CHECK-LABEL: @loop_requires_scev_predicate(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[P1:%.*]] = alloca [1024 x i32], align 4
+; CHECK-NEXT: [[P2:%.*]] = alloca [1024 x i32], align 4
+; CHECK-NEXT: call void @init(ptr [[P1]])
+; CHECK-NEXT: call void @init(ptr [[P2]])
+; CHECK-NEXT: [[END_CLAMPED:%.*]] = and i32 [[END:%.*]], 1023
+; CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[END]] to i10
+; CHECK-NEXT: [[TMP1:%.*]] = zext i10 [[TMP0]] to i64
+; CHECK-NEXT: [[UMAX1:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP1]], i64 1)
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[UMAX1]], 2
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
+; CHECK: vector.scevcheck:
+; CHECK-NEXT: [[UMAX:%.*]] = call i32 @llvm.umax.i32(i32 [[END_CLAMPED]], i32 1)
+; CHECK-NEXT: [[TMP2:%.*]] = add nsw i32 [[UMAX]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
+; CHECK-NEXT: [[TMP4:%.*]] = add i8 1, [[TMP3]]
+; CHECK-NEXT: [[TMP5:%.*]] = icmp ult i8 [[TMP4]], 1
+; CHECK-NEXT: [[TMP6:%.*]] = icmp ugt i32 [[TMP2]], 255
+; CHECK-NEXT: [[TMP7:%.*]] = or i1 [[TMP5]], [[TMP6]]
+; CHECK-NEXT: br i1 [[TMP7]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
+; CHECK: vector.ph:
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[UMAX1]], 2
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[UMAX1]], [[N_MOD_VF]]
+; CHECK-NEXT: [[IND_END:%.*]] = trunc i64 [[N_VEC]] to i8
+; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE5:%.*]] ]
+; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[INDEX]], 0
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[P1]], i64 [[TMP8]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[TMP9]], i32 0
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP10]], align 4
+; CHECK-NEXT: [[TMP11:%.*]] = icmp ne <2 x i32> [[WIDE_LOAD]], zeroinitializer
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[P2]], i64 [[TMP8]]
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[TMP12]], i32 0
+; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <2 x i32>, ptr [[TMP13]], align 4
+; CHECK-NEXT: [[TMP14:%.*]] = extractelement <2 x i1> [[TMP11]], i32 0
+; CHECK-NEXT: br i1 [[TMP14]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
+; CHECK: pred.store.if:
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[DEST:%.*]], i64 [[TMP8]]
+; CHECK-NEXT: [[TMP16:%.*]] = extractelement <2 x i32> [[WIDE_LOAD]], i32 0
+; CHECK-NEXT: [[TMP17:%.*]] = extractelement <2 x i32> [[WIDE_LOAD3]], i32 0
+; CHECK-NEXT: [[TMP18:%.*]] = add i32 [[TMP16]], [[TMP17]]
+; CHECK-NEXT: store i32 [[TMP18]], ptr [[TMP15]], align 4
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE]]
+; CHECK: pred.store.continue:
+; CHECK-NEXT: [[TMP19:%.*]] = extractelement <2 x i1> [[TMP11]], i32 1
+; CHECK-NEXT: br i1 [[TMP19]], label [[PRED_STORE_IF4:%.*]], label [[PRED_STORE_CONTINUE5]]
+; CHECK: pred.store.if4:
+; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[INDEX]], 1
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[DEST]], i64 [[TMP20]]
+; CHECK-NEXT: [[TMP22:%.*]] = extractelement <2 x i32> [[WIDE_LOAD]], i32 1
+; CHECK-NEXT: [[TMP23:%.*]] = extractelement <2 x i32> [[WIDE_LOAD3]], i32 1
+; CHECK-NEXT: [[TMP24:%.*]] = add i32 [[TMP22]], [[TMP23]]
+; CHECK-NEXT: store i32 [[TMP24]], ptr [[TMP21]], align 4
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE5]]
+; CHECK: pred.store.continue5:
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
+; CHECK-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK: middle.block:
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[UMAX1]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK: scalar.ph:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i8 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL2:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
+; CHECK-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK: for.body:
+; CHECK-NEXT: [[IND:%.*]] = phi i8 [ [[IND_NEXT:%.*]], [[FOR_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[GEP_IND:%.*]] = phi i64 [ [[GEP_IND_NEXT:%.*]], [[FOR_INC]] ], [ [[BC_RESUME_VAL2]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[P1]], i64 [[GEP_IND]]
+; CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[DOWORK:%.*]] = icmp ne i32 [[TMP26]], 0
+; CHECK-NEXT: br i1 [[DOWORK]], label [[FOR_DOWORK:%.*]], label [[FOR_INC]]
+; CHECK: for.dowork:
+; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[P2]], i64 [[GEP_IND]]
+; CHECK-NEXT: [[TMP27:%.*]] = load i32, ptr [[ARRAYIDX3]], align 4
+; CHECK-NEXT: [[ADD:%.*]] = add i32 [[TMP26]], [[TMP27]]
+; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, ptr [[DEST]], i64 [[GEP_IND]]
+; CHECK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX5]], align 4
+; CHECK-NEXT: br label [[FOR_INC]]
+; CHECK: for.inc:
+; CHECK-NEXT: [[IND_NEXT]] = add i8 [[IND]], 1
+; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[IND_NEXT]] to i32
+; CHECK-NEXT: [[GEP_IND_NEXT]] = add i64 [[GEP_IND]], 1
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[CONV]], [[END_CLAMPED]]
+; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[EXIT]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK: exit:
+; CHECK-NEXT: ret i32 0
+;
+entry:
+ %p1 = alloca [1024 x i32]
+ %p2 = alloca [1024 x i32]
+ call void @init(ptr %p1)
+ call void @init(ptr %p2)
+ %end.clamped = and i32 %end, 1023
+ br label %for.body
+
+for.body:
+ %ind = phi i8 [ %ind.next, %for.inc ], [ 0, %entry ]
+ %gep.ind = phi i64 [ %gep.ind.next, %for.inc ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds i32, ptr %p1, i64 %gep.ind
+ %0 = load i32, ptr %arrayidx, align 4
+ %dowork = icmp ne i32 %0, 0
+ br i1 %dowork, label %for.dowork, label %for.inc
+
+for.dowork:
+ %arrayidx3 = getelementptr inbounds i32, ptr %p2, i64 %gep.ind
+ %1 = load i32, ptr %arrayidx3, align 4
+ %add = add i32 %0, %1
+ %arrayidx5 = getelementptr inbounds i32, ptr %dest, i64 %gep.ind
+ store i32 %add, ptr %arrayidx5, align 4
+ br label %for.inc
+
+for.inc:
+ %ind.next = add i8 %ind, 1
+ %conv = zext i8 %ind.next to i32
+ %gep.ind.next = add i64 %gep.ind, 1
+ %cmp = icmp ult i32 %conv, %end.clamped
+ br i1 %cmp, label %for.body, label %exit
+
+exit:
+ ret i32 0
+}
diff --git a/llvm/test/Transforms/LoopVectorize/simple_early_exit.ll b/llvm/test/Transforms/LoopVectorize/simple_early_exit.ll
index dcf5c9d..49454ae 100644
--- a/llvm/test/Transforms/LoopVectorize/simple_early_exit.ll
+++ b/llvm/test/Transforms/LoopVectorize/simple_early_exit.ll
@@ -7,10 +7,9 @@ declare void @init_mem(ptr, i64);
define i64 @same_exit_block_pre_inc_use1() {
; DEBUG-LABEL: LV: Checking a loop in 'same_exit_block_pre_inc_use1'
-; DEBUG: LV: Found an early exit. Retrying with speculative exit count.
-; DEBUG-NEXT: LV: Found speculative backedge taken count: 63
+; DEBUG: LV: Found an early exit loop with symbolic max backedge taken count: 63
; DEBUG-NEXT: LV: We can vectorize this loop!
-; DEBUG-NEXT: LV: Not vectorizing: Auto-vectorization of early exit loops is not yet supported.
+; DEBUG-NEXT: LV: Not vectorizing: Auto-vectorization of loops with uncountable early exit is not yet supported.
; CHECK-LABEL: define i64 @same_exit_block_pre_inc_use1() {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[P1:%.*]] = alloca [1024 x i8], align 1
@@ -1089,8 +1088,7 @@ loop.end:
define i64 @loop_contains_safe_call() {
; DEBUG-LABEL: LV: Checking a loop in 'loop_contains_safe_call'
-; DEBUG: LV: Found an early exit. Retrying with speculative exit count.
-; DEBUG-NEXT: LV: Found speculative backedge taken count: 63
+; DEBUG: LV: Found an early exit loop with symbolic max backedge taken count: 63
; DEBUG-NEXT: LV: We can vectorize this loop!
; CHECK-LABEL: define i64 @loop_contains_safe_call() {
; CHECK-NEXT: entry:
@@ -1193,8 +1191,7 @@ loop.end:
define i64 @loop_contains_safe_div() {
; DEBUG-LABEL: LV: Checking a loop in 'loop_contains_safe_div'
-; DEBUG: LV: Found an early exit. Retrying with speculative exit count.
-; DEBUG-NEXT: LV: Found speculative backedge taken count: 63
+; DEBUG: LV: Found an early exit loop with symbolic max backedge taken count: 63
; DEBUG-NEXT: LV: We can vectorize this loop!
; CHECK-LABEL: define i64 @loop_contains_safe_div() {
; CHECK-NEXT: entry:
@@ -1347,10 +1344,9 @@ loop.end:
define i64 @loop_contains_load_after_early_exit(ptr dereferenceable(1024) align(8) %p2) {
; DEBUG-LABEL: LV: Checking a loop in 'loop_contains_load_after_early_exit'
-; DEBUG: LV: Found an early exit. Retrying with speculative exit count.
-; DEBUG-NEXT: LV: Found speculative backedge taken count: 63
+; DEBUG: LV: Found an early exit loop with symbolic max backedge taken count: 63
; DEBUG-NEXT: LV: We can vectorize this loop!
-; DEBUG-NEXT: LV: Not vectorizing: Auto-vectorization of early exit loops is not yet supported.
+; DEBUG-NEXT: LV: Not vectorizing: Auto-vectorization of loops with uncountable early exit is not yet supported.
; CHECK-LABEL: define i64 @loop_contains_load_after_early_exit(
; CHECK-SAME: ptr align 8 dereferenceable(1024) [[P2:%.*]]) {
; CHECK-NEXT: entry:
@@ -1621,12 +1617,11 @@ loop.end:
; The form of the induction variables requires SCEV predicates.
-; TODO: We should fix isDereferenceableAndAlignedInLoop and
-; getSmallConstantMaxTripCount to cope with SCEV predicates when
-; requesting the small constant max trip count.
define i32 @diff_exit_block_needs_scev_check(i32 %end) {
; DEBUG-LABEL: LV: Checking a loop in 'diff_exit_block_needs_scev_check'
-; DEBUG: LV: Not vectorizing: Loop may fault.
+; DEBUG: Found an early exit loop with symbolic max backedge taken count: (-1 + (1 umax (zext i10 (trunc i32 %end to i10) to i32)))<nsw>
+; DEBUG-NEXT: LV: We can vectorize this loop!
+; DEBUG-NEXT: LV: Not vectorizing: Auto-vectorization of loops with uncountable early exit is not yet supported.
; CHECK-LABEL: define i32 @diff_exit_block_needs_scev_check(
; CHECK-SAME: i32 [[END:%.*]]) {
; CHECK-NEXT: entry:
@@ -1695,9 +1690,8 @@ declare void @abort()
; early is loop invariant.
define i32 @diff_blocks_invariant_early_exit_cond(ptr %s) {
; DEBUG-LABEL: LV: Checking a loop in 'diff_blocks_invariant_early_exit_cond'
-; DEBUG: LV: Found an early exit. Retrying with speculative exit count.
-; DEBUG-NEXT: LV: Found speculative backedge taken count: 275
-; DEBUG: LV: Not vectorizing: Auto-vectorization of early exit loops is not yet supported.
+; DEBUG: LV: Found an early exit loop with symbolic max backedge taken count: 275
+; DEBUG: LV: Not vectorizing: Auto-vectorization of loops with uncountable early exit is not yet supported.
; CHECK-LABEL: define i32 @diff_blocks_invariant_early_exit_cond(
; CHECK-SAME: ptr [[S:%.*]]) {
; CHECK-NEXT: entry:
diff --git a/llvm/test/Transforms/MemCpyOpt/fca2memcpy.ll b/llvm/test/Transforms/MemCpyOpt/fca2memcpy.ll
index 51fad82..61e349e 100644
--- a/llvm/test/Transforms/MemCpyOpt/fca2memcpy.ll
+++ b/llvm/test/Transforms/MemCpyOpt/fca2memcpy.ll
@@ -141,4 +141,19 @@ define void @throwing_call(ptr noalias %src, ptr %dst) {
ret void
}
+define void @loop_memoryphi(ptr %a, ptr %b) {
+; CHECK-LABEL: @loop_memoryphi(
+; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK: loop:
+; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr align 8 [[B:%.*]], ptr align 8 [[A:%.*]], i64 16, i1 false)
+; CHECK-NEXT: br label [[LOOP]]
+;
+ br label %loop
+
+loop:
+ %v = load { i64, i64 }, ptr %a
+ store { i64, i64 } %v, ptr %b
+ br label %loop
+}
+
declare void @call()
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/reduce-fadd.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/reduce-fadd.ll
index edc0381..6dceabe 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/reduce-fadd.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/reduce-fadd.ll
@@ -1,8 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
-; RUN: opt < %s -S -passes=slp-vectorizer -mtriple=aarch64-unknown-linux \
-; RUN: -mattr=-fullfp16 | FileCheck %s --check-prefixes=CHECK,NOFP16
-; RUN: opt < %s -S -passes=slp-vectorizer -mtriple=aarch64-unknown-linux \
-; RUN: -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK,FP16
+; RUN: opt < %s -S -passes=slp-vectorizer -mtriple=aarch64-unknown-linux -mattr=-fullfp16 | FileCheck %s --check-prefixes=CHECK,NOFP16
+; RUN: opt < %s -S -passes=slp-vectorizer -mtriple=aarch64-unknown-linux -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK,FULLFP16
define half @reduce_fast_half2(<2 x half> %vec2) {
; CHECK-LABEL: define half @reduce_fast_half2(
@@ -79,20 +77,26 @@ entry:
}
define half @reduce_fast_half8(<8 x half> %vec8) {
-; CHECK-LABEL: define half @reduce_fast_half8(
-; CHECK-SAME: <8 x half> [[VEC8:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[ELT4:%.*]] = extractelement <8 x half> [[VEC8]], i64 4
-; CHECK-NEXT: [[ELT5:%.*]] = extractelement <8 x half> [[VEC8]], i64 5
-; CHECK-NEXT: [[ELT6:%.*]] = extractelement <8 x half> [[VEC8]], i64 6
-; CHECK-NEXT: [[ELT7:%.*]] = extractelement <8 x half> [[VEC8]], i64 7
-; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <8 x half> [[VEC8]], <8 x half> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; CHECK-NEXT: [[TMP1:%.*]] = call fast half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> [[TMP0]])
-; CHECK-NEXT: [[OP_RDX:%.*]] = fadd fast half [[TMP1]], [[ELT4]]
-; CHECK-NEXT: [[OP_RDX1:%.*]] = fadd fast half [[ELT5]], [[ELT6]]
-; CHECK-NEXT: [[OP_RDX2:%.*]] = fadd fast half [[OP_RDX]], [[OP_RDX1]]
-; CHECK-NEXT: [[OP_RDX3:%.*]] = fadd fast half [[OP_RDX2]], [[ELT7]]
-; CHECK-NEXT: ret half [[OP_RDX3]]
+; NOFP16-LABEL: define half @reduce_fast_half8(
+; NOFP16-SAME: <8 x half> [[VEC8:%.*]]) #[[ATTR0]] {
+; NOFP16-NEXT: [[ENTRY:.*:]]
+; NOFP16-NEXT: [[ELT4:%.*]] = extractelement <8 x half> [[VEC8]], i64 4
+; NOFP16-NEXT: [[ELT5:%.*]] = extractelement <8 x half> [[VEC8]], i64 5
+; NOFP16-NEXT: [[ELT6:%.*]] = extractelement <8 x half> [[VEC8]], i64 6
+; NOFP16-NEXT: [[ELT7:%.*]] = extractelement <8 x half> [[VEC8]], i64 7
+; NOFP16-NEXT: [[TMP0:%.*]] = shufflevector <8 x half> [[VEC8]], <8 x half> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; NOFP16-NEXT: [[TMP1:%.*]] = call fast half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> [[TMP0]])
+; NOFP16-NEXT: [[OP_RDX:%.*]] = fadd fast half [[TMP1]], [[ELT4]]
+; NOFP16-NEXT: [[OP_RDX1:%.*]] = fadd fast half [[ELT5]], [[ELT6]]
+; NOFP16-NEXT: [[OP_RDX2:%.*]] = fadd fast half [[OP_RDX]], [[OP_RDX1]]
+; NOFP16-NEXT: [[OP_RDX3:%.*]] = fadd fast half [[OP_RDX2]], [[ELT7]]
+; NOFP16-NEXT: ret half [[OP_RDX3]]
+;
+; FULLFP16-LABEL: define half @reduce_fast_half8(
+; FULLFP16-SAME: <8 x half> [[VEC8:%.*]]) #[[ATTR0]] {
+; FULLFP16-NEXT: [[ENTRY:.*:]]
+; FULLFP16-NEXT: [[TMP0:%.*]] = call fast half @llvm.vector.reduce.fadd.v8f16(half 0xH0000, <8 x half> [[VEC8]])
+; FULLFP16-NEXT: ret half [[TMP0]]
;
entry:
%elt0 = extractelement <8 x half> %vec8, i64 0
@@ -154,37 +158,11 @@ entry:
}
define half @reduce_fast_half16(<16 x half> %vec16) {
-; NOFP16-LABEL: define half @reduce_fast_half16(
-; NOFP16-SAME: <16 x half> [[VEC16:%.*]]) #[[ATTR0]] {
-; NOFP16-NEXT: [[ENTRY:.*:]]
-; NOFP16-NEXT: [[TMP0:%.*]] = call fast half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> [[VEC16]])
-; NOFP16-NEXT: ret half [[TMP0]]
-;
-; FP16-LABEL: define half @reduce_fast_half16(
-; FP16-SAME: <16 x half> [[VEC16:%.*]]) #[[ATTR0]] {
-; FP16-NEXT: [[ENTRY:.*:]]
-; FP16-NEXT: [[ELT4:%.*]] = extractelement <16 x half> [[VEC16]], i64 4
-; FP16-NEXT: [[ELT5:%.*]] = extractelement <16 x half> [[VEC16]], i64 5
-; FP16-NEXT: [[ELT6:%.*]] = extractelement <16 x half> [[VEC16]], i64 6
-; FP16-NEXT: [[ELT7:%.*]] = extractelement <16 x half> [[VEC16]], i64 7
-; FP16-NEXT: [[ELT12:%.*]] = extractelement <16 x half> [[VEC16]], i64 12
-; FP16-NEXT: [[ELT13:%.*]] = extractelement <16 x half> [[VEC16]], i64 13
-; FP16-NEXT: [[ELT14:%.*]] = extractelement <16 x half> [[VEC16]], i64 14
-; FP16-NEXT: [[ELT15:%.*]] = extractelement <16 x half> [[VEC16]], i64 15
-; FP16-NEXT: [[TMP0:%.*]] = shufflevector <16 x half> [[VEC16]], <16 x half> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; FP16-NEXT: [[TMP1:%.*]] = call fast half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> [[TMP0]])
-; FP16-NEXT: [[TMP2:%.*]] = shufflevector <16 x half> [[VEC16]], <16 x half> poison, <4 x i32> <i32 8, i32 9, i32 10, i32 11>
-; FP16-NEXT: [[TMP3:%.*]] = call fast half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> [[TMP2]])
-; FP16-NEXT: [[OP_RDX:%.*]] = fadd fast half [[TMP1]], [[TMP3]]
-; FP16-NEXT: [[OP_RDX1:%.*]] = fadd fast half [[OP_RDX]], [[ELT4]]
-; FP16-NEXT: [[OP_RDX2:%.*]] = fadd fast half [[ELT5]], [[ELT6]]
-; FP16-NEXT: [[OP_RDX3:%.*]] = fadd fast half [[ELT7]], [[ELT12]]
-; FP16-NEXT: [[OP_RDX4:%.*]] = fadd fast half [[ELT13]], [[ELT14]]
-; FP16-NEXT: [[OP_RDX5:%.*]] = fadd fast half [[OP_RDX1]], [[OP_RDX2]]
-; FP16-NEXT: [[OP_RDX6:%.*]] = fadd fast half [[OP_RDX3]], [[OP_RDX4]]
-; FP16-NEXT: [[OP_RDX7:%.*]] = fadd fast half [[OP_RDX5]], [[OP_RDX6]]
-; FP16-NEXT: [[OP_RDX8:%.*]] = fadd fast half [[OP_RDX7]], [[ELT15]]
-; FP16-NEXT: ret half [[OP_RDX8]]
+; CHECK-LABEL: define half @reduce_fast_half16(
+; CHECK-SAME: <16 x half> [[VEC16:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = call fast half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> [[VEC16]])
+; CHECK-NEXT: ret half [[TMP0]]
;
entry:
%elt0 = extractelement <16 x half> %vec16, i64 0
@@ -512,19 +490,11 @@ define float @reduce_fast_float_case1(ptr %a) {
; CHECK-LABEL: define float @reduce_fast_float_case1(
; CHECK-SAME: ptr [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[LOAD:%.*]] = load float, ptr [[A]], align 4
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 4
-; CHECK-NEXT: [[LOAD1:%.*]] = load float, ptr [[GEP]], align 4
-; CHECK-NEXT: [[ADD1:%.*]] = fadd fast float [[LOAD1]], [[LOAD]]
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 8
-; CHECK-NEXT: [[LOAD2:%.*]] = load float, ptr [[GEP2]], align 4
-; CHECK-NEXT: [[ADD2:%.*]] = fadd fast float [[LOAD2]], [[ADD1]]
-; CHECK-NEXT: [[GEP3:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 12
-; CHECK-NEXT: [[LOAD3:%.*]] = load float, ptr [[GEP3]], align 4
-; CHECK-NEXT: [[ADD3:%.*]] = fadd fast float [[LOAD3]], [[ADD2]]
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x float>, ptr [[A]], align 4
; CHECK-NEXT: [[GEP4:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 16
; CHECK-NEXT: [[LOAD4:%.*]] = load float, ptr [[GEP4]], align 4
-; CHECK-NEXT: [[ADD4:%.*]] = fadd fast float [[LOAD4]], [[ADD3]]
+; CHECK-NEXT: [[TMP1:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[TMP0]])
+; CHECK-NEXT: [[ADD4:%.*]] = fadd fast float [[TMP1]], [[LOAD4]]
; CHECK-NEXT: ret float [[ADD4]]
;
entry:
@@ -586,24 +556,11 @@ define float @reduce_fast_float_case2(ptr %a, ptr %b) {
; CHECK-LABEL: define float @reduce_fast_float_case2(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[GEPA2:%.*]] = getelementptr inbounds float, ptr [[A]], i32 2
-; CHECK-NEXT: [[GEPA3:%.*]] = getelementptr inbounds float, ptr [[A]], i32 3
-; CHECK-NEXT: [[GEPB2:%.*]] = getelementptr inbounds float, ptr [[B]], i32 2
-; CHECK-NEXT: [[GEPB3:%.*]] = getelementptr inbounds float, ptr [[B]], i32 3
-; CHECK-NEXT: [[LOADA2:%.*]] = load float, ptr [[GEPA2]], align 4
-; CHECK-NEXT: [[LOADA3:%.*]] = load float, ptr [[GEPA3]], align 4
-; CHECK-NEXT: [[LOADB2:%.*]] = load float, ptr [[GEPB2]], align 4
-; CHECK-NEXT: [[LOADB3:%.*]] = load float, ptr [[GEPB3]], align 4
-; CHECK-NEXT: [[TMP0:%.*]] = load <2 x float>, ptr [[A]], align 4
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x float>, ptr [[B]], align 4
-; CHECK-NEXT: [[TMP2:%.*]] = fadd fast <2 x float> [[TMP0]], [[TMP1]]
-; CHECK-NEXT: [[ADD2:%.*]] = fadd fast float [[LOADA3]], [[LOADB2]]
-; CHECK-NEXT: [[ADD3:%.*]] = fadd fast float [[LOADA2]], [[LOADB3]]
-; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x float> [[TMP2]], i32 0
-; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x float> [[TMP2]], i32 1
-; CHECK-NEXT: [[RED1:%.*]] = fadd fast float [[TMP3]], [[TMP4]]
-; CHECK-NEXT: [[RED2:%.*]] = fadd fast float [[ADD2]], [[RED1]]
-; CHECK-NEXT: [[RED3:%.*]] = fadd fast float [[ADD3]], [[RED2]]
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x float>, ptr [[A]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[B]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = call <8 x float> @llvm.vector.insert.v8f32.v4f32(<8 x float> poison, <4 x float> [[TMP1]], i64 0)
+; CHECK-NEXT: [[TMP3:%.*]] = call <8 x float> @llvm.vector.insert.v8f32.v4f32(<8 x float> [[TMP2]], <4 x float> [[TMP0]], i64 4)
+; CHECK-NEXT: [[RED3:%.*]] = call fast float @llvm.vector.reduce.fadd.v8f32(float 0.000000e+00, <8 x float> [[TMP3]])
; CHECK-NEXT: ret float [[RED3]]
;
entry:
diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/revec-getGatherCost.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/revec-getGatherCost.ll
new file mode 100644
index 0000000..887f59b
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/revec-getGatherCost.ll
@@ -0,0 +1,42 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -mtriple=riscv64 -mcpu=sifive-x280 -passes=slp-vectorizer -S -slp-revec -slp-max-reg-size=1024 -slp-threshold=-10 -pass-remarks-output=%t %s | FileCheck %s
+; RUN: FileCheck --input-file=%t --check-prefix=YAML %s
+
+; YAML: --- !Passed
+; YAML: Pass: slp-vectorizer
+; YAML: Name: StoresVectorized
+; YAML: Function: test
+; YAML: Args:
+; YAML: - String: 'Stores SLP vectorized with cost '
+; YAML: - Cost: '6'
+; YAML: - String: ' and with tree size '
+; YAML: - TreeSize: '5'
+
+define void @test(<4 x float> %load6, <4 x float> %load7, <4 x float> %load8, <4 x float> %load17, <4 x float> %fmuladd7, <4 x float> %fmuladd16, ptr %out_ptr) {
+; CHECK-LABEL: @test(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[VEXT165_I:%.*]] = shufflevector <4 x float> [[LOAD6:%.*]], <4 x float> [[LOAD7:%.*]], <4 x i32> <i32 2, i32 3, i32 4, i32 5>
+; CHECK-NEXT: [[VEXT309_I:%.*]] = shufflevector <4 x float> [[LOAD7]], <4 x float> [[LOAD8:%.*]], <4 x i32> <i32 2, i32 3, i32 4, i32 5>
+; CHECK-NEXT: [[TMP0:%.*]] = call <8 x float> @llvm.vector.insert.v8f32.v4f32(<8 x float> poison, <4 x float> [[VEXT165_I]], i64 0)
+; CHECK-NEXT: [[TMP1:%.*]] = call <8 x float> @llvm.vector.insert.v8f32.v4f32(<8 x float> [[TMP0]], <4 x float> [[VEXT309_I]], i64 4)
+; CHECK-NEXT: [[TMP2:%.*]] = call <8 x float> @llvm.vector.insert.v8f32.v4f32(<8 x float> poison, <4 x float> poison, i64 4)
+; CHECK-NEXT: [[TMP3:%.*]] = call <8 x float> @llvm.vector.insert.v8f32.v4f32(<8 x float> [[TMP2]], <4 x float> [[LOAD17:%.*]], i64 0)
+; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x float> [[TMP3]], <8 x float> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[TMP5:%.*]] = call <8 x float> @llvm.vector.insert.v8f32.v4f32(<8 x float> poison, <4 x float> [[FMULADD7:%.*]], i64 0)
+; CHECK-NEXT: [[TMP6:%.*]] = call <8 x float> @llvm.vector.insert.v8f32.v4f32(<8 x float> [[TMP5]], <4 x float> [[FMULADD16:%.*]], i64 4)
+; CHECK-NEXT: [[TMP7:%.*]] = call <8 x float> @llvm.fmuladd.v8f32(<8 x float> [[TMP1]], <8 x float> [[TMP4]], <8 x float> [[TMP6]])
+; CHECK-NEXT: store <8 x float> [[TMP7]], ptr [[OUT_PTR:%.*]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %vext165.i = shufflevector <4 x float> %load6, <4 x float> %load7, <4 x i32> <i32 2, i32 3, i32 4, i32 5>
+ %vext309.i = shufflevector <4 x float> %load7, <4 x float> %load8, <4 x i32> <i32 2, i32 3, i32 4, i32 5>
+ %fmuladd8 = tail call noundef <4 x float> @llvm.fmuladd.v4f32(<4 x float> %vext165.i, <4 x float> %load17, <4 x float> %fmuladd7)
+ %fmuladd17 = tail call noundef <4 x float> @llvm.fmuladd.v4f32(<4 x float> %vext309.i, <4 x float> %load17, <4 x float> %fmuladd16)
+ %add.ptr.i.i = getelementptr inbounds i8, ptr %out_ptr, i64 16
+ store <4 x float> %fmuladd8, ptr %out_ptr, align 4
+ store <4 x float> %fmuladd17, ptr %add.ptr.i.i, align 4
+ ret void
+}
+
+declare <4 x float> @llvm.fmuladd.v4f32(<4 x float>, <4 x float>, <4 x float>)
diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/select-profitability.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/select-profitability.ll
new file mode 100644
index 0000000..4496b19
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/select-profitability.ll
@@ -0,0 +1,55 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S --passes=slp-vectorizer -mtriple=riscv64-unknown-linux -mattr=+v < %s | FileCheck %s
+
+define i32 @pow2_zero_constant_shift(i16 zeroext %a, i16 zeroext %b, i16 zeroext %c, i16 zeroext %d) {
+; CHECK-LABEL: define i32 @pow2_zero_constant_shift(
+; CHECK-SAME: i16 zeroext [[A:%.*]], i16 zeroext [[B:%.*]], i16 zeroext [[C:%.*]], i16 zeroext [[D:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i16> poison, i16 [[A]], i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i16> [[TMP1]], i16 [[B]], i32 1
+; CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i16> [[TMP2]], i16 [[C]], i32 2
+; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i16> [[TMP3]], i16 [[D]], i32 3
+; CHECK-NEXT: [[TMP5:%.*]] = icmp eq <4 x i16> [[TMP4]], <i16 1, i16 1, i16 1, i16 1>
+; CHECK-NEXT: [[TMP6:%.*]] = select <4 x i1> [[TMP5]], <4 x i32> <i32 65536, i32 65536, i32 65536, i32 65536>, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP6]])
+; CHECK-NEXT: ret i32 [[TMP7]]
+;
+ %t39.i0 = icmp eq i16 %a, 1
+ %t39.i1 = icmp eq i16 %b, 1
+ %t39.i2 = icmp eq i16 %c, 1
+ %t39.i3 = icmp eq i16 %d, 1
+ %t40.i0 = select i1 %t39.i0, i32 65536, i32 0
+ %t40.i1 = select i1 %t39.i1, i32 65536, i32 0
+ %t40.i2 = select i1 %t39.i2, i32 65536, i32 0
+ %t40.i3 = select i1 %t39.i3, i32 65536, i32 0
+ %or.rdx0 = or i32 %t40.i0, %t40.i1
+ %or.rdx1 = or i32 %t40.i2, %t40.i3
+ %or.rdx2 = or i32 %or.rdx0, %or.rdx1
+ ret i32 %or.rdx2
+}
+
+; TODO: This case is unprofitable, and we should not be vectorizing this.
+define i32 @pow2_zero_variable_shift(i16 zeroext %a, i16 zeroext %b, i16 zeroext %c, i16 zeroext %d) {
+; CHECK-LABEL: define i32 @pow2_zero_variable_shift(
+; CHECK-SAME: i16 zeroext [[A:%.*]], i16 zeroext [[B:%.*]], i16 zeroext [[C:%.*]], i16 zeroext [[D:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i16> poison, i16 [[A]], i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i16> [[TMP1]], i16 [[B]], i32 1
+; CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i16> [[TMP2]], i16 [[C]], i32 2
+; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i16> [[TMP3]], i16 [[D]], i32 3
+; CHECK-NEXT: [[TMP5:%.*]] = icmp eq <4 x i16> [[TMP4]], <i16 1, i16 1, i16 1, i16 1>
+; CHECK-NEXT: [[TMP6:%.*]] = select <4 x i1> [[TMP5]], <4 x i32> <i32 524288, i32 262144, i32 131072, i32 65536>, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[OR_RDX2:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP6]])
+; CHECK-NEXT: ret i32 [[OR_RDX2]]
+;
+ %t39.i0 = icmp eq i16 %a, 1
+ %t39.i1 = icmp eq i16 %b, 1
+ %t39.i2 = icmp eq i16 %c, 1
+ %t39.i3 = icmp eq i16 %d, 1
+ %t40.i0 = select i1 %t39.i0, i32 524288, i32 0
+ %t40.i1 = select i1 %t39.i1, i32 262144, i32 0
+ %t40.i2 = select i1 %t39.i2, i32 131072, i32 0
+ %t40.i3 = select i1 %t39.i3, i32 65536, i32 0
+ %or.rdx0 = or i32 %t40.i0, %t40.i1
+ %or.rdx1 = or i32 %t40.i2, %t40.i3
+ %or.rdx2 = or i32 %or.rdx0, %or.rdx1
+ ret i32 %or.rdx2
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/reduced-value-vectorized-later.ll b/llvm/test/Transforms/SLPVectorizer/X86/reduced-value-vectorized-later.ll
new file mode 100644
index 0000000..6b0b22b
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/reduced-value-vectorized-later.ll
@@ -0,0 +1,41 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S -passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s
+
+define i16 @test() {
+; CHECK-LABEL: define i16 @test() {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = call i16 @llvm.vector.reduce.or.v8i16(<8 x i16> zeroinitializer)
+; CHECK-NEXT: [[TMP1:%.*]] = call i16 @llvm.vector.reduce.or.v4i16(<4 x i16> zeroinitializer)
+; CHECK-NEXT: [[OP_RDX:%.*]] = or i16 [[TMP0]], [[TMP1]]
+; CHECK-NEXT: [[OP_RDX1:%.*]] = or i16 [[OP_RDX]], 0
+; CHECK-NEXT: ret i16 [[OP_RDX1]]
+;
+entry:
+ %subi = add i16 0, 0
+ %sub40.i = add i16 %subi, 0
+ %sub41.i = add i16 %subi, 0
+ %sub42.i = add i16 %subi, 0
+ %sub43.i = add i16 %subi, 0
+ %sub44.i = add i16 %subi, 0
+ %sub45.i = add i16 %subi, 0
+ %sub46.i = add i16 0, 0
+ %sub47.i = add i16 0, 0
+ %sub48.i = add i16 0, 0
+ %sub49.i = add i16 0, 0
+ %or40.i = or i16 %sub40.i, %sub41.i
+ %or41.i = or i16 %or40.i, %sub42.i
+ %or42.i = or i16 %or41.i, %sub43.i
+ %or43.i = or i16 %or42.i, %sub44.i
+ %or44.i = or i16 %or43.i, %sub45.i
+ %or45.i = or i16 %or44.i, %sub46.i
+ %or46.i = or i16 %or45.i, %sub47.i
+ %or47.i = or i16 %or46.i, %sub48.i
+ %or48.i = or i16 %or47.i, %sub49.i
+ %or50.i = or i16 %or48.i, %subi
+ %subii = add i16 0, 0
+ %subi16.i = add i16 %subii, 0
+ %subi17.i = add i16 %subii, 0
+ %0 = or i16 %subi16.i, %subi17.i
+ %1 = or i16 %0, %or50.i
+ ret i16 %1
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/splat-score-adjustment.ll b/llvm/test/Transforms/SLPVectorizer/X86/splat-score-adjustment.ll
new file mode 100644
index 0000000..33fa00c
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/splat-score-adjustment.ll
@@ -0,0 +1,89 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu -mcpu=core-avx2 < %s | FileCheck %s
+
+define i32 @a() {
+; CHECK-LABEL: define i32 @a(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: br label %[[BB1:.*]]
+; CHECK: [[BB1]]:
+; CHECK-NEXT: [[TMP4:%.*]] = phi <4 x i8> [ zeroinitializer, [[TMP0:%.*]] ], [ [[TMP6:%.*]], %[[BB1]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = phi <2 x i8> [ zeroinitializer, [[TMP0]] ], [ [[TMP17:%.*]], %[[BB1]] ]
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x i8> [[TMP4]], <4 x i8> poison, <8 x i32> <i32 0, i32 0, i32 1, i32 1, i32 2, i32 2, i32 3, i32 3>
+; CHECK-NEXT: [[TMP6]] = load <4 x i8>, ptr null, align 4
+; CHECK-NEXT: [[TMP12:%.*]] = shufflevector <4 x i8> [[TMP6]], <4 x i8> poison, <4 x i32> <i32 poison, i32 poison, i32 0, i32 1>
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <2 x i8> [[TMP3]], <2 x i8> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <4 x i8> [[TMP12]], <4 x i8> [[TMP7]], <4 x i32> <i32 4, i32 5, i32 2, i32 3>
+; CHECK-NEXT: [[TMP9:%.*]] = xor <4 x i8> [[TMP6]], [[TMP8]]
+; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <4 x i8> [[TMP6]], <4 x i8> poison, <8 x i32> <i32 poison, i32 0, i32 poison, i32 1, i32 poison, i32 2, i32 poison, i32 3>
+; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <4 x i8> [[TMP9]], <4 x i8> poison, <8 x i32> <i32 0, i32 poison, i32 1, i32 poison, i32 2, i32 poison, i32 3, i32 poison>
+; CHECK-NEXT: [[TMP18:%.*]] = shufflevector <8 x i8> [[TMP10]], <8 x i8> [[TMP11]], <8 x i32> <i32 8, i32 1, i32 10, i32 3, i32 12, i32 5, i32 14, i32 7>
+; CHECK-NEXT: [[TMP19:%.*]] = shufflevector <4 x i8> [[TMP4]], <4 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP21:%.*]] = shufflevector <8 x i8> [[TMP19]], <8 x i8> [[TMP18]], <8 x i32> <i32 1, i32 3, i32 2, i32 9, i32 3, i32 11, i32 9, i32 13>
+; CHECK-NEXT: [[TMP22:%.*]] = xor <8 x i8> [[TMP18]], [[TMP21]]
+; CHECK-NEXT: [[TMP23:%.*]] = xor <8 x i8> [[TMP22]], [[TMP5]]
+; CHECK-NEXT: store <8 x i8> [[TMP23]], ptr null, align 4
+; CHECK-NEXT: [[TMP17]] = shufflevector <4 x i8> [[TMP6]], <4 x i8> poison, <2 x i32> <i32 2, i32 3>
+; CHECK-NEXT: br label %[[BB1]]
+;
+ br label %1
+
+1:
+ %2 = phi i8 [ 0, %0 ], [ %40, %1 ]
+ %3 = phi i8 [ 0, %0 ], [ %28, %1 ]
+ %4 = phi i8 [ 0, %0 ], [ %16, %1 ]
+ %5 = phi i8 [ 0, %0 ], [ %6, %1 ]
+ %6 = load i8, ptr null, align 4
+ %7 = xor i8 %6, %3
+ %8 = xor i8 %7, %4
+ %9 = xor i8 %8, %5
+ store i8 %9, ptr null, align 4
+ %10 = xor i8 %6, %2
+ %11 = xor i8 %10, %5
+ %12 = add i64 0, 1
+ %13 = getelementptr i8, ptr null, i64 %12
+ store i8 %11, ptr %13, align 1
+ %14 = add i64 0, 1
+ %15 = getelementptr i8, ptr null, i64 %14
+ %16 = load i8, ptr %15, align 1
+ %17 = xor i8 %16, %2
+ %18 = xor i8 %17, %3
+ %19 = xor i8 %18, %4
+ %20 = add i64 0, 2
+ %21 = getelementptr i8, ptr null, i64 %20
+ store i8 %19, ptr %21, align 2
+ %22 = xor i8 %16, %6
+ %23 = xor i8 %22, %4
+ %24 = add i64 0, 3
+ %25 = getelementptr i8, ptr null, i64 %24
+ store i8 %23, ptr %25, align 1
+ %26 = add i64 0, 2
+ %27 = getelementptr i8, ptr null, i64 %26
+ %28 = load i8, ptr %27, align 2
+ %29 = xor i8 %28, %6
+ %30 = xor i8 %29, %2
+ %31 = xor i8 %30, %3
+ %32 = add i64 0, 4
+ %33 = getelementptr i8, ptr null, i64 %32
+ store i8 %31, ptr %33, align 4
+ %34 = xor i8 %28, %16
+ %35 = xor i8 %34, %3
+ %36 = add i64 0, 5
+ %37 = getelementptr i8, ptr null, i64 %36
+ store i8 %35, ptr %37, align 1
+ %38 = add i64 0, 3
+ %39 = getelementptr i8, ptr null, i64 %38
+ %40 = load i8, ptr %39, align 1
+ %41 = xor i8 %40, %16
+ %42 = xor i8 %41, %6
+ %43 = xor i8 %42, %2
+ %44 = add i64 0, 6
+ %45 = getelementptr i8, ptr null, i64 %44
+ store i8 %43, ptr %45, align 2
+ %46 = xor i8 %40, %28
+ %47 = xor i8 %46, %2
+ %48 = add i64 0, 7
+ %49 = getelementptr i8, ptr null, i64 %48
+ store i8 %47, ptr %49, align 1
+ br label %1
+}
+
diff --git a/llvm/test/Transforms/SLPVectorizer/alternate-cmp-swapped-pred-parent.ll b/llvm/test/Transforms/SLPVectorizer/alternate-cmp-swapped-pred-parent.ll
index cbac456..371b230 100644
--- a/llvm/test/Transforms/SLPVectorizer/alternate-cmp-swapped-pred-parent.ll
+++ b/llvm/test/Transforms/SLPVectorizer/alternate-cmp-swapped-pred-parent.ll
@@ -1,7 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; REQUIRES: aarch64-registered-target, x86-registered-target
-; RUN: opt < %s -mtriple=x86_64-unknown -passes=slp-vectorizer -S -slp-threshold=-1000 | FileCheck %s
-; RUN: opt < %s -mtriple=aarch64-unknown-linux-gnu -passes=slp-vectorizer -S -slp-threshold=-1000 | FileCheck %s
+; RUN: %if aarch64-registered-target %{ opt < %s -mtriple=aarch64-unknown-linux-gnu -passes=slp-vectorizer -S -slp-threshold=-1000 | FileCheck %s %}
+; RUN: %if x86-registered-target %{ opt < %s -mtriple=x86_64-unknown -passes=slp-vectorizer -S -slp-threshold=-1000 | FileCheck %s %}
define void @test() {
; CHECK-LABEL: @test(
diff --git a/llvm/test/Transforms/SLPVectorizer/alternate-opcode-sindle-bv.ll b/llvm/test/Transforms/SLPVectorizer/alternate-opcode-sindle-bv.ll
index baf94e4..c250029 100644
--- a/llvm/test/Transforms/SLPVectorizer/alternate-opcode-sindle-bv.ll
+++ b/llvm/test/Transforms/SLPVectorizer/alternate-opcode-sindle-bv.ll
@@ -1,7 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
-; REQUIRES: aarch64-registered-target, x86-registered-target
-; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s
-; RUN: opt -S --passes=slp-vectorizer -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -S --passes=slp-vectorizer -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s %}
define <2 x i32> @test(i32 %arg) {
; CHECK-LABEL: define <2 x i32> @test(
diff --git a/llvm/test/Transforms/SLPVectorizer/arith-div-undef.ll b/llvm/test/Transforms/SLPVectorizer/arith-div-undef.ll
index dc6b024..3e45ace 100644
--- a/llvm/test/Transforms/SLPVectorizer/arith-div-undef.ll
+++ b/llvm/test/Transforms/SLPVectorizer/arith-div-undef.ll
@@ -1,7 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; REQUIRES: aarch64-registered-target, x86-registered-target
-; RUN: opt < %s -mtriple=x86_64-unknown -passes=slp-vectorizer,instcombine -S -slp-threshold=-10000 | FileCheck %s
-; RUN: opt < %s -mtriple=aarch64-unknown-linux-gnu -passes=slp-vectorizer,instcombine -S -slp-threshold=-10000 | FileCheck %s
+; RUN: %if x86-registered-target %{ opt < %s -mtriple=x86_64-unknown -passes=slp-vectorizer,instcombine -S -slp-threshold=-10000 | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt < %s -mtriple=aarch64-unknown-linux-gnu -passes=slp-vectorizer,instcombine -S -slp-threshold=-10000 | FileCheck %s %}
define <8 x i32> @sdiv_v8i32_undefs(<8 x i32> %a) {
; CHECK-LABEL: @sdiv_v8i32_undefs(
diff --git a/llvm/test/Transforms/SLPVectorizer/bool-logical-op-reduction-with-poison.ll b/llvm/test/Transforms/SLPVectorizer/bool-logical-op-reduction-with-poison.ll
index bad0a28..a5b1e9b 100644
--- a/llvm/test/Transforms/SLPVectorizer/bool-logical-op-reduction-with-poison.ll
+++ b/llvm/test/Transforms/SLPVectorizer/bool-logical-op-reduction-with-poison.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3
-; REQUIRES: aarch64-registered-target, x86-registered-target
-; RUN: opt -S --passes=slp-vectorizer < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s
-; RUN: opt -S --passes=slp-vectorizer < %s -mtriple=aarch64-unknown-linux-gnu | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -S --passes=slp-vectorizer < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -S --passes=slp-vectorizer < %s -mtriple=aarch64-unknown-linux-gnu | FileCheck %s %}
+
define i1 @test(i32 %0, i32 %1, i32 %p) {
; CHECK-LABEL: define i1 @test(
diff --git a/llvm/test/Transforms/SLPVectorizer/buildvector-insert-mask-size.ll b/llvm/test/Transforms/SLPVectorizer/buildvector-insert-mask-size.ll
index 9704fc2..be7b009 100644
--- a/llvm/test/Transforms/SLPVectorizer/buildvector-insert-mask-size.ll
+++ b/llvm/test/Transforms/SLPVectorizer/buildvector-insert-mask-size.ll
@@ -1,7 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; REQUIRES: aarch64-registered-target, x86-registered-target
-; RUN: opt -S -passes=slp-vectorizer -mtriple=x86_64-unknown-linux < %s -slp-threshold=-1 | FileCheck %s
-; RUN: opt -S -passes=slp-vectorizer -mtriple=aarch64-unknown-linux < %s -slp-threshold=-1 | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -S -passes=slp-vectorizer -mtriple=x86_64-unknown-linux < %s -slp-threshold=-1 | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -S -passes=slp-vectorizer -mtriple=aarch64-unknown-linux < %s -slp-threshold=-1 | FileCheck %s %}
define void @test() {
; CHECK-LABEL: @test(
diff --git a/llvm/test/Transforms/SLPVectorizer/buildvector-nodes-dependency.ll b/llvm/test/Transforms/SLPVectorizer/buildvector-nodes-dependency.ll
index 5f63a31..36abe96 100644
--- a/llvm/test/Transforms/SLPVectorizer/buildvector-nodes-dependency.ll
+++ b/llvm/test/Transforms/SLPVectorizer/buildvector-nodes-dependency.ll
@@ -1,7 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
-; REQUIRES: aarch64-registered-target, x86-registered-target
-; RUN: opt -passes=slp-vectorizer -S -mtriple=x86_64 < %s | FileCheck %s
-; RUN: opt -passes=slp-vectorizer -S -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -passes=slp-vectorizer -S -mtriple=x86_64 < %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -passes=slp-vectorizer -S -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s %}
define double @test() {
; CHECK-LABEL: define double @test() {
diff --git a/llvm/test/Transforms/SLPVectorizer/call-arg-reduced-by-minbitwidth.ll b/llvm/test/Transforms/SLPVectorizer/call-arg-reduced-by-minbitwidth.ll
index 5cd87ab..f0d5629 100644
--- a/llvm/test/Transforms/SLPVectorizer/call-arg-reduced-by-minbitwidth.ll
+++ b/llvm/test/Transforms/SLPVectorizer/call-arg-reduced-by-minbitwidth.ll
@@ -1,7 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
-; REQUIRES: aarch64-registered-target, x86-registered-target
-; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-pc-windows-msvc19.34.0 < %s | FileCheck %s
-; RUN: opt -S --passes=slp-vectorizer -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -S --passes=slp-vectorizer -mtriple=x86_64-pc-windows-msvc19.34.0 < %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -S --passes=slp-vectorizer -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s %}
define void @test(ptr %0, i8 %1, i1 %cmp12.i) {
; CHECK-LABEL: define void @test(
diff --git a/llvm/test/Transforms/SLPVectorizer/catchswitch.ll b/llvm/test/Transforms/SLPVectorizer/catchswitch.ll
index f228d19..2cd555f 100644
--- a/llvm/test/Transforms/SLPVectorizer/catchswitch.ll
+++ b/llvm/test/Transforms/SLPVectorizer/catchswitch.ll
@@ -1,7 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; REQUIRES: aarch64-registered-target, x86-registered-target
-; RUN: opt -passes=slp-vectorizer -S -mtriple=x86_64-pc-windows-msvc19.29.30145 < %s | FileCheck %s
-; RUN: opt -passes=slp-vectorizer -S -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -passes=slp-vectorizer -S -mtriple=x86_64-pc-windows-msvc19.29.30145 < %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -passes=slp-vectorizer -S -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s %}
; This used to crash in SLP vectorization when attempting to set the
; IRBuilder's insertion point to the end of a catchswitch block, which
diff --git a/llvm/test/Transforms/SLPVectorizer/crash_exceed_scheduling.ll b/llvm/test/Transforms/SLPVectorizer/crash_exceed_scheduling.ll
index 58a4184..793d089 100644
--- a/llvm/test/Transforms/SLPVectorizer/crash_exceed_scheduling.ll
+++ b/llvm/test/Transforms/SLPVectorizer/crash_exceed_scheduling.ll
@@ -1,7 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; REQUIRES: aarch64-registered-target, x86-registered-target
-; RUN: opt < %s -passes=slp-vectorizer -slp-min-tree-size=2 -slp-threshold=-1000 -slp-max-look-ahead-depth=1 -slp-schedule-budget=27 -S -mtriple=x86_64-unknown-linux-gnu | FileCheck %s
-; RUN: opt < %s -passes=slp-vectorizer -slp-min-tree-size=2 -slp-threshold=-1000 -slp-max-look-ahead-depth=1 -slp-schedule-budget=27 -S -mtriple=aarch64-unknown-linux-gnu | FileCheck %s
+; RUN: %if x86-registered-target %{ opt < %s -passes=slp-vectorizer -slp-min-tree-size=2 -slp-threshold=-1000 -slp-max-look-ahead-depth=1 -slp-schedule-budget=27 -S -mtriple=x86_64-unknown-linux-gnu | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt < %s -passes=slp-vectorizer -slp-min-tree-size=2 -slp-threshold=-1000 -slp-max-look-ahead-depth=1 -slp-schedule-budget=27 -S -mtriple=aarch64-unknown-linux-gnu | FileCheck %s %}
define void @exceed(double %0, double %1) {
; CHECK-LABEL: @exceed(
diff --git a/llvm/test/Transforms/SLPVectorizer/diamond_broadcast.ll b/llvm/test/Transforms/SLPVectorizer/diamond_broadcast.ll
index 3b8ced8..6fe286f 100644
--- a/llvm/test/Transforms/SLPVectorizer/diamond_broadcast.ll
+++ b/llvm/test/Transforms/SLPVectorizer/diamond_broadcast.ll
@@ -1,7 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; REQUIRES: aarch64-registered-target, x86-registered-target
-; RUN: opt < %s -passes=slp-vectorizer -S -mtriple=x86_64-unknown-linux -slp-threshold=-1 | FileCheck %s
-; RUN: opt < %s -passes=slp-vectorizer -S -mtriple=aarch64-unknown-linux-gnu -slp-threshold=-1 | FileCheck %s
+; RUN: %if x86-registered-target %{ opt < %s -passes=slp-vectorizer -S -mtriple=x86_64-unknown-linux -slp-threshold=-1 | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt < %s -passes=slp-vectorizer -S -mtriple=aarch64-unknown-linux-gnu -slp-threshold=-1 | FileCheck %s %}
define i32 @diamond_broadcast(ptr noalias nocapture %B, ptr noalias nocapture %A) {
; CHECK-LABEL: @diamond_broadcast(
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/ext-int-reduced-not-operand.ll b/llvm/test/Transforms/SLPVectorizer/ext-int-reduced-not-operand.ll
index 2ff6785..d802153 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/ext-int-reduced-not-operand.ll
+++ b/llvm/test/Transforms/SLPVectorizer/ext-int-reduced-not-operand.ll
@@ -1,7 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
-; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu -slp-threshold=-99999 < %s | FileCheck %s
-; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu -slp-threshold=-99999\
-; RUN: -slp-skip-early-profitability-check < %s | FileCheck %s --check-prefixes=FORCED
+; RUN: %if x86-registered-target %{ opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu -slp-threshold=-99999 < %s | FileCheck %s %}
+; RUN: %if x86-registered-target %{ opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu -slp-threshold=-99999\
+; RUN: -slp-skip-early-profitability-check < %s | FileCheck %s --check-prefixes=FORCED %}
+; RUN: %if aarch64-registered-target %{ opt -S --passes=slp-vectorizer -mtriple=aarch64-unknown-linux-gnu -slp-threshold=-99999 < %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -S --passes=slp-vectorizer -mtriple=aarch64-unknown-linux-gnu -slp-threshold=-99999\
+; RUN: -slp-skip-early-profitability-check < %s | FileCheck %s --check-prefixes=FORCED %}
define i64 @wombat() {
; FORCED-LABEL: define i64 @wombat() {
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/extended-vectorized-gathered-inst.ll b/llvm/test/Transforms/SLPVectorizer/extended-vectorized-gathered-inst.ll
index 2d02806..94aa8de 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/extended-vectorized-gathered-inst.ll
+++ b/llvm/test/Transforms/SLPVectorizer/extended-vectorized-gathered-inst.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
-; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux < %s | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux < %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -S --passes=slp-vectorizer -mtriple=aarch64-unknown-linux < %s | FileCheck %s %}
define void @test(ptr %top) {
; CHECK-LABEL: define void @test(
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/external-user-instruction-minbitwidth.ll b/llvm/test/Transforms/SLPVectorizer/external-user-instruction-minbitwidth.ll
index f58379b..07dab9f 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/external-user-instruction-minbitwidth.ll
+++ b/llvm/test/Transforms/SLPVectorizer/external-user-instruction-minbitwidth.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
-; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -S --passes=slp-vectorizer -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s %}
@e = global i8 0
@c = global i16 0
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/extract-many-users-buildvector.ll b/llvm/test/Transforms/SLPVectorizer/extract-many-users-buildvector.ll
index 87b1302e..261ec2b 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/extract-many-users-buildvector.ll
+++ b/llvm/test/Transforms/SLPVectorizer/extract-many-users-buildvector.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
-; RUN: opt -S -passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -S -passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -S -passes=slp-vectorizer -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s %}
define i1 @test(float %0, double %1) {
; CHECK-LABEL: define i1 @test
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/extractelement-insertpoint.ll b/llvm/test/Transforms/SLPVectorizer/extractelement-insertpoint.ll
index 8c51a90..94c361ca 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/extractelement-insertpoint.ll
+++ b/llvm/test/Transforms/SLPVectorizer/extractelement-insertpoint.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -passes=slp-vectorizer -mtriple=x86_64-grtev4-linux-gnu -o - < %s | FileCheck %s
+; RUN: %if x86_64-registered-target %{ opt -S -passes=slp-vectorizer -mtriple=x86_64-grtev4-linux-gnu -o - < %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -S -passes=slp-vectorizer -mtriple=aarch64-unknown-linux-gnu -o - < %s | FileCheck %s %}
define i32 @crash() {
; CHECK-LABEL: @crash(
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/extractlements-gathered-first-node.ll b/llvm/test/Transforms/SLPVectorizer/extractlements-gathered-first-node.ll
index d5f2cf7f..9afd47e 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/extractlements-gathered-first-node.ll
+++ b/llvm/test/Transforms/SLPVectorizer/extractlements-gathered-first-node.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
-; RUN: opt -S --passes=slp-vectorizer -slp-threshold=-99999 -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -S --passes=slp-vectorizer -slp-threshold=-99999 -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -S --passes=slp-vectorizer -slp-threshold=-99999 -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s %}
define void @test() {
; CHECK-LABEL: define void @test() {
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/extracts-with-undefs.ll b/llvm/test/Transforms/SLPVectorizer/extracts-with-undefs.ll
index b6de2d4..dca34b6 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/extracts-with-undefs.ll
+++ b/llvm/test/Transforms/SLPVectorizer/extracts-with-undefs.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -passes=slp-vectorizer -S -mtriple=x86_64-unknown-linux-gnu | FileCheck %s
+; RUN: %if x86-registered-target %{ opt < %s -passes=slp-vectorizer -S -mtriple=x86_64-unknown-linux-gnu | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt < %s -passes=slp-vectorizer -S -mtriple=aarch64-unknown-linux-gnu | FileCheck %s %}
define void @test() {
; CHECK-LABEL: @test(
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/gather_extract_from_vectorbuild.ll b/llvm/test/Transforms/SLPVectorizer/gather_extract_from_vectorbuild.ll
index dd5c52b..c1ec9b8 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/gather_extract_from_vectorbuild.ll
+++ b/llvm/test/Transforms/SLPVectorizer/gather_extract_from_vectorbuild.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
-; RUN: opt -passes=slp-vectorizer -S -mtriple=x86_64 < %s | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -passes=slp-vectorizer -S -mtriple=x86_64 < %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -passes=slp-vectorizer -S -mtriple=aarch64 < %s | FileCheck %s %}
; Vectorization tree roots at vector build sequence (insertelement),
; SLP crashed on generating vector code for pair {%i4, 0.0} trying to produce
; a shuffle with %ins1 as a source because it was marked deleted
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/gep-with-extractelement-many-users.ll b/llvm/test/Transforms/SLPVectorizer/gep-with-extractelement-many-users.ll
index cea95c1..996ed87 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/gep-with-extractelement-many-users.ll
+++ b/llvm/test/Transforms/SLPVectorizer/gep-with-extractelement-many-users.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
-; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu -slp-threshold=-99999 < %s | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu -slp-threshold=-99999 < %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -S --passes=slp-vectorizer -mtriple=aarch64-unknown-linux-gnu -slp-threshold=-99999 < %s | FileCheck %s %}
define void @test() {
; CHECK-LABEL: define void @test() {
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/insert-crash-index.ll b/llvm/test/Transforms/SLPVectorizer/insert-crash-index.ll
index 9d7ba5a..e934197 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/insert-crash-index.ll
+++ b/llvm/test/Transforms/SLPVectorizer/insert-crash-index.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -passes=slp-vectorizer < %s | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -S -passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -S -passes=slp-vectorizer -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s %}
; These all crashing before patch
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/insert-element-build-vector-const-undef.ll b/llvm/test/Transforms/SLPVectorizer/insert-element-build-vector-const-undef.ll
index d9f3966..48b5145 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/insert-element-build-vector-const-undef.ll
+++ b/llvm/test/Transforms/SLPVectorizer/insert-element-build-vector-const-undef.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -passes=slp-vectorizer -slp-threshold=0 < %s | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -S -passes=slp-vectorizer -slp-threshold=0 -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -S -passes=slp-vectorizer -slp-threshold=0 -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s %}
define <4 x float> @simple_select(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
; CHECK-LABEL: @simple_select(
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/insert-element-build-vector-inseltpoison.ll b/llvm/test/Transforms/SLPVectorizer/insert-element-build-vector-inseltpoison.ll
index fd9528a..5f02b00 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/insert-element-build-vector-inseltpoison.ll
+++ b/llvm/test/Transforms/SLPVectorizer/insert-element-build-vector-inseltpoison.ll
@@ -1,10 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -passes=slp-vectorizer -slp-threshold=-10000 < %s | FileCheck %s --check-prefixes=CHECK,THRESHOLD
-; RUN: opt -S -passes=slp-vectorizer -slp-threshold=0 < %s | FileCheck %s --check-prefixes=CHECK,NOTHRESHOLD
-; RUN: opt -S -passes=slp-vectorizer -slp-threshold=-10000 -slp-min-tree-size=0 < %s | FileCheck %s --check-prefixes=CHECK,MINTREESIZE
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-n8:16:32:64-S128"
-target triple = "x86_64-apple-macosx10.8.0"
+; RUN: %if x86-registered-target %{ opt -S -passes=slp-vectorizer -slp-threshold=-10000 -mtriple=x86_64-unknown-unknown < %s | FileCheck %s --check-prefixes=CHECK,THRESHOLD %}
+; RUN: %if x86-registered-target %{ opt -S -passes=slp-vectorizer -slp-threshold=0 -mtriple=x86_64-unknown-unknown < %s | FileCheck %s --check-prefixes=CHECK,NOTHRESHOLD %}
+; RUN: %if x86-registered-target %{ opt -S -passes=slp-vectorizer -slp-threshold=-10000 -slp-min-tree-size=0 -mtriple=x86_64-unknown-unknown < %s | FileCheck %s --check-prefixes=CHECK,MINTREESIZE %}
+; RUN: %if aarch64-registered-target %{ opt -S -passes=slp-vectorizer -slp-threshold=-10000 -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s --check-prefixes=CHECK,THRESHOLD %}
+; RUN: %if aarch64-registered-target %{ opt -S -passes=slp-vectorizer -slp-threshold=0 -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s --check-prefixes=CHECK,NOTHRESHOLD %}
+; RUN: %if aarch64-registered-target %{ opt -S -passes=slp-vectorizer -slp-threshold=-10000 -slp-min-tree-size=0 -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s --check-prefixes=CHECK,MINTREESIZE %}
define <4 x float> @simple_select(<4 x float> %a, <4 x float> %b, <4 x i32> %c) #0 {
; CHECK-LABEL: @simple_select(
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/insert-element-build-vector.ll b/llvm/test/Transforms/SLPVectorizer/insert-element-build-vector.ll
index 18d5b09..63d55f7 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/insert-element-build-vector.ll
+++ b/llvm/test/Transforms/SLPVectorizer/insert-element-build-vector.ll
@@ -1,10 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -passes=slp-vectorizer -slp-threshold=-10000 < %s | FileCheck %s --check-prefixes=CHECK,THRESHOLD
-; RUN: opt -S -passes=slp-vectorizer -slp-threshold=0 < %s | FileCheck %s --check-prefixes=CHECK,NOTHRESHOLD
-; RUN: opt -S -passes=slp-vectorizer -slp-threshold=-10000 -slp-min-tree-size=0 < %s | FileCheck %s --check-prefixes=CHECK,MINTREESIZE
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-n8:16:32:64-S128"
-target triple = "x86_64-apple-macosx10.8.0"
+; RUN: %if x86-registered-target %{ opt -S -passes=slp-vectorizer -slp-threshold=-10000 -mtriple=x86_64-unknown-unknown < %s | FileCheck %s --check-prefixes=CHECK,THRESHOLD %}
+; RUN: %if x86-registered-target %{ opt -S -passes=slp-vectorizer -slp-threshold=0 -mtriple=x86_64-unknown-unknown < %s | FileCheck %s --check-prefixes=CHECK,NOTHRESHOLD %}
+; RUN: %if x86-registered-target %{ opt -S -passes=slp-vectorizer -slp-threshold=-10000 -slp-min-tree-size=0 -mtriple=x86_64-unknown-unknown < %s | FileCheck %s --check-prefixes=CHECK,MINTREESIZE %}
+; RUN: %if aarch64-registered-target %{ opt -S -passes=slp-vectorizer -slp-threshold=-10000 -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s --check-prefixes=CHECK,THRESHOLD %}
+; RUN: %if aarch64-registered-target %{ opt -S -passes=slp-vectorizer -slp-threshold=0 -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s --check-prefixes=CHECK,NOTHRESHOLD %}
+; RUN: %if aarch64-registered-target %{ opt -S -passes=slp-vectorizer -slp-threshold=-10000 -slp-min-tree-size=0 -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s --check-prefixes=CHECK,MINTREESIZE %}
define <4 x float> @simple_select(<4 x float> %a, <4 x float> %b, <4 x i32> %c) #0 {
; CHECK-LABEL: @simple_select(
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/insert-element-multiple-uses.ll b/llvm/test/Transforms/SLPVectorizer/insert-element-multiple-uses.ll
index 1b684e9..0059a5fe 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/insert-element-multiple-uses.ll
+++ b/llvm/test/Transforms/SLPVectorizer/insert-element-multiple-uses.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -passes=slp-vectorizer < %s | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -S -passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -S -passes=slp-vectorizer -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s %}
define void @main() {
; CHECK-LABEL: @main(
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/insertelement-postpone.ll b/llvm/test/Transforms/SLPVectorizer/insertelement-postpone.ll
index 3fc6fe9..1e4b598 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/insertelement-postpone.ll
+++ b/llvm/test/Transforms/SLPVectorizer/insertelement-postpone.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -passes=slp-vectorizer -mtriple x86_64-unknown-linux-gnu < %s | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -S -passes=slp-vectorizer -mtriple x86_64-unknown-linux-gnu < %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -S -passes=slp-vectorizer -mtriple aarch64-unknown-linux-gnu < %s | FileCheck %s %}
define <4 x double> @test(ptr %p2, double %i1754, double %i1781, double %i1778) {
; CHECK-LABEL: @test(
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/insertelement-uses-vectorized-index.ll b/llvm/test/Transforms/SLPVectorizer/insertelement-uses-vectorized-index.ll
index 78b3f8b..94f973e 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/insertelement-uses-vectorized-index.ll
+++ b/llvm/test/Transforms/SLPVectorizer/insertelement-uses-vectorized-index.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
-; RUN: opt -S --passes=slp-vectorizer -slp-threshold=-10 < %s -mtriple=x86_64-pc-windows-msvc19.39.33523 | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -S --passes=slp-vectorizer -slp-threshold=-10 < %s -mtriple=x86_64-pc-windows-msvc19.39.33523 | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -S --passes=slp-vectorizer -slp-threshold=-10 < %s -mtriple=aarch64-unknown-linux-gnu | FileCheck %s %}
define void @test(ptr %0) {
; CHECK-LABEL: define void @test(
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/int-bitcast-minbitwidth.ll b/llvm/test/Transforms/SLPVectorizer/int-bitcast-minbitwidth.ll
index 97e505f..766decc 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/int-bitcast-minbitwidth.ll
+++ b/llvm/test/Transforms/SLPVectorizer/int-bitcast-minbitwidth.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
-; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu -slp-threshold=-9 < %s | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu -slp-threshold=-9 < %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -S --passes=slp-vectorizer -mtriple=aarch64-unknown-linux-gnu -slp-threshold=-9 < %s | FileCheck %s %}
define void @t(i64 %v) {
; CHECK-LABEL: define void @t(
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/jumbled_store_crash.ll b/llvm/test/Transforms/SLPVectorizer/jumbled_store_crash.ll
index bb1aac8..f53e22d 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/jumbled_store_crash.ll
+++ b/llvm/test/Transforms/SLPVectorizer/jumbled_store_crash.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu -o - -S < %s | FileCheck %s
+; RUN: %if x86-registered-target %{ opt --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu -o - -S < %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt --passes=slp-vectorizer -mtriple=aarch64-unknown-linux-gnu -o - -S < %s | FileCheck %s %}
@b = common dso_local global ptr null, align 8
@e = common dso_local global float 0.000000e+00, align 4
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/minbitwidth-multiuse-with-insertelement.ll b/llvm/test/Transforms/SLPVectorizer/minbitwidth-multiuse-with-insertelement.ll
index 6051638..97341d1 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/minbitwidth-multiuse-with-insertelement.ll
+++ b/llvm/test/Transforms/SLPVectorizer/minbitwidth-multiuse-with-insertelement.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
-; RUN: opt -passes=slp-vectorizer -S -slp-threshold=-10 -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -passes=slp-vectorizer -S -slp-threshold=-10 -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -passes=slp-vectorizer -S -slp-threshold=-10 -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s %}
define void @test(i8 %0) {
; CHECK-LABEL: define void @test(
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/minbitwidth-node-with-multi-users.ll b/llvm/test/Transforms/SLPVectorizer/minbitwidth-node-with-multi-users.ll
index ef07e33..a7f8629 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/minbitwidth-node-with-multi-users.ll
+++ b/llvm/test/Transforms/SLPVectorizer/minbitwidth-node-with-multi-users.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
-; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -S --passes=slp-vectorizer -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s %}
define void @test() {
; CHECK-LABEL: define void @test() {
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/minbitwidth-user-not-min.ll b/llvm/test/Transforms/SLPVectorizer/minbitwidth-user-not-min.ll
index 6922df8..28def69 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/minbitwidth-user-not-min.ll
+++ b/llvm/test/Transforms/SLPVectorizer/minbitwidth-user-not-min.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
-; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -S --passes=slp-vectorizer -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s %}
define void @test(ptr %block, ptr noalias %pixels, i1 %b) {
; CHECK-LABEL: define void @test(
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/multi-node-vectorized-insts.ll b/llvm/test/Transforms/SLPVectorizer/multi-node-vectorized-insts.ll
index 3b54b0a..8abc6ef 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/multi-node-vectorized-insts.ll
+++ b/llvm/test/Transforms/SLPVectorizer/multi-node-vectorized-insts.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3
-; RUN: opt -passes=slp-vectorizer -S -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -passes=slp-vectorizer -S -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -passes=slp-vectorizer -S -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s %}
define void @test(double %0) {
; CHECK-LABEL: define void @test(
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/multi-uses-with-deps-in-first.ll b/llvm/test/Transforms/SLPVectorizer/multi-uses-with-deps-in-first.ll
index 998efaa..a894259 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/multi-uses-with-deps-in-first.ll
+++ b/llvm/test/Transforms/SLPVectorizer/multi-uses-with-deps-in-first.ll
@@ -1,5 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3
-; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-pc-linux-gnu < %s | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -S --passes=slp-vectorizer -mtriple=x86_64-pc-linux-gnu < %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -S --passes=slp-vectorizer -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s %}
+
define void @test(double %add) {
; CHECK-LABEL: define void @test(
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/one-element-vector.ll b/llvm/test/Transforms/SLPVectorizer/one-element-vector.ll
index 5380b82..cab9188 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/one-element-vector.ll
+++ b/llvm/test/Transforms/SLPVectorizer/one-element-vector.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3
-; RUN: opt -S --passes=slp-vectorizer < %s | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -S --passes=slp-vectorizer -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s %}
define void @test() {
; CHECK-LABEL: define void @test() {
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/peek-through-shuffle.ll b/llvm/test/Transforms/SLPVectorizer/peek-through-shuffle.ll
index c157f61..839c1eb 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/peek-through-shuffle.ll
+++ b/llvm/test/Transforms/SLPVectorizer/peek-through-shuffle.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -passes=slp-vectorizer < %s -mtriple=x86_64-unknown-linux-gnu -o - | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -S -passes=slp-vectorizer < %s -mtriple=x86_64-unknown-linux-gnu -o - | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -S -passes=slp-vectorizer < %s -mtriple=aarch64-unknown-linux-gnu -o - | FileCheck %s %}
define void @foo(ptr %0, <4 x float> %1) {
; CHECK-LABEL: @foo(
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/phi-node-bitwidt-op-not.ll b/llvm/test/Transforms/SLPVectorizer/phi-node-bitwidt-op-not.ll
index f376ca7..2037e0d6 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/phi-node-bitwidt-op-not.ll
+++ b/llvm/test/Transforms/SLPVectorizer/phi-node-bitwidt-op-not.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
-; RUN: opt -S -passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -S -passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -S -passes=slp-vectorizer -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s %}
define i32 @test(ptr %b, ptr %c, i32 %0, ptr %a, i1 %tobool3.not) {
; CHECK-LABEL: define i32 @test(
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/phi-undef-input.ll b/llvm/test/Transforms/SLPVectorizer/phi-undef-input.ll
index 3cc32c1..b9802a0 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/phi-undef-input.ll
+++ b/llvm/test/Transforms/SLPVectorizer/phi-undef-input.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -passes=slp-vectorizer -slp-threshold=-1000 -mtriple=x86_64 -S | FileCheck %s
+; RUN: %if x86-registered-target %{ opt < %s -passes=slp-vectorizer -slp-threshold=-1000 -mtriple=x86_64 -S | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt < %s -passes=slp-vectorizer -slp-threshold=-1000 -mtriple=aarch64-unknown-linux-gnu -S | FileCheck %s %}
; The inputs to vector phi should remain undef.
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/postponed_gathers.ll b/llvm/test/Transforms/SLPVectorizer/postponed_gathers.ll
index 488ca0b..f6bed79 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/postponed_gathers.ll
+++ b/llvm/test/Transforms/SLPVectorizer/postponed_gathers.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
-; RUN: opt < %s -passes=slp-vectorizer -slp-threshold=-10 -mtriple=x86_64-unknown-linux-gnu -S | FileCheck %s
+; RUN: %if x86-registered-target %{ opt < %s -passes=slp-vectorizer -slp-threshold=-10 -mtriple=x86_64-unknown-linux-gnu -S | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt < %s -passes=slp-vectorizer -slp-threshold=-10 -mtriple=aarch64-unknown-linux-gnu -S | FileCheck %s %}
define void @foo() {
; CHECK-LABEL: define void @foo() {
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/pr31599-inseltpoison.ll b/llvm/test/Transforms/SLPVectorizer/pr31599-inseltpoison.ll
index 5506f61..fe5871d 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/pr31599-inseltpoison.ll
+++ b/llvm/test/Transforms/SLPVectorizer/pr31599-inseltpoison.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -passes=slp-vectorizer -S -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -passes=slp-vectorizer -S -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -passes=slp-vectorizer -S -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s %}
define <2 x float> @foo() {
; CHECK-LABEL: @foo(
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/pr31599.ll b/llvm/test/Transforms/SLPVectorizer/pr31599.ll
index 348656e..10b9b22 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/pr31599.ll
+++ b/llvm/test/Transforms/SLPVectorizer/pr31599.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -passes=slp-vectorizer -S -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -passes=slp-vectorizer -S -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -passes=slp-vectorizer -S -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s %}
define <2 x float> @foo() {
; CHECK-LABEL: @foo(
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/reduction-gather-non-scheduled-extracts.ll b/llvm/test/Transforms/SLPVectorizer/reduction-gather-non-scheduled-extracts.ll
index 03c8767..f1034f3 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/reduction-gather-non-scheduled-extracts.ll
+++ b/llvm/test/Transforms/SLPVectorizer/reduction-gather-non-scheduled-extracts.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3
-; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-sie-ps5 < %s | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -S --passes=slp-vectorizer -mtriple=x86_64-sie-ps5 < %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -S --passes=slp-vectorizer -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s %}
define void @tes() {
; CHECK-LABEL: define void @tes() {
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/reduction-modified-values.ll b/llvm/test/Transforms/SLPVectorizer/reduction-modified-values.ll
index dbf490c..be9318e 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/reduction-modified-values.ll
+++ b/llvm/test/Transforms/SLPVectorizer/reduction-modified-values.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -S --passes=slp-vectorizer -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s %}
define i32 @test() {
; CHECK-LABEL: @test(
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/reorder-clustered-node.ll b/llvm/test/Transforms/SLPVectorizer/reorder-clustered-node.ll
index 1a6ff23..561182d 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/reorder-clustered-node.ll
+++ b/llvm/test/Transforms/SLPVectorizer/reorder-clustered-node.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -passes=slp-vectorizer -S < %s -mtriple=x86_64 -slp-threshold=-150 | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -passes=slp-vectorizer -S < %s -mtriple=x86_64 -slp-threshold=-150 | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -passes=slp-vectorizer -S < %s -mtriple=aarch64-unknown-linux-gnu -slp-threshold=-150 | FileCheck %s %}
define i1 @test(ptr %arg, ptr %i233, i64 %i241, ptr %i235, ptr %i237, ptr %i227) {
; CHECK-LABEL: @test(
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/reordered-top-scalars.ll b/llvm/test/Transforms/SLPVectorizer/reordered-top-scalars.ll
index 4517d27..1de5ee2 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/reordered-top-scalars.ll
+++ b/llvm/test/Transforms/SLPVectorizer/reordered-top-scalars.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown %s -slp-threshold=-5 | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown %s -slp-threshold=-5 | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -S --passes=slp-vectorizer -mtriple=aarch64-unknown %s -slp-threshold=-5 | FileCheck %s %}
define i32 @test(ptr %isec) {
; CHECK-LABEL: @test(
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/reordering-single-phi.ll b/llvm/test/Transforms/SLPVectorizer/reordering-single-phi.ll
index bc1eaaa..a70daf9 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/reordering-single-phi.ll
+++ b/llvm/test/Transforms/SLPVectorizer/reordering-single-phi.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
-; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux < %s | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux < %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -S --passes=slp-vectorizer -mtriple=aarch64-unknown-linux < %s | FileCheck %s %}
@a = external global [32000 x float], align 64
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/reused-buildvector-matching-vectorized-node.ll b/llvm/test/Transforms/SLPVectorizer/reused-buildvector-matching-vectorized-node.ll
index 2b425ee..3e00550 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/reused-buildvector-matching-vectorized-node.ll
+++ b/llvm/test/Transforms/SLPVectorizer/reused-buildvector-matching-vectorized-node.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
-; RUN: opt -S -passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -S -passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -S -passes=slp-vectorizer -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s %}
define void @blam(ptr %arg, double %load2, i1 %fcmp3) {
; CHECK-LABEL: define void @blam
diff --git a/llvm/test/Transforms/SLPVectorizer/revec-fix-109835.ll b/llvm/test/Transforms/SLPVectorizer/revec-fix-109835.ll
new file mode 100644
index 0000000..965bfc7
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/revec-fix-109835.ll
@@ -0,0 +1,70 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -passes=slp-vectorizer -S -slp-revec %s | FileCheck %s
+
+@b = external dso_local local_unnamed_addr global i64, align 8
+@d = external dso_local local_unnamed_addr global i32, align 4
+@c = external dso_local local_unnamed_addr global i32, align 4
+@a = external dso_local local_unnamed_addr global i8, align 2
+
+define void @e() {
+; CHECK-LABEL: @e(
+; CHECK-NEXT: vector.ph:
+; CHECK-NEXT: [[C_PROMOTED5:%.*]] = load i32, ptr @c, align 4
+; CHECK-NEXT: [[A_PROMOTED7:%.*]] = load i8, ptr @a, align 2
+; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[C_PROMOTED5]], i64 0
+; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i32> [[DOTSPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP0:%.*]] = insertelement <16 x i8> <i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison>, i8 [[A_PROMOTED7]], i64 0
+; CHECK-NEXT: [[TMP1:%.*]] = add <16 x i32> [[DOTSPLAT]], <i32 -6, i32 3, i32 12, i32 21, i32 30, i32 39, i32 48, i32 57, i32 66, i32 75, i32 84, i32 93, i32 102, i32 111, i32 120, i32 129>
+; CHECK-NEXT: [[TMP2:%.*]] = add <16 x i32> [[DOTSPLAT]], <i32 -4, i32 5, i32 14, i32 23, i32 32, i32 41, i32 50, i32 59, i32 68, i32 77, i32 86, i32 95, i32 104, i32 113, i32 122, i32 131>
+; CHECK-NEXT: [[TMP3:%.*]] = add <16 x i32> [[DOTSPLAT]], <i32 -2, i32 7, i32 16, i32 25, i32 34, i32 43, i32 52, i32 61, i32 70, i32 79, i32 88, i32 97, i32 106, i32 115, i32 124, i32 133>
+; CHECK-NEXT: [[INDUCTION:%.*]] = add <16 x i32> [[DOTSPLAT]], <i32 0, i32 9, i32 18, i32 27, i32 36, i32 45, i32 54, i32 63, i32 72, i32 81, i32 90, i32 99, i32 108, i32 117, i32 126, i32 135>
+; CHECK-NEXT: [[TMP4:%.*]] = icmp ult <16 x i32> [[TMP1]], <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
+; CHECK-NEXT: [[TMP5:%.*]] = icmp ult <16 x i32> [[TMP2]], <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
+; CHECK-NEXT: [[TMP6:%.*]] = icmp ult <16 x i32> [[TMP3]], <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
+; CHECK-NEXT: [[TMP7:%.*]] = icmp ult <16 x i32> [[INDUCTION]], <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
+; CHECK-NEXT: [[TMP8:%.*]] = icmp eq <16 x i32> [[DOTSPLAT]], <i32 -1, i32 -10, i32 -19, i32 -28, i32 -37, i32 -46, i32 -55, i32 -64, i32 -73, i32 -82, i32 -91, i32 -100, i32 -109, i32 -118, i32 -127, i32 -136>
+; CHECK-NEXT: [[TMP9:%.*]] = or <16 x i1> [[TMP4]], [[TMP5]]
+; CHECK-NEXT: [[TMP10:%.*]] = or <16 x i1> [[TMP9]], [[TMP6]]
+; CHECK-NEXT: [[TMP11:%.*]] = or <16 x i1> [[TMP10]], [[TMP7]]
+; CHECK-NEXT: [[TMP12:%.*]] = or <16 x i1> [[TMP11]], [[TMP8]]
+; CHECK-NEXT: [[TMP13:%.*]] = zext <16 x i1> [[TMP12]] to <16 x i8>
+; CHECK-NEXT: [[TMP14:%.*]] = or <16 x i8> [[TMP0]], [[TMP13]]
+; CHECK-NEXT: [[TMP15:%.*]] = shufflevector <16 x i8> [[TMP14]], <16 x i8> <i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+; CHECK-NEXT: [[TMP16:%.*]] = call i8 @llvm.vector.reduce.or.v16i8(<16 x i8> [[TMP15]])
+; CHECK-NEXT: [[TMP17:%.*]] = add i32 [[C_PROMOTED5]], 81
+; CHECK-NEXT: store i64 -1, ptr @b, align 8
+; CHECK-NEXT: store i32 9, ptr @d, align 4
+; CHECK-NEXT: store i32 [[TMP17]], ptr @c, align 4
+; CHECK-NEXT: store i8 [[TMP16]], ptr @a, align 2
+; CHECK-NEXT: ret void
+;
+vector.ph:
+ %c.promoted5 = load i32, ptr @c, align 4
+ %a.promoted7 = load i8, ptr @a, align 2
+ %.splatinsert = insertelement <16 x i32> poison, i32 %c.promoted5, i64 0
+ %.splat = shufflevector <16 x i32> %.splatinsert, <16 x i32> poison, <16 x i32> zeroinitializer
+ %0 = insertelement <16 x i8> <i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison>, i8 %a.promoted7, i64 0
+ %1 = add <16 x i32> %.splat, <i32 -6, i32 3, i32 12, i32 21, i32 30, i32 39, i32 48, i32 57, i32 66, i32 75, i32 84, i32 93, i32 102, i32 111, i32 120, i32 129>
+ %2 = add <16 x i32> %.splat, <i32 -4, i32 5, i32 14, i32 23, i32 32, i32 41, i32 50, i32 59, i32 68, i32 77, i32 86, i32 95, i32 104, i32 113, i32 122, i32 131>
+ %3 = add <16 x i32> %.splat, <i32 -2, i32 7, i32 16, i32 25, i32 34, i32 43, i32 52, i32 61, i32 70, i32 79, i32 88, i32 97, i32 106, i32 115, i32 124, i32 133>
+ %induction = add <16 x i32> %.splat, <i32 0, i32 9, i32 18, i32 27, i32 36, i32 45, i32 54, i32 63, i32 72, i32 81, i32 90, i32 99, i32 108, i32 117, i32 126, i32 135>
+ %4 = icmp ult <16 x i32> %1, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
+ %5 = icmp ult <16 x i32> %2, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
+ %6 = icmp ult <16 x i32> %3, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
+ %7 = icmp ult <16 x i32> %induction, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
+ %8 = icmp eq <16 x i32> %.splat, <i32 -1, i32 -10, i32 -19, i32 -28, i32 -37, i32 -46, i32 -55, i32 -64, i32 -73, i32 -82, i32 -91, i32 -100, i32 -109, i32 -118, i32 -127, i32 -136>
+ %9 = or <16 x i1> %4, %5
+ %10 = or <16 x i1> %9, %6
+ %11 = or <16 x i1> %10, %7
+ %12 = or <16 x i1> %11, %8
+ %13 = zext <16 x i1> %12 to <16 x i8>
+ %14 = or <16 x i8> %0, %13
+ %15 = shufflevector <16 x i8> %14, <16 x i8> <i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %16 = call i8 @llvm.vector.reduce.or.v16i8(<16 x i8> %15)
+ %17 = add i32 %c.promoted5, 81
+ store i64 -1, ptr @b, align 8
+ store i32 9, ptr @d, align 4
+ store i32 %17, ptr @c, align 4
+ store i8 %16, ptr @a, align 2
+ ret void
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/root-trunc-extract-reuse.ll b/llvm/test/Transforms/SLPVectorizer/root-trunc-extract-reuse.ll
index af46b4f..34c06847 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/root-trunc-extract-reuse.ll
+++ b/llvm/test/Transforms/SLPVectorizer/root-trunc-extract-reuse.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -passes=slp-vectorizer -S -slp-threshold=-100 -mtriple=x86_64 < %s | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -passes=slp-vectorizer -S -slp-threshold=-100 -mtriple=x86_64 < %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -passes=slp-vectorizer -S -slp-threshold=-100 -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s %}
define i1 @test() {
; CHECK-LABEL: @test(
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/same-scalar-in-same-phi-extract.ll b/llvm/test/Transforms/SLPVectorizer/same-scalar-in-same-phi-extract.ll
index f1be11d..fe08135 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/same-scalar-in-same-phi-extract.ll
+++ b/llvm/test/Transforms/SLPVectorizer/same-scalar-in-same-phi-extract.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
-; RUN: opt -S --passes=slp-vectorizer -slp-threshold=-99999 -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -S --passes=slp-vectorizer -slp-threshold=-99999 -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -S --passes=slp-vectorizer -slp-threshold=-99999 -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s %}
define void @test(i32 %arg) {
; CHECK-LABEL: define void @test(
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/scalarazied-result.ll b/llvm/test/Transforms/SLPVectorizer/scalarazied-result.ll
index 1d6e191..2570cdb 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/scalarazied-result.ll
+++ b/llvm/test/Transforms/SLPVectorizer/scalarazied-result.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu -S < %s | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu -S < %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -passes=slp-vectorizer -mtriple=aarch64-unknown-linux-gnu -S < %s | FileCheck %s %}
define void @test() {
; CHECK-LABEL: @test(
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/scalarization-overhead.ll b/llvm/test/Transforms/SLPVectorizer/scalarization-overhead.ll
index 55e1558..9f6b285 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/scalarization-overhead.ll
+++ b/llvm/test/Transforms/SLPVectorizer/scalarization-overhead.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -mtriple=x86_64-- -passes=slp-vectorizer -S < %s | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -mtriple=x86_64-- -passes=slp-vectorizer -S < %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -mtriple=aarch64-- -passes=slp-vectorizer -S < %s | FileCheck %s %}
; Crash Test case reported on D134605
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/shrink_after_reorder2.ll b/llvm/test/Transforms/SLPVectorizer/shrink_after_reorder2.ll
index 9e3ba05..2f0bd4a 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/shrink_after_reorder2.ll
+++ b/llvm/test/Transforms/SLPVectorizer/shrink_after_reorder2.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -o - -passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -S -o - -passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -S -o - -passes=slp-vectorizer -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s %}
%class.e = type { i32, i32 }
%struct.a = type { i32, i32, i32, i32 }
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/shuffle-multivector.ll b/llvm/test/Transforms/SLPVectorizer/shuffle-multivector.ll
index c255588..2253c70 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/shuffle-multivector.ll
+++ b/llvm/test/Transforms/SLPVectorizer/shuffle-multivector.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -passes=slp-vectorizer -S < %s -mtriple=x86_64-unknown-linux -slp-threshold=-163 | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -passes=slp-vectorizer -S < %s -mtriple=x86_64-unknown-linux -slp-threshold=-163 | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -passes=slp-vectorizer -S < %s -mtriple=aarch64-unknown-linux -slp-threshold=-163 | FileCheck %s %}
define void @test1(i128 %p0, i128 %p1, i128 %p2, i128 %p3, <4 x i128> %vec) {
; CHECK-LABEL: @test1(
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/shufflebuilder-bug.ll b/llvm/test/Transforms/SLPVectorizer/shufflebuilder-bug.ll
index 9db7d69..019c9ea 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/shufflebuilder-bug.ll
+++ b/llvm/test/Transforms/SLPVectorizer/shufflebuilder-bug.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
-; RUN: opt -S -p slp-vectorizer -mtriple=x86_64-- %s | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -S -p slp-vectorizer -mtriple=x86_64-- %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -S -p slp-vectorizer -mtriple=aarch64-unknown-linux-gnu %s | FileCheck %s %}
define void @foo(<4 x float> %vec, float %val, ptr %ptr) {
; CHECK-LABEL: define void @foo
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/stores-non-ordered.ll b/llvm/test/Transforms/SLPVectorizer/stores-non-ordered.ll
index a9748ca6..aaa6be7 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/stores-non-ordered.ll
+++ b/llvm/test/Transforms/SLPVectorizer/stores-non-ordered.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -S -mtriple=x86_64-unknown -passes=slp-vectorizer -slp-min-reg-size=64 -slp-threshold=-1000 | FileCheck %s
+; RUN: %if x86-registered-target %{ opt < %s -S -mtriple=x86_64-unknown -passes=slp-vectorizer -slp-min-reg-size=64 -slp-threshold=-1000 | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt < %s -S -mtriple=aarch64-unknown -passes=slp-vectorizer -slp-min-reg-size=64 -slp-threshold=-1000 | FileCheck %s %}
define i32 @non-ordered-stores(ptr noalias nocapture %in, ptr noalias nocapture %inn, ptr noalias nocapture %out) {
; CHECK-LABEL: @non-ordered-stores(
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/unknown-entries.ll b/llvm/test/Transforms/SLPVectorizer/unknown-entries.ll
index fc22280..ca9aa45 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/unknown-entries.ll
+++ b/llvm/test/Transforms/SLPVectorizer/unknown-entries.ll
@@ -1,7 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
-; RUN: opt < %s -passes=slp-vectorizer -S | FileCheck %s
-
-target triple = "x86_64-unknown-linux-gnu"
+; RUN: %if x86-registered-target %{ opt < %s -passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu -S | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt < %s -passes=slp-vectorizer -mtriple=aarch64-unknown-linux-gnu -S | FileCheck %s %}
define <3 x i64> @ahyes(i64 %position, i64 %value) {
; CHECK-LABEL: define <3 x i64> @ahyes(
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/zext-incoming-for-neg-icmp.ll b/llvm/test/Transforms/SLPVectorizer/zext-incoming-for-neg-icmp.ll
index 7f086d1..89fcc7e 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/zext-incoming-for-neg-icmp.ll
+++ b/llvm/test/Transforms/SLPVectorizer/zext-incoming-for-neg-icmp.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
-; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s
+; RUN: %if x86-registered-target %{ opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -S --passes=slp-vectorizer -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s %}
define i32 @test(i32 %a, i8 %b, i8 %c) {
; CHECK-LABEL: define i32 @test(
diff --git a/llvm/test/Transforms/SimplifyCFG/X86/hoist-loads-stores-with-cf.ll b/llvm/test/Transforms/SimplifyCFG/X86/hoist-loads-stores-with-cf.ll
index 6ea0cf29..03db1bb 100644
--- a/llvm/test/Transforms/SimplifyCFG/X86/hoist-loads-stores-with-cf.ll
+++ b/llvm/test/Transforms/SimplifyCFG/X86/hoist-loads-stores-with-cf.ll
@@ -672,8 +672,8 @@ if.false:
ret void
}
-define i32 @str_transcode0(i1 %cond1, ptr %p, i1 %cond2) {
-; CHECK-LABEL: @str_transcode0(
+define i32 @succ_phi_has_3input(i1 %cond1, ptr %p, i1 %cond2) {
+; CHECK-LABEL: @succ_phi_has_3input(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[COND1:%.*]], label [[BB3:%.*]], label [[BB1:%.*]]
; CHECK: bb1:
@@ -728,6 +728,37 @@ if.true:
ret i32 %res
}
+define i32 @succ1to0_phi3(ptr %p, ptr %p2, i32 %x) {
+; CHECK-LABEL: @succ1to0_phi3(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[COND:%.*]] = icmp eq ptr [[P:%.*]], null
+; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[COND]], true
+; CHECK-NEXT: [[TMP1:%.*]] = bitcast i1 [[TMP0]] to <1 x i1>
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32 [[X:%.*]] to <1 x i32>
+; CHECK-NEXT: [[TMP3:%.*]] = call <1 x i32> @llvm.masked.load.v1i32.p0(ptr [[P]], i32 4, <1 x i1> [[TMP1]], <1 x i32> [[TMP2]])
+; CHECK-NEXT: [[TMP4:%.*]] = bitcast <1 x i32> [[TMP3]] to i32
+; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP4]] to <1 x i32>
+; CHECK-NEXT: call void @llvm.masked.store.v1i32.p0(<1 x i32> [[TMP5]], ptr [[P2:%.*]], i32 4, <1 x i1> [[TMP1]])
+; CHECK-NEXT: [[SPEC_SELECT:%.*]] = select i1 [[COND]], i32 0, i32 [[TMP4]]
+; CHECK-NEXT: [[RES:%.*]] = add i32 [[SPEC_SELECT]], [[TMP4]]
+; CHECK-NEXT: ret i32 [[RES]]
+;
+entry:
+ %cond = icmp eq ptr %p, null
+ br i1 %cond, label %if.true, label %if.false
+
+if.false:
+ %0 = load i32, ptr %p
+ store i32 %0, ptr %p2
+ br label %if.true
+
+if.true:
+ %res0 = phi i32 [ %0, %if.false ], [ 0, %entry ]
+ %res1 = phi i32 [ %0, %if.false ], [ %x, %entry ]
+ %res = add i32 %res0, %res1
+ ret i32 %res
+}
+
declare i32 @read_memory_only() readonly nounwind willreturn speculatable
!llvm.dbg.cu = !{!0}
diff --git a/llvm/test/Transforms/SimplifyCFG/X86/sink-common-code.ll b/llvm/test/Transforms/SimplifyCFG/X86/sink-common-code.ll
index 39b1bec1..170f8d1 100644
--- a/llvm/test/Transforms/SimplifyCFG/X86/sink-common-code.ll
+++ b/llvm/test/Transforms/SimplifyCFG/X86/sink-common-code.ll
@@ -304,14 +304,14 @@ define i32 @test10(i1 zeroext %flag, i32 %x, ptr %y, ptr %s) {
; CHECK-NEXT: br i1 [[FLAG:%.*]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
; CHECK: if.then:
; CHECK-NEXT: call void @bar(i32 5)
+; CHECK-NEXT: store volatile i32 [[X:%.*]], ptr [[S:%.*]], align 4
; CHECK-NEXT: br label [[IF_END:%.*]]
; CHECK: if.else:
; CHECK-NEXT: call void @bar(i32 6)
-; CHECK-NEXT: [[GEPB:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[S:%.*]], i32 0, i32 1
+; CHECK-NEXT: [[GEPB:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[S]], i32 0, i32 1
+; CHECK-NEXT: store volatile i32 [[X]], ptr [[GEPB]], align 4
; CHECK-NEXT: br label [[IF_END]]
; CHECK: if.end:
-; CHECK-NEXT: [[GEPB_SINK:%.*]] = phi ptr [ [[GEPB]], [[IF_ELSE]] ], [ [[S]], [[IF_THEN]] ]
-; CHECK-NEXT: store volatile i32 [[X:%.*]], ptr [[GEPB_SINK]], align 4
; CHECK-NEXT: ret i32 1
;
entry:
@@ -518,23 +518,25 @@ declare void @llvm.dbg.value(metadata, metadata, metadata)
!11 = !DILocation(line: 1, column: 14, scope: !8)
-; The load should be commoned.
+; The load should not be commoned, as it will get separated from the GEP
+; instruction producing the address.
define i32 @test15(i1 zeroext %flag, i32 %w, i32 %x, i32 %y, ptr %s) {
; CHECK-LABEL: @test15(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[FLAG:%.*]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
; CHECK: if.then:
; CHECK-NEXT: call void @bar(i32 1)
+; CHECK-NEXT: [[SV1:%.*]] = load i32, ptr [[S:%.*]], align 4
; CHECK-NEXT: br label [[IF_END:%.*]]
; CHECK: if.else:
; CHECK-NEXT: call void @bar(i32 4)
-; CHECK-NEXT: [[GEPB:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[S:%.*]], i32 0, i32 1
+; CHECK-NEXT: [[GEPB:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[S]], i32 0, i32 1
+; CHECK-NEXT: [[SV2:%.*]] = load i32, ptr [[GEPB]], align 4
; CHECK-NEXT: br label [[IF_END]]
; CHECK: if.end:
-; CHECK-NEXT: [[GEPB_SINK:%.*]] = phi ptr [ [[GEPB]], [[IF_ELSE]] ], [ [[S]], [[IF_THEN]] ]
+; CHECK-NEXT: [[SV2_SINK:%.*]] = phi i32 [ [[SV2]], [[IF_ELSE]] ], [ [[SV1]], [[IF_THEN]] ]
; CHECK-NEXT: [[DOTSINK:%.*]] = phi i64 [ 57, [[IF_ELSE]] ], [ 56, [[IF_THEN]] ]
-; CHECK-NEXT: [[SV2:%.*]] = load i32, ptr [[GEPB_SINK]], align 4
-; CHECK-NEXT: [[EXT2:%.*]] = zext i32 [[SV2]] to i64
+; CHECK-NEXT: [[EXT2:%.*]] = zext i32 [[SV2_SINK]] to i64
; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i64 [[EXT2]], [[DOTSINK]]
; CHECK-NEXT: ret i32 1
;
@@ -1803,17 +1805,19 @@ define i64 @multi_use_in_block_inconsistent(i1 %cond, ptr %p, i64 %a, i64 %b) {
; CHECK: if:
; CHECK-NEXT: call void @dummy()
; CHECK-NEXT: [[GEP1_A:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 [[A:%.*]]
+; CHECK-NEXT: [[V_A:%.*]] = load i64, ptr [[GEP1_A]], align 8
+; CHECK-NEXT: [[GEP2_A:%.*]] = getelementptr i8, ptr [[GEP1_A]], i64 [[V_A]]
; CHECK-NEXT: br label [[JOIN:%.*]]
; CHECK: else:
; CHECK-NEXT: [[GEP1_B:%.*]] = getelementptr i8, ptr [[P]], i64 [[A]]
+; CHECK-NEXT: [[V_B:%.*]] = load i64, ptr [[P]], align 8
+; CHECK-NEXT: [[GEP2_B:%.*]] = getelementptr i8, ptr [[GEP1_B]], i64 [[V_B]]
; CHECK-NEXT: br label [[JOIN]]
; CHECK: join:
-; CHECK-NEXT: [[P_SINK:%.*]] = phi ptr [ [[P]], [[ELSE]] ], [ [[GEP1_A]], [[IF]] ]
-; CHECK-NEXT: [[GEP1_B_SINK:%.*]] = phi ptr [ [[GEP1_B]], [[ELSE]] ], [ [[GEP1_A]], [[IF]] ]
-; CHECK-NEXT: [[V_B:%.*]] = load i64, ptr [[P_SINK]], align 8
-; CHECK-NEXT: [[GEP2_B:%.*]] = getelementptr i8, ptr [[GEP1_B_SINK]], i64 [[V_B]]
-; CHECK-NEXT: call void @use.ptr(ptr [[GEP2_B]])
-; CHECK-NEXT: ret i64 [[V_B]]
+; CHECK-NEXT: [[PHI1:%.*]] = phi i64 [ [[V_A]], [[IF]] ], [ [[V_B]], [[ELSE]] ]
+; CHECK-NEXT: [[PHI2:%.*]] = phi ptr [ [[GEP2_A]], [[IF]] ], [ [[GEP2_B]], [[ELSE]] ]
+; CHECK-NEXT: call void @use.ptr(ptr [[PHI2]])
+; CHECK-NEXT: ret i64 [[PHI1]]
;
br i1 %cond, label %if, label %else
@@ -1873,14 +1877,15 @@ define i64 @load_with_non_sunk_gep_both(i1 %cond, ptr %p.a, ptr %p.b, i64 %a, i6
; CHECK: if:
; CHECK-NEXT: call void @dummy()
; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i8, ptr [[P_A:%.*]], i64 [[A:%.*]]
+; CHECK-NEXT: [[V_A:%.*]] = load i64, ptr [[GEP_A]], align 8
; CHECK-NEXT: br label [[JOIN:%.*]]
; CHECK: else:
; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr i8, ptr [[P_B:%.*]], i64 [[B:%.*]]
+; CHECK-NEXT: [[V_B:%.*]] = load i64, ptr [[GEP_B]], align 8
; CHECK-NEXT: br label [[JOIN]]
; CHECK: join:
-; CHECK-NEXT: [[GEP_B_SINK:%.*]] = phi ptr [ [[GEP_B]], [[ELSE]] ], [ [[GEP_A]], [[IF]] ]
-; CHECK-NEXT: [[V_B:%.*]] = load i64, ptr [[GEP_B_SINK]], align 8
-; CHECK-NEXT: ret i64 [[V_B]]
+; CHECK-NEXT: [[V:%.*]] = phi i64 [ [[V_A]], [[IF]] ], [ [[V_B]], [[ELSE]] ]
+; CHECK-NEXT: ret i64 [[V]]
;
br i1 %cond, label %if, label %else
@@ -1905,14 +1910,15 @@ define i64 @load_with_non_sunk_gep_left(i1 %cond, ptr %p.a, ptr %p.b, i64 %b) {
; CHECK-NEXT: br i1 [[COND:%.*]], label [[IF:%.*]], label [[ELSE:%.*]]
; CHECK: if:
; CHECK-NEXT: call void @dummy()
+; CHECK-NEXT: [[V_A:%.*]] = load i64, ptr [[P_A:%.*]], align 8
; CHECK-NEXT: br label [[JOIN:%.*]]
; CHECK: else:
; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr i8, ptr [[P_B:%.*]], i64 [[B:%.*]]
+; CHECK-NEXT: [[V_B:%.*]] = load i64, ptr [[GEP_B]], align 8
; CHECK-NEXT: br label [[JOIN]]
; CHECK: join:
-; CHECK-NEXT: [[GEP_B_SINK:%.*]] = phi ptr [ [[GEP_B]], [[ELSE]] ], [ [[P_A:%.*]], [[IF]] ]
-; CHECK-NEXT: [[V_B:%.*]] = load i64, ptr [[GEP_B_SINK]], align 8
-; CHECK-NEXT: ret i64 [[V_B]]
+; CHECK-NEXT: [[V:%.*]] = phi i64 [ [[V_A]], [[IF]] ], [ [[V_B]], [[ELSE]] ]
+; CHECK-NEXT: ret i64 [[V]]
;
br i1 %cond, label %if, label %else
@@ -1933,15 +1939,18 @@ join:
define i64 @load_with_non_sunk_gep_right(i1 %cond, ptr %p.a, ptr %p.b, i64 %a) {
; CHECK-LABEL: @load_with_non_sunk_gep_right(
-; CHECK-NEXT: br i1 [[COND:%.*]], label [[IF:%.*]], label [[JOIN:%.*]]
+; CHECK-NEXT: br i1 [[COND:%.*]], label [[IF:%.*]], label [[ELSE:%.*]]
; CHECK: if:
; CHECK-NEXT: call void @dummy()
; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i8, ptr [[P_A:%.*]], i64 [[A:%.*]]
+; CHECK-NEXT: [[V_A:%.*]] = load i64, ptr [[GEP_A]], align 8
+; CHECK-NEXT: br label [[JOIN:%.*]]
+; CHECK: else:
+; CHECK-NEXT: [[V_B:%.*]] = load i64, ptr [[P_B:%.*]], align 8
; CHECK-NEXT: br label [[JOIN]]
; CHECK: join:
-; CHECK-NEXT: [[P_B_SINK:%.*]] = phi ptr [ [[GEP_A]], [[IF]] ], [ [[P_B:%.*]], [[TMP0:%.*]] ]
-; CHECK-NEXT: [[V_B:%.*]] = load i64, ptr [[P_B_SINK]], align 8
-; CHECK-NEXT: ret i64 [[V_B]]
+; CHECK-NEXT: [[V:%.*]] = phi i64 [ [[V_A]], [[IF]] ], [ [[V_B]], [[ELSE]] ]
+; CHECK-NEXT: ret i64 [[V]]
;
br i1 %cond, label %if, label %else
@@ -1966,13 +1975,13 @@ define void @store_with_non_sunk_gep(i1 %cond, ptr %p.a, ptr %p.b, i64 %a, i64 %
; CHECK: if:
; CHECK-NEXT: call void @dummy()
; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i8, ptr [[P_A:%.*]], i64 [[A:%.*]]
+; CHECK-NEXT: store i64 0, ptr [[GEP_A]], align 8
; CHECK-NEXT: br label [[JOIN:%.*]]
; CHECK: else:
; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr i8, ptr [[P_B:%.*]], i64 [[B:%.*]]
+; CHECK-NEXT: store i64 0, ptr [[GEP_B]], align 8
; CHECK-NEXT: br label [[JOIN]]
; CHECK: join:
-; CHECK-NEXT: [[GEP_B_SINK:%.*]] = phi ptr [ [[GEP_B]], [[ELSE]] ], [ [[GEP_A]], [[IF]] ]
-; CHECK-NEXT: store i64 0, ptr [[GEP_B_SINK]], align 8
; CHECK-NEXT: ret void
;
br i1 %cond, label %if, label %else
diff --git a/llvm/test/Transforms/SimplifyCFG/X86/switch_to_lookup_table.ll b/llvm/test/Transforms/SimplifyCFG/X86/switch_to_lookup_table.ll
index 845c500..9549ccd 100644
--- a/llvm/test/Transforms/SimplifyCFG/X86/switch_to_lookup_table.ll
+++ b/llvm/test/Transforms/SimplifyCFG/X86/switch_to_lookup_table.ll
@@ -2118,6 +2118,31 @@ cond.end: ; preds = %entry, %cond.false
ret i8 %conv
}
+define i1 @linearmap_trunc_smaller_table_size(i8 %arg) {
+; CHECK-LABEL: @linearmap_trunc_smaller_table_size(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = icmp ult i8 [[ARG:%.*]], 10
+; CHECK-NEXT: [[SWITCH_IDX_CAST:%.*]] = trunc i8 [[ARG]] to i1
+; CHECK-NEXT: [[SPEC_SELECT:%.*]] = select i1 [[TMP0]], i1 [[SWITCH_IDX_CAST]], i1 false
+; CHECK-NEXT: ret i1 [[SPEC_SELECT]]
+;
+entry:
+ switch i8 %arg, label %exit [
+ i8 1, label %sw
+ i8 3, label %sw
+ i8 5, label %sw
+ i8 7, label %sw
+ i8 9, label %sw
+ ]
+
+sw:
+ br label %exit
+
+exit:
+ %phi = phi i1 [ true, %sw ], [ false, %entry ]
+ ret i1 %phi
+}
+
; Don't create a table with an unknown type
define { i8, i8 } @test_unknown_result_type(i8 %n) {
; CHECK-LABEL: @test_unknown_result_type(
diff --git a/llvm/test/Transforms/SimplifyCFG/speculate-derefable-load.ll b/llvm/test/Transforms/SimplifyCFG/speculate-derefable-load.ll
index 8c7afa4..0138433 100644
--- a/llvm/test/Transforms/SimplifyCFG/speculate-derefable-load.ll
+++ b/llvm/test/Transforms/SimplifyCFG/speculate-derefable-load.ll
@@ -77,14 +77,17 @@ exit:
ret i64 %res
}
-; FIXME: This is a miscompile.
define i64 @deref_no_hoist(i1 %c, ptr align 8 dereferenceable(8) %p1) {
; CHECK-LABEL: define i64 @deref_no_hoist(
; CHECK-SAME: i1 [[C:%.*]], ptr align 8 dereferenceable(8) [[P1:%.*]]) {
-; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[P2:%.*]] = load ptr, ptr [[P1]], align 8, !align [[META0:![0-9]+]]
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br i1 [[C]], label %[[IF:.*]], label %[[EXIT:.*]]
+; CHECK: [[IF]]:
+; CHECK-NEXT: [[P2:%.*]] = load ptr, ptr [[P1]], align 8, !dereferenceable [[META0:![0-9]+]], !align [[META0]]
; CHECK-NEXT: [[V:%.*]] = load i64, ptr [[P2]], align 8
-; CHECK-NEXT: [[RES:%.*]] = select i1 [[C]], i64 [[V]], i64 0
+; CHECK-NEXT: br label %[[EXIT]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[RES:%.*]] = phi i64 [ [[V]], %[[IF]] ], [ 0, %[[ENTRY]] ]
; CHECK-NEXT: ret i64 [[RES]]
;
entry:
diff --git a/llvm/test/Transforms/VectorCombine/RISCV/shuffle-of-intrinsics.ll b/llvm/test/Transforms/VectorCombine/RISCV/shuffle-of-intrinsics.ll
index 7ccc14c..f3e5d27 100644
--- a/llvm/test/Transforms/VectorCombine/RISCV/shuffle-of-intrinsics.ll
+++ b/llvm/test/Transforms/VectorCombine/RISCV/shuffle-of-intrinsics.ll
@@ -48,10 +48,9 @@ entry:
define <8 x i1> @test4(<4 x float> %0, <4 x float> %1) {
; CHECK-LABEL: @test4(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.is.fpclass.v4f32(<4 x float> [[TMP0:%.*]], i32 0)
-; CHECK-NEXT: [[TMP3:%.*]] = call <4 x i1> @llvm.is.fpclass.v4f32(<4 x float> [[TMP1:%.*]], i32 0)
-; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i1> [[TMP2]], <4 x i1> [[TMP3]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-; CHECK-NEXT: ret <8 x i1> [[TMP4]]
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x float> [[TMP0:%.*]], <4 x float> [[TMP1:%.*]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i1> @llvm.is.fpclass.v8f32(<8 x float> [[TMP2]], i32 0)
+; CHECK-NEXT: ret <8 x i1> [[TMP3]]
;
entry:
%2 = call <4 x i1> @llvm.is.fpclass.v4f32(<4 x float> %0, i32 0)
diff --git a/llvm/test/lit.cfg.py b/llvm/test/lit.cfg.py
index 1e0dd0a7..5a03a85 100644
--- a/llvm/test/lit.cfg.py
+++ b/llvm/test/lit.cfg.py
@@ -364,6 +364,14 @@ if config.host_ldflags.find("-m32") < 0 and any(
config.available_features.add("llvm-64-bits")
config.available_features.add("host-byteorder-" + sys.byteorder + "-endian")
+if config.target_triple:
+ if re.match(
+ r"(aarch64_be|arc|armeb|bpfeb|lanai|m68k|mips|mips64|powerpc|powerpc64|sparc|sparcv9|s390x|s390|tce|thumbeb)-.*",
+ config.target_triple,
+ ):
+ config.available_features.add("target-byteorder-big-endian")
+ else:
+ config.available_features.add("target-byteorder-little-endian")
if sys.platform in ["win32"]:
# ExecutionEngine, no weak symbols in COFF.
diff --git a/llvm/test/tools/UpdateTestChecks/lit.local.cfg b/llvm/test/tools/UpdateTestChecks/lit.local.cfg
index 2e69549..a954eb7 100644
--- a/llvm/test/tools/UpdateTestChecks/lit.local.cfg
+++ b/llvm/test/tools/UpdateTestChecks/lit.local.cfg
@@ -52,3 +52,7 @@ if os.path.isfile(llvm_mca_path):
split_file_path = os.path.join(config.llvm_tools_dir, "split-file")
if os.path.isfile(split_file_path):
add_update_script_substition("%update_test_body")
+
+llvm_mc_path = os.path.join(config.llvm_tools_dir, "llvm-mc")
+if os.path.isfile(llvm_mc_path):
+ add_update_script_substition("%update_mc_test_checks")
diff --git a/llvm/test/tools/UpdateTestChecks/update_mc_test_checks/Inputs/amdgpu_asm.s b/llvm/test/tools/UpdateTestChecks/update_mc_test_checks/Inputs/amdgpu_asm.s
new file mode 100644
index 0000000..b21935e
--- /dev/null
+++ b/llvm/test/tools/UpdateTestChecks/update_mc_test_checks/Inputs/amdgpu_asm.s
@@ -0,0 +1,3 @@
+// RUN: llvm-mc -triple=amdgcn -show-encoding %s 2>&1 | FileCheck --check-prefixes=CHECK %s
+
+v_bfrev_b32 v5, v1
diff --git a/llvm/test/tools/UpdateTestChecks/update_mc_test_checks/Inputs/amdgpu_asm.s.expected b/llvm/test/tools/UpdateTestChecks/update_mc_test_checks/Inputs/amdgpu_asm.s.expected
new file mode 100644
index 0000000..7336947
--- /dev/null
+++ b/llvm/test/tools/UpdateTestChecks/update_mc_test_checks/Inputs/amdgpu_asm.s.expected
@@ -0,0 +1,5 @@
+; NOTE: Assertions have been autogenerated by utils/update_mc_test_checks.py
+// RUN: llvm-mc -triple=amdgcn -show-encoding %s 2>&1 | FileCheck --check-prefixes=CHECK %s
+
+v_bfrev_b32 v5, v1
+// CHECK: v_bfrev_b32_e32 v5, v1 ; encoding: [0x01,0x71,0x0a,0x7e]
diff --git a/llvm/test/tools/UpdateTestChecks/update_mc_test_checks/Inputs/amdgpu_asm_err.s b/llvm/test/tools/UpdateTestChecks/update_mc_test_checks/Inputs/amdgpu_asm_err.s
new file mode 100644
index 0000000..489bd18
--- /dev/null
+++ b/llvm/test/tools/UpdateTestChecks/update_mc_test_checks/Inputs/amdgpu_asm_err.s
@@ -0,0 +1,3 @@
+// RUN: not llvm-mc -triple=amdgcn -show-encoding %s 2>&1 | FileCheck --check-prefixes=CHECK %s
+
+v_bfrev_b32 v5, v299
diff --git a/llvm/test/tools/UpdateTestChecks/update_mc_test_checks/Inputs/amdgpu_asm_err.s.expected b/llvm/test/tools/UpdateTestChecks/update_mc_test_checks/Inputs/amdgpu_asm_err.s.expected
new file mode 100644
index 0000000..0a0ad51
--- /dev/null
+++ b/llvm/test/tools/UpdateTestChecks/update_mc_test_checks/Inputs/amdgpu_asm_err.s.expected
@@ -0,0 +1,5 @@
+; NOTE: Assertions have been autogenerated by utils/update_mc_test_checks.py
+// RUN: not llvm-mc -triple=amdgcn -show-encoding %s 2>&1 | FileCheck --check-prefixes=CHECK %s
+
+v_bfrev_b32 v5, v299
+// CHECK: :[[@LINE-1]]:17: error: register index is out of range
diff --git a/llvm/test/tools/UpdateTestChecks/update_mc_test_checks/Inputs/amdgpu_dasm.txt b/llvm/test/tools/UpdateTestChecks/update_mc_test_checks/Inputs/amdgpu_dasm.txt
new file mode 100644
index 0000000..9f5fba6
--- /dev/null
+++ b/llvm/test/tools/UpdateTestChecks/update_mc_test_checks/Inputs/amdgpu_dasm.txt
@@ -0,0 +1,5 @@
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -disassemble -show-encoding %s 2>&1 | FileCheck -check-prefixes=CHECK %s
+
+0x00,0x00,0x00,0x7e
+
+0xfd,0xb8,0x0a,0x7f
diff --git a/llvm/test/tools/UpdateTestChecks/update_mc_test_checks/Inputs/amdgpu_dasm.txt.expected b/llvm/test/tools/UpdateTestChecks/update_mc_test_checks/Inputs/amdgpu_dasm.txt.expected
new file mode 100644
index 0000000..1b64695
--- /dev/null
+++ b/llvm/test/tools/UpdateTestChecks/update_mc_test_checks/Inputs/amdgpu_dasm.txt.expected
@@ -0,0 +1,8 @@
+; NOTE: Assertions have been autogenerated by utils/update_mc_test_checks.py
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -disassemble -show-encoding %s 2>&1 | FileCheck -check-prefixes=CHECK %s
+
+0x00,0x00,0x00,0x7e
+# CHECK: v_nop ; encoding: [0x00,0x00,0x00,0x7e]
+
+0xfd,0xb8,0x0a,0x7f
+# CHECK: :[[@LINE-1]]:1: warning: invalid instruction encoding
diff --git a/llvm/test/tools/UpdateTestChecks/update_mc_test_checks/Inputs/amdgpu_multirun_dasm.txt b/llvm/test/tools/UpdateTestChecks/update_mc_test_checks/Inputs/amdgpu_multirun_dasm.txt
new file mode 100644
index 0000000..0f680d0
--- /dev/null
+++ b/llvm/test/tools/UpdateTestChecks/update_mc_test_checks/Inputs/amdgpu_multirun_dasm.txt
@@ -0,0 +1,6 @@
+# RUN: llvm-mc -triple=amdgcn -mcpu=tonga -disassemble -show-encoding %s 2>&1 | FileCheck -check-prefixes=CHECK,CHECKA %s
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -disassemble -show-encoding %s 2>&1 | FileCheck -check-prefixes=CHECK,CHECKB %s
+
+0x00,0x00,0x00,0x7e
+
+0x01,0x71,0x0a,0x7e
diff --git a/llvm/test/tools/UpdateTestChecks/update_mc_test_checks/Inputs/amdgpu_multirun_dasm.txt.expected b/llvm/test/tools/UpdateTestChecks/update_mc_test_checks/Inputs/amdgpu_multirun_dasm.txt.expected
new file mode 100644
index 0000000..03a5ec3
--- /dev/null
+++ b/llvm/test/tools/UpdateTestChecks/update_mc_test_checks/Inputs/amdgpu_multirun_dasm.txt.expected
@@ -0,0 +1,10 @@
+; NOTE: Assertions have been autogenerated by utils/update_mc_test_checks.py
+# RUN: llvm-mc -triple=amdgcn -mcpu=tonga -disassemble -show-encoding %s 2>&1 | FileCheck -check-prefixes=CHECK,CHECKA %s
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -disassemble -show-encoding %s 2>&1 | FileCheck -check-prefixes=CHECK,CHECKB %s
+
+0x00,0x00,0x00,0x7e
+# CHECK: v_nop ; encoding: [0x00,0x00,0x00,0x7e]
+
+0x01,0x71,0x0a,0x7e
+# CHECKA: v_movrelsd_b32_e32 v5, v1 ; encoding: [0x01,0x71,0x0a,0x7e]
+# CHECKB: v_bfrev_b32_e32 v5, v1 ; encoding: [0x01,0x71,0x0a,0x7e]
diff --git a/llvm/test/tools/UpdateTestChecks/update_mc_test_checks/amdgpu-basic.test b/llvm/test/tools/UpdateTestChecks/update_mc_test_checks/amdgpu-basic.test
new file mode 100644
index 0000000..4713635
--- /dev/null
+++ b/llvm/test/tools/UpdateTestChecks/update_mc_test_checks/amdgpu-basic.test
@@ -0,0 +1,11 @@
+# REQUIRES: amdgpu-registered-target
+## Check that basic asm/dasm process is correct
+
+# RUN: cp -f %S/Inputs/amdgpu_asm.s %t.s && %update_mc_test_checks %t.s
+# RUN: diff -u %S/Inputs/amdgpu_asm.s.expected %t.s
+# RUN: cp -f %S/Inputs/amdgpu_asm_err.s %t.s && %update_mc_test_checks %t.s
+# RUN: diff -u %S/Inputs/amdgpu_asm_err.s.expected %t.s
+# RUN: cp -f %S/Inputs/amdgpu_dasm.txt %t.txt && %update_mc_test_checks %t.txt
+# RUN: diff -u %S/Inputs/amdgpu_dasm.txt.expected %t.txt
+# RUN: cp -f %S/Inputs/amdgpu_multirun_dasm.txt %t.txt && %update_mc_test_checks %t.txt
+# RUN: diff -u %S/Inputs/amdgpu_multirun_dasm.txt.expected %t.txt
diff --git a/llvm/test/tools/UpdateTestChecks/update_mc_test_checks/lit.local.cfg b/llvm/test/tools/UpdateTestChecks/update_mc_test_checks/lit.local.cfg
new file mode 100644
index 0000000..1bb2464
--- /dev/null
+++ b/llvm/test/tools/UpdateTestChecks/update_mc_test_checks/lit.local.cfg
@@ -0,0 +1,4 @@
+import platform
+
+if platform.system() == "Windows":
+ config.unsupported = True
diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/if_target.ll b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/if_target.ll
new file mode 100644
index 0000000..63d9d5c
--- /dev/null
+++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/if_target.ll
@@ -0,0 +1,11 @@
+; Example input for update_test_checks (taken from test/Transforms/SLPVectorizer/extractlements-gathered-first-node.ll)
+; RUN: %if x86-registered-target %{ opt -S --passes=slp-vectorizer -slp-threshold=-99999 -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -S --passes=slp-vectorizer -slp-threshold=-99999 -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s %}
+
+define void @test() {
+bb:
+ %0 = extractelement <4 x i32> zeroinitializer, i32 0
+ %1 = extractelement <2 x i32> zeroinitializer, i32 0
+ %icmp = icmp ult i32 %0, %1
+ ret void
+}
diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/if_target.ll.expected b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/if_target.ll.expected
new file mode 100644
index 0000000..a744acd
--- /dev/null
+++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/if_target.ll.expected
@@ -0,0 +1,19 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; Example input for update_test_checks (taken from test/Transforms/SLPVectorizer/extractlements-gathered-first-node.ll)
+; RUN: %if x86-registered-target %{ opt -S --passes=slp-vectorizer -slp-threshold=-99999 -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -S --passes=slp-vectorizer -slp-threshold=-99999 -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s %}
+
+define void @test() {
+; CHECK-LABEL: define void @test() {
+; CHECK-NEXT: bb:
+; CHECK-NEXT: [[TMP0:%.*]] = extractelement <4 x i32> zeroinitializer, i32 0
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x i32> zeroinitializer, i32 0
+; CHECK-NEXT: [[ICMP:%.*]] = icmp ult i32 [[TMP0]], [[TMP1]]
+; CHECK-NEXT: ret void
+;
+bb:
+ %0 = extractelement <4 x i32> zeroinitializer, i32 0
+ %1 = extractelement <2 x i32> zeroinitializer, i32 0
+ %icmp = icmp ult i32 %0, %1
+ ret void
+}
diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/if_target.test b/llvm/test/tools/UpdateTestChecks/update_test_checks/if_target.test
new file mode 100644
index 0000000..3d8427b
--- /dev/null
+++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/if_target.test
@@ -0,0 +1,6 @@
+## Basic test checking that update_test_checks.py works correctly with %if in RUN line
+# RUN: cp -f %S/Inputs/if_target.ll %t.ll && %update_test_checks %t.ll --version 4
+# RUN: diff -u %t.ll %S/Inputs/if_target.ll.expected
+## Check that running the script again does not change the result:
+# RUN: %update_test_checks %t.ll
+# RUN: diff -u %t.ll %S/Inputs/if_target.ll.expected \ No newline at end of file
diff --git a/llvm/test/tools/dsymutil/X86/dwarf5-many-include-directories.test b/llvm/test/tools/dsymutil/X86/dwarf5-many-include-directories.test
new file mode 100644
index 0000000..644eecd
--- /dev/null
+++ b/llvm/test/tools/dsymutil/X86/dwarf5-many-include-directories.test
@@ -0,0 +1,213 @@
+# RUN: rm -rf %t && mkdir -p %t
+# RUN: split-file %s %t
+# RUN: %python %t/all.py > %t/all.ll
+# RUN: sed 's@---TEMPORARY_DIR---@%{/t:regex_replacement}@' %t/debug.map.template > %t/debug.map
+# RUN: %llc_dwarf -mtriple x86_64-apple-macosx10.4.0 -o %t/all.o -filetype=obj %t/all.ll
+# RUN: dsymutil -f -y %t/debug.map -o - | llvm-dwarfdump -debug-line - | FileCheck %s
+# RUN: dsymutil --linker parallel -f -y %t/debug.map -o - | llvm-dwarfdump -debug-line - | tee %t/output.txt | FileCheck %s
+
+# CHECK: include_directories[255] = "/tmp/tmp.0HPkdttdoU/d254"
+# CHECK-NEXT: include_directories[256] = "/tmp/tmp.0HPkdttdoU/d255"
+# CHECK-NEXT: include_directories[257] = "/tmp/tmp.0HPkdttdoU/d256"
+
+# CHECK: dir_index: 255
+# CHECK: dir_index: 256
+# CHECK: dir_index: 257
+
+# Original file generated doing the following (fish shell):
+# - for cnt in (seq 0 256); mkdir -p d$cnt ; printf "void func$cnd() {}\n#define FUNC$cnt func$cnt()\n" >> d$cnt/f$cnt.c ; end
+# - for cnt in (seq 0 256); printf "#include \"f$cnt.c\"" >> all.c ; end
+# - printf "void all() {\n" >> all.c
+# - for cnt in (seq 0 256); printf "FUNC$cnt;\n" >> all.c ; end
+# - printf "}\n" >> all.c
+# - clang -target x86_64-apple-macos -S -emit-llvm -gdwarf-5 -o all.ll all.c (for cnt in (seq 0 256); echo "-Id$cnt"; end)
+# - Edit all.ll manually and change all DIFile so the directory in filename is
+# moved into the directory field.
+# - Transformed into Python manually.
+
+#--- all.py
+import math
+import string
+
+PROLOGUE = string.Template("""\
+; ModuleID = 'all.c'
+source_filename = "all.c"
+target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.4.0"
+""")
+
+FUNCTION = string.Template("""\
+; Function Attrs: noinline nounwind optnone uwtable
+define void @func$idx() #0 !dbg !$dbg_reference_subprogram {
+ ret void, !dbg !$dbg_reference_location_ret
+}
+""")
+
+ALL_FUNCTION_PROLOGUE = string.Template("""\
+; Function Attrs: noinline nounwind optnone uwtable
+define void @all() #0 !dbg !$dbg_reference_subprogram {
+""")
+
+ALL_FUNCTION_CALL = string.Template("""\
+ call void @func$idx(), !dbg !$dbg_reference_location_call
+""")
+
+ALL_FUNCTION_EPILOGUE = string.Template("""\
+ ret void, !dbg !$dbg_reference_location_ret
+}
+""")
+
+DWARF_PROLOGUE = string.Template("""\
+attributes #0 = { noinline nounwind optnone uwtable "frame-pointer"="all" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="core2" "target-features"="+cmov,+cx16,+cx8,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+ssse3,+x87" "tune-cpu"="generic" }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!2, !3, !4, !5, !6, !7}
+!llvm.ident = !{!8}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C11, file: !1, producer: "clang version 18.1.6 (CentOS 18.1.6-3.el9)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, splitDebugInlining: false, nameTableKind: Apple, sysroot: "/")
+!1 = !DIFile(filename: "all.c", directory: "/tmp/tmp.0HPkdttdoU", checksumkind: CSK_MD5, checksum: "8b5068f097f0c272ddc808ed2d82cb12")
+!2 = !{i32 7, !"Dwarf Version", i32 5}
+!3 = !{i32 2, !"Debug Info Version", i32 3}
+!4 = !{i32 1, !"wchar_size", i32 4}
+!5 = !{i32 8, !"PIC Level", i32 2}
+!6 = !{i32 7, !"uwtable", i32 2}
+!7 = !{i32 7, !"frame-pointer", i32 2}
+!8 = !{!"clang version 18.1.6 (CentOS 18.1.6-3.el9)"}
+""")
+
+DWARF_FUNCTION_WITH_TYPE = string.Template("""\
+!$dbg_reference_subprogram = distinct !DISubprogram(name: "func$idx", scope: !$dbg_reference_file, file: !$dbg_reference_file, line: 1, type: !11, scopeLine: 1, spFlags: DISPFlagDefinition, unit: !0)
+!$dbg_reference_file = !DIFile(filename: "f$idx.c", directory: "/tmp/tmp.0HPkdttdoU/d$idx", checksumkind: CSK_MD5, checksum: "01234567890123456789012345678901")
+!11 = !DISubroutineType(types: !12)
+!12 = !{null}
+!$dbg_reference_location = !DILocation(line: 1, column: $column, scope: !$dbg_reference_subprogram)
+""")
+
+DWARF_FUNCTION = string.Template("""\
+!$dbg_reference_subprogram = distinct !DISubprogram(name: "func$idx", scope: !$dbg_reference_file, file: !$dbg_reference_file, line: 1, type: !11, scopeLine: 1, spFlags: DISPFlagDefinition, unit: !0)
+!$dbg_reference_file = !DIFile(filename: "f$idx.c", directory: "/tmp/tmp.0HPkdttdoU/d$idx", checksumkind: CSK_MD5, checksum: "01234567890123456789012345678901")
+!$dbg_reference_location = !DILocation(line: 1, column: $column, scope: !$dbg_reference_subprogram)
+""")
+
+DWARF_ALL_FUNCTION_PROLOGUE = string.Template("""\
+!$dbg_reference_subprogram = distinct !DISubprogram(name: "all", scope: !1, file: !1, line: $line_number, type: !11, scopeLine: $line_number, spFlags: DISPFlagDefinition, unit: !0)
+""")
+
+DWARF_ALL_FUNCTION_LOCATION = string.Template("""\
+!$dbg_reference_location = !DILocation(line: $line_number, column: 1, scope: !$dbg_reference_subprogram)
+""")
+
+NUM_FUNCS = 257
+
+dbg_reference_subprogram = 9
+dbg_reference_file = 10
+dbg_reference_location = 13
+column_base = 15
+functions = []
+dwarf_subprograms = []
+
+first = True
+for idx in range(NUM_FUNCS):
+ functions.append(
+ FUNCTION.substitute(
+ idx=idx,
+ dbg_reference_subprogram=dbg_reference_subprogram,
+ dbg_reference_location_ret=dbg_reference_location,
+ )
+ )
+ if first:
+ dwarf_subprograms.append(
+ DWARF_FUNCTION_WITH_TYPE.substitute(
+ idx=idx,
+ dbg_reference_subprogram=dbg_reference_subprogram,
+ dbg_reference_file=dbg_reference_file,
+ dbg_reference_location=dbg_reference_location,
+ column=column_base,
+ )
+ )
+ else:
+ dwarf_subprograms.append(
+ DWARF_FUNCTION.substitute(
+ idx=idx,
+ dbg_reference_subprogram=dbg_reference_subprogram,
+ dbg_reference_file=dbg_reference_file,
+ dbg_reference_location=dbg_reference_location,
+ column=column_base + math.floor(math.log10(idx)),
+ )
+ )
+
+ dbg_reference_subprogram += 5 if first else 3
+ dbg_reference_file += 5 if first else 3
+ dbg_reference_location += 3
+ first = False
+
+dbg_reference_location = dbg_reference_subprogram + 1
+line_number = 258
+all_function = []
+dwarf_all_subprogram = []
+
+all_function.append(
+ ALL_FUNCTION_PROLOGUE.substitute(
+ dbg_reference_subprogram=dbg_reference_subprogram
+ )
+)
+dwarf_all_subprogram.append(
+ DWARF_ALL_FUNCTION_PROLOGUE.substitute(
+ dbg_reference_subprogram=dbg_reference_subprogram,
+ line_number=line_number
+ )
+)
+line_number += 1
+
+for idx in range(NUM_FUNCS):
+ all_function.append(
+ ALL_FUNCTION_CALL.substitute(
+ idx=idx,
+ dbg_reference_location_call=dbg_reference_location,
+ )
+ )
+ dwarf_all_subprogram.append(
+ DWARF_ALL_FUNCTION_LOCATION.substitute(
+ dbg_reference_location=dbg_reference_location,
+ line_number=line_number,
+ dbg_reference_subprogram=dbg_reference_subprogram,
+ )
+ )
+
+ dbg_reference_location += 1
+ line_number += 1
+
+all_function.append(
+ ALL_FUNCTION_EPILOGUE.substitute(
+ dbg_reference_location_ret=dbg_reference_location
+ )
+)
+dwarf_all_subprogram.append(
+ DWARF_ALL_FUNCTION_LOCATION.substitute(
+ dbg_reference_location=dbg_reference_location,
+ line_number=line_number,
+ dbg_reference_subprogram=dbg_reference_subprogram,
+ )
+)
+
+print(PROLOGUE.substitute())
+for function in functions:
+ print(function)
+for all_function_piece in all_function:
+ print(all_function_piece, end='')
+print()
+print(DWARF_PROLOGUE.substitute(), end='')
+for dwarf_subprogram in dwarf_subprograms:
+ print(dwarf_subprogram, end='')
+for dwarf_all_subprogram_piece in dwarf_all_subprogram:
+ print(dwarf_all_subprogram_piece, end='')
+print()
+
+#--- debug.map.template
+---
+triple: 'x86_64-apple-darwin'
+objects:
+ - filename: ---TEMPORARY_DIR---/all.o
+ symbols:
+ - { sym: _all, objAddr: 0x0, binAddr: 0x0, size: 0x0 }
+...
diff --git a/llvm/test/tools/llvm-exegesis/X86/latency/cpu-pinning-execution-mode.s b/llvm/test/tools/llvm-exegesis/X86/latency/cpu-pinning-execution-mode.s
new file mode 100644
index 0000000..b73ac26
--- /dev/null
+++ b/llvm/test/tools/llvm-exegesis/X86/latency/cpu-pinning-execution-mode.s
@@ -0,0 +1,5 @@
+# REQUIRES: exegesis-can-measure-latency, x86_64-linux
+
+# RUN: not llvm-exegesis -mtriple=x86_64-unknown-unknown -mode=latency -opcode-name=ADD64rr -execution-mode=inprocess --benchmark-process-cpu=0 2>&1 | FileCheck %s
+
+# CHECK: llvm-exegesis error: The inprocess execution mode does not support benchmark core pinning.
diff --git a/llvm/test/tools/llvm-exegesis/X86/latency/cpu-pinning.s b/llvm/test/tools/llvm-exegesis/X86/latency/cpu-pinning.s
new file mode 100644
index 0000000..0ea3752
--- /dev/null
+++ b/llvm/test/tools/llvm-exegesis/X86/latency/cpu-pinning.s
@@ -0,0 +1,5 @@
+# REQUIRES: exegesis-can-measure-latency, x86_64-linux
+
+# RUN: llvm-exegesis -mtriple=x86_64-unknown-unknown -mode=latency -opcode-name=ADD64rr -execution-mode=subprocess | FileCheck %s
+
+# CHECK: - { key: latency, value: {{[0-9.]*}}, per_snippet_value: {{[0-9.]*}}
diff --git a/llvm/test/tools/llvm-readobj/COFF/arm64ec-chpe.yaml b/llvm/test/tools/llvm-readobj/COFF/arm64ec-chpe.yaml
index 1f5e7e1..91dde600 100644
--- a/llvm/test/tools/llvm-readobj/COFF/arm64ec-chpe.yaml
+++ b/llvm/test/tools/llvm-readobj/COFF/arm64ec-chpe.yaml
@@ -150,3 +150,34 @@ sections:
- UInt32: 4 # HybridImageInfoBitfield
symbols: []
...
+
+# RUN: yaml2obj --docnum=3 %s -o %t3
+# RUN: not llvm-readobj --coff-load-config %t3 2>&1 | FileCheck --check-prefix=ERR-EOF %s
+# ERR-EOF: The end of the file was unexpectedly encountered
+
+--- !COFF
+OptionalHeader:
+ ImageBase: 0x180000000
+ SectionAlignment: 4096
+ FileAlignment: 512
+ DLLCharacteristics: [ ]
+ LoadConfigTable:
+ RelativeVirtualAddress: 0x4000
+ Size: 320
+header:
+ Machine: IMAGE_FILE_MACHINE_AMD64
+ Characteristics: [ IMAGE_FILE_EXECUTABLE_IMAGE, IMAGE_FILE_LARGE_ADDRESS_AWARE, IMAGE_FILE_DLL ]
+sections:
+ - Name: .text
+ Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x1000
+ VirtualSize: 0x2050
+ - Name: .rdata
+ Characteristics: [ IMAGE_SCN_CNT_INITIALIZED_DATA, IMAGE_SCN_MEM_READ ]
+ VirtualAddress: 0x4000
+ VirtualSize: 512
+ StructuredData:
+ - LoadConfig:
+ CHPEMetadataPointer: 0x1800041AC
+symbols: []
+...
diff --git a/llvm/test/tools/llvm-reduce/mir/preserve-func-info.mir b/llvm/test/tools/llvm-reduce/mir/preserve-func-info.mir
index 5f11cea..f735dfd 100644
--- a/llvm/test/tools/llvm-reduce/mir/preserve-func-info.mir
+++ b/llvm/test/tools/llvm-reduce/mir/preserve-func-info.mir
@@ -14,6 +14,9 @@
# RESULT-NEXT: failedISel: true
# RESULT-NEXT: tracksRegLiveness: true
# RESULT-NEXT: hasWinCFI: true
+# RESULT-NEXT: noPhis: false
+# RESULT-NEXT: isSSA: false
+# RESULT-NEXT: noVRegs: false
# RESULT-NEXT: callsEHReturn: true
# RESULT-NEXT: callsUnwindInit: true
# RESULT-NEXT: hasEHCatchret: true
@@ -41,6 +44,9 @@ selected: true
failedISel: true
tracksRegLiveness: true
hasWinCFI: true
+noPhis: false
+isSSA: false
+noVRegs: false
failsVerification: true
tracksDebugUserValues: true
callsEHReturn: true
diff --git a/llvm/tools/gold/gold-plugin.cpp b/llvm/tools/gold/gold-plugin.cpp
index 0b175a3..0377791 100644
--- a/llvm/tools/gold/gold-plugin.cpp
+++ b/llvm/tools/gold/gold-plugin.cpp
@@ -1057,9 +1057,11 @@ static std::vector<std::pair<SmallString<128>, bool>> runLTO() {
getThinLTOOldAndNewSuffix(OldSuffix, NewSuffix);
for (claimed_file &F : Modules) {
- if (options::thinlto && !HandleToInputFile.count(F.leader_handle))
- HandleToInputFile.insert(std::make_pair(
- F.leader_handle, std::make_unique<PluginInputFile>(F.handle)));
+ if (options::thinlto) {
+ auto [It, Inserted] = HandleToInputFile.try_emplace(F.leader_handle);
+ if (Inserted)
+ It->second = std::make_unique<PluginInputFile>(F.handle);
+ }
// In case we are thin linking with a minimized bitcode file, ensure
// the module paths encoded in the index reflect where the backends
// will locate the full bitcode files for compiling/importing.
diff --git a/llvm/tools/llvm-c-test/main.c b/llvm/tools/llvm-c-test/main.c
index 8be9ea0..badbe4b 100644
--- a/llvm/tools/llvm-c-test/main.c
+++ b/llvm/tools/llvm-c-test/main.c
@@ -109,8 +109,7 @@ int main(int argc, char **argv) {
return llvm_echo();
} else if (argc == 2 && !strcmp(argv[1], "--test-diagnostic-handler")) {
return llvm_test_diagnostic_handler();
- } else if (argc == 2 &&
- !strcmp(argv[1], "--test-dibuilder-debuginfo-format")) {
+ } else if (argc == 2 && !strcmp(argv[1], "--test-dibuilder")) {
return llvm_test_dibuilder();
} else {
print_usage();
diff --git a/llvm/tools/llvm-ctxprof-util/llvm-ctxprof-util.cpp b/llvm/tools/llvm-ctxprof-util/llvm-ctxprof-util.cpp
index 0fad4ee..485f6c7 100644
--- a/llvm/tools/llvm-ctxprof-util/llvm-ctxprof-util.cpp
+++ b/llvm/tools/llvm-ctxprof-util/llvm-ctxprof-util.cpp
@@ -48,7 +48,8 @@ static cl::opt<std::string> OutputFilename("output", cl::value_desc("output"),
// Save the bitstream profile from the JSON representation.
Error convertFromJSON() {
- auto BufOrError = MemoryBuffer::getFileOrSTDIN(InputFilename);
+ auto BufOrError =
+ MemoryBuffer::getFileOrSTDIN(InputFilename, /*IsText=*/true);
if (!BufOrError)
return createFileError(InputFilename, BufOrError.getError());
diff --git a/llvm/tools/llvm-debuginfod-find/llvm-debuginfod-find.cpp b/llvm/tools/llvm-debuginfod-find/llvm-debuginfod-find.cpp
index 1f4404a..699fcf8 100644
--- a/llvm/tools/llvm-debuginfod-find/llvm-debuginfod-find.cpp
+++ b/llvm/tools/llvm-debuginfod-find/llvm-debuginfod-find.cpp
@@ -98,45 +98,6 @@ static void parseArgs(int argc, char **argv) {
exit(1);
}
-/*
-cl::OptionCategory DebuginfodFindCategory("llvm-debuginfod-find Options");
-
-cl::opt<std::string> InputBuildID(cl::Positional, cl::Required,
- cl::desc("<input build_id>"), cl::init("-"),
- cl::cat(DebuginfodFindCategory));
-
-static cl::opt<bool>
- FetchExecutable("executable", cl::init(false),
- cl::desc("If set, fetch a binary file associated with this "
- "build id, containing the executable sections."),
- cl::cat(DebuginfodFindCategory));
-
-static cl::opt<bool>
- FetchDebuginfo("debuginfo", cl::init(false),
- cl::desc("If set, fetch a binary file associated with this "
- "build id, containing the debuginfo sections."),
- cl::cat(DebuginfodFindCategory));
-
-static cl::opt<std::string> FetchSource(
- "source", cl::init(""),
- cl::desc("Fetch a source file associated with this build id, which is at "
- "this relative path relative to the compilation directory."),
- cl::cat(DebuginfodFindCategory));
-
-static cl::opt<bool>
- DumpToStdout("dump", cl::init(false),
- cl::desc("If set, dumps the contents of the fetched artifact "
- "to standard output. Otherwise, dumps the absolute "
- "path to the cached artifact on disk."),
- cl::cat(DebuginfodFindCategory));
-
-static cl::list<std::string> DebugFileDirectory(
- "debug-file-directory",
- cl::desc("Path to directory where to look for debug files."),
- cl::cat(DebuginfodFindCategory));
-
-*/
-
ExitOnError ExitOnDebuginfodFindError;
static std::string fetchDebugInfo(object::BuildIDRef BuildID);
diff --git a/llvm/tools/llvm-exegesis/lib/BenchmarkRunner.cpp b/llvm/tools/llvm-exegesis/lib/BenchmarkRunner.cpp
index 4e60d33..9116b5c 100644
--- a/llvm/tools/llvm-exegesis/lib/BenchmarkRunner.cpp
+++ b/llvm/tools/llvm-exegesis/lib/BenchmarkRunner.cpp
@@ -98,7 +98,8 @@ class InProcessFunctionExecutorImpl : public BenchmarkRunner::FunctionExecutor {
public:
static Expected<std::unique_ptr<InProcessFunctionExecutorImpl>>
create(const LLVMState &State, object::OwningBinary<object::ObjectFile> Obj,
- BenchmarkRunner::ScratchSpace *Scratch) {
+ BenchmarkRunner::ScratchSpace *Scratch,
+ std::optional<int> BenchmarkProcessCPU) {
Expected<ExecutableFunction> EF =
ExecutableFunction::create(State.createTargetMachine(), std::move(Obj));
@@ -190,27 +191,31 @@ class SubProcessFunctionExecutorImpl
public:
static Expected<std::unique_ptr<SubProcessFunctionExecutorImpl>>
create(const LLVMState &State, object::OwningBinary<object::ObjectFile> Obj,
- const BenchmarkKey &Key) {
+ const BenchmarkKey &Key, std::optional<int> BenchmarkProcessCPU) {
Expected<ExecutableFunction> EF =
ExecutableFunction::create(State.createTargetMachine(), std::move(Obj));
if (!EF)
return EF.takeError();
return std::unique_ptr<SubProcessFunctionExecutorImpl>(
- new SubProcessFunctionExecutorImpl(State, std::move(*EF), Key));
+ new SubProcessFunctionExecutorImpl(State, std::move(*EF), Key,
+ BenchmarkProcessCPU));
}
private:
SubProcessFunctionExecutorImpl(const LLVMState &State,
ExecutableFunction Function,
- const BenchmarkKey &Key)
- : State(State), Function(std::move(Function)), Key(Key) {}
+ const BenchmarkKey &Key,
+ std::optional<int> BenchmarkCPU)
+ : State(State), Function(std::move(Function)), Key(Key),
+ BenchmarkProcessCPU(BenchmarkCPU) {}
enum ChildProcessExitCodeE {
CounterFDReadFailed = 1,
RSeqDisableFailed,
FunctionDataMappingFailed,
- AuxiliaryMemorySetupFailed
+ AuxiliaryMemorySetupFailed,
+ SetCPUAffinityFailed
};
StringRef childProcessExitCodeToString(int ExitCode) const {
@@ -223,6 +228,8 @@ private:
return "Failed to map memory for assembled snippet";
case ChildProcessExitCodeE::AuxiliaryMemorySetupFailed:
return "Failed to setup auxiliary memory";
+ case ChildProcessExitCodeE::SetCPUAffinityFailed:
+ return "Failed to set CPU affinity of the benchmarking process";
default:
return "Child process returned with unknown exit code";
}
@@ -384,6 +391,41 @@ private:
return make_error<SnippetSignal>(ChildSignalInfo.si_signo);
}
+ static void setCPUAffinityIfRequested(int CPUToUse) {
+// Special case this function for x86_64 for now as certain more esoteric
+// platforms have different definitions for some of the libc functions that
+// cause buildtime failures. Additionally, the subprocess executor mode (the
+// sole mode where this is supported) currently only supports x86_64.
+
+// Also check that we have the SYS_getcpu macro defined, meaning the syscall
+// actually exists within the build environment. We manually use the syscall
+// rather than the libc wrapper given the wrapper for getcpu is only available
+// in glibc 2.29 and later.
+#if defined(__x86_64__) && defined(SYS_getcpu)
+ // Set the CPU affinity for the child process, so that we ensure that if
+ // the user specified a CPU the process should run on, the benchmarking
+ // process is running on that CPU.
+ cpu_set_t CPUMask;
+ CPU_ZERO(&CPUMask);
+ CPU_SET(CPUToUse, &CPUMask);
+ // TODO(boomanaiden154): Rewrite this to use LLVM primitives once they
+ // are available.
+ int SetAffinityReturn = sched_setaffinity(0, sizeof(CPUMask), &CPUMask);
+ if (SetAffinityReturn == -1) {
+ exit(ChildProcessExitCodeE::SetCPUAffinityFailed);
+ }
+
+ // Check (if assertions are enabled) that we are actually running on the
+ // CPU that was specified by the user.
+ [[maybe_unused]] unsigned int CurrentCPU;
+ assert(syscall(SYS_getcpu, &CurrentCPU, nullptr) == 0 &&
+ "Expected getcpu call to succeed.");
+ assert(static_cast<int>(CurrentCPU) == CPUToUse &&
+ "Expected current CPU to equal the CPU requested by the user");
+#endif // defined(__x86_64__) && defined(SYS_getcpu)
+ exit(ChildProcessExitCodeE::SetCPUAffinityFailed);
+ }
+
Error createSubProcessAndRunBenchmark(
StringRef CounterName, SmallVectorImpl<int64_t> &CounterValues,
ArrayRef<const char *> ValidationCounters,
@@ -416,6 +458,10 @@ private:
}
if (ParentOrChildPID == 0) {
+ if (BenchmarkProcessCPU.has_value()) {
+ setCPUAffinityIfRequested(*BenchmarkProcessCPU);
+ }
+
// We are in the child process, close the write end of the pipe.
close(PipeFiles[1]);
// Unregister handlers, signal handling is now handled through ptrace in
@@ -538,6 +584,7 @@ private:
const LLVMState &State;
const ExecutableFunction Function;
const BenchmarkKey &Key;
+ const std::optional<int> BenchmarkProcessCPU;
};
#endif // __linux__
} // namespace
@@ -615,11 +662,15 @@ BenchmarkRunner::getRunnableConfiguration(
Expected<std::unique_ptr<BenchmarkRunner::FunctionExecutor>>
BenchmarkRunner::createFunctionExecutor(
object::OwningBinary<object::ObjectFile> ObjectFile,
- const BenchmarkKey &Key) const {
+ const BenchmarkKey &Key, std::optional<int> BenchmarkProcessCPU) const {
switch (ExecutionMode) {
case ExecutionModeE::InProcess: {
+ if (BenchmarkProcessCPU.has_value())
+ return make_error<Failure>("The inprocess execution mode does not "
+ "support benchmark core pinning.");
+
auto InProcessExecutorOrErr = InProcessFunctionExecutorImpl::create(
- State, std::move(ObjectFile), Scratch.get());
+ State, std::move(ObjectFile), Scratch.get(), BenchmarkProcessCPU);
if (!InProcessExecutorOrErr)
return InProcessExecutorOrErr.takeError();
@@ -628,7 +679,7 @@ BenchmarkRunner::createFunctionExecutor(
case ExecutionModeE::SubProcess: {
#ifdef __linux__
auto SubProcessExecutorOrErr = SubProcessFunctionExecutorImpl::create(
- State, std::move(ObjectFile), Key);
+ State, std::move(ObjectFile), Key, BenchmarkProcessCPU);
if (!SubProcessExecutorOrErr)
return SubProcessExecutorOrErr.takeError();
@@ -643,8 +694,8 @@ BenchmarkRunner::createFunctionExecutor(
}
std::pair<Error, Benchmark> BenchmarkRunner::runConfiguration(
- RunnableConfiguration &&RC,
- const std::optional<StringRef> &DumpFile) const {
+ RunnableConfiguration &&RC, const std::optional<StringRef> &DumpFile,
+ std::optional<int> BenchmarkProcessCPU) const {
Benchmark &BenchmarkResult = RC.BenchmarkResult;
object::OwningBinary<object::ObjectFile> &ObjectFile = RC.ObjectFile;
@@ -665,7 +716,8 @@ std::pair<Error, Benchmark> BenchmarkRunner::runConfiguration(
}
Expected<std::unique_ptr<BenchmarkRunner::FunctionExecutor>> Executor =
- createFunctionExecutor(std::move(ObjectFile), RC.BenchmarkResult.Key);
+ createFunctionExecutor(std::move(ObjectFile), RC.BenchmarkResult.Key,
+ BenchmarkProcessCPU);
if (!Executor)
return {Executor.takeError(), std::move(BenchmarkResult)};
auto NewMeasurements = runMeasurements(**Executor);
diff --git a/llvm/tools/llvm-exegesis/lib/BenchmarkRunner.h b/llvm/tools/llvm-exegesis/lib/BenchmarkRunner.h
index 9b4bb1d..e688b81 100644
--- a/llvm/tools/llvm-exegesis/lib/BenchmarkRunner.h
+++ b/llvm/tools/llvm-exegesis/lib/BenchmarkRunner.h
@@ -68,7 +68,8 @@ public:
std::pair<Error, Benchmark>
runConfiguration(RunnableConfiguration &&RC,
- const std::optional<StringRef> &DumpFile) const;
+ const std::optional<StringRef> &DumpFile,
+ std::optional<int> BenchmarkProcessCPU) const;
// Scratch space to run instructions that touch memory.
struct ScratchSpace {
@@ -135,7 +136,8 @@ private:
Expected<std::unique_ptr<FunctionExecutor>>
createFunctionExecutor(object::OwningBinary<object::ObjectFile> Obj,
- const BenchmarkKey &Key) const;
+ const BenchmarkKey &Key,
+ std::optional<int> BenchmarkProcessCPU) const;
};
} // namespace exegesis
diff --git a/llvm/tools/llvm-exegesis/llvm-exegesis.cpp b/llvm/tools/llvm-exegesis/llvm-exegesis.cpp
index e6a43cf..546ec770 100644
--- a/llvm/tools/llvm-exegesis/llvm-exegesis.cpp
+++ b/llvm/tools/llvm-exegesis/llvm-exegesis.cpp
@@ -269,6 +269,11 @@ static cl::list<ValidationEvent> ValidationCounters(
"counter to validate benchmarking assumptions"),
cl::CommaSeparated, cl::cat(BenchmarkOptions), ValidationEventOptions());
+static cl::opt<int> BenchmarkProcessCPU(
+ "benchmark-process-cpu",
+ cl::desc("The CPU number that the benchmarking process should executon on"),
+ cl::cat(BenchmarkOptions), cl::init(-1));
+
static ExitOnError ExitOnErr("llvm-exegesis error: ");
// Helper function that logs the error(s) and exits.
@@ -418,8 +423,12 @@ static void runBenchmarkConfigurations(
std::optional<StringRef> DumpFile;
if (DumpObjectToDisk.getNumOccurrences())
DumpFile = DumpObjectToDisk;
+ const std::optional<int> BenchmarkCPU =
+ BenchmarkProcessCPU == -1
+ ? std::nullopt
+ : std::optional(BenchmarkProcessCPU.getValue());
auto [Err, BenchmarkResult] =
- Runner.runConfiguration(std::move(RC), DumpFile);
+ Runner.runConfiguration(std::move(RC), DumpFile, BenchmarkCPU);
if (Err) {
// Errors from executing the snippets are fine.
// All other errors are a framework issue and should fail.
diff --git a/llvm/tools/llvm-extract/llvm-extract.cpp b/llvm/tools/llvm-extract/llvm-extract.cpp
index 4ee644f..5fc9a31 100644
--- a/llvm/tools/llvm-extract/llvm-extract.cpp
+++ b/llvm/tools/llvm-extract/llvm-extract.cpp
@@ -297,9 +297,8 @@ int main(int argc, char **argv) {
Function *CF = CB->getCalledFunction();
if (!CF)
continue;
- if (CF->isDeclaration() || GVs.count(CF))
+ if (CF->isDeclaration() || !GVs.insert(CF))
continue;
- GVs.insert(CF);
Workqueue.push_back(CF);
}
}
diff --git a/llvm/tools/llvm-ifs/llvm-ifs.cpp b/llvm/tools/llvm-ifs/llvm-ifs.cpp
index 169f601..b76ea8d 100644
--- a/llvm/tools/llvm-ifs/llvm-ifs.cpp
+++ b/llvm/tools/llvm-ifs/llvm-ifs.cpp
@@ -441,12 +441,9 @@ int llvm_ifs_main(int argc, char **argv, const llvm::ToolContext &) {
}
for (auto Symbol : TargetStub->Symbols) {
- auto SI = SymbolMap.find(Symbol.Name);
- if (SI == SymbolMap.end()) {
- SymbolMap.insert(
- std::pair<std::string, IFSSymbol>(Symbol.Name, Symbol));
+ auto [SI, Inserted] = SymbolMap.try_emplace(Symbol.Name, Symbol);
+ if (Inserted)
continue;
- }
assert(Symbol.Name == SI->second.Name && "Symbol Names Must Match.");
diff --git a/llvm/tools/llvm-reduce/ReducerWorkItem.cpp b/llvm/tools/llvm-reduce/ReducerWorkItem.cpp
index 1510e9f..5409b6d 100644
--- a/llvm/tools/llvm-reduce/ReducerWorkItem.cpp
+++ b/llvm/tools/llvm-reduce/ReducerWorkItem.cpp
@@ -52,6 +52,11 @@ extern cl::OptionCategory LLVMReduceOptions;
static cl::opt<std::string> TargetTriple("mtriple",
cl::desc("Set the target triple"),
cl::cat(LLVMReduceOptions));
+static cl::opt<bool> PrintInvalidMachineReductions(
+ "print-invalid-reduction-machine-verifier-errors",
+ cl::desc(
+ "Print machine verifier errors on invalid reduction attempts triple"),
+ cl::cat(LLVMReduceOptions));
static cl::opt<bool> TmpFilesAsBitcode(
"write-tmp-files-as-bitcode",
@@ -417,7 +422,7 @@ static std::unique_ptr<MachineFunction> cloneMF(MachineFunction *SrcMF,
DstMRI->freezeReservedRegs();
- DstMF->verify(nullptr, "", /*AbortOnError=*/true);
+ DstMF->verify(nullptr, "", &errs(), /*AbortOnError=*/true);
return DstMF;
}
@@ -450,8 +455,21 @@ bool ReducerWorkItem::verify(raw_fd_ostream *OS) const {
for (const Function &F : getModule()) {
if (const MachineFunction *MF = MMI->getMachineFunction(F)) {
- if (!MF->verify(nullptr, "", /*AbortOnError=*/false))
+ // With the current state of quality, most reduction attempts fail the
+ // machine verifier. Avoid spamming large function dumps on nearly every
+ // attempt until the situation is better.
+ if (!MF->verify(nullptr, "",
+ /*OS=*/PrintInvalidMachineReductions ? &errs() : nullptr,
+ /*AbortOnError=*/false)) {
+
+ if (!PrintInvalidMachineReductions) {
+ WithColor::warning(errs())
+ << "reduction attempt on function '" << MF->getName()
+ << "' failed machine verifier (debug with "
+ "-print-invalid-reduction-machine-verifier-errors)\n";
+ }
return true;
+ }
}
}
diff --git a/llvm/tools/llvm-reduce/TestRunner.cpp b/llvm/tools/llvm-reduce/TestRunner.cpp
index 8a61aae..aac5c4a 100644
--- a/llvm/tools/llvm-reduce/TestRunner.cpp
+++ b/llvm/tools/llvm-reduce/TestRunner.cpp
@@ -13,17 +13,18 @@
using namespace llvm;
-TestRunner::TestRunner(StringRef TestName,
- const std::vector<std::string> &TestArgs,
+TestRunner::TestRunner(StringRef TestName, ArrayRef<std::string> RawTestArgs,
std::unique_ptr<ReducerWorkItem> Program,
std::unique_ptr<TargetMachine> TM, StringRef ToolName,
StringRef OutputName, bool InputIsBitcode,
bool OutputBitcode)
- : TestName(TestName), ToolName(ToolName), TestArgs(TestArgs),
- Program(std::move(Program)), TM(std::move(TM)),
- OutputFilename(OutputName), InputIsBitcode(InputIsBitcode),
- EmitBitcode(OutputBitcode) {
+ : TestName(TestName), ToolName(ToolName), Program(std::move(Program)),
+ TM(std::move(TM)), OutputFilename(OutputName),
+ InputIsBitcode(InputIsBitcode), EmitBitcode(OutputBitcode) {
assert(this->Program && "Initialized with null program?");
+
+ TestArgs.push_back(TestName); // argv[0]
+ TestArgs.append(RawTestArgs.begin(), RawTestArgs.end());
}
static constexpr std::array<std::optional<StringRef>, 3> DefaultRedirects = {
@@ -33,18 +34,13 @@ static constexpr std::array<std::optional<StringRef>, 3> NullRedirects;
/// Runs the interestingness test, passes file to be tested as first argument
/// and other specified test arguments after that.
int TestRunner::run(StringRef Filename) const {
- std::vector<StringRef> ProgramArgs;
- ProgramArgs.push_back(TestName);
-
- for (const auto &Arg : TestArgs)
- ProgramArgs.push_back(Arg);
-
- ProgramArgs.push_back(Filename);
+ SmallVector<StringRef> ExecArgs(TestArgs);
+ ExecArgs.push_back(Filename);
std::string ErrMsg;
int Result =
- sys::ExecuteAndWait(TestName, ProgramArgs, /*Env=*/std::nullopt,
+ sys::ExecuteAndWait(TestName, ExecArgs, /*Env=*/std::nullopt,
Verbose ? DefaultRedirects : NullRedirects,
/*SecondsToWait=*/0, /*MemoryLimit=*/0, &ErrMsg);
diff --git a/llvm/tools/llvm-reduce/TestRunner.h b/llvm/tools/llvm-reduce/TestRunner.h
index 16d3dcd..930c324 100644
--- a/llvm/tools/llvm-reduce/TestRunner.h
+++ b/llvm/tools/llvm-reduce/TestRunner.h
@@ -25,7 +25,7 @@ namespace llvm {
// respective filename.
class TestRunner {
public:
- TestRunner(StringRef TestName, const std::vector<std::string> &TestArgs,
+ TestRunner(StringRef TestName, ArrayRef<std::string> TestArgs,
std::unique_ptr<ReducerWorkItem> Program,
std::unique_ptr<TargetMachine> TM, StringRef ToolName,
StringRef OutputFilename, bool InputIsBitcode, bool OutputBitcode);
@@ -55,7 +55,7 @@ public:
private:
StringRef TestName;
StringRef ToolName;
- const std::vector<std::string> &TestArgs;
+ SmallVector<StringRef> TestArgs;
std::unique_ptr<ReducerWorkItem> Program;
std::unique_ptr<TargetMachine> TM;
StringRef OutputFilename;
diff --git a/llvm/tools/llvm-reduce/deltas/ReduceDistinctMetadata.cpp b/llvm/tools/llvm-reduce/deltas/ReduceDistinctMetadata.cpp
index 32fca80..0212926 100644
--- a/llvm/tools/llvm-reduce/deltas/ReduceDistinctMetadata.cpp
+++ b/llvm/tools/llvm-reduce/deltas/ReduceDistinctMetadata.cpp
@@ -41,10 +41,8 @@ reduceNodes(MDNode *Root,
if (MDNode *Operand =
dyn_cast<MDNode>(CurrentNode->getOperand(I).get())) {
// Check whether node has been visited
- if (!VisitedNodes.contains(Operand)) {
+ if (VisitedNodes.insert(Operand))
NodesToTraverse.push(Operand);
- VisitedNodes.insert(Operand);
- }
// Delete the node only if it is distinct
if (Operand->isDistinct()) {
// Add to removal list
@@ -74,10 +72,8 @@ static void cleanUpTemporaries(NamedMDNode &NamedNode, MDTuple *TemporaryTuple,
// If the node hasn't been traversed yet, add it to the queue of nodes to
// traverse.
if (MDTuple *TupleI = dyn_cast<MDTuple>((*I))) {
- if (!VisitedNodes.contains(TupleI)) {
+ if (VisitedNodes.insert(TupleI))
NodesToTraverse.push(TupleI);
- VisitedNodes.insert(TupleI);
- }
}
}
@@ -113,12 +109,10 @@ static void cleanUpTemporaries(NamedMDNode &NamedNode, MDTuple *TemporaryTuple,
// Push the remaining nodes into the queue
for (unsigned int I = 0; I < CurrentTuple->getNumOperands(); ++I) {
MDTuple *Operand = dyn_cast<MDTuple>(CurrentTuple->getOperand(I).get());
- if (Operand && !VisitedNodes.contains(Operand)) {
- NodesToTraverse.push(Operand);
+ if (Operand && VisitedNodes.insert(Operand))
// If the node hasn't been traversed yet, add it to the queue of nodes
// to traverse.
- VisitedNodes.insert(Operand);
- }
+ NodesToTraverse.push(Operand);
}
}
}
diff --git a/llvm/unittests/IR/IRBuilderTest.cpp b/llvm/unittests/IR/IRBuilderTest.cpp
index 64e3b9c..d5239f2 100644
--- a/llvm/unittests/IR/IRBuilderTest.cpp
+++ b/llvm/unittests/IR/IRBuilderTest.cpp
@@ -1142,12 +1142,12 @@ TEST_F(IRBuilderTest, InsertExtractElement) {
EXPECT_EQ(Elt2, X2);
}
-TEST_F(IRBuilderTest, CreateGlobalStringPtr) {
+TEST_F(IRBuilderTest, CreateGlobalString) {
IRBuilder<> Builder(BB);
- auto String1a = Builder.CreateGlobalStringPtr("TestString", "String1a");
- auto String1b = Builder.CreateGlobalStringPtr("TestString", "String1b", 0);
- auto String2 = Builder.CreateGlobalStringPtr("TestString", "String2", 1);
+ auto String1a = Builder.CreateGlobalString("TestString", "String1a");
+ auto String1b = Builder.CreateGlobalString("TestString", "String1b", 0);
+ auto String2 = Builder.CreateGlobalString("TestString", "String2", 1);
auto String3 = Builder.CreateGlobalString("TestString", "String3", 2);
EXPECT_TRUE(String1a->getType()->getPointerAddressSpace() == 0);
diff --git a/llvm/unittests/MI/LiveIntervalTest.cpp b/llvm/unittests/MI/LiveIntervalTest.cpp
index 7dcd82f..f910e8e 100644
--- a/llvm/unittests/MI/LiveIntervalTest.cpp
+++ b/llvm/unittests/MI/LiveIntervalTest.cpp
@@ -101,7 +101,9 @@ struct TestPassT : public TestPass {
bool runOnMachineFunction(MachineFunction &MF) override {
AnalysisType &A = getAnalysis<AnalysisType>();
T(MF, A);
- EXPECT_EQ(MF.verify(this, /* Banner */ nullptr, /* AbortOnError */ false),
+ EXPECT_EQ(MF.verify(this, /* Banner=*/nullptr,
+ /*OS=*/nullptr,
+ /* AbortOnError=*/false),
ShouldPass);
return true;
}
diff --git a/llvm/unittests/Option/OptionMarshallingTest.cpp b/llvm/unittests/Option/OptionMarshallingTest.cpp
index 0464e27..2ec422f 100644
--- a/llvm/unittests/Option/OptionMarshallingTest.cpp
+++ b/llvm/unittests/Option/OptionMarshallingTest.cpp
@@ -1,4 +1,4 @@
-//===- unittest/Support/OptionMarshallingTest.cpp - OptParserEmitter tests ===//
+//===- OptionMarshallingTest.cpp - OptionParserEmitter tests -================//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/llvm/unittests/SandboxIR/SandboxIRTest.cpp b/llvm/unittests/SandboxIR/SandboxIRTest.cpp
index bd6a4c2..42df096 100644
--- a/llvm/unittests/SandboxIR/SandboxIRTest.cpp
+++ b/llvm/unittests/SandboxIR/SandboxIRTest.cpp
@@ -14,6 +14,7 @@
#include "llvm/IR/Function.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Module.h"
+#include "llvm/SandboxIR/Utils.h"
#include "llvm/Support/SourceMgr.h"
#include "gmock/gmock-matchers.h"
#include "gtest/gtest.h"
@@ -1179,6 +1180,24 @@ define ptr @foo() {
EXPECT_EQ(PtrAuth->getWithSameSchema(&F), PtrAuth);
}
+TEST_F(SandboxIRTest, ConstantExpr) {
+ parseIR(C, R"IR(
+define i32 @foo() {
+ ret i32 ptrtoint (ptr @foo to i32)
+}
+)IR");
+ Function &LLVMF = *M->getFunction("foo");
+ sandboxir::Context Ctx(C);
+
+ auto &F = *Ctx.createFunction(&LLVMF);
+ auto *BB = &*F.begin();
+ auto It = BB->begin();
+ auto *Ret = cast<sandboxir::ReturnInst>(&*It++);
+ // Check classof(), creation.
+ [[maybe_unused]] auto *ConstExpr =
+ cast<sandboxir::ConstantExpr>(Ret->getReturnValue());
+}
+
TEST_F(SandboxIRTest, BlockAddress) {
parseIR(C, R"IR(
define void @foo(ptr %ptr) {
@@ -1373,6 +1392,8 @@ OperandNo: 0
EXPECT_TRUE(I0->hasNUses(1u));
EXPECT_FALSE(I0->hasNUses(2u));
+ // Check Value.getExpectedType
+
// Check User.setOperand().
Ret->setOperand(0, Arg0);
EXPECT_EQ(Ret->getOperand(0), Arg0);
@@ -1436,7 +1457,6 @@ define i32 @foo(i32 %arg0, i32 %arg1) {
Replaced = Ret->replaceUsesOfWith(I0, Arg0);
EXPECT_TRUE(Replaced);
EXPECT_EQ(Ret->getOperand(0), Arg0);
-
// Check RAUW on constant.
auto *Glob0 = cast<sandboxir::Constant>(I1->getOperand(0));
auto *Glob1 = cast<sandboxir::Constant>(I2->getOperand(0));
@@ -1445,6 +1465,68 @@ define i32 @foo(i32 %arg0, i32 %arg1) {
EXPECT_EQ(Glob0->getOperand(0), Glob1);
}
+TEST_F(SandboxIRTest, GetExpected) {
+ parseIR(C, R"IR(
+define float @foo(float %v, ptr %ptr) {
+ %add = fadd float %v, %v
+ store float %v, ptr %ptr
+ ret float %v
+}
+define void @bar(float %v, ptr %ptr) {
+ ret void
+}
+)IR");
+ llvm::Function &Foo = *M->getFunction("foo");
+ sandboxir::Context Ctx(C);
+
+ Ctx.createFunction(&Foo);
+ auto *FooBB = cast<sandboxir::BasicBlock>(Ctx.getValue(&*Foo.begin()));
+ auto FooIt = FooBB->begin();
+ auto Add = cast<sandboxir::Instruction>(&*FooIt++);
+ auto *S0 = cast<sandboxir::Instruction>(&*FooIt++);
+ auto *RetF = cast<sandboxir::Instruction>(&*FooIt++);
+ // getExpectedValue
+ EXPECT_EQ(sandboxir::Utils::getExpectedValue(Add), Add);
+ EXPECT_EQ(sandboxir::Utils::getExpectedValue(S0),
+ cast<sandboxir::StoreInst>(S0)->getValueOperand());
+ EXPECT_EQ(sandboxir::Utils::getExpectedValue(RetF),
+ cast<sandboxir::ReturnInst>(RetF)->getReturnValue());
+ // getExpectedType
+ EXPECT_EQ(sandboxir::Utils::getExpectedType(Add), Add->getType());
+ EXPECT_EQ(sandboxir::Utils::getExpectedType(S0),
+ cast<sandboxir::StoreInst>(S0)->getValueOperand()->getType());
+ EXPECT_EQ(sandboxir::Utils::getExpectedType(RetF),
+ cast<sandboxir::ReturnInst>(RetF)->getReturnValue()->getType());
+
+ // getExpectedValue for void returns
+ llvm::Function &Bar = *M->getFunction("bar");
+ Ctx.createFunction(&Bar);
+ auto *BarBB = cast<sandboxir::BasicBlock>(Ctx.getValue(&*Bar.begin()));
+ auto BarIt = BarBB->begin();
+ auto *RetV = cast<sandboxir::Instruction>(&*BarIt++);
+ EXPECT_EQ(sandboxir::Utils::getExpectedValue(RetV), nullptr);
+}
+
+TEST_F(SandboxIRTest, GetNumBits) {
+ parseIR(C, R"IR(
+define void @foo(float %arg0, double %arg1, i8 %arg2, i64 %arg3) {
+bb0:
+ ret void
+}
+)IR");
+ llvm::Function &Foo = *M->getFunction("foo");
+ sandboxir::Context Ctx(C);
+ sandboxir::Function *F = Ctx.createFunction(&Foo);
+ const DataLayout &DL = M->getDataLayout();
+ // getNumBits for scalars
+ EXPECT_EQ(sandboxir::Utils::getNumBits(F->getArg(0), DL),
+ DL.getTypeSizeInBits(Type::getFloatTy(C)));
+ EXPECT_EQ(sandboxir::Utils::getNumBits(F->getArg(1), DL),
+ DL.getTypeSizeInBits(Type::getDoubleTy(C)));
+ EXPECT_EQ(sandboxir::Utils::getNumBits(F->getArg(2), DL), 8u);
+ EXPECT_EQ(sandboxir::Utils::getNumBits(F->getArg(3), DL), 64u);
+}
+
TEST_F(SandboxIRTest, RAUW_RUWIf) {
parseIR(C, R"IR(
define void @foo(ptr %ptr) {
@@ -1673,17 +1755,32 @@ bb0:
TEST_F(SandboxIRTest, Instruction) {
parseIR(C, R"IR(
-define void @foo(i8 %v1) {
+define void @foo(i8 %v1, ptr %ptr) {
+bb0:
%add0 = add i8 %v1, %v1
%sub1 = sub i8 %add0, %v1
ret void
+
+bb1:
+ %add1 = add i8 %v1, %v1
+ %sub2 = sub i8 %add1, %v1
+ %ld0 = load i8, ptr %ptr
+ store i8 %ld0, ptr %ptr
+ store volatile i8 %ld0, ptr %ptr
+ %atomicrmw = atomicrmw add ptr %ptr, i8 %v1 acquire
+ %udiv = udiv i8 %ld0, %v1
+ %urem = urem i8 %ld0, %v1
+ call void @foo()
+ ret void
}
)IR");
llvm::Function *LLVMF = &*M->getFunction("foo");
+ llvm::BasicBlock *LLVMBB1 = getBasicBlockByName(*LLVMF, "bb1");
sandboxir::Context Ctx(C);
sandboxir::Function *F = Ctx.createFunction(LLVMF);
auto *Arg = F->getArg(0);
- auto *BB = &*F->begin();
+ auto *BB = cast<sandboxir::BasicBlock>(
+ Ctx.getValue(getBasicBlockByName(*LLVMF, "bb0")));
auto It = BB->begin();
auto *I0 = &*It++;
auto *I1 = &*It++;
@@ -1762,6 +1859,54 @@ define void @foo(i8 %v1) {
I1->eraseFromParent();
EXPECT_EQ(I0->getNumUses(), 0u);
EXPECT_EQ(I0->getNextNode(), Ret);
+
+ for (auto &LLVMI : *LLVMBB1) {
+ auto &I = cast<sandboxir::Instruction>(*Ctx.getValue(&LLVMI));
+ // Check isTerminator().
+ EXPECT_EQ(LLVMI.isTerminator(), I.isTerminator());
+ // Check isUnaryOp().
+ EXPECT_EQ(LLVMI.isUnaryOp(), I.isUnaryOp());
+ // Check isBinaryOp().
+ EXPECT_EQ(LLVMI.isBinaryOp(), I.isBinaryOp());
+ // Check isIntDivRem().
+ EXPECT_EQ(LLVMI.isIntDivRem(), I.isIntDivRem());
+ // Check isShift().
+ EXPECT_EQ(LLVMI.isShift(), I.isShift());
+ // Check isCast().
+ EXPECT_EQ(LLVMI.isCast(), I.isCast());
+ // Check isAssociative().
+ EXPECT_EQ(LLVMI.isAssociative(), I.isAssociative());
+ // Check isCommutative().
+ EXPECT_EQ(LLVMI.isCommutative(), I.isCommutative());
+ // Check isIdempotent().
+ EXPECT_EQ(LLVMI.isIdempotent(), I.isIdempotent());
+ // Check isNilpotent().
+ EXPECT_EQ(LLVMI.isNilpotent(), I.isNilpotent());
+ // Check mayWriteToMemory().
+ EXPECT_EQ(LLVMI.mayWriteToMemory(), I.mayWriteToMemory());
+ // Check mayReadFromMemory().
+ EXPECT_EQ(LLVMI.mayReadFromMemory(), I.mayReadFromMemory());
+ // Check mayReadOrWriteMemory().
+ EXPECT_EQ(LLVMI.mayReadOrWriteMemory(), I.mayReadOrWriteMemory());
+ // Check isAtomic().
+ EXPECT_EQ(LLVMI.isAtomic(), I.isAtomic());
+ if (I.isAtomic()) {
+ // Check hasAtomicLoad().
+ EXPECT_EQ(LLVMI.hasAtomicLoad(), I.hasAtomicLoad());
+ // Check hasAtomicStore().
+ EXPECT_EQ(LLVMI.hasAtomicStore(), I.hasAtomicStore());
+ }
+ // Check isVolatile().
+ EXPECT_EQ(LLVMI.isVolatile(), I.isVolatile());
+ // Check getAccessType().
+ EXPECT_EQ(Ctx.getType(LLVMI.getAccessType()), I.getAccessType());
+ // Check mayThrow().
+ EXPECT_EQ(LLVMI.mayThrow(), I.mayThrow());
+ // Check isFenceLike().
+ EXPECT_EQ(LLVMI.isFenceLike(), I.isFenceLike());
+ // Check mayHaveSideEffects().
+ EXPECT_EQ(LLVMI.mayHaveSideEffects(), I.mayHaveSideEffects());
+ }
}
TEST_F(SandboxIRTest, Instruction_isStackSaveOrRestoreIntrinsic) {
diff --git a/llvm/unittests/Support/raw_ostream_test.cpp b/llvm/unittests/Support/raw_ostream_test.cpp
index 99aa350..a35edd6 100644
--- a/llvm/unittests/Support/raw_ostream_test.cpp
+++ b/llvm/unittests/Support/raw_ostream_test.cpp
@@ -188,6 +188,16 @@ TEST(raw_ostreamTest, Indent) {
EXPECT_EQ(Spaces(5), printToString(Indent));
Indent -= 1;
EXPECT_EQ(Spaces(4), printToString(Indent));
+
+ // Scaled indent.
+ indent Scaled(4, 2);
+ EXPECT_EQ(Spaces(8), printToString(Scaled));
+ EXPECT_EQ(Spaces(10), printToString(Scaled + 1));
+ EXPECT_EQ(Spaces(6), printToString(Scaled - 1));
+ Scaled += 1;
+ EXPECT_EQ(Spaces(10), printToString(Scaled));
+ Scaled -= 1;
+ EXPECT_EQ(Spaces(8), printToString(Scaled));
}
TEST(raw_ostreamTest, FormatHex) {
diff --git a/llvm/unittests/Transforms/Vectorize/SandboxVectorizer/RegionTest.cpp b/llvm/unittests/Transforms/Vectorize/SandboxVectorizer/RegionTest.cpp
index 2c7390c5..0318d32 100644
--- a/llvm/unittests/Transforms/Vectorize/SandboxVectorizer/RegionTest.cpp
+++ b/llvm/unittests/Transforms/Vectorize/SandboxVectorizer/RegionTest.cpp
@@ -45,9 +45,8 @@ define i8 @foo(i8 %v0, i8 %v1) {
auto *Ret = cast<sandboxir::Instruction>(&*It++);
sandboxir::Region Rgn(Ctx);
- // Check getters
+ // Check getContext.
EXPECT_EQ(&Ctx, &Rgn.getContext());
- EXPECT_EQ(0U, Rgn.getID());
// Check add / remove / empty.
EXPECT_TRUE(Rgn.empty());
@@ -79,3 +78,104 @@ define i8 @foo(i8 %v0, i8 %v1) {
EXPECT_EQ(Rgn, Other);
#endif
}
+
+TEST_F(RegionTest, MetadataFromIR) {
+ parseIR(C, R"IR(
+define i8 @foo(i8 %v0, i8 %v1) {
+ %t0 = add i8 %v0, 1, !sandboxvec !0
+ %t1 = add i8 %t0, %v1, !sandboxvec !1
+ %t2 = add i8 %t1, %v1, !sandboxvec !1
+ ret i8 %t2
+}
+
+!0 = distinct !{!"sandboxregion"}
+!1 = distinct !{!"sandboxregion"}
+)IR");
+ llvm::Function *LLVMF = &*M->getFunction("foo");
+ sandboxir::Context Ctx(C);
+ auto *F = Ctx.createFunction(LLVMF);
+ auto *BB = &*F->begin();
+ auto It = BB->begin();
+ auto *T0 = cast<sandboxir::Instruction>(&*It++);
+ auto *T1 = cast<sandboxir::Instruction>(&*It++);
+ auto *T2 = cast<sandboxir::Instruction>(&*It++);
+
+ SmallVector<std::unique_ptr<sandboxir::Region>> Regions =
+ sandboxir::Region::createRegionsFromMD(*F);
+ EXPECT_THAT(Regions[0]->insts(), testing::UnorderedElementsAre(T0));
+ EXPECT_THAT(Regions[1]->insts(), testing::UnorderedElementsAre(T1, T2));
+}
+
+TEST_F(RegionTest, DumpedMetadata) {
+ parseIR(C, R"IR(
+define i8 @foo(i8 %v0, i8 %v1) {
+ %t0 = add i8 %v0, 1
+ %t1 = add i8 %t0, %v1
+ %t2 = add i8 %t1, %v1
+ ret i8 %t1
+}
+)IR");
+ llvm::Function *LLVMF = &*M->getFunction("foo");
+ sandboxir::Context Ctx(C);
+ auto *F = Ctx.createFunction(LLVMF);
+ auto *BB = &*F->begin();
+ auto It = BB->begin();
+ auto *T0 = cast<sandboxir::Instruction>(&*It++);
+ [[maybe_unused]] auto *T1 = cast<sandboxir::Instruction>(&*It++);
+ auto *T2 = cast<sandboxir::Instruction>(&*It++);
+ [[maybe_unused]] auto *Ret = cast<sandboxir::Instruction>(&*It++);
+ sandboxir::Region Rgn(Ctx);
+ Rgn.add(T0);
+ sandboxir::Region Rgn2(Ctx);
+ Rgn2.add(T2);
+
+ std::string output;
+ llvm::raw_string_ostream RSO(output);
+ M->print(RSO, nullptr, /*ShouldPreserveUseListOrder=*/true,
+ /*IsForDebug=*/true);
+
+ // TODO: Replace this with a lit test, which is more suitable for this kind
+ // of IR comparison.
+ std::string expected = R"(; ModuleID = '<string>'
+source_filename = "<string>"
+
+define i8 @foo(i8 %v0, i8 %v1) {
+ %t0 = add i8 %v0, 1, !sandboxvec !0
+ %t1 = add i8 %t0, %v1
+ %t2 = add i8 %t1, %v1, !sandboxvec !1
+ ret i8 %t1
+}
+
+!0 = distinct !{!"sandboxregion"}
+!1 = distinct !{!"sandboxregion"}
+)";
+ EXPECT_EQ(expected, output);
+}
+
+TEST_F(RegionTest, MetadataRoundTrip) {
+ parseIR(C, R"IR(
+define i8 @foo(i8 %v0, i8 %v1) {
+ %t0 = add i8 %v0, 1
+ %t1 = add i8 %t0, %v1
+ ret i8 %t1
+}
+)IR");
+ llvm::Function *LLVMF = &*M->getFunction("foo");
+ sandboxir::Context Ctx(C);
+ auto *F = Ctx.createFunction(LLVMF);
+ auto *BB = &*F->begin();
+ auto It = BB->begin();
+ auto *T0 = cast<sandboxir::Instruction>(&*It++);
+ auto *T1 = cast<sandboxir::Instruction>(&*It++);
+
+ sandboxir::Region Rgn(Ctx);
+ Rgn.add(T0);
+ Rgn.add(T1);
+
+ SmallVector<std::unique_ptr<sandboxir::Region>> Regions =
+ sandboxir::Region::createRegionsFromMD(*F);
+ ASSERT_EQ(1U, Regions.size());
+#ifndef NDEBUG
+ EXPECT_EQ(Rgn, *Regions[0].get());
+#endif
+}
diff --git a/llvm/utils/TableGen/CMakeLists.txt b/llvm/utils/TableGen/CMakeLists.txt
index abebb987..ba1e4aa0 100644
--- a/llvm/utils/TableGen/CMakeLists.txt
+++ b/llvm/utils/TableGen/CMakeLists.txt
@@ -59,8 +59,8 @@ add_tablegen(llvm-tblgen LLVM
InstrInfoEmitter.cpp
IntrinsicEmitter.cpp
MacroFusionPredicatorEmitter.cpp
- OptParserEmitter.cpp
- OptRSTEmitter.cpp
+ OptionParserEmitter.cpp
+ OptionRSTEmitter.cpp
PseudoLoweringEmitter.cpp
RegisterBankEmitter.cpp
RegisterInfoEmitter.cpp
diff --git a/llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp b/llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp
index fd80bc6..e8cf7e3 100644
--- a/llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp
+++ b/llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp
@@ -910,22 +910,16 @@ bool TreePredicateFn::hasPredCode() const {
std::string TreePredicateFn::getPredCode() const {
std::string Code;
- if (!isLoad() && !isStore() && !isAtomic()) {
- Record *MemoryVT = getMemoryVT();
-
- if (MemoryVT)
- PrintFatalError(getOrigPatFragRecord()->getRecord()->getLoc(),
- "MemoryVT requires IsLoad or IsStore");
- }
+ if (!isLoad() && !isStore() && !isAtomic() && getMemoryVT())
+ PrintFatalError(getOrigPatFragRecord()->getRecord()->getLoc(),
+ "MemoryVT requires IsLoad or IsStore");
if (!isLoad() && !isStore()) {
if (isUnindexed())
PrintFatalError(getOrigPatFragRecord()->getRecord()->getLoc(),
"IsUnindexed requires IsLoad or IsStore");
- Record *ScalarMemoryVT = getScalarMemoryVT();
-
- if (ScalarMemoryVT)
+ if (getScalarMemoryVT())
PrintFatalError(getOrigPatFragRecord()->getRecord()->getLoc(),
"ScalarMemoryVT requires IsLoad or IsStore");
}
@@ -1016,15 +1010,15 @@ std::string TreePredicateFn::getPredCode() const {
}
if (isLoad() || isStore() || isAtomic()) {
- if (ListInit *AddressSpaces = getAddressSpaces()) {
+ if (const ListInit *AddressSpaces = getAddressSpaces()) {
Code += "unsigned AddrSpace = cast<MemSDNode>(N)->getAddressSpace();\n"
" if (";
ListSeparator LS(" && ");
- for (Init *Val : AddressSpaces->getValues()) {
+ for (const Init *Val : AddressSpaces->getValues()) {
Code += LS;
- IntInit *IntVal = dyn_cast<IntInit>(Val);
+ const IntInit *IntVal = dyn_cast<IntInit>(Val);
if (!IntVal) {
PrintFatalError(getOrigPatFragRecord()->getRecord()->getLoc(),
"AddressSpaces element must be integer");
@@ -1043,9 +1037,7 @@ std::string TreePredicateFn::getPredCode() const {
Code += "))\nreturn false;\n";
}
- Record *MemoryVT = getMemoryVT();
-
- if (MemoryVT)
+ if (const Record *MemoryVT = getMemoryVT())
Code += ("if (cast<MemSDNode>(N)->getMemoryVT() != MVT::" +
MemoryVT->getName() + ") return false;\n")
.str();
@@ -1129,9 +1121,7 @@ std::string TreePredicateFn::getPredCode() const {
" if (!cast<StoreSDNode>(N)->isTruncatingStore()) return false;\n";
}
- Record *ScalarMemoryVT = getScalarMemoryVT();
-
- if (ScalarMemoryVT)
+ if (const Record *ScalarMemoryVT = getScalarMemoryVT())
Code += ("if (cast<" + SDNodeName +
">(N)->getMemoryVT().getScalarType() != MVT::" +
ScalarMemoryVT->getName() + ") return false;\n")
@@ -1254,14 +1244,14 @@ bool TreePredicateFn::isAtomicOrderingWeakerThanRelease() const {
return isPredefinedPredicateEqualTo("IsAtomicOrderingReleaseOrStronger",
false);
}
-Record *TreePredicateFn::getMemoryVT() const {
+const Record *TreePredicateFn::getMemoryVT() const {
const Record *R = getOrigPatFragRecord()->getRecord();
if (R->isValueUnset("MemoryVT"))
return nullptr;
return R->getValueAsDef("MemoryVT");
}
-ListInit *TreePredicateFn::getAddressSpaces() const {
+const ListInit *TreePredicateFn::getAddressSpaces() const {
const Record *R = getOrigPatFragRecord()->getRecord();
if (R->isValueUnset("AddressSpaces"))
return nullptr;
@@ -1275,7 +1265,7 @@ int64_t TreePredicateFn::getMinAlignment() const {
return R->getValueAsInt("MinAlignment");
}
-Record *TreePredicateFn::getScalarMemoryVT() const {
+const Record *TreePredicateFn::getScalarMemoryVT() const {
const Record *R = getOrigPatFragRecord()->getRecord();
if (R->isValueUnset("ScalarMemoryVT"))
return nullptr;
@@ -1419,11 +1409,11 @@ std::string TreePredicateFn::getCodeToRunOnSDNode() const {
static bool isImmAllOnesAllZerosMatch(const TreePatternNode &P) {
if (!P.isLeaf())
return false;
- DefInit *DI = dyn_cast<DefInit>(P.getLeafValue());
+ const DefInit *DI = dyn_cast<DefInit>(P.getLeafValue());
if (!DI)
return false;
- Record *R = DI->getDef();
+ const Record *R = DI->getDef();
return R->getName() == "immAllOnesV" || R->getName() == "immAllZerosV";
}
@@ -1483,10 +1473,10 @@ int PatternToMatch::getPatternComplexity(const CodeGenDAGPatterns &CGP) const {
}
void PatternToMatch::getPredicateRecords(
- SmallVectorImpl<Record *> &PredicateRecs) const {
- for (Init *I : Predicates->getValues()) {
- if (DefInit *Pred = dyn_cast<DefInit>(I)) {
- Record *Def = Pred->getDef();
+ SmallVectorImpl<const Record *> &PredicateRecs) const {
+ for (const Init *I : Predicates->getValues()) {
+ if (const DefInit *Pred = dyn_cast<DefInit>(I)) {
+ const Record *Def = Pred->getDef();
if (!Def->isSubClassOf("Predicate")) {
#ifndef NDEBUG
Def->dump();
@@ -1506,13 +1496,13 @@ void PatternToMatch::getPredicateRecords(
/// pattern's predicates concatenated with "&&" operators.
///
std::string PatternToMatch::getPredicateCheck() const {
- SmallVector<Record *, 4> PredicateRecs;
+ SmallVector<const Record *, 4> PredicateRecs;
getPredicateRecords(PredicateRecs);
SmallString<128> PredicateCheck;
raw_svector_ostream OS(PredicateCheck);
ListSeparator LS(" && ");
- for (Record *Pred : PredicateRecs) {
+ for (const Record *Pred : PredicateRecs) {
StringRef CondString = Pred->getValueAsString("CondString");
if (CondString.empty())
continue;
@@ -1659,7 +1649,7 @@ bool SDTypeConstraint::ApplyTypeConstraint(TreePatternNode &N,
TP.error(N.getOperator()->getName() + " expects a VT operand!");
return false;
}
- DefInit *DI = cast<DefInit>(NodeToApply.getLeafValue());
+ const DefInit *DI = cast<DefInit>(NodeToApply.getLeafValue());
const CodeGenTarget &T = TP.getDAGPatterns().getTargetInfo();
auto VVT = getValueTypeByHwMode(DI->getDef(), T.getHwModes());
TypeSetByHwMode TypeListTmp(VVT);
@@ -1731,7 +1721,7 @@ bool TreePatternNode::UpdateNodeTypeFromInst(unsigned ResNo,
// The Operand class specifies a type directly.
if (Operand->isSubClassOf("Operand")) {
- Record *R = Operand->getValueAsDef("Type");
+ const Record *R = Operand->getValueAsDef("Type");
const CodeGenTarget &T = TP.getDAGPatterns().getTargetInfo();
return UpdateNodeType(ResNo, getValueTypeByHwMode(R, T.getHwModes()), TP);
}
@@ -1802,7 +1792,7 @@ bool TreePatternNode::setDefaultMode(unsigned Mode) {
SDNodeInfo::SDNodeInfo(const Record *R, const CodeGenHwModes &CGH) : Def(R) {
EnumName = R->getValueAsString("Opcode");
SDClassName = R->getValueAsString("SDClass");
- Record *TypeProfile = R->getValueAsDef("TypeProfile");
+ const Record *TypeProfile = R->getValueAsDef("TypeProfile");
NumResults = TypeProfile->getValueAsInt("NumResults");
NumOperands = TypeProfile->getValueAsInt("NumOperands");
@@ -1810,9 +1800,7 @@ SDNodeInfo::SDNodeInfo(const Record *R, const CodeGenHwModes &CGH) : Def(R) {
Properties = parseSDPatternOperatorProperties(R);
// Parse the type constraints.
- std::vector<Record *> ConstraintList =
- TypeProfile->getValueAsListOfDefs("Constraints");
- for (Record *R : ConstraintList)
+ for (const Record *R : TypeProfile->getValueAsListOfDefs("Constraints"))
TypeConstraints.emplace_back(R, CGH);
}
@@ -1872,13 +1860,13 @@ static unsigned GetNumNodeResults(const Record *Operator,
return NumResults;
}
- ListInit *LI = Operator->getValueAsListInit("Fragments");
+ const ListInit *LI = Operator->getValueAsListInit("Fragments");
assert(LI && "Invalid Fragment");
unsigned NumResults = 0;
- for (Init *I : LI->getValues()) {
- Record *Op = nullptr;
- if (DagInit *Dag = dyn_cast<DagInit>(I))
- if (DefInit *DI = dyn_cast<DefInit>(Dag->getOperator()))
+ for (const Init *I : LI->getValues()) {
+ const Record *Op = nullptr;
+ if (const DagInit *Dag = dyn_cast<DagInit>(I))
+ if (const DefInit *DI = dyn_cast<DefInit>(Dag->getOperator()))
Op = DI->getDef();
assert(Op && "Invalid Fragment");
NumResults = std::max(NumResults, GetNumNodeResults(Op, CDP));
@@ -1986,8 +1974,8 @@ bool TreePatternNode::isIsomorphicTo(const TreePatternNode &N,
return false;
if (isLeaf()) {
- if (DefInit *DI = dyn_cast<DefInit>(getLeafValue())) {
- if (DefInit *NDI = dyn_cast<DefInit>(N.getLeafValue())) {
+ if (const DefInit *DI = dyn_cast<DefInit>(getLeafValue())) {
+ if (const DefInit *NDI = dyn_cast<DefInit>(N.getLeafValue())) {
return ((DI->getDef() == NDI->getDef()) &&
(!DepVars.contains(getName()) || getName() == N.getName()));
}
@@ -2044,7 +2032,7 @@ void TreePatternNode::SubstituteFormalArguments(
for (unsigned i = 0, e = getNumChildren(); i != e; ++i) {
TreePatternNode &Child = getChild(i);
if (Child.isLeaf()) {
- Init *Val = Child.getLeafValue();
+ const Init *Val = Child.getLeafValue();
// Note that, when substituting into an output pattern, Val might be an
// UnsetInit.
if (isa<UnsetInit>(Val) ||
@@ -2217,7 +2205,7 @@ void TreePatternNode::InlinePatternFragments(
/// When Unnamed is false, return the type of a named DAG operand such as the
/// GPR:$src operand above.
///
-static TypeSetByHwMode getImplicitType(Record *R, unsigned ResNo,
+static TypeSetByHwMode getImplicitType(const Record *R, unsigned ResNo,
bool NotRegisters, bool Unnamed,
TreePattern &TP) {
CodeGenDAGPatterns &CDP = TP.getDAGPatterns();
@@ -2227,7 +2215,7 @@ static TypeSetByHwMode getImplicitType(Record *R, unsigned ResNo,
assert(ResNo == 0 && "Regoperand ref only has one result!");
if (NotRegisters)
return TypeSetByHwMode(); // Unknown.
- Record *RegClass = R->getValueAsDef("RegClass");
+ const Record *RegClass = R->getValueAsDef("RegClass");
const CodeGenTarget &T = TP.getDAGPatterns().getTargetInfo();
return TypeSetByHwMode(T.getRegisterClass(RegClass).getValueTypes());
}
@@ -2316,7 +2304,7 @@ static TypeSetByHwMode getImplicitType(Record *R, unsigned ResNo,
if (R->isSubClassOf("Operand")) {
const CodeGenHwModes &CGH = CDP.getTargetInfo().getHwModes();
- Record *T = R->getValueAsDef("Type");
+ const Record *T = R->getValueAsDef("Type");
return TypeSetByHwMode(getValueTypeByHwMode(T, CGH));
}
@@ -2343,7 +2331,7 @@ const ComplexPattern *
TreePatternNode::getComplexPatternInfo(const CodeGenDAGPatterns &CGP) const {
const Record *Rec;
if (isLeaf()) {
- DefInit *DI = dyn_cast<DefInit>(getLeafValue());
+ const DefInit *DI = dyn_cast<DefInit>(getLeafValue());
if (!DI)
return nullptr;
Rec = DI->getDef();
@@ -2362,9 +2350,9 @@ unsigned TreePatternNode::getNumMIResults(const CodeGenDAGPatterns &CGP) const {
// If MIOperandInfo is specified, that gives the count.
if (isLeaf()) {
- DefInit *DI = dyn_cast<DefInit>(getLeafValue());
+ const DefInit *DI = dyn_cast<DefInit>(getLeafValue());
if (DI && DI->getDef()->isSubClassOf("Operand")) {
- DagInit *MIOps = DI->getDef()->getValueAsDag("MIOperandInfo");
+ const DagInit *MIOps = DI->getDef()->getValueAsDag("MIOperandInfo");
if (MIOps->getNumArgs())
return MIOps->getNumArgs();
}
@@ -2423,7 +2411,7 @@ static bool isOperandClass(const TreePatternNode &N, StringRef Class) {
if (!N.isLeaf())
return N.getOperator()->isSubClassOf(Class);
- DefInit *DI = dyn_cast<DefInit>(N.getLeafValue());
+ const DefInit *DI = dyn_cast<DefInit>(N.getLeafValue());
if (DI && DI->getDef()->isSubClassOf(Class))
return true;
@@ -2451,7 +2439,7 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
CodeGenDAGPatterns &CDP = TP.getDAGPatterns();
if (isLeaf()) {
- if (DefInit *DI = dyn_cast<DefInit>(getLeafValue())) {
+ if (const DefInit *DI = dyn_cast<DefInit>(getLeafValue())) {
// If it's a regclass or something else known, include the type.
bool MadeChange = false;
for (unsigned i = 0, e = Types.size(); i != e; ++i)
@@ -2461,7 +2449,7 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
return MadeChange;
}
- if (IntInit *II = dyn_cast<IntInit>(getLeafValue())) {
+ if (const IntInit *II = dyn_cast<IntInit>(getLeafValue())) {
assert(Types.size() == 1 && "Invalid IntInit");
// Int inits are always integers. :)
@@ -2658,7 +2646,7 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
if (Child->getNumMIResults(CDP) < NumArgs) {
// Match first sub-operand against the child we already have.
- Record *SubRec = cast<DefInit>(MIOpInfo->getArg(0))->getDef();
+ const Record *SubRec = cast<DefInit>(MIOpInfo->getArg(0))->getDef();
MadeChange |= Child->UpdateNodeTypeFromInst(ChildResNo, SubRec, TP);
// And the remaining sub-operands against subsequent children.
@@ -2794,8 +2782,8 @@ bool TreePatternNode::canPatternMatch(std::string &Reason,
// TreePattern implementation
//
-TreePattern::TreePattern(const Record *TheRec, ListInit *RawPat, bool isInput,
- CodeGenDAGPatterns &cdp)
+TreePattern::TreePattern(const Record *TheRec, const ListInit *RawPat,
+ bool isInput, CodeGenDAGPatterns &cdp)
: TheRecord(TheRec), CDP(cdp), isInputPattern(isInput), HasError(false),
Infer(*this) {
for (Init *I : RawPat->getValues())
@@ -2840,8 +2828,10 @@ void TreePattern::ComputeNamedNodes(TreePatternNode &N) {
TreePatternNodePtr TreePattern::ParseTreePattern(Init *TheInit,
StringRef OpName) {
RecordKeeper &RK = TheInit->getRecordKeeper();
+ // Here, we are creating new records (BitsInit->InitInit), so const_cast
+ // TheInit back to non-const pointer.
if (DefInit *DI = dyn_cast<DefInit>(TheInit)) {
- Record *R = DI->getDef();
+ const Record *R = DI->getDef();
// Direct reference to a leaf DagNode or PatFrag? Turn it into a
// TreePatternNode of its own. For example:
@@ -2901,7 +2891,7 @@ TreePatternNodePtr TreePattern::ParseTreePattern(Init *TheInit,
error("Pattern has unexpected operator type!");
return nullptr;
}
- Record *Operator = OpDef->getDef();
+ const Record *Operator = OpDef->getDef();
if (Operator->isSubClassOf("ValueType")) {
// If the operator is a ValueType, then this must be "type cast" of a leaf
@@ -2996,7 +2986,7 @@ TreePatternNodePtr TreePattern::ParseTreePattern(Init *TheInit,
// Check that the ComplexPattern uses are consistent: "(MY_PAT $a, $b)"
// and "(MY_PAT $b, $a)" should not be allowed in the same pattern;
// neither should "(MY_PAT_1 $a, $b)" and "(MY_PAT_2 $a, $b)".
- auto OperandId = std::pair(Operator, i);
+ auto OperandId = std::make_pair(Operator, i);
auto PrevOp = ComplexPatternOperands.find(Child->getName());
if (PrevOp != ComplexPatternOperands.end()) {
if (PrevOp->getValue() != OperandId)
@@ -3094,7 +3084,7 @@ bool TreePattern::InferAllTypes(
// us to match things like:
// def : Pat<(v1i64 (bitconvert(v2i32 DPR:$src))), (v1i64 DPR:$src)>;
if (Node == Trees[0].get() && Node->isLeaf()) {
- DefInit *DI = dyn_cast<DefInit>(Node->getLeafValue());
+ const DefInit *DI = dyn_cast<DefInit>(Node->getLeafValue());
if (DI && (DI->getDef()->isSubClassOf("RegisterClass") ||
DI->getDef()->isSubClassOf("RegisterOperand")))
continue;
@@ -3188,11 +3178,10 @@ CodeGenDAGPatterns::CodeGenDAGPatterns(const RecordKeeper &R,
VerifyInstructionFlags();
}
-Record *CodeGenDAGPatterns::getSDNodeNamed(StringRef Name) const {
- Record *N = Records.getDef(Name);
+const Record *CodeGenDAGPatterns::getSDNodeNamed(StringRef Name) const {
+ const Record *N = Records.getDef(Name);
if (!N || !N->isSubClassOf("SDNode"))
PrintFatalError("Error getting SDNode '" + Name + "'!");
-
return N;
}
@@ -3286,7 +3275,7 @@ void CodeGenDAGPatterns::ParsePatternFragments(bool OutFrags) {
// If there is a node transformation corresponding to this, keep track of
// it.
- Record *Transform = Frag->getValueAsDef("OperandTransform");
+ const Record *Transform = Frag->getValueAsDef("OperandTransform");
if (!getSDNodeTransform(Transform).second.empty()) // not noop xform?
for (const auto &T : P->getTrees())
T->setTransformFn(Transform);
@@ -3369,7 +3358,7 @@ static bool HandleUse(TreePattern &I, TreePatternNodePtr Pat,
// No name -> not interesting.
if (Pat->getName().empty()) {
if (Pat->isLeaf()) {
- DefInit *DI = dyn_cast<DefInit>(Pat->getLeafValue());
+ const DefInit *DI = dyn_cast<DefInit>(Pat->getLeafValue());
if (DI && (DI->getDef()->isSubClassOf("RegisterClass") ||
DI->getDef()->isSubClassOf("RegisterOperand")))
I.error("Input " + DI->getDef()->getName() + " must be named!");
@@ -3379,7 +3368,7 @@ static bool HandleUse(TreePattern &I, TreePatternNodePtr Pat,
const Record *Rec;
if (Pat->isLeaf()) {
- DefInit *DI = dyn_cast<DefInit>(Pat->getLeafValue());
+ const DefInit *DI = dyn_cast<DefInit>(Pat->getLeafValue());
if (!DI)
I.error("Input $" + Pat->getName() + " must be an identifier!");
Rec = DI->getDef();
@@ -3423,8 +3412,7 @@ void CodeGenDAGPatterns::FindPatternInputsAndOutputs(
std::map<std::string, TreePatternNodePtr> &InstInputs,
MapVector<std::string, TreePatternNodePtr, std::map<std::string, unsigned>>
&InstResults,
- std::vector<Record *> &InstImpResults) {
-
+ std::vector<const Record *> &InstImpResults) {
// The instruction pattern still has unresolved fragments. For *named*
// nodes we must resolve those here. This may not result in multiple
// alternatives.
@@ -3448,7 +3436,7 @@ void CodeGenDAGPatterns::FindPatternInputsAndOutputs(
if (!Dest.isLeaf())
I.error("implicitly defined value should be a register!");
- DefInit *Val = dyn_cast<DefInit>(Dest.getLeafValue());
+ const DefInit *Val = dyn_cast<DefInit>(Dest.getLeafValue());
if (!Val || !Val->getDef()->isSubClassOf("Register"))
I.error("implicitly defined value should be a register!");
if (Val)
@@ -3496,7 +3484,7 @@ void CodeGenDAGPatterns::FindPatternInputsAndOutputs(
if (!Dest->isLeaf())
I.error("set destination should be a register!");
- DefInit *Val = dyn_cast<DefInit>(Dest->getLeafValue());
+ const DefInit *Val = dyn_cast<DefInit>(Dest->getLeafValue());
if (!Val) {
I.error("set destination should be a register!");
continue;
@@ -3571,8 +3559,8 @@ private:
public:
void AnalyzeNode(const TreePatternNode &N) {
if (N.isLeaf()) {
- if (DefInit *DI = dyn_cast<DefInit>(N.getLeafValue())) {
- Record *LeafRec = DI->getDef();
+ if (const DefInit *DI = dyn_cast<DefInit>(N.getLeafValue())) {
+ const Record *LeafRec = DI->getDef();
// Handle ComplexPattern leaves.
if (LeafRec->isSubClassOf("ComplexPattern")) {
const ComplexPattern &CP = CDP.getComplexPattern(LeafRec);
@@ -3729,7 +3717,8 @@ static void getInstructionsInTree(TreePatternNode &Tree,
/// Check the class of a pattern leaf node against the instruction operand it
/// represents.
-static bool checkOperandClass(CGIOperandList::OperandInfo &OI, Record *Leaf) {
+static bool checkOperandClass(CGIOperandList::OperandInfo &OI,
+ const Record *Leaf) {
if (OI.Rec == Leaf)
return true;
@@ -3746,7 +3735,7 @@ static bool checkOperandClass(CGIOperandList::OperandInfo &OI, Record *Leaf) {
}
void CodeGenDAGPatterns::parseInstructionPattern(CodeGenInstruction &CGI,
- ListInit *Pat,
+ const ListInit *Pat,
DAGInstMap &DAGInsts) {
assert(!DAGInsts.count(CGI.TheDef) && "Instruction already parsed!");
@@ -3763,7 +3752,7 @@ void CodeGenDAGPatterns::parseInstructionPattern(CodeGenInstruction &CGI,
MapVector<std::string, TreePatternNodePtr, std::map<std::string, unsigned>>
InstResults;
- std::vector<Record *> InstImpResults;
+ std::vector<const Record *> InstImpResults;
// Verify that the top-level forms in the instruction are of void type, and
// fill in the InstResults map.
@@ -3821,7 +3810,7 @@ void CodeGenDAGPatterns::parseInstructionPattern(CodeGenInstruction &CGI,
I.error("Operand $" + OpName + " does not exist in operand list!");
TreePatternNodePtr RNode = InstResultIter->second;
- Record *R = cast<DefInit>(RNode->getLeafValue())->getDef();
+ const Record *R = cast<DefInit>(RNode->getLeafValue())->getDef();
ResNodes.push_back(std::move(RNode));
if (!R)
I.error("Operand $" + OpName +
@@ -3869,7 +3858,7 @@ void CodeGenDAGPatterns::parseInstructionPattern(CodeGenInstruction &CGI,
InstInputs.erase(OpName); // It occurred, remove from map.
if (InVal->isLeaf() && isa<DefInit>(InVal->getLeafValue())) {
- Record *InRec = cast<DefInit>(InVal->getLeafValue())->getDef();
+ const Record *InRec = cast<DefInit>(InVal->getLeafValue())->getDef();
if (!checkOperandClass(Op, InRec)) {
I.error("Operand $" + OpName +
"'s register class disagrees"
@@ -3886,7 +3875,7 @@ void CodeGenDAGPatterns::parseInstructionPattern(CodeGenInstruction &CGI,
OpNode->clearPredicateCalls();
// Promote the xform function to be an explicit node if set.
- if (Record *Xform = OpNode->getTransformFn()) {
+ if (const Record *Xform = OpNode->getTransformFn()) {
OpNode->setTransformFn(nullptr);
std::vector<TreePatternNodePtr> Children;
Children.push_back(OpNode);
@@ -3965,7 +3954,7 @@ void CodeGenDAGPatterns::ParseInstructions() {
// Create and insert the instruction.
Instructions.try_emplace(Instr, std::move(Results), std::move(Operands),
- std::vector<Record *>());
+ std::vector<const Record *>());
continue; // no pattern.
}
@@ -4211,7 +4200,7 @@ static bool ForceArbitraryInstResultType(TreePatternNode &N, TreePattern &TP) {
// Promote xform function to be an explicit node wherever set.
static TreePatternNodePtr PromoteXForms(TreePatternNodePtr N) {
- if (Record *Xform = N->getTransformFn()) {
+ if (const Record *Xform = N->getTransformFn()) {
N->setTransformFn(nullptr);
std::vector<TreePatternNodePtr> Children;
Children.push_back(PromoteXForms(N));
@@ -4229,8 +4218,7 @@ static TreePatternNodePtr PromoteXForms(TreePatternNodePtr N) {
void CodeGenDAGPatterns::ParseOnePattern(
const Record *TheDef, TreePattern &Pattern, TreePattern &Result,
- const std::vector<Record *> &InstImpResults, bool ShouldIgnore) {
-
+ ArrayRef<const Record *> InstImpResults, bool ShouldIgnore) {
// Inline pattern fragments and expand multiple alternatives.
Pattern.InlinePatternFragments();
Result.InlinePatternFragments();
@@ -4354,7 +4342,7 @@ void CodeGenDAGPatterns::ParsePatterns() {
std::map<std::string, TreePatternNodePtr> InstInputs;
MapVector<std::string, TreePatternNodePtr, std::map<std::string, unsigned>>
InstResults;
- std::vector<Record *> InstImpResults;
+ std::vector<const Record *> InstImpResults;
for (unsigned j = 0, ee = Pattern.getNumTrees(); j != ee; ++j)
FindPatternInputsAndOutputs(Pattern, Pattern.getTree(j), InstInputs,
InstResults, InstImpResults);
@@ -4682,8 +4670,8 @@ static void GenerateVariantsOf(TreePatternNodePtr N,
for (; i != e; ++i) {
TreePatternNode &Child = N->getChild(i);
if (Child.isLeaf())
- if (DefInit *DI = dyn_cast<DefInit>(Child.getLeafValue())) {
- Record *RR = DI->getDef();
+ if (const DefInit *DI = dyn_cast<DefInit>(Child.getLeafValue())) {
+ const Record *RR = DI->getDef();
if (RR->isSubClassOf("Register"))
NoRegisters = false;
}
diff --git a/llvm/utils/TableGen/Common/CodeGenDAGPatterns.h b/llvm/utils/TableGen/Common/CodeGenDAGPatterns.h
index 0aa6287..1da7dea 100644
--- a/llvm/utils/TableGen/Common/CodeGenDAGPatterns.h
+++ b/llvm/utils/TableGen/Common/CodeGenDAGPatterns.h
@@ -582,13 +582,13 @@ public:
/// If non-null, indicates that this predicate is a predefined memory VT
/// predicate for a load/store and returns the ValueType record for the memory
/// VT.
- Record *getMemoryVT() const;
+ const Record *getMemoryVT() const;
/// If non-null, indicates that this predicate is a predefined memory VT
/// predicate (checking only the scalar type) for load/store and returns the
/// ValueType record for the memory VT.
- Record *getScalarMemoryVT() const;
+ const Record *getScalarMemoryVT() const;
- ListInit *getAddressSpaces() const;
+ const ListInit *getAddressSpaces() const;
int64_t getMinAlignment() const;
// If true, indicates that GlobalISel-based C++ code was supplied.
@@ -634,7 +634,7 @@ class TreePatternNode : public RefCountedBase<TreePatternNode> {
/// OperatorOrVal - The Record for the operator if this is an interior node
/// (not a leaf) or the init value (e.g. the "GPRC" record, or "7") for a
/// leaf.
- PointerUnion<const Record *, Init *> OperatorOrVal;
+ PointerUnion<const Record *, const Init *> OperatorOrVal;
/// Name - The name given to this node with the :$foo notation.
///
@@ -648,7 +648,7 @@ class TreePatternNode : public RefCountedBase<TreePatternNode> {
/// TransformFn - The transformation function to execute on this node before
/// it can be substituted into the resulting instruction on a pattern match.
- Record *TransformFn;
+ const Record *TransformFn;
std::vector<TreePatternNodePtr> Children;
@@ -664,7 +664,7 @@ public:
ResultPerm.resize(NumResults);
std::iota(ResultPerm.begin(), ResultPerm.end(), 0);
}
- TreePatternNode(Init *val, unsigned NumResults) // leaf ctor
+ TreePatternNode(const Init *val, unsigned NumResults) // leaf ctor
: OperatorOrVal(val), TransformFn(nullptr) {
Types.resize(NumResults);
ResultPerm.resize(NumResults);
@@ -685,7 +685,7 @@ public:
NamesAsPredicateArg.push_back(N);
}
- bool isLeaf() const { return isa<Init *>(OperatorOrVal); }
+ bool isLeaf() const { return isa<const Init *>(OperatorOrVal); }
// Type accessors.
unsigned getNumTypes() const { return Types.size(); }
@@ -713,9 +713,9 @@ public:
unsigned getResultIndex(unsigned ResNo) const { return ResultPerm[ResNo]; }
void setResultIndex(unsigned ResNo, unsigned RI) { ResultPerm[ResNo] = RI; }
- Init *getLeafValue() const {
+ const Init *getLeafValue() const {
assert(isLeaf());
- return cast<Init *>(OperatorOrVal);
+ return cast<const Init *>(OperatorOrVal);
}
const Record *getOperator() const {
assert(!isLeaf());
@@ -766,8 +766,8 @@ public:
addPredicateCall(TreePredicateCall(Fn, Scope));
}
- Record *getTransformFn() const { return TransformFn; }
- void setTransformFn(Record *Fn) { TransformFn = Fn; }
+ const Record *getTransformFn() const { return TransformFn; }
+ void setTransformFn(const Record *Fn) { TransformFn = Fn; }
/// getIntrinsicInfo - If this node corresponds to an intrinsic, return the
/// CodeGenIntrinsic information for it, otherwise return a null pointer.
@@ -901,14 +901,14 @@ class TreePattern {
/// ComplexPattern. This records the ComplexPattern instance and the operand
/// number for each operand encountered in a ComplexPattern to aid in that
/// check.
- StringMap<std::pair<Record *, unsigned>> ComplexPatternOperands;
+ StringMap<std::pair<const Record *, unsigned>> ComplexPatternOperands;
TypeInfer Infer;
public:
/// TreePattern constructor - Parse the specified DagInits into the
/// current record.
- TreePattern(const Record *TheRec, ListInit *RawPat, bool isInput,
+ TreePattern(const Record *TheRec, const ListInit *RawPat, bool isInput,
CodeGenDAGPatterns &ise);
TreePattern(const Record *TheRec, DagInit *Pat, bool isInput,
CodeGenDAGPatterns &ise);
@@ -1013,24 +1013,24 @@ struct DAGDefaultOperand {
class DAGInstruction {
std::vector<const Record *> Results;
std::vector<const Record *> Operands;
- std::vector<Record *> ImpResults;
+ std::vector<const Record *> ImpResults;
TreePatternNodePtr SrcPattern;
TreePatternNodePtr ResultPattern;
public:
- DAGInstruction(std::vector<const Record *> &&results,
- std::vector<const Record *> &&operands,
- std::vector<Record *> &&impresults,
- TreePatternNodePtr srcpattern = nullptr,
- TreePatternNodePtr resultpattern = nullptr)
- : Results(std::move(results)), Operands(std::move(operands)),
- ImpResults(std::move(impresults)), SrcPattern(srcpattern),
- ResultPattern(resultpattern) {}
+ DAGInstruction(std::vector<const Record *> &&Results,
+ std::vector<const Record *> &&Operands,
+ std::vector<const Record *> &&ImpResults,
+ TreePatternNodePtr SrcPattern = nullptr,
+ TreePatternNodePtr ResultPattern = nullptr)
+ : Results(std::move(Results)), Operands(std::move(Operands)),
+ ImpResults(std::move(ImpResults)), SrcPattern(SrcPattern),
+ ResultPattern(ResultPattern) {}
unsigned getNumResults() const { return Results.size(); }
unsigned getNumOperands() const { return Operands.size(); }
unsigned getNumImpResults() const { return ImpResults.size(); }
- const std::vector<Record *> &getImpResults() const { return ImpResults; }
+ ArrayRef<const Record *> getImpResults() const { return ImpResults; }
const Record *getResult(unsigned RN) const {
assert(RN < Results.size());
@@ -1042,7 +1042,7 @@ public:
return Operands[ON];
}
- Record *getImpResult(unsigned RN) const {
+ const Record *getImpResult(unsigned RN) const {
assert(RN < ImpResults.size());
return ImpResults[RN];
}
@@ -1058,7 +1058,7 @@ class PatternToMatch {
ListInit *Predicates; // Top level predicate conditions to match.
TreePatternNodePtr SrcPattern; // Source pattern to match.
TreePatternNodePtr DstPattern; // Resulting pattern.
- std::vector<Record *> Dstregs; // Physical register defs being matched.
+ std::vector<const Record *> Dstregs; // Physical register defs being matched.
std::string HwModeFeatures;
int AddedComplexity; // Add to matching pattern complexity.
bool GISelShouldIgnore; // Should GlobalISel ignore importing this pattern.
@@ -1067,12 +1067,11 @@ class PatternToMatch {
public:
PatternToMatch(const Record *srcrecord, ListInit *preds,
TreePatternNodePtr src, TreePatternNodePtr dst,
- std::vector<Record *> dstregs, int complexity, unsigned uid,
+ ArrayRef<const Record *> dstregs, int complexity, unsigned uid,
bool ignore, const Twine &hwmodefeatures = "")
: SrcRecord(srcrecord), Predicates(preds), SrcPattern(src),
- DstPattern(dst), Dstregs(std::move(dstregs)),
- HwModeFeatures(hwmodefeatures.str()), AddedComplexity(complexity),
- GISelShouldIgnore(ignore), ID(uid) {}
+ DstPattern(dst), Dstregs(dstregs), HwModeFeatures(hwmodefeatures.str()),
+ AddedComplexity(complexity), GISelShouldIgnore(ignore), ID(uid) {}
const Record *getSrcRecord() const { return SrcRecord; }
ListInit *getPredicates() const { return Predicates; }
@@ -1080,14 +1079,15 @@ public:
TreePatternNodePtr getSrcPatternShared() const { return SrcPattern; }
TreePatternNode &getDstPattern() const { return *DstPattern; }
TreePatternNodePtr getDstPatternShared() const { return DstPattern; }
- const std::vector<Record *> &getDstRegs() const { return Dstregs; }
+ ArrayRef<const Record *> getDstRegs() const { return Dstregs; }
StringRef getHwModeFeatures() const { return HwModeFeatures; }
int getAddedComplexity() const { return AddedComplexity; }
bool getGISelShouldIgnore() const { return GISelShouldIgnore; }
unsigned getID() const { return ID; }
std::string getPredicateCheck() const;
- void getPredicateRecords(SmallVectorImpl<Record *> &PredicateRecs) const;
+ void
+ getPredicateRecords(SmallVectorImpl<const Record *> &PredicateRecs) const;
/// Compute the complexity metric for the input pattern. This roughly
/// corresponds to the number of nodes that are covered.
@@ -1113,8 +1113,8 @@ private:
std::map<const Record *, DAGInstruction, LessRecordByID> Instructions;
// Specific SDNode definitions:
- Record *intrinsic_void_sdnode;
- Record *intrinsic_w_chain_sdnode, *intrinsic_wo_chain_sdnode;
+ const Record *intrinsic_void_sdnode;
+ const Record *intrinsic_w_chain_sdnode, *intrinsic_wo_chain_sdnode;
/// PatternsToMatch - All of the things we are matching on the DAG. The first
/// value is the pattern to match, the second pattern is the result to
@@ -1136,7 +1136,7 @@ public:
const CodeGenTarget &getTargetInfo() const { return Target; }
const TypeSetByHwMode &getLegalTypes() const { return LegalVTS; }
- Record *getSDNodeNamed(StringRef Name) const;
+ const Record *getSDNodeNamed(StringRef Name) const;
const SDNodeInfo &getSDNodeInfo(const Record *R) const {
auto F = SDNodes.find(R);
@@ -1170,7 +1170,7 @@ public:
llvm_unreachable("Bad intrinsic ID!");
}
- unsigned getIntrinsicID(Record *R) const {
+ unsigned getIntrinsicID(const Record *R) const {
for (unsigned i = 0, e = Intrinsics.size(); i != e; ++i)
if (Intrinsics[i].TheDef == R)
return i;
@@ -1209,7 +1209,7 @@ public:
/// Parse the Pattern for an instruction, and insert the result in DAGInsts.
typedef std::map<const Record *, DAGInstruction, LessRecordByID> DAGInstMap;
- void parseInstructionPattern(CodeGenInstruction &CGI, ListInit *Pattern,
+ void parseInstructionPattern(CodeGenInstruction &CGI, const ListInit *Pattern,
DAGInstMap &DAGInsts);
const DAGInstruction &getInstruction(const Record *R) const {
@@ -1218,11 +1218,13 @@ public:
return F->second;
}
- Record *get_intrinsic_void_sdnode() const { return intrinsic_void_sdnode; }
- Record *get_intrinsic_w_chain_sdnode() const {
+ const Record *get_intrinsic_void_sdnode() const {
+ return intrinsic_void_sdnode;
+ }
+ const Record *get_intrinsic_w_chain_sdnode() const {
return intrinsic_w_chain_sdnode;
}
- Record *get_intrinsic_wo_chain_sdnode() const {
+ const Record *get_intrinsic_wo_chain_sdnode() const {
return intrinsic_wo_chain_sdnode;
}
@@ -1248,7 +1250,7 @@ private:
void ParseOnePattern(const Record *TheDef, TreePattern &Pattern,
TreePattern &Result,
- const std::vector<Record *> &InstImpResults,
+ ArrayRef<const Record *> InstImpResults,
bool ShouldIgnore = false);
void AddPatternToMatch(TreePattern *Pattern, PatternToMatch &&PTM);
void FindPatternInputsAndOutputs(
@@ -1256,7 +1258,7 @@ private:
std::map<std::string, TreePatternNodePtr> &InstInputs,
MapVector<std::string, TreePatternNodePtr,
std::map<std::string, unsigned>> &InstResults,
- std::vector<Record *> &InstImpResults);
+ std::vector<const Record *> &InstImpResults);
unsigned getNewUID();
};
diff --git a/llvm/utils/TableGen/Common/CodeGenHwModes.cpp b/llvm/utils/TableGen/Common/CodeGenHwModes.cpp
index fd2fd33..fda13b3 100644
--- a/llvm/utils/TableGen/Common/CodeGenHwModes.cpp
+++ b/llvm/utils/TableGen/Common/CodeGenHwModes.cpp
@@ -39,8 +39,8 @@ LLVM_DUMP_METHOD
void HwMode::dump() const { dbgs() << Name << ": " << Features << '\n'; }
HwModeSelect::HwModeSelect(const Record *R, CodeGenHwModes &CGH) {
- std::vector<Record *> Modes = R->getValueAsListOfDefs("Modes");
- std::vector<Record *> Objects = R->getValueAsListOfDefs("Objects");
+ std::vector<const Record *> Modes = R->getValueAsListOfConstDefs("Modes");
+ std::vector<const Record *> Objects = R->getValueAsListOfConstDefs("Objects");
if (Modes.size() != Objects.size()) {
PrintError(
R->getLoc(),
@@ -49,9 +49,9 @@ HwModeSelect::HwModeSelect(const Record *R, CodeGenHwModes &CGH) {
"have the same size");
report_fatal_error("error in target description.");
}
- for (unsigned i = 0, e = Modes.size(); i != e; ++i) {
- unsigned ModeId = CGH.getHwModeId(Modes[i]);
- Items.push_back(std::pair(ModeId, Objects[i]));
+ for (auto [Mode, Object] : zip_equal(Modes, Objects)) {
+ unsigned ModeId = CGH.getHwModeId(Mode);
+ Items.push_back(std::pair(ModeId, Object));
}
}
diff --git a/llvm/utils/TableGen/Common/CodeGenInstAlias.cpp b/llvm/utils/TableGen/Common/CodeGenInstAlias.cpp
index f23ccf9..69e0029 100644
--- a/llvm/utils/TableGen/Common/CodeGenInstAlias.cpp
+++ b/llvm/utils/TableGen/Common/CodeGenInstAlias.cpp
@@ -30,9 +30,9 @@ bool CodeGenInstAlias::tryAliasOpMatch(const DagInit *Result,
ArrayRef<SMLoc> Loc,
const CodeGenTarget &T,
ResultOperand &ResOp) {
- Init *Arg = Result->getArg(AliasOpNo);
- DefInit *ADI = dyn_cast<DefInit>(Arg);
- Record *ResultRecord = ADI ? ADI->getDef() : nullptr;
+ const Init *Arg = Result->getArg(AliasOpNo);
+ const DefInit *ADI = dyn_cast<DefInit>(Arg);
+ const Record *ResultRecord = ADI ? ADI->getDef() : nullptr;
if (ADI && ADI->getDef() == InstOpRec) {
// If the operand is a record, it must have a name, and the record type
@@ -102,12 +102,12 @@ bool CodeGenInstAlias::tryAliasOpMatch(const DagInit *Result,
// throw TGError(Loc, "reg0 used for result that is not an "
// "OptionalDefOperand!");
- ResOp = ResultOperand(static_cast<Record *>(nullptr));
+ ResOp = ResultOperand(nullptr);
return true;
}
// Literal integers.
- if (IntInit *II = dyn_cast<IntInit>(Arg)) {
+ if (const IntInit *II = dyn_cast<IntInit>(Arg)) {
if (hasSubOps || !InstOpRec->isSubClassOf("Operand"))
return false;
// Integer arguments can't have names.
@@ -119,17 +119,16 @@ bool CodeGenInstAlias::tryAliasOpMatch(const DagInit *Result,
}
// Bits<n> (also used for 0bxx literals)
- if (BitsInit *BI = dyn_cast<BitsInit>(Arg)) {
+ if (const BitsInit *BI = dyn_cast<BitsInit>(Arg)) {
if (hasSubOps || !InstOpRec->isSubClassOf("Operand"))
return false;
if (!BI->isComplete())
return false;
// Convert the bits init to an integer and use that for the result.
- IntInit *II = dyn_cast_or_null<IntInit>(
- BI->convertInitializerTo(IntRecTy::get(BI->getRecordKeeper())));
- if (!II)
+ std::optional<int64_t> Value = BI->convertInitializerToInt();
+ if (!Value)
return false;
- ResOp = ResultOperand(II->getValue());
+ ResOp = ResultOperand(*Value);
return true;
}
@@ -182,15 +181,15 @@ CodeGenInstAlias::CodeGenInstAlias(const Record *R, const CodeGenTarget &T)
// NameClass - If argument names are repeated, we need to verify they have
// the same class.
- StringMap<Record *> NameClass;
+ StringMap<const Record *> NameClass;
for (unsigned i = 0, e = Result->getNumArgs(); i != e; ++i) {
- DefInit *ADI = dyn_cast<DefInit>(Result->getArg(i));
+ const DefInit *ADI = dyn_cast<DefInit>(Result->getArg(i));
if (!ADI || !Result->getArgName(i))
continue;
// Verify we don't have something like: (someinst GR16:$foo, GR32:$foo)
// $foo can exist multiple times in the result list, but it must have the
// same type.
- Record *&Entry = NameClass[Result->getArgNameStr(i)];
+ const Record *&Entry = NameClass[Result->getArgNameStr(i)];
if (Entry && Entry != ADI->getDef())
PrintFatalError(R->getLoc(), "result value $" + Result->getArgNameStr(i) +
" is both " + Entry->getName() +
@@ -235,9 +234,9 @@ CodeGenInstAlias::CodeGenInstAlias(const Record *R, const CodeGenTarget &T)
// Otherwise, we need to match each of the suboperands individually.
} else {
- DagInit *MIOI = ResultInst->Operands[i].MIOperandInfo;
+ const DagInit *MIOI = ResultInst->Operands[i].MIOperandInfo;
for (unsigned SubOp = 0; SubOp != NumSubOps; ++SubOp) {
- Record *SubRec = cast<DefInit>(MIOI->getArg(SubOp))->getDef();
+ const Record *SubRec = cast<DefInit>(MIOI->getArg(SubOp))->getDef();
// Take care to instantiate each of the suboperands with the correct
// nomenclature: $foo.bar
@@ -255,11 +254,11 @@ CodeGenInstAlias::CodeGenInstAlias(const Record *R, const CodeGenTarget &T)
// If the argument did not match the instruction operand, and the operand
// is composed of multiple suboperands, try matching the suboperands.
if (NumSubOps > 1) {
- DagInit *MIOI = ResultInst->Operands[i].MIOperandInfo;
+ const DagInit *MIOI = ResultInst->Operands[i].MIOperandInfo;
for (unsigned SubOp = 0; SubOp != NumSubOps; ++SubOp) {
if (AliasOpNo >= Result->getNumArgs())
PrintFatalError(R->getLoc(), "not enough arguments for instruction!");
- Record *SubRec = cast<DefInit>(MIOI->getArg(SubOp))->getDef();
+ const Record *SubRec = cast<DefInit>(MIOI->getArg(SubOp))->getDef();
if (tryAliasOpMatch(Result, AliasOpNo, SubRec, false, R->getLoc(), T,
ResOp)) {
ResultOperands.push_back(ResOp);
diff --git a/llvm/utils/TableGen/Common/CodeGenInstAlias.h b/llvm/utils/TableGen/Common/CodeGenInstAlias.h
index dd6f93e..00680b0 100644
--- a/llvm/utils/TableGen/Common/CodeGenInstAlias.h
+++ b/llvm/utils/TableGen/Common/CodeGenInstAlias.h
@@ -57,7 +57,7 @@ public:
ResultOperand(std::string N, const Record *R)
: Name(std::move(N)), R(R), Kind(K_Record) {}
ResultOperand(int64_t I) : Imm(I), Kind(K_Imm) {}
- ResultOperand(Record *R) : R(R), Kind(K_Reg) {}
+ ResultOperand(const Record *R) : R(R), Kind(K_Reg) {}
bool isRecord() const { return Kind == K_Record; }
bool isImm() const { return Kind == K_Imm; }
diff --git a/llvm/utils/TableGen/Common/CodeGenRegisters.cpp b/llvm/utils/TableGen/Common/CodeGenRegisters.cpp
index 5b43f7d..d0f4a2f 100644
--- a/llvm/utils/TableGen/Common/CodeGenRegisters.cpp
+++ b/llvm/utils/TableGen/Common/CodeGenRegisters.cpp
@@ -79,7 +79,8 @@ void CodeGenSubRegIndex::updateComponents(CodeGenRegBank &RegBank) {
if (!TheDef)
return;
- std::vector<Record *> Comps = TheDef->getValueAsListOfDefs("ComposedOf");
+ std::vector<const Record *> Comps =
+ TheDef->getValueAsListOfConstDefs("ComposedOf");
if (!Comps.empty()) {
if (Comps.size() != 2)
PrintFatalError(TheDef->getLoc(),
@@ -91,8 +92,8 @@ void CodeGenSubRegIndex::updateComponents(CodeGenRegBank &RegBank) {
PrintFatalError(TheDef->getLoc(), "Ambiguous ComposedOf entries");
}
- std::vector<Record *> Parts =
- TheDef->getValueAsListOfDefs("CoveringSubRegIndices");
+ std::vector<const Record *> Parts =
+ TheDef->getValueAsListOfConstDefs("CoveringSubRegIndices");
if (!Parts.empty()) {
if (Parts.size() < 2)
PrintFatalError(TheDef->getLoc(),
@@ -167,8 +168,10 @@ CodeGenRegister::CodeGenRegister(const Record *R, unsigned Enum)
}
void CodeGenRegister::buildObjectGraph(CodeGenRegBank &RegBank) {
- std::vector<Record *> SRIs = TheDef->getValueAsListOfDefs("SubRegIndices");
- std::vector<Record *> SRs = TheDef->getValueAsListOfDefs("SubRegs");
+ std::vector<const Record *> SRIs =
+ TheDef->getValueAsListOfConstDefs("SubRegIndices");
+ std::vector<const Record *> SRs =
+ TheDef->getValueAsListOfConstDefs("SubRegs");
if (SRIs.size() != SRs.size())
PrintFatalError(TheDef->getLoc(),
@@ -625,7 +628,8 @@ struct TupleExpander : SetTheory::Expander {
void expand(SetTheory &ST, const Record *Def,
SetTheory::RecSet &Elts) override {
- std::vector<Record *> Indices = Def->getValueAsListOfDefs("SubRegIndices");
+ std::vector<const Record *> Indices =
+ Def->getValueAsListOfConstDefs("SubRegIndices");
unsigned Dim = Indices.size();
ListInit *SubRegs = Def->getValueAsListInit("SubRegs");
if (Dim != SubRegs->size())
@@ -760,7 +764,8 @@ CodeGenRegisterClass::CodeGenRegisterClass(CodeGenRegBank &RegBank,
: TheDef(R), Name(std::string(R->getName())),
TopoSigs(RegBank.getNumTopoSigs()), EnumValue(-1), TSFlags(0) {
GeneratePressureSet = R->getValueAsBit("GeneratePressureSet");
- std::vector<Record *> TypeList = R->getValueAsListOfDefs("RegTypes");
+ std::vector<const Record *> TypeList =
+ R->getValueAsListOfConstDefs("RegTypes");
if (TypeList.empty())
PrintFatalError(R->getLoc(), "RegTypes list must not be empty!");
for (unsigned i = 0, e = TypeList.size(); i != e; ++i) {
diff --git a/llvm/utils/TableGen/Common/CodeGenTarget.cpp b/llvm/utils/TableGen/Common/CodeGenTarget.cpp
index 065d101..7aa945a 100644
--- a/llvm/utils/TableGen/Common/CodeGenTarget.cpp
+++ b/llvm/utils/TableGen/Common/CodeGenTarget.cpp
@@ -121,7 +121,7 @@ StringRef CodeGenTarget::getRegNamespace() const {
return RegClasses.size() > 0 ? RegClasses.front().Namespace : "";
}
-Record *CodeGenTarget::getInstructionSet() const {
+const Record *CodeGenTarget::getInstructionSet() const {
return TargetRec->getValueAsDef("InstructionSet");
}
@@ -131,8 +131,9 @@ bool CodeGenTarget::getAllowRegisterRenaming() const {
/// getAsmParser - Return the AssemblyParser definition for this target.
///
-Record *CodeGenTarget::getAsmParser() const {
- std::vector<Record *> LI = TargetRec->getValueAsListOfDefs("AssemblyParsers");
+const Record *CodeGenTarget::getAsmParser() const {
+ std::vector<const Record *> LI =
+ TargetRec->getValueAsListOfConstDefs("AssemblyParsers");
if (AsmParserNum >= LI.size())
PrintFatalError("Target does not have an AsmParser #" +
Twine(AsmParserNum) + "!");
@@ -142,28 +143,27 @@ Record *CodeGenTarget::getAsmParser() const {
/// getAsmParserVariant - Return the AssemblyParserVariant definition for
/// this target.
///
-Record *CodeGenTarget::getAsmParserVariant(unsigned i) const {
- std::vector<Record *> LI =
- TargetRec->getValueAsListOfDefs("AssemblyParserVariants");
- if (i >= LI.size())
- PrintFatalError("Target does not have an AsmParserVariant #" + Twine(i) +
+const Record *CodeGenTarget::getAsmParserVariant(unsigned Idx) const {
+ std::vector<const Record *> LI =
+ TargetRec->getValueAsListOfConstDefs("AssemblyParserVariants");
+ if (Idx >= LI.size())
+ PrintFatalError("Target does not have an AsmParserVariant #" + Twine(Idx) +
"!");
- return LI[i];
+ return LI[Idx];
}
/// getAsmParserVariantCount - Return the AssemblyParserVariant definition
/// available for this target.
///
unsigned CodeGenTarget::getAsmParserVariantCount() const {
- std::vector<Record *> LI =
- TargetRec->getValueAsListOfDefs("AssemblyParserVariants");
- return LI.size();
+ return TargetRec->getValueAsListOfDefs("AssemblyParserVariants").size();
}
/// getAsmWriter - Return the AssemblyWriter definition for this target.
///
-Record *CodeGenTarget::getAsmWriter() const {
- std::vector<Record *> LI = TargetRec->getValueAsListOfDefs("AssemblyWriters");
+const Record *CodeGenTarget::getAsmWriter() const {
+ std::vector<const Record *> LI =
+ TargetRec->getValueAsListOfConstDefs("AssemblyWriters");
if (AsmWriterNum >= LI.size())
PrintFatalError("Target does not have an AsmWriter #" +
Twine(AsmWriterNum) + "!");
@@ -422,30 +422,29 @@ ComplexPattern::ComplexPattern(const Record *R) {
// FIXME: Why is this different from parseSDPatternOperatorProperties?
// Parse the properties.
Properties = 0;
- std::vector<Record *> PropList = R->getValueAsListOfDefs("Properties");
- for (unsigned i = 0, e = PropList.size(); i != e; ++i)
- if (PropList[i]->getName() == "SDNPHasChain") {
+ for (const Record *Prop : R->getValueAsListOfDefs("Properties")) {
+ if (Prop->getName() == "SDNPHasChain") {
Properties |= 1 << SDNPHasChain;
- } else if (PropList[i]->getName() == "SDNPOptInGlue") {
+ } else if (Prop->getName() == "SDNPOptInGlue") {
Properties |= 1 << SDNPOptInGlue;
- } else if (PropList[i]->getName() == "SDNPMayStore") {
+ } else if (Prop->getName() == "SDNPMayStore") {
Properties |= 1 << SDNPMayStore;
- } else if (PropList[i]->getName() == "SDNPMayLoad") {
+ } else if (Prop->getName() == "SDNPMayLoad") {
Properties |= 1 << SDNPMayLoad;
- } else if (PropList[i]->getName() == "SDNPSideEffect") {
+ } else if (Prop->getName() == "SDNPSideEffect") {
Properties |= 1 << SDNPSideEffect;
- } else if (PropList[i]->getName() == "SDNPMemOperand") {
+ } else if (Prop->getName() == "SDNPMemOperand") {
Properties |= 1 << SDNPMemOperand;
- } else if (PropList[i]->getName() == "SDNPVariadic") {
+ } else if (Prop->getName() == "SDNPVariadic") {
Properties |= 1 << SDNPVariadic;
- } else if (PropList[i]->getName() == "SDNPWantRoot") {
+ } else if (Prop->getName() == "SDNPWantRoot") {
Properties |= 1 << SDNPWantRoot;
- } else if (PropList[i]->getName() == "SDNPWantParent") {
+ } else if (Prop->getName() == "SDNPWantParent") {
Properties |= 1 << SDNPWantParent;
} else {
- PrintFatalError(R->getLoc(), "Unsupported SD Node property '" +
- PropList[i]->getName() +
- "' on ComplexPattern '" + R->getName() +
- "'!");
+ PrintFatalError(R->getLoc(),
+ "Unsupported SD Node property '" + Prop->getName() +
+ "' on ComplexPattern '" + R->getName() + "'!");
}
+ }
}
diff --git a/llvm/utils/TableGen/Common/CodeGenTarget.h b/llvm/utils/TableGen/Common/CodeGenTarget.h
index 41497c8..c7b44f7 100644
--- a/llvm/utils/TableGen/Common/CodeGenTarget.h
+++ b/llvm/utils/TableGen/Common/CodeGenTarget.h
@@ -95,7 +95,7 @@ public:
/// getInstructionSet - Return the InstructionSet object.
///
- Record *getInstructionSet() const;
+ const Record *getInstructionSet() const;
/// getAllowRegisterRenaming - Return the AllowRegisterRenaming flag value for
/// this target.
@@ -104,12 +104,12 @@ public:
/// getAsmParser - Return the AssemblyParser definition for this target.
///
- Record *getAsmParser() const;
+ const Record *getAsmParser() const;
/// getAsmParserVariant - Return the AssemblyParserVariant definition for
/// this target.
///
- Record *getAsmParserVariant(unsigned i) const;
+ const Record *getAsmParserVariant(unsigned i) const;
/// getAsmParserVariantCount - Return the AssemblyParserVariant definition
/// available for this target.
@@ -118,7 +118,7 @@ public:
/// getAsmWriter - Return the AssemblyWriter definition for this target.
///
- Record *getAsmWriter() const;
+ const Record *getAsmWriter() const;
/// getRegBank - Return the register bank description.
CodeGenRegBank &getRegBank() const;
diff --git a/llvm/utils/TableGen/Common/GlobalISel/PatternParser.cpp b/llvm/utils/TableGen/Common/GlobalISel/PatternParser.cpp
index 4761df3..9dcc5f4 100644
--- a/llvm/utils/TableGen/Common/GlobalISel/PatternParser.cpp
+++ b/llvm/utils/TableGen/Common/GlobalISel/PatternParser.cpp
@@ -117,7 +117,7 @@ PatternParser::parseInstructionPattern(const Init &Arg, StringRef Name) {
std::make_unique<CodeGenInstructionPattern>(Instr, insertStrRef(Name));
} else if (const DagInit *IP =
getDagWithOperatorOfSubClass(Arg, "Intrinsic")) {
- Record *TheDef = IP->getOperatorAsDef(DiagLoc);
+ const Record *TheDef = IP->getOperatorAsDef(DiagLoc);
const CodeGenIntrinsic *Intrin = &CGT.getIntrinsic(TheDef);
const CodeGenInstruction &Instr = getInstrForIntrinsic(CGT, Intrin);
Pat =
@@ -169,7 +169,7 @@ PatternParser::parseWipMatchOpcodeMatcher(const Init &Arg, StringRef Name) {
// Each argument is an opcode that can match.
auto Result = std::make_unique<AnyOpcodePattern>(insertStrRef(Name));
for (const auto &Arg : Matcher->getArgs()) {
- Record *OpcodeDef = getDefOfSubClass(*Arg, "Instruction");
+ const Record *OpcodeDef = getDefOfSubClass(*Arg, "Instruction");
if (OpcodeDef) {
Result->addOpcode(&CGT.getInstruction(OpcodeDef));
continue;
diff --git a/llvm/utils/TableGen/DAGISelMatcherGen.cpp b/llvm/utils/TableGen/DAGISelMatcherGen.cpp
index bb8bba0..31c46d5 100644
--- a/llvm/utils/TableGen/DAGISelMatcherGen.cpp
+++ b/llvm/utils/TableGen/DAGISelMatcherGen.cpp
@@ -202,7 +202,7 @@ void MatcherGen::EmitLeafMatchCode(const TreePatternNode &N) {
assert(N.isLeaf() && "Not a leaf?");
// Direct match against an integer constant.
- if (IntInit *II = dyn_cast<IntInit>(N.getLeafValue())) {
+ if (const IntInit *II = dyn_cast<IntInit>(N.getLeafValue())) {
// If this is the root of the dag we're matching, we emit a redundant opcode
// check to ensure that this gets folded into the normal top-level
// OpcodeSwitch.
@@ -336,7 +336,7 @@ void MatcherGen::EmitOperatorMatchCode(const TreePatternNode &N,
N.getOperator()->getName() == "or") &&
N.getChild(1).isLeaf() && N.getChild(1).getPredicateCalls().empty() &&
N.getPredicateCalls().empty()) {
- if (IntInit *II = dyn_cast<IntInit>(N.getChild(1).getLeafValue())) {
+ if (const IntInit *II = dyn_cast<IntInit>(N.getChild(1).getLeafValue())) {
if (!llvm::has_single_bit<uint32_t>(
II->getValue())) { // Don't bother with single bits.
// If this is at the root of the pattern, we emit a redundant
@@ -665,14 +665,14 @@ void MatcherGen::EmitResultLeafAsOperand(const TreePatternNode &N,
SmallVectorImpl<unsigned> &ResultOps) {
assert(N.isLeaf() && "Must be a leaf");
- if (IntInit *II = dyn_cast<IntInit>(N.getLeafValue())) {
+ if (const IntInit *II = dyn_cast<IntInit>(N.getLeafValue())) {
AddMatcher(new EmitIntegerMatcher(II->getValue(), N.getSimpleType(0)));
ResultOps.push_back(NextRecordedOperandNo++);
return;
}
// If this is an explicit register reference, handle it.
- if (DefInit *DI = dyn_cast<DefInit>(N.getLeafValue())) {
+ if (const DefInit *DI = dyn_cast<DefInit>(N.getLeafValue())) {
const Record *Def = DI->getDef();
if (Def->isSubClassOf("Register")) {
const CodeGenRegister *Reg = CGP.getTargetInfo().getRegBank().getReg(Def);
diff --git a/llvm/utils/TableGen/FastISelEmitter.cpp b/llvm/utils/TableGen/FastISelEmitter.cpp
index af05496..17198c8 100644
--- a/llvm/utils/TableGen/FastISelEmitter.cpp
+++ b/llvm/utils/TableGen/FastISelEmitter.cpp
@@ -269,7 +269,7 @@ struct OperandsSignature {
if (Op.getSimpleType(0) != VT)
return false;
- DefInit *OpDI = dyn_cast<DefInit>(Op.getLeafValue());
+ const DefInit *OpDI = dyn_cast<DefInit>(Op.getLeafValue());
if (!OpDI)
return false;
const Record *OpLeafRec = OpDI->getDef();
@@ -509,7 +509,7 @@ void FastISelMap::collectPatterns(const CodeGenDAGPatterns &CGP) {
if (!Dst.getChild(1).isLeaf())
continue;
- DefInit *SR = dyn_cast<DefInit>(Dst.getChild(1).getLeafValue());
+ const DefInit *SR = dyn_cast<DefInit>(Dst.getChild(1).getLeafValue());
if (SR)
SubRegNo = getQualifiedName(SR->getDef());
else
diff --git a/llvm/utils/TableGen/GlobalISelEmitter.cpp b/llvm/utils/TableGen/GlobalISelEmitter.cpp
index 41a2db1d..c345662 100644
--- a/llvm/utils/TableGen/GlobalISelEmitter.cpp
+++ b/llvm/utils/TableGen/GlobalISelEmitter.cpp
@@ -122,16 +122,16 @@ static std::string explainPredicates(const TreePatternNode &N) {
if (const Record *VT = P.getMemoryVT())
Explanation += (" MemVT=" + VT->getName()).str();
- if (Record *VT = P.getScalarMemoryVT())
+ if (const Record *VT = P.getScalarMemoryVT())
Explanation += (" ScalarVT(MemVT)=" + VT->getName()).str();
- if (ListInit *AddrSpaces = P.getAddressSpaces()) {
+ if (const ListInit *AddrSpaces = P.getAddressSpaces()) {
raw_string_ostream OS(Explanation);
OS << " AddressSpaces=[";
StringRef AddrSpaceSeparator;
- for (Init *Val : AddrSpaces->getValues()) {
- IntInit *IntVal = dyn_cast<IntInit>(Val);
+ for (const Init *Val : AddrSpaces->getValues()) {
+ const IntInit *IntVal = dyn_cast<IntInit>(Val);
if (!IntVal)
continue;
@@ -267,8 +267,8 @@ static Error isTrivialOperatorNode(const TreePatternNode &N) {
return failedImport(Explanation);
}
-static Record *getInitValueAsRegClass(Init *V) {
- if (DefInit *VDefInit = dyn_cast<DefInit>(V)) {
+static const Record *getInitValueAsRegClass(const Init *V) {
+ if (const DefInit *VDefInit = dyn_cast<DefInit>(V)) {
if (VDefInit->getDef()->isSubClassOf("RegisterOperand"))
return VDefInit->getDef()->getValueAsDef("RegClass");
if (VDefInit->getDef()->isSubClassOf("RegisterClass"))
@@ -383,7 +383,8 @@ private:
const CodeGenInstruction *getEquivNode(Record &Equiv,
const TreePatternNode &N) const;
- Error importRulePredicates(RuleMatcher &M, ArrayRef<Record *> Predicates);
+ Error importRulePredicates(RuleMatcher &M,
+ ArrayRef<const Record *> Predicates);
Expected<InstructionMatcher &>
createAndImportSelDAGMatcher(RuleMatcher &Rule,
InstructionMatcher &InsnMatcher,
@@ -420,15 +421,14 @@ private:
Error importDefaultOperandRenderers(action_iterator InsertPt, RuleMatcher &M,
BuildMIAction &DstMIBuilder,
const DAGDefaultOperand &DefaultOp) const;
- Error
- importImplicitDefRenderers(BuildMIAction &DstMIBuilder,
- const std::vector<Record *> &ImplicitDefs) const;
+ Error importImplicitDefRenderers(BuildMIAction &DstMIBuilder,
+ ArrayRef<const Record *> ImplicitDefs) const;
/// Analyze pattern \p P, returning a matcher for it if possible.
/// Otherwise, return an Error explaining why we don't support it.
Expected<RuleMatcher> runOnPattern(const PatternToMatch &P);
- void declareSubtargetFeature(Record *Predicate);
+ void declareSubtargetFeature(const Record *Predicate);
unsigned declareHwModeCheck(StringRef HwModeFeatures);
@@ -544,9 +544,9 @@ GlobalISelEmitter::GlobalISelEmitter(RecordKeeper &RK)
//===- Emitter ------------------------------------------------------------===//
-Error GlobalISelEmitter::importRulePredicates(RuleMatcher &M,
- ArrayRef<Record *> Predicates) {
- for (Record *Pred : Predicates) {
+Error GlobalISelEmitter::importRulePredicates(
+ RuleMatcher &M, ArrayRef<const Record *> Predicates) {
+ for (const Record *Pred : Predicates) {
if (Pred->getValueAsString("CondString").empty())
continue;
declareSubtargetFeature(Pred);
@@ -726,7 +726,7 @@ Expected<InstructionMatcher &> GlobalISelEmitter::createAndImportSelDAGMatcher(
// Start with the defined operands (i.e., the results of the root operator).
if (Src.isLeaf()) {
- Init *SrcInit = Src.getLeafValue();
+ const Init *SrcInit = Src.getLeafValue();
if (isa<IntInit>(SrcInit)) {
InsnMatcher.addPredicate<InstructionOpcodeMatcher>(
&Target.getInstruction(RK.getDef("G_CONSTANT")));
@@ -816,8 +816,8 @@ Expected<InstructionMatcher &> GlobalISelEmitter::createAndImportSelDAGMatcher(
}
if (Src.isLeaf()) {
- Init *SrcInit = Src.getLeafValue();
- if (IntInit *SrcIntInit = dyn_cast<IntInit>(SrcInit)) {
+ const Init *SrcInit = Src.getLeafValue();
+ if (const IntInit *SrcIntInit = dyn_cast<IntInit>(SrcInit)) {
OperandMatcher &OM =
InsnMatcher.addOperand(OpIdx++, Src.getName(), TempOpIdx);
OM.addPredicate<LiteralIntOperandMatcher>(SrcIntInit->getValue());
@@ -851,8 +851,8 @@ Expected<InstructionMatcher &> GlobalISelEmitter::createAndImportSelDAGMatcher(
if (IsFCmp || SrcGIOrNull->TheDef->getName() == "G_ICMP") {
const TreePatternNode &SrcChild = Src.getChild(NumChildren - 1);
if (SrcChild.isLeaf()) {
- DefInit *DI = dyn_cast<DefInit>(SrcChild.getLeafValue());
- Record *CCDef = DI ? DI->getDef() : nullptr;
+ const DefInit *DI = dyn_cast<DefInit>(SrcChild.getLeafValue());
+ const Record *CCDef = DI ? DI->getDef() : nullptr;
if (!CCDef || !CCDef->isSubClassOf("CondCode"))
return failedImport("Unable to handle CondCode");
@@ -1580,7 +1580,8 @@ Expected<action_iterator> GlobalISelEmitter::importExplicitUseRenderers(
if (Name == "EXTRACT_SUBREG") {
if (!Dst.getChild(1).isLeaf())
return failedImport("EXTRACT_SUBREG child #1 is not a leaf");
- DefInit *SubRegInit = dyn_cast<DefInit>(Dst.getChild(1).getLeafValue());
+ const DefInit *SubRegInit =
+ dyn_cast<DefInit>(Dst.getChild(1).getLeafValue());
if (!SubRegInit)
return failedImport("EXTRACT_SUBREG child #1 is not a subreg index");
@@ -1607,7 +1608,7 @@ Expected<action_iterator> GlobalISelEmitter::importExplicitUseRenderers(
}
// If this is a source operand, this is just a subregister copy.
- Record *RCDef = getInitValueAsRegClass(ValChild.getLeafValue());
+ const Record *RCDef = getInitValueAsRegClass(ValChild.getLeafValue());
if (!RCDef)
return failedImport("EXTRACT_SUBREG child #0 could not "
"be coerced to a register class");
@@ -1638,7 +1639,8 @@ Expected<action_iterator> GlobalISelEmitter::importExplicitUseRenderers(
if (!Dst.getChild(0).isLeaf())
return failedImport("REG_SEQUENCE child #0 is not a leaf");
- Record *RCDef = getInitValueAsRegClass(Dst.getChild(0).getLeafValue());
+ const Record *RCDef =
+ getInitValueAsRegClass(Dst.getChild(0).getLeafValue());
if (!RCDef)
return failedImport("REG_SEQUENCE child #0 could not "
"be coerced to a register class");
@@ -1650,7 +1652,8 @@ Expected<action_iterator> GlobalISelEmitter::importExplicitUseRenderers(
const TreePatternNode &ValChild = Dst.getChild(I);
const TreePatternNode &SubRegChild = Dst.getChild(I + 1);
- if (DefInit *SubRegInit = dyn_cast<DefInit>(SubRegChild.getLeafValue())) {
+ if (const DefInit *SubRegInit =
+ dyn_cast<DefInit>(SubRegChild.getLeafValue())) {
CodeGenSubRegIndex *SubIdx = CGRegs.getSubRegIdx(SubRegInit->getDef());
auto InsertPtOrError =
@@ -1782,8 +1785,7 @@ Error GlobalISelEmitter::importDefaultOperandRenderers(
}
Error GlobalISelEmitter::importImplicitDefRenderers(
- BuildMIAction &DstMIBuilder,
- const std::vector<Record *> &ImplicitDefs) const {
+ BuildMIAction &DstMIBuilder, ArrayRef<const Record *> ImplicitDefs) const {
if (!ImplicitDefs.empty())
return failedImport("Pattern defines a physical register");
return Error::success();
@@ -1792,10 +1794,10 @@ Error GlobalISelEmitter::importImplicitDefRenderers(
std::optional<const CodeGenRegisterClass *>
GlobalISelEmitter::getRegClassFromLeaf(const TreePatternNode &Leaf) {
assert(Leaf.isLeaf() && "Expected leaf?");
- Record *RCRec = getInitValueAsRegClass(Leaf.getLeafValue());
+ const Record *RCRec = getInitValueAsRegClass(Leaf.getLeafValue());
if (!RCRec)
return std::nullopt;
- CodeGenRegisterClass *RC = CGRegs.getRegClass(RCRec);
+ const CodeGenRegisterClass *RC = CGRegs.getRegClass(RCRec);
if (!RC)
return std::nullopt;
return RC;
@@ -1873,10 +1875,10 @@ GlobalISelEmitter::inferSuperRegisterClass(
return std::nullopt;
if (!SubRegIdxNode.isLeaf())
return std::nullopt;
- DefInit *SubRegInit = dyn_cast<DefInit>(SubRegIdxNode.getLeafValue());
+ const DefInit *SubRegInit = dyn_cast<DefInit>(SubRegIdxNode.getLeafValue());
if (!SubRegInit)
return std::nullopt;
- CodeGenSubRegIndex *SubIdx = CGRegs.getSubRegIdx(SubRegInit->getDef());
+ const CodeGenSubRegIndex *SubIdx = CGRegs.getSubRegIdx(SubRegInit->getDef());
// Use the information we found above to find a minimal register class which
// supports the subregister and type we want.
@@ -1908,7 +1910,7 @@ std::optional<CodeGenSubRegIndex *> GlobalISelEmitter::inferSubRegIndexForNode(
if (!SubRegIdxNode.isLeaf())
return std::nullopt;
- DefInit *SubRegInit = dyn_cast<DefInit>(SubRegIdxNode.getLeafValue());
+ const DefInit *SubRegInit = dyn_cast<DefInit>(SubRegIdxNode.getLeafValue());
if (!SubRegInit)
return std::nullopt;
return CGRegs.getSubRegIdx(SubRegInit->getDef());
@@ -1923,7 +1925,7 @@ Expected<RuleMatcher> GlobalISelEmitter::runOnPattern(const PatternToMatch &P) {
" => " +
llvm::to_string(P.getDstPattern()));
- SmallVector<Record *, 4> Predicates;
+ SmallVector<const Record *, 4> Predicates;
P.getPredicateRecords(Predicates);
if (auto Error = importRulePredicates(M, Predicates))
return std::move(Error);
@@ -1976,8 +1978,7 @@ Expected<RuleMatcher> GlobalISelEmitter::runOnPattern(const PatternToMatch &P) {
InstructionMatcher &InsnMatcher = InsnMatcherOrError.get();
if (Dst.isLeaf()) {
- Record *RCDef = getInitValueAsRegClass(Dst.getLeafValue());
- if (RCDef) {
+ if (const Record *RCDef = getInitValueAsRegClass(Dst.getLeafValue())) {
const CodeGenRegisterClass &RC = Target.getRegisterClass(RCDef);
// We need to replace the def and all its uses with the specified
@@ -2119,7 +2120,8 @@ Expected<RuleMatcher> GlobalISelEmitter::runOnPattern(const PatternToMatch &P) {
if (DstIName == "COPY_TO_REGCLASS") {
// COPY_TO_REGCLASS does not provide operand constraints itself but the
// result is constrained to the class given by the second child.
- Record *DstIOpRec = getInitValueAsRegClass(Dst.getChild(1).getLeafValue());
+ const Record *DstIOpRec =
+ getInitValueAsRegClass(Dst.getChild(1).getLeafValue());
if (DstIOpRec == nullptr)
return failedImport("COPY_TO_REGCLASS operand #1 isn't a register class");
@@ -2514,7 +2516,7 @@ void GlobalISelEmitter::run(raw_ostream &OS) {
emitPredicatesInit(OS, "GET_GLOBALISEL_PREDICATES_INIT");
}
-void GlobalISelEmitter::declareSubtargetFeature(Record *Predicate) {
+void GlobalISelEmitter::declareSubtargetFeature(const Record *Predicate) {
SubtargetFeatures.try_emplace(Predicate, Predicate, SubtargetFeatures.size());
}
diff --git a/llvm/utils/TableGen/OptParserEmitter.cpp b/llvm/utils/TableGen/OptionParserEmitter.cpp
index 79cbf51..5ae6f77 100644
--- a/llvm/utils/TableGen/OptParserEmitter.cpp
+++ b/llvm/utils/TableGen/OptionParserEmitter.cpp
@@ -1,4 +1,4 @@
-//===- OptParserEmitter.cpp - Table Driven Command Line Parsing -----------===//
+//===- OptionParserEmitter.cpp - Table Driven Command Option Line Parsing -===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -247,10 +247,10 @@ static void EmitHelpTextsForVariants(
OS << " }})";
}
-/// OptParserEmitter - This tablegen backend takes an input .td file
+/// OptionParserEmitter - This tablegen backend takes an input .td file
/// describing a list of options and emits a data structure for parsing and
/// working with those options when given an input command line.
-static void EmitOptParser(const RecordKeeper &Records, raw_ostream &OS) {
+static void EmitOptionParser(const RecordKeeper &Records, raw_ostream &OS) {
// Get the option groups and options.
ArrayRef<const Record *> Groups =
Records.getAllDerivedDefinitions("OptionGroup");
@@ -572,5 +572,5 @@ static void EmitOptParser(const RecordKeeper &Records, raw_ostream &OS) {
OS << "\n";
}
-static TableGen::Emitter::Opt X("gen-opt-parser-defs", EmitOptParser,
+static TableGen::Emitter::Opt X("gen-opt-parser-defs", EmitOptionParser,
"Generate option definitions");
diff --git a/llvm/utils/TableGen/OptRSTEmitter.cpp b/llvm/utils/TableGen/OptionRSTEmitter.cpp
index 1612519..b798896 100644
--- a/llvm/utils/TableGen/OptRSTEmitter.cpp
+++ b/llvm/utils/TableGen/OptionRSTEmitter.cpp
@@ -1,4 +1,4 @@
-//===- OptParserEmitter.cpp - Table Driven Command Line Parsing -----------===//
+//===- OptionRSTEmitter.cpp - Table Driven Command Line Option Parsing ----===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -14,9 +14,9 @@
using namespace llvm;
-/// OptParserEmitter - This tablegen backend takes an input .td file
-/// describing a list of options and emits a RST man page.
-static void EmitOptRST(const RecordKeeper &Records, raw_ostream &OS) {
+/// This tablegen backend takes an input .td file describing a list of options
+/// and emits a RST man page.
+static void EmitOptionRST(const RecordKeeper &Records, raw_ostream &OS) {
llvm::StringMap<std::vector<const Record *>> OptionsByGroup;
std::vector<Record *> OptionsWithoutGroup;
@@ -97,5 +97,5 @@ static void EmitOptRST(const RecordKeeper &Records, raw_ostream &OS) {
}
}
-static TableGen::Emitter::Opt X("gen-opt-rst", EmitOptRST,
+static TableGen::Emitter::Opt X("gen-opt-rst", EmitOptionRST,
"Generate option RST");
diff --git a/llvm/utils/UpdateTestChecks/common.py b/llvm/utils/UpdateTestChecks/common.py
index 9b9be69..b861bd0 100644
--- a/llvm/utils/UpdateTestChecks/common.py
+++ b/llvm/utils/UpdateTestChecks/common.py
@@ -573,7 +573,7 @@ LOOP_PASS_DEBUG_RE = re.compile(
IR_FUNCTION_RE = re.compile(r'^\s*define\s+(?:internal\s+)?[^@]*@"?([\w.$-]+)"?\s*\(')
TRIPLE_IR_RE = re.compile(r'^\s*target\s+triple\s*=\s*"([^"]+)"$')
-TRIPLE_ARG_RE = re.compile(r"-mtriple[= ]([^ ]+)")
+TRIPLE_ARG_RE = re.compile(r"-m?triple[= ]([^ ]+)")
MARCH_ARG_RE = re.compile(r"-march[= ]([^ ]+)")
DEBUG_ONLY_ARG_RE = re.compile(r"-debug-only[= ]([^ ]+)")
diff --git a/llvm/utils/gn/build/BUILD.gn b/llvm/utils/gn/build/BUILD.gn
index 27f95bb..0b0f627 100644
--- a/llvm/utils/gn/build/BUILD.gn
+++ b/llvm/utils/gn/build/BUILD.gn
@@ -186,6 +186,7 @@ config("compiler_defaults") {
if (!is_clang) {
# expand __VA_ARGS__ in "OPTION(...) LLVM_MAKE_OPT_ID(__VA_ARGS__)"
cflags += [ "/Zc:preprocessor" ]
+
# cl.exe doesn't set __cplusplus correctly by default.
# clang-cl gets it right by default, so don't needlessly add the flag there.
cflags_cc += [ "/Zc:__cplusplus" ]
diff --git a/llvm/utils/gn/build/toolchain/target_flags.gni b/llvm/utils/gn/build/toolchain/target_flags.gni
index cbfa229..50d31a3 100644
--- a/llvm/utils/gn/build/toolchain/target_flags.gni
+++ b/llvm/utils/gn/build/toolchain/target_flags.gni
@@ -45,9 +45,10 @@ if (current_os == "android") {
target_flags += [
"-isysroot",
rebase_path(mac_sdk_path, root_build_dir),
+
# TODO(lgrey): We should be getting this from `compiler_defaults`. Why
# aren't we?
- "-mmacos-version-min=$mac_deployment_target",
+ "-mmacos-version-min=$mac_deployment_target",
]
}
} else if (current_os == "baremetal") {
diff --git a/llvm/utils/gn/secondary/BUILD.gn b/llvm/utils/gn/secondary/BUILD.gn
index a17a2fd..7f6b4cb 100644
--- a/llvm/utils/gn/secondary/BUILD.gn
+++ b/llvm/utils/gn/secondary/BUILD.gn
@@ -21,12 +21,12 @@ group("default") {
"//libcxxabi",
]
}
- if (current_os == "linux" || current_os == "win" || current_os=="mac") {
+ if (current_os == "linux" || current_os == "win" || current_os == "mac") {
deps += [ "//compiler-rt/test/asan" ]
}
if (current_os == "linux" || current_os == "mac") {
- deps += [ "//compiler-rt/test/lsan"]
+ deps += [ "//compiler-rt/test/lsan" ]
}
if (current_os == "linux" || current_os == "android") {
diff --git a/llvm/utils/gn/secondary/clang-tools-extra/clang-doc/tool/BUILD.gn b/llvm/utils/gn/secondary/clang-tools-extra/clang-doc/tool/BUILD.gn
index b224df0..47dd70e 100644
--- a/llvm/utils/gn/secondary/clang-tools-extra/clang-doc/tool/BUILD.gn
+++ b/llvm/utils/gn/secondary/clang-tools-extra/clang-doc/tool/BUILD.gn
@@ -1,7 +1,7 @@
copy("assets") {
sources = [
- "../assets/index.js",
"../assets/clang-doc-default-stylesheet.css",
+ "../assets/index.js",
]
outputs = [ "$root_build_dir/share/clang-doc/{{source_file_part}}" ]
}
diff --git a/llvm/utils/gn/secondary/clang-tools-extra/clangd/BUILD.gn b/llvm/utils/gn/secondary/clang-tools-extra/clangd/BUILD.gn
index a116c0abe..c6b45ef 100644
--- a/llvm/utils/gn/secondary/clang-tools-extra/clangd/BUILD.gn
+++ b/llvm/utils/gn/secondary/clang-tools-extra/clangd/BUILD.gn
@@ -56,9 +56,9 @@ static_library("clangd") {
"//clang/lib/Serialization",
"//clang/lib/Tooling",
"//clang/lib/Tooling/Core",
+ "//clang/lib/Tooling/DependencyScanning",
"//clang/lib/Tooling/Inclusions",
"//clang/lib/Tooling/Inclusions/Stdlib",
- "//clang/lib/Tooling/DependencyScanning",
"//clang/lib/Tooling/Refactoring",
"//clang/lib/Tooling/Syntax",
"//llvm/lib/Support",
diff --git a/llvm/utils/gn/secondary/clang/lib/Interpreter/BUILD.gn b/llvm/utils/gn/secondary/clang/lib/Interpreter/BUILD.gn
index a578651..4b79e98 100644
--- a/llvm/utils/gn/secondary/clang/lib/Interpreter/BUILD.gn
+++ b/llvm/utils/gn/secondary/clang/lib/Interpreter/BUILD.gn
@@ -30,6 +30,7 @@ static_library("Interpreter") {
"IncrementalParser.cpp",
"Interpreter.cpp",
"InterpreterUtils.cpp",
+ "InterpreterValuePrinter.cpp",
"Value.cpp",
]
}
diff --git a/llvm/utils/gn/secondary/clang/test/BUILD.gn b/llvm/utils/gn/secondary/clang/test/BUILD.gn
index 1d5b802..97610d4 100644
--- a/llvm/utils/gn/secondary/clang/test/BUILD.gn
+++ b/llvm/utils/gn/secondary/clang/test/BUILD.gn
@@ -124,13 +124,13 @@ write_lit_config("lit_site_cfg") {
"CMAKE_LIBRARY_OUTPUT_DIRECTORY=" + rebase_path("$root_out_dir/bin", dir),
"LLVM_LIT_ERRC_MESSAGES=no such file or directory;is a directory;" +
"invalid argument;permission denied",
- "PERL_EXECUTABLE="
+ "PERL_EXECUTABLE=",
]
} else {
extra_values += [
"CMAKE_LIBRARY_OUTPUT_DIRECTORY=" + rebase_path("$root_out_dir/lib", dir),
"LLVM_LIT_ERRC_MESSAGES=",
- "PERL_EXECUTABLE=/usr/bin/perl"
+ "PERL_EXECUTABLE=/usr/bin/perl",
]
}
diff --git a/llvm/utils/gn/secondary/clang/unittests/InstallAPI/BUILD.gn b/llvm/utils/gn/secondary/clang/unittests/InstallAPI/BUILD.gn
index e2765945..b8bf438 100644
--- a/llvm/utils/gn/secondary/clang/unittests/InstallAPI/BUILD.gn
+++ b/llvm/utils/gn/secondary/clang/unittests/InstallAPI/BUILD.gn
@@ -7,7 +7,7 @@ unittest("InstallAPITests") {
"//llvm/lib/Testing/Support",
]
sources = [
- "HeaderFileTest.cpp",
"FileListTest.cpp",
+ "HeaderFileTest.cpp",
]
}
diff --git a/llvm/utils/gn/secondary/compiler-rt/lib/sanitizer_common/BUILD.gn b/llvm/utils/gn/secondary/compiler-rt/lib/sanitizer_common/BUILD.gn
index 450c419..e398119 100644
--- a/llvm/utils/gn/secondary/compiler-rt/lib/sanitizer_common/BUILD.gn
+++ b/llvm/utils/gn/secondary/compiler-rt/lib/sanitizer_common/BUILD.gn
@@ -167,9 +167,9 @@ source_set("sources") {
"sanitizer_vector.h",
"sanitizer_win.cpp",
"sanitizer_win.h",
+ "sanitizer_win_defs.h",
"sanitizer_win_interception.cpp",
"sanitizer_win_interception.h",
- "sanitizer_win_defs.h",
"sanitizer_win_thunk_interception.h",
]
}
diff --git a/llvm/utils/gn/secondary/compiler-rt/test/hwasan/BUILD.gn b/llvm/utils/gn/secondary/compiler-rt/test/hwasan/BUILD.gn
index 59ed1d1..7bdf9c2 100644
--- a/llvm/utils/gn/secondary/compiler-rt/test/hwasan/BUILD.gn
+++ b/llvm/utils/gn/secondary/compiler-rt/test/hwasan/BUILD.gn
@@ -37,8 +37,8 @@ if (current_toolchain != host_toolchain) {
":lit_site_cfg",
"//compiler-rt/include($host_toolchain)",
"//compiler-rt/lib/cfi:ignorelist($host_toolchain)",
- "//compiler-rt/lib/hwasan:hwasan_shared",
"//compiler-rt/lib/hwasan:hwasan_preinit",
+ "//compiler-rt/lib/hwasan:hwasan_shared",
"//compiler-rt/test:lit_common_configured",
"//llvm/utils/FileCheck($host_toolchain)",
"//llvm/utils/llvm-lit($host_toolchain)",
diff --git a/llvm/utils/gn/secondary/compiler-rt/test/lsan/BUILD.gn b/llvm/utils/gn/secondary/compiler-rt/test/lsan/BUILD.gn
index 4fb375f..7dc69af 100644
--- a/llvm/utils/gn/secondary/compiler-rt/test/lsan/BUILD.gn
+++ b/llvm/utils/gn/secondary/compiler-rt/test/lsan/BUILD.gn
@@ -7,11 +7,13 @@ import("//llvm/version.gni")
write_cmake_config("asan_mode_cfg") {
input = "lit.site.cfg.py.in"
- output = "$target_gen_dir/${crt_current_target_arch}AsanConfig/lit.site.cfg.py"
+ output =
+ "$target_gen_dir/${crt_current_target_arch}AsanConfig/lit.site.cfg.py"
values = [
"LSAN_LIT_SOURCE_DIR=" + rebase_path("."),
"LSAN_TEST_CONFIG_SUFFIX=$crt_current_target_suffix",
"LSAN_TEST_TARGET_CFLAGS=$target_flags_string",
+
# TODO(lgrey): Support standalone mode
"LSAN_LIT_TEST_MODE=AddressSanitizer",
"LSAN_TEST_TARGET_ARCH=$crt_current_target_arch",
@@ -59,9 +61,7 @@ if (supported_toolchains != []) {
test_dir = rebase_path(
get_label_info(":lit_site_cfg($toolchain)", "target_gen_dir"),
root_build_dir)
- args += [
- test_dir + "/${crt_current_target_arch}AsanConfig",
- ]
+ args += [ test_dir + "/${crt_current_target_arch}AsanConfig" ]
}
outputs = [ "$target_gen_dir/run-lit" ] # Non-existing, so that ninja runs
# it each time.
diff --git a/llvm/utils/gn/secondary/libcxx/src/BUILD.gn b/llvm/utils/gn/secondary/libcxx/src/BUILD.gn
index a94674a..29e8649 100644
--- a/llvm/utils/gn/secondary/libcxx/src/BUILD.gn
+++ b/llvm/utils/gn/secondary/libcxx/src/BUILD.gn
@@ -317,13 +317,13 @@ if (libcxx_enable_experimental) {
sources = [ "experimental/keep.cpp" ]
if (libcxx_enable_filesystem && libcxx_enable_time_zone_database) {
sources += [
+ # TODO TZDB The exception could be moved in chrono once the TZDB library
+ # is no longer experimental.
+ "experimental/chrono_exception.cpp",
"experimental/include/tzdb/time_zone_private.h",
"experimental/include/tzdb/types_private.h",
"experimental/include/tzdb/tzdb_list_private.h",
"experimental/include/tzdb/tzdb_private.h",
- # TODO TZDB The exception could be moved in chrono once the TZDB library
- # is no longer experimental.
- "experimental/chrono_exception.cpp",
"experimental/time_zone.cpp",
"experimental/tzdb.cpp",
"experimental/tzdb_list.cpp",
diff --git a/llvm/utils/gn/secondary/lld/unittests/AsLibAll/BUILD.gn b/llvm/utils/gn/secondary/lld/unittests/AsLibAll/BUILD.gn
index d6af6a1..6eb82ea 100644
--- a/llvm/utils/gn/secondary/lld/unittests/AsLibAll/BUILD.gn
+++ b/llvm/utils/gn/secondary/lld/unittests/AsLibAll/BUILD.gn
@@ -3,8 +3,8 @@ import("//third-party/unittest/unittest.gni")
unittest("LLDAsLibAllTests") {
configs += [ "//llvm/utils/gn/build:lld_code" ]
deps = [
- "//lld/Common",
"//lld/COFF",
+ "//lld/Common",
"//lld/ELF",
"//lld/MachO",
"//lld/MinGW",
diff --git a/llvm/utils/gn/secondary/lld/unittests/BUILD.gn b/llvm/utils/gn/secondary/lld/unittests/BUILD.gn
index c909670..6faaa12 100644
--- a/llvm/utils/gn/secondary/lld/unittests/BUILD.gn
+++ b/llvm/utils/gn/secondary/lld/unittests/BUILD.gn
@@ -5,4 +5,3 @@ group("unittests") {
]
testonly = true
}
-
diff --git a/llvm/utils/gn/secondary/lldb/test/BUILD.gn b/llvm/utils/gn/secondary/lldb/test/BUILD.gn
index e903d16..369b24f 100644
--- a/llvm/utils/gn/secondary/lldb/test/BUILD.gn
+++ b/llvm/utils/gn/secondary/lldb/test/BUILD.gn
@@ -164,8 +164,8 @@ group("test") {
":lit_unit_site_cfg",
"//clang/tools/driver:symlinks",
"//lld/tools/lld:symlinks",
- "//lldb/tools/lldb-dap",
"//lldb/tools/driver:lldb",
+ "//lldb/tools/lldb-dap",
# XXX lldb-instr, darwin-debug, etc
"//lldb/tools/lldb-server",
diff --git a/llvm/utils/gn/secondary/llvm/include/llvm/TargetParser/BUILD.gn b/llvm/utils/gn/secondary/llvm/include/llvm/TargetParser/BUILD.gn
index a71dfa5..455a8265 100644
--- a/llvm/utils/gn/secondary/llvm/include/llvm/TargetParser/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/include/llvm/TargetParser/BUILD.gn
@@ -23,8 +23,8 @@ tablegen("RISCVTargetParserDef") {
group("gen") {
deps = [
- ":ARMTargetParserDef",
":AArch64TargetParserDef",
+ ":ARMTargetParserDef",
":RISCVTargetParserDef",
]
}
diff --git a/llvm/utils/gn/secondary/llvm/lib/CodeGenTypes/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/CodeGenTypes/BUILD.gn
index 5df31c3..04f819d 100644
--- a/llvm/utils/gn/secondary/llvm/lib/CodeGenTypes/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/lib/CodeGenTypes/BUILD.gn
@@ -10,4 +10,3 @@ static_library("CodeGenTypes") {
]
sources = [ "LowLevelType.cpp" ]
}
-
diff --git a/llvm/utils/gn/secondary/llvm/lib/DebugInfo/BTF/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/DebugInfo/BTF/BUILD.gn
index 74c1362..803dd86 100644
--- a/llvm/utils/gn/secondary/llvm/lib/DebugInfo/BTF/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/lib/DebugInfo/BTF/BUILD.gn
@@ -2,7 +2,7 @@ static_library("BTF") {
output_name = "LLVMDebugInfoBTF"
deps = [ "//llvm/lib/Support" ]
sources = [
- "BTFParser.cpp",
"BTFContext.cpp",
+ "BTFParser.cpp",
]
}
diff --git a/llvm/utils/gn/secondary/llvm/lib/Target/AMDGPU/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/Target/AMDGPU/BUILD.gn
index dd4af4e..f83efbd 100644
--- a/llvm/utils/gn/secondary/llvm/lib/Target/AMDGPU/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/lib/Target/AMDGPU/BUILD.gn
@@ -152,7 +152,6 @@ static_library("LLVMAMDGPUCodeGen") {
"AMDGPUISelLowering.cpp",
"AMDGPUImageIntrinsicOptimizer.cpp",
"AMDGPUInsertDelayAlu.cpp",
- "AMDGPUInsertSingleUseVDST.cpp",
"AMDGPUInstCombineIntrinsic.cpp",
"AMDGPUInstrInfo.cpp",
"AMDGPUInstructionSelector.cpp",
diff --git a/llvm/utils/gn/secondary/llvm/lib/Target/WebAssembly/Utils/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/Target/WebAssembly/Utils/BUILD.gn
index a4a6889..1fba864 100644
--- a/llvm/utils/gn/secondary/llvm/lib/Target/WebAssembly/Utils/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/lib/Target/WebAssembly/Utils/BUILD.gn
@@ -12,7 +12,5 @@ static_library("Utils") {
"//llvm/lib/Target/WebAssembly/TargetInfo",
]
include_dirs = [ ".." ]
- sources = [
- "WebAssemblyTypeUtilities.cpp",
- ]
+ sources = [ "WebAssemblyTypeUtilities.cpp" ]
}
diff --git a/llvm/utils/gn/secondary/llvm/tools/llc/BUILD.gn b/llvm/utils/gn/secondary/llvm/tools/llc/BUILD.gn
index a968760..8756ee5 100644
--- a/llvm/utils/gn/secondary/llvm/tools/llc/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/tools/llc/BUILD.gn
@@ -6,8 +6,8 @@ executable("llc") {
"//llvm/lib/CodeGen/MIRParser",
"//llvm/lib/CodeGen/SelectionDAG",
"//llvm/lib/IR",
- "//llvm/lib/IRReader",
"//llvm/lib/IRPrinter",
+ "//llvm/lib/IRReader",
"//llvm/lib/MC",
"//llvm/lib/Passes",
"//llvm/lib/Support",
diff --git a/llvm/utils/gn/secondary/llvm/tools/llvm-dwp/BUILD.gn b/llvm/utils/gn/secondary/llvm/tools/llvm-dwp/BUILD.gn
index 01f8d0f..49cccaa 100644
--- a/llvm/utils/gn/secondary/llvm/tools/llvm-dwp/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/tools/llvm-dwp/BUILD.gn
@@ -1,7 +1,7 @@
import("//llvm/tools/binutils_symlinks.gni")
+import("//llvm/utils/TableGen/tablegen.gni")
import("//llvm/utils/gn/build/driver_executable.gni")
import("//llvm/utils/gn/build/symlink_or_copy.gni")
-import("//llvm/utils/TableGen/tablegen.gni")
tablegen("Opts") {
visibility = [ ":llvm-dwp" ]
diff --git a/llvm/utils/gn/secondary/llvm/tools/llvm-libtool-darwin/BUILD.gn b/llvm/utils/gn/secondary/llvm/tools/llvm-libtool-darwin/BUILD.gn
index c974cae..034201a 100644
--- a/llvm/utils/gn/secondary/llvm/tools/llvm-libtool-darwin/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/tools/llvm-libtool-darwin/BUILD.gn
@@ -1,7 +1,7 @@
import("//llvm/tools/cctools_symlinks.gni")
+import("//llvm/utils/TableGen/tablegen.gni")
import("//llvm/utils/gn/build/driver_executable.gni")
import("//llvm/utils/gn/build/symlink_or_copy.gni")
-import("//llvm/utils/TableGen/tablegen.gni")
tablegen("Opts") {
visibility = [ ":llvm-libtool-darwin" ]
diff --git a/llvm/utils/gn/secondary/llvm/tools/llvm-ml/BUILD.gn b/llvm/utils/gn/secondary/llvm/tools/llvm-ml/BUILD.gn
index b094f0e..9e3fb96 100644
--- a/llvm/utils/gn/secondary/llvm/tools/llvm-ml/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/tools/llvm-ml/BUILD.gn
@@ -1,5 +1,5 @@
-import("//llvm/utils/gn/build/driver_executable.gni")
import("//llvm/utils/TableGen/tablegen.gni")
+import("//llvm/utils/gn/build/driver_executable.gni")
tablegen("Opts") {
visibility = [ ":llvm-ml" ]
diff --git a/llvm/utils/gn/secondary/llvm/tools/sancov/BUILD.gn b/llvm/utils/gn/secondary/llvm/tools/sancov/BUILD.gn
index ff0fd70..9057072 100644
--- a/llvm/utils/gn/secondary/llvm/tools/sancov/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/tools/sancov/BUILD.gn
@@ -1,5 +1,5 @@
-import("//llvm/utils/gn/build/driver_executable.gni")
import("//llvm/utils/TableGen/tablegen.gni")
+import("//llvm/utils/gn/build/driver_executable.gni")
tablegen("Opts") {
visibility = [ ":sancov" ]
diff --git a/llvm/utils/gn/secondary/llvm/unittests/Transforms/Instrumentation/BUILD.gn b/llvm/utils/gn/secondary/llvm/unittests/Transforms/Instrumentation/BUILD.gn
index 27ff75b..c9c59ac 100644
--- a/llvm/utils/gn/secondary/llvm/unittests/Transforms/Instrumentation/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/unittests/Transforms/Instrumentation/BUILD.gn
@@ -5,10 +5,10 @@ unittest("InstrumentationTests") {
"//llvm/lib/Analysis",
"//llvm/lib/AsmParser",
"//llvm/lib/IR",
- "//llvm/lib/Transforms/Instrumentation",
"//llvm/lib/Passes",
"//llvm/lib/Support",
"//llvm/lib/Testing/Support",
+ "//llvm/lib/Transforms/Instrumentation",
]
sources = [ "PGOInstrumentationTest.cpp" ]
}
diff --git a/llvm/utils/gn/secondary/llvm/utils/TableGen/BUILD.gn b/llvm/utils/gn/secondary/llvm/utils/TableGen/BUILD.gn
index 2e11d25..ba52a97 100644
--- a/llvm/utils/gn/secondary/llvm/utils/TableGen/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/utils/TableGen/BUILD.gn
@@ -56,17 +56,17 @@ executable("llvm-tblgen") {
"InstrDocsEmitter.cpp",
"InstrInfoEmitter.cpp",
"MacroFusionPredicatorEmitter.cpp",
- "OptParserEmitter.cpp",
- "OptRSTEmitter.cpp",
+ "OptionParserEmitter.cpp",
+ "OptionRSTEmitter.cpp",
"PseudoLoweringEmitter.cpp",
"RegisterBankEmitter.cpp",
"RegisterInfoEmitter.cpp",
"SearchableTableEmitter.cpp",
"SubtargetEmitter.cpp",
"WebAssemblyDisassemblerEmitter.cpp",
- "X86InstrMappingEmitter.cpp",
"X86DisassemblerTables.cpp",
"X86FoldTablesEmitter.cpp",
+ "X86InstrMappingEmitter.cpp",
"X86MnemonicTables.cpp",
"X86ModRMFilters.cpp",
"X86RecognizableInstr.cpp",
diff --git a/llvm/utils/update_mc_test_checks.py b/llvm/utils/update_mc_test_checks.py
new file mode 100755
index 0000000..f9f8cfd
--- /dev/null
+++ b/llvm/utils/update_mc_test_checks.py
@@ -0,0 +1,329 @@
+#!/usr/bin/env python3
+"""
+A test update script. This script is a utility to update LLVM 'llvm-mc' based test cases with new FileCheck patterns.
+"""
+
+from __future__ import print_function
+
+import argparse
+import os # Used to advertise this file's name ("autogenerated_note").
+
+from UpdateTestChecks import common
+
+import subprocess
+import re
+
+mc_LIKE_TOOLS = [
+ "llvm-mc",
+ "not llvm-mc",
+]
+ERROR_RE = re.compile(r":\d+: (warning|error): .*")
+ERROR_CHECK_RE = re.compile(r"# COM: .*")
+OUTPUT_SKIPPED_RE = re.compile(r"(.text)")
+COMMENT = {"asm": "//", "dasm": "#"}
+
+
+def invoke_tool(exe, cmd_args, testline, verbose=False):
+ if isinstance(cmd_args, list):
+ args = [applySubstitutions(a, substitutions) for a in cmd_args]
+ else:
+ args = cmd_args
+
+ cmd = 'echo "' + testline + '" | ' + exe + " " + args
+ if verbose:
+ print("Command: ", cmd)
+ out = subprocess.check_output(cmd, shell=True)
+ # Fix line endings to unix CR style.
+ return out.decode().replace("\r\n", "\n")
+
+
+# create tests line-by-line, here we just filter out the check lines and comments
+# and treat all others as tests
+def isTestLine(input_line, mc_mode):
+ line = input_line.strip()
+ # Skip empty and comment lines
+ if not line or line.startswith(COMMENT[mc_mode]):
+ return False
+ # skip any CHECK lines.
+ elif common.CHECK_RE.match(input_line):
+ return False
+ return True
+
+
+def hasErr(err):
+ return err and ERROR_RE.search(err) is not None
+
+
+def getErrString(err):
+ if not err:
+ return ""
+
+ # take the first match
+ for line in err.splitlines():
+ s = ERROR_RE.search(line)
+ if s:
+ return s.group(0)
+ return ""
+
+
+def getOutputString(out):
+ if not out:
+ return ""
+ output = ""
+
+ for line in out.splitlines():
+ if OUTPUT_SKIPPED_RE.search(line):
+ continue
+ if line.strip("\t ") == "":
+ continue
+ output += line.lstrip("\t ")
+ return output
+
+
+def should_add_line_to_output(input_line, prefix_set, mc_mode):
+ # special check line
+ if mc_mode == "dasm" and ERROR_CHECK_RE.search(input_line):
+ return False
+ else:
+ return common.should_add_line_to_output(
+ input_line, prefix_set, comment_marker=COMMENT[mc_mode]
+ )
+
+
+def getStdCheckLine(prefix, output, mc_mode):
+ o = ""
+ for line in output.splitlines():
+ o += COMMENT[mc_mode] + " " + prefix + ": " + line + "\n"
+ return o
+
+
+def getErrCheckLine(prefix, output, mc_mode):
+ return COMMENT[mc_mode] + " " + prefix + ": " + ":[[@LINE-1]]" + output + "\n"
+
+
+def main():
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument(
+ "--llvm-mc-binary",
+ default=None,
+ help='The "mc" binary to use to generate the test case',
+ )
+ parser.add_argument(
+ "--tool",
+ default=None,
+ help="Treat the given tool name as an mc-like tool for which check lines should be generated",
+ )
+ parser.add_argument(
+ "--default-march",
+ default=None,
+ help="Set a default -march for when neither triple nor arch are found in a RUN line",
+ )
+ parser.add_argument("tests", nargs="+")
+ initial_args = common.parse_commandline_args(parser)
+
+ script_name = os.path.basename(__file__)
+
+ for ti in common.itertests(
+ initial_args.tests, parser, script_name="utils/" + script_name
+ ):
+ if ti.path.endswith(".s"):
+ mc_mode = "asm"
+ elif ti.path.endswith(".txt"):
+ mc_mode = "dasm"
+ else:
+ common.warn("Expected .s and .txt, Skipping file : ", ti.path)
+ continue
+
+ triple_in_ir = None
+ for l in ti.input_lines:
+ m = common.TRIPLE_IR_RE.match(l)
+ if m:
+ triple_in_ir = m.groups()[0]
+ break
+
+ run_list = []
+ for l in ti.run_lines:
+ if "|" not in l:
+ common.warn("Skipping unparsable RUN line: " + l)
+ continue
+
+ commands = [cmd.strip() for cmd in l.split("|")]
+ assert len(commands) >= 2
+ mc_cmd = " | ".join(commands[:-1])
+ filecheck_cmd = commands[-1]
+ mc_tool = mc_cmd.split(" ")[0]
+
+ # special handling for negating exit status
+ if mc_tool == "not":
+ mc_tool = mc_tool + " " + mc_cmd.split(" ")[1]
+
+ triple_in_cmd = None
+ m = common.TRIPLE_ARG_RE.search(mc_cmd)
+ if m:
+ triple_in_cmd = m.groups()[0]
+
+ march_in_cmd = ti.args.default_march
+ m = common.MARCH_ARG_RE.search(mc_cmd)
+ if m:
+ march_in_cmd = m.groups()[0]
+
+ common.verify_filecheck_prefixes(filecheck_cmd)
+
+ mc_like_tools = mc_LIKE_TOOLS[:]
+ if ti.args.tool:
+ mc_like_tools.append(ti.args.tool)
+ if mc_tool not in mc_like_tools:
+ common.warn("Skipping non-mc RUN line: " + l)
+ continue
+
+ if not filecheck_cmd.startswith("FileCheck "):
+ common.warn("Skipping non-FileChecked RUN line: " + l)
+ continue
+
+ mc_cmd_args = mc_cmd[len(mc_tool) :].strip()
+ mc_cmd_args = mc_cmd_args.replace("< %s", "").replace("%s", "").strip()
+ check_prefixes = common.get_check_prefixes(filecheck_cmd)
+
+ run_list.append(
+ (
+ check_prefixes,
+ mc_tool,
+ mc_cmd_args,
+ triple_in_cmd,
+ march_in_cmd,
+ )
+ )
+
+ # find all test line from input
+ testlines = [l for l in ti.input_lines if isTestLine(l, mc_mode)]
+ run_list_size = len(run_list)
+ testnum = len(testlines)
+
+ raw_output = []
+ raw_prefixes = []
+ for (
+ prefixes,
+ mc_tool,
+ mc_args,
+ triple_in_cmd,
+ march_in_cmd,
+ ) in run_list:
+ common.debug("Extracted mc cmd:", mc_tool, mc_args)
+ common.debug("Extracted FileCheck prefixes:", str(prefixes))
+ common.debug("Extracted triple :", str(triple_in_cmd))
+ common.debug("Extracted march:", str(march_in_cmd))
+
+ triple = triple_in_cmd or triple_in_ir
+ if not triple:
+ triple = common.get_triple_from_march(march_in_cmd)
+
+ raw_output.append([])
+ for line in testlines:
+ # get output for each testline
+ out = invoke_tool(
+ ti.args.llvm_mc_binary or mc_tool,
+ mc_args,
+ line,
+ verbose=ti.args.verbose,
+ )
+ raw_output[-1].append(out)
+
+ common.debug("Collect raw tool lines:", str(len(raw_output[-1])))
+
+ raw_prefixes.append(prefixes)
+
+ output_lines = []
+ generated_prefixes = []
+ used_prefixes = set()
+ prefix_set = set([prefix for p in run_list for prefix in p[0]])
+ common.debug("Rewriting FileCheck prefixes:", str(prefix_set))
+
+ for test_id in range(testnum):
+ input_line = testlines[test_id]
+
+ # a {prefix : output, [runid] } dict
+ # insert output to a prefix-key dict, and do a max sorting
+ # to select the most-used prefix which share the same output string
+ p_dict = {}
+ for run_id in range(run_list_size):
+ out = raw_output[run_id][test_id]
+
+ if hasErr(out):
+ o = getErrString(out)
+ else:
+ o = getOutputString(out)
+
+ prefixes = raw_prefixes[run_id]
+
+ for p in prefixes:
+ if p not in p_dict:
+ p_dict[p] = o, [run_id]
+ else:
+ if p_dict[p] == (None, []):
+ continue
+
+ prev_o, run_ids = p_dict[p]
+ if o == prev_o:
+ run_ids.append(run_id)
+ p_dict[p] = o, run_ids
+ else:
+ # conflict, discard
+ p_dict[p] = None, []
+
+ p_dict_sorted = dict(
+ sorted(p_dict.items(), key=lambda item: -len(item[1][1]))
+ )
+
+ # prefix is selected and generated with most shared output lines
+ # each run_id can only be used once
+ gen_prefix = ""
+ used_runid = set()
+ for prefix, tup in p_dict_sorted.items():
+ o, run_ids = tup
+
+ if len(run_ids) == 0:
+ continue
+
+ skip = False
+ for i in run_ids:
+ if i in used_runid:
+ skip = True
+ else:
+ used_runid.add(i)
+ if not skip:
+ used_prefixes.add(prefix)
+
+ if hasErr(o):
+ gen_prefix += getErrCheckLine(prefix, o, mc_mode)
+ else:
+ gen_prefix += getStdCheckLine(prefix, o, mc_mode)
+
+ generated_prefixes.append(gen_prefix.rstrip("\n"))
+
+ # write output
+ prefix_id = 0
+ for input_info in ti.iterlines(output_lines):
+ input_line = input_info.line
+ if isTestLine(input_line, mc_mode):
+ output_lines.append(input_line)
+ output_lines.append(generated_prefixes[prefix_id])
+ prefix_id += 1
+
+ elif should_add_line_to_output(input_line, prefix_set, mc_mode):
+ output_lines.append(input_line)
+
+ elif input_line in ti.run_lines or input_line == "":
+ output_lines.append(input_line)
+
+ if ti.args.gen_unused_prefix_body:
+ output_lines.extend(
+ ti.get_checks_for_unused_prefixes(run_list, used_prefixes)
+ )
+
+ common.debug("Writing %d lines to %s..." % (len(output_lines), ti.path))
+ with open(ti.path, "wb") as f:
+ f.writelines(["{}\n".format(l).encode("utf-8") for l in output_lines])
+
+
+if __name__ == "__main__":
+ main()
diff --git a/llvm/utils/update_test_checks.py b/llvm/utils/update_test_checks.py
index 16f3e61..b413c25 100755
--- a/llvm/utils/update_test_checks.py
+++ b/llvm/utils/update_test_checks.py
@@ -123,7 +123,13 @@ def main():
common.warn("Skipping unparsable RUN line: " + l)
continue
- commands = [cmd.strip() for cmd in l.split("|")]
+ cropped_content = l
+ if "%if" in l:
+ match = re.search(r"%{\s*(.*?)\s*%}", l)
+ if match:
+ cropped_content = match.group(1)
+
+ commands = [cmd.strip() for cmd in cropped_content.split("|")]
assert len(commands) >= 2
preprocess_cmd = None
if len(commands) > 2:
diff --git a/mlir/include/mlir-c/BuiltinTypes.h b/mlir/include/mlir-c/BuiltinTypes.h
index cc6da48..6dc25a5 100644
--- a/mlir/include/mlir-c/BuiltinTypes.h
+++ b/mlir/include/mlir-c/BuiltinTypes.h
@@ -79,6 +79,16 @@ MLIR_CAPI_EXPORTED bool mlirTypeIsAFloat(MlirType type);
/// Returns the bitwidth of a floating-point type.
MLIR_CAPI_EXPORTED unsigned mlirFloatTypeGetWidth(MlirType type);
+/// Returns the typeID of an Float4E2M1FN type.
+MLIR_CAPI_EXPORTED MlirTypeID mlirFloat4E2M1FNTypeGetTypeID(void);
+
+/// Checks whether the given type is an f4E2M1FN type.
+MLIR_CAPI_EXPORTED bool mlirTypeIsAFloat4E2M1FN(MlirType type);
+
+/// Creates an f4E2M1FN type in the given context. The type is owned by the
+/// context.
+MLIR_CAPI_EXPORTED MlirType mlirFloat4E2M1FNTypeGet(MlirContext ctx);
+
/// Returns the typeID of an Float6E2M3FN type.
MLIR_CAPI_EXPORTED MlirTypeID mlirFloat6E2M3FNTypeGetTypeID(void);
diff --git a/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td b/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td
index cafc3d9..3170115 100644
--- a/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td
+++ b/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td
@@ -1814,7 +1814,7 @@ def Tensor_SplatOp : Tensor_Op<"splat", [
}
//===----------------------------------------------------------------------===//
-// PackOp
+// RelayoutOp
//===----------------------------------------------------------------------===//
class Tensor_RelayoutOp<string mnemonic, list<Trait> traits = []> :
@@ -1851,11 +1851,27 @@ class Tensor_RelayoutOp<string mnemonic, list<Trait> traits = []> :
/// a sentinel `kDynamic` is introduced at that position in
/// the returned vector.
SmallVector<int64_t> getStaticTiles();
+
+ /// Retrieve all outer dims for this Pack/UnPack Op, i.e. all the leading
+ /// dims excluding the trailing dims corresponding to `innerTiles`. Note
+ /// that this will include both tiled and non-tiled dimensions. The order
+ /// of the output dimensions is consistent with the shape of the packed
+ /// tensor.
+ ArrayRef<int64_t> getAllOuterDims();
+
+ /// Similar to `getAllOuterDims`, but only retrieve the outer dims that
+ /// have been tiled. Also, the order of the output dimensions is consistent
+ /// with `inner_dims_pos` rather than the packed tensor.
+ SmallVector<int64_t> getTiledOuterDims();
}];
let hasVerifier = 1;
}
+//===----------------------------------------------------------------------===//
+// PackOp
+//===----------------------------------------------------------------------===//
+
def Tensor_PackOp : Tensor_RelayoutOp<"pack", [
AttrSizedOperandSegments]> {
let summary = "tensor pack operation";
diff --git a/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUAttrs.td b/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUAttrs.td
index f3ca09a..26eec0d 100644
--- a/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUAttrs.td
+++ b/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUAttrs.td
@@ -19,12 +19,18 @@ class XeGPUAttr<string name, string attrMnemonic, list<Trait> traits = [],
let mnemonic = attrMnemonic;
}
-def XeGPU_TensorDescAttr: XeGPUAttr<"TensorDesc", "tdesc_attr"> {
+class XeGPU_TensorDescAttr<string name, string attrMnemonic, list<Trait> traits = [],
+ string baseCppClass = "::mlir::Attribute">
+ : XeGPUAttr<name, attrMnemonic, traits, baseCppClass> {
+ let assemblyFormat = "`<` struct(params) `>`";
+}
+
+def XeGPU_BlockTensorDescAttr: XeGPU_TensorDescAttr<"BlockTensorDesc", "block_tdesc_attr"> {
let summary = [{a composite attribute for `TensorDescType`}];
- let description = [{`TensorDescAttr` (or `tdesc_attr`) is a composite
+ let description = [{`BlockTensorDesc` (or `block_tdesc_attr`) is a composite
attribute defined for `TensorDescType` for describing following
properties of a `TensorDesc`.
- 1. `memory_scope`: It describes where the data block described by the
+ 1. `memory_space`: It describes where the data block described by the
TensorDesc is located, `Global` device memory or `Shared` local memory.
It is default to `Global`.
2. `array_length`: It describes how many horizontally consecutive blocks
@@ -33,43 +39,63 @@ def XeGPU_TensorDescAttr: XeGPUAttr<"TensorDesc", "tdesc_attr"> {
8x32. Its default value is 1.
3. `boundary_check`: It is used to indicates the hardware whether to do
out-of-boundary check. The default value is true.
- 4. `scattered`: It is used to differenciate TensorDescs created from
- `create_nd_tdesc` vs from `create_tdesc`.
}];
let parameters = (ins
- OptionalParameter<"MemoryScopeAttr">: $memory_scope,
+ OptionalParameter<"MemorySpaceAttr">: $memory_space,
OptionalParameter<"IntegerAttr", "1">: $array_length,
- OptionalParameter<"BoolAttr", "true">: $boundary_check,
- OptionalParameter<"BoolAttr", "false">: $scattered
+ OptionalParameter<"BoolAttr", "true">: $boundary_check
);
let builders = [
AttrBuilder<(ins
- CArg<"xegpu::MemoryScope", "xegpu::MemoryScope::Global">:$memory_scope,
+ CArg<"xegpu::MemorySpace", "xegpu::MemorySpace::Global">:$memory_space,
CArg<"int", "1">:$array_length,
- CArg<"bool", "true">: $boundary_check,
- CArg<"bool", "false">: $scattered
+ CArg<"bool", "true">: $boundary_check
)>
];
- let assemblyFormat = "`<` struct(params) `>`";
}
+def XeGPU_ScatterTensorDescAttr: XeGPU_TensorDescAttr<"ScatterTensorDesc", "scatter_tdesc_attr"> {
+ let summary = [{a composite attribute for `TensorDescType`}];
+ let description = [{`ScatterTensorDesc` (or `scatter_tdesc_attr`) is a composite
+ attribute defined for `TensorDescType` for describing following
+ properties of a `TensorDesc`.
+ 1. `memory_space`: It describes where the data block described by the
+ TensorDesc is located, `Global` device memory or `Shared` local memory.
+ It is default to `Global`.
+ 2. `chunk_size`: indicates number of continious elements accessed for each
+ offset, default is 1. It is used with `scattered` attr only.
+ }];
+
+ let parameters = (ins
+ OptionalParameter<"MemorySpaceAttr">: $memory_space,
+ OptionalParameter<"IntegerAttr", "1">: $chunk_size
+ );
+
+ let builders = [
+ AttrBuilder<(ins
+ CArg<"xegpu::MemorySpace", "xegpu::MemorySpace::Global">:$memory_space,
+ CArg<"int", "1">: $chunk_size
+ )>
+ ];
+ }
+
//===----------------------------------------------------------------------===//
// XeGPU Memory Scope Enums.
//===----------------------------------------------------------------------===//
-def XeGPU_MemoryScopeGlobal: I32EnumAttrCase<"Global", 0, "global">;
-def XeGPU_MemoryScopeShared: I32EnumAttrCase<"SLM", 1, "slm">;
-def XeGPU_MemoryScope: I32EnumAttr<"MemoryScope",
+def XeGPU_MemorySpaceGlobal: I32EnumAttrCase<"Global", 0, "global">;
+def XeGPU_MemorySpaceShared: I32EnumAttrCase<"SLM", 3, "slm">;
+def XeGPU_MemorySpace: I32EnumAttr<"MemorySpace",
"The address space of the memory the tensor descritor is created for",
- [XeGPU_MemoryScopeGlobal, XeGPU_MemoryScopeShared]> {
+ [XeGPU_MemorySpaceGlobal, XeGPU_MemorySpaceShared]> {
let genSpecializedAttr = 0;
let cppNamespace = "::mlir::xegpu";
}
-def XeGPU_MemoryScopeAttr:
- EnumAttr<XeGPU_Dialect, XeGPU_MemoryScope, "memory_scope"> {
+def XeGPU_MemorySpaceAttr:
+ EnumAttr<XeGPU_Dialect, XeGPU_MemorySpace, "memory_space"> {
let summary = [{Describe the location of data described by a `TensorDesc`:
Global device memory (`Global`) or Shared local memory (`SLM`).}];
let assemblyFormat = "$value";
@@ -116,4 +142,4 @@ def XeGPU_FenceScopeAttr:
let assemblyFormat = "$value";
}
-#endif // MLIR_DIALECT_XEGPU_IR_XEGPUATTRS_TD \ No newline at end of file
+#endif // MLIR_DIALECT_XEGPU_IR_XEGPUATTRS_TD
diff --git a/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUOps.td b/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUOps.td
index c32c754..e24a056 100644
--- a/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUOps.td
+++ b/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUOps.td
@@ -218,6 +218,23 @@ def XeGPU_CreateNdDescOp: XeGPU_Op<"create_nd_tdesc", [Pure, ViewLikeOpInterface
static unsigned getOffsetSizeAndStrideStartOperandIndex() { return 1; }
mlir::Value getViewSource() { return getSource(); }
+
+ unsigned getSourceMemorySpace() {
+ auto srcTy = getSourceType();
+ if (auto memrefTy = llvm::dyn_cast<mlir::MemRefType>(srcTy)) {
+ auto attr = memrefTy.getMemorySpace();
+ if (attr) {
+ if (auto intAttr = llvm::dyn_cast<mlir::IntegerAttr>(attr)) {
+ return static_cast<unsigned>(intAttr.getInt());
+ }
+ if (auto memSpaceAttr = llvm::dyn_cast<MemorySpaceAttr>(attr))
+ return static_cast<unsigned>(memSpaceAttr.getValue());
+ }
+ }
+ // take global as default memory scope.
+ return static_cast<unsigned>(MemorySpace::Global);
+ }
+
}];
}
@@ -411,8 +428,10 @@ def XeGPU_CreateDescOp: XeGPU_Op<"create_tdesc", [Pure, ViewLikeOpInterface]> {
is fixed to the hardware supportted subgroup size, e.g., 16 on PVC,
implying each element in the array corresponds to a work-item (SIMT lane)
in the subgroup.
- * chunk_size: [optional attribute] indicates number of continious
- elements accessed for each offset, default is 1.
+
+ The first dimension of the result TensorDesc corresponds to work-items, so it should
+ match the dimension of offsets. It may also has a second dimension corresponding to
+ the chunk_size if the chunk size is larger than 1.
Example 1. It assumes subgroup size is 4, and accesses a[0], a[16], a[32], a[64]
```mlir
@@ -424,29 +443,22 @@ def XeGPU_CreateDescOp: XeGPU_Op<"create_tdesc", [Pure, ViewLikeOpInterface]> {
It will access totally 32 data elements: a[0:7], a[16:23], a[32:39], a[64:71]
```mlir
%0 = memref.alloc() : memref<1024xf32>
- %1 = xegpu.create_tdesc %0[0, 16, 32, 64] {chunk_size = 8}: memref<1024xf32> -> TensorDesc<4x8xf32>
+ %1 = xegpu.create_tdesc %0[0, 16, 32, 64] : memref<1024xf32> -> TensorDesc<4x8xf32, chunk_size = 8>
```
Example 3. It is similar to Example 2, but there is some overlaps among workitems.
It accesses: a[0:7], a[4:11], a[8:15], a[12:19]
```mlir
%0 = memref.alloc() : memref<1024xf32>
- %1 = xegpu.create_tdesc %0[0, 4, 8, 12] {chunk_size = 8}: memref<1024xf32> -> TensorDesc<4x8xf32>
+ %1 = xegpu.create_tdesc %0[0, 4, 8, 12] : memref<1024xf32> -> TensorDesc<4x8xf32, chunk_size = 8>>
```
}];
let arguments = (ins XeGPU_BaseAddrType: $source,
Variadic<Index>: $offsets,
- DenseI64ArrayAttr: $const_offsets,
- DefaultValuedAttr<I64Attr, "1">: $chunk_size);
+ DenseI64ArrayAttr: $const_offsets);
let results = (outs XeGPU_TensorDesc:$TensorDesc);
- let builders = [
- OpBuilder<(ins "xegpu::TensorDescType": $TensorDesc, "Value": $source,
- "llvm::ArrayRef<OpFoldResult>": $offsets,
- CArg<"uint32_t", "1"> : $chunk_size)>,
- ];
-
let assemblyFormat = [{
$source
custom<DynamicIndexList>($offsets, $const_offsets)
@@ -473,6 +485,22 @@ def XeGPU_CreateDescOp: XeGPU_Op<"create_tdesc", [Pure, ViewLikeOpInterface]> {
assert(idx < getNumOffsets() && "Invalid out of bound access.");
return getMixedOffsets()[idx];
}
+
+ unsigned getSourceMemorySpace() {
+ auto srcTy = getSource().getType();
+ if (auto memrefTy = llvm::dyn_cast<mlir::MemRefType>(srcTy)) {
+ auto attr = memrefTy.getMemorySpace();
+ if (attr) {
+ if (auto intAttr = llvm::dyn_cast<mlir::IntegerAttr>(attr))
+ return static_cast<unsigned>(intAttr.getInt());
+ if (auto memSpaceAttr = llvm::dyn_cast<MemorySpaceAttr>(attr))
+ return static_cast<unsigned>(memSpaceAttr.getValue());
+ }
+ }
+ // take global as default memory scope.
+ return static_cast<unsigned>(MemorySpace::Global);
+ }
+
}];
let hasVerifier = 1;
@@ -520,28 +548,31 @@ def XeGPU_LoadGatherOp : XeGPU_Op<"load", [AllRanksMatch<["value", "TensorDesc"]
let description = [{ It (aka. load) load data per each work-item. The output
describes the data being loaded at the subgroup level, so its size is
- consistent with the number of work-items in a subgroup. When `chunk_size_per_lane`
- attribute is larger than 1 in TensorDesc, the output vector will be 2D vector,
- with dim-1 correspoding to the chunk size.
+ consistent with the number of work-items in a subgroup. When the chunk size
+ is larger than 2, the output vector is a 2D vector, with dim-1 correspoding
+ to work-items, and dim-0 corresponding to the chunk_size loaded by each work-item.
+ Specially, there is a transpose effect on the result (as compared to the TensorDesc)
+ due to the hardware implementation. Therefore, a transpose attribute is introduced
+ on purpose, making sure users are aware of this implicit transformation.
The mask operand masks out memory access so that it is safe to pass out-of-boundary
addresses/offsets as long as they are masked. It applies to slots of SIMD lanes.
Example:
```mlir
- %2 = xegpu.load %1, %0 {transpose = [1, 0],
+ %2 = xegpu.load %1, %0 {transpose,
l1_hint = #xegpu.cache_hint<cached>,
l2_hint = #xegpu.cache_hint<uncached>,
l3_hint = #xegpu.cache_hint<uncached>}
- : !xegpu.tensor_desc<16xf32, #xegpu.tdesc_attr<scattered=true>>, vector<16xi1>
- -> vector<16xf32>
+ : !xegpu.tensor_desc<16xf32, #xegpu.scatter_tdesc_attr<memory_space=global>>,
+ vector<16xi1> -> vector<16xf32>
```
}];
let arguments = (ins XeGPU_TensorDesc: $TensorDesc,
XeGPU_MaskType: $mask,
- OptionalAttr<DenseI64ArrayAttr>: $transpose,
+ OptionalAttr<UnitAttr>: $transpose,
OptionalAttr<XeGPU_CacheHintAttr>: $l1_hint,
OptionalAttr<XeGPU_CacheHintAttr>: $l2_hint,
OptionalAttr<XeGPU_CacheHintAttr>: $l3_hint);
@@ -573,11 +604,15 @@ def XeGPU_LoadGatherOp : XeGPU_Op<"load", [AllRanksMatch<["value", "TensorDesc"]
let hasVerifier = 1;
}
-def XeGPU_StoreScatterOp : XeGPU_Op<"store", [AllShapesMatch<["value", "TensorDesc"]>,
- AllElementTypesMatch<["value", "TensorDesc"]>]> {
+def XeGPU_StoreScatterOp : XeGPU_Op<"store", [AllElementCountsMatch<["value", "TensorDesc"]>,
+ AllElementTypesMatch<["value", "TensorDesc"]>]> {
let summary = "store data to scattered memory locations.";
- let description = [{ It (aka. store) stores data to scattered memory locations.
- It has similar semantic to `load_gather`.
+ let description = [{ It (aka. store) stores data to scattered memory locations. The value is
+ typically a 1D vector. But when the chunk size of the TensorDesc is larger than 1, it will be
+ a 2D vector instead. For the later case, dim-1 of the value correspods to the simd lanes
+ and the dim-0 of the value corresponds to the chunk_size stored per lane. So `store_scatter`
+ has transpose effect, which is similar to `load_gather`. Therefore, a transpose attribute is
+ introduced on purpose, making sure users are aware of this implicit transformation.
Example:
```mlir
@@ -592,6 +627,7 @@ def XeGPU_StoreScatterOp : XeGPU_Op<"store", [AllShapesMatch<["value", "TensorDe
XeGPU_ValueType: $value,
XeGPU_TensorDesc: $TensorDesc,
XeGPU_MaskType: $mask,
+ OptionalAttr<UnitAttr>: $transpose,
OptionalAttr<XeGPU_CacheHintAttr>: $l1_hint,
OptionalAttr<XeGPU_CacheHintAttr>: $l2_hint,
OptionalAttr<XeGPU_CacheHintAttr>: $l3_hint);
@@ -723,7 +759,7 @@ def XeGPU_DpasOp : XeGPU_Op<"dpas", [Pure, AllElementTypesMatch<["lhs", "rhs"]>]
def XeGPU_AtomicRMWOp: XeGPU_Op<"atomic_rmw", [Pure,
AllElementTypesMatch<["tensorDesc", "value", "result"]>,
- AllShapesMatch<["tensorDesc", "mask", "value", "result"]>]> {
+ AllShapesMatch<["tensorDesc", "value", "result"]>]> {
let summary = "Atomic ready-modify-write operation on the TensorDesc. ";
let description = [{
@@ -808,7 +844,7 @@ def XeGPU_FenceOp: XeGPU_Op<"fence", []> {
2. `Fence_scope` describes the scope of fence. "Workgroup" means that the scope would be
within each workgroup. "GPU" means the scope would be across workgroups within the GPU.
}];
- let arguments = (ins XeGPU_MemoryScopeAttr: $memory_kind,
+ let arguments = (ins XeGPU_MemorySpaceAttr: $memory_kind,
XeGPU_FenceScopeAttr: $fence_scope);
let assemblyFormat = [{`memory_kind` `=` `` $memory_kind `,` `fence_scope` `=` `` $fence_scope attr-dict}];
let extraClassDeclaration = extraBaseClassDeclaration;
diff --git a/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUTypes.td b/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUTypes.td
index 9f101a7..0ce1211 100644
--- a/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUTypes.td
+++ b/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUTypes.td
@@ -48,7 +48,7 @@ def XeGPU_TensorDesc: XeGPUTypeDef<"TensorDesc", "tensor_desc",
Similar to the builtin tensor, it also provides an optinal attribute to encoding
the following information via the TensorDescAttr object:
- * memory_scope (xegpu::MemoryScope): [optional] where the data is located,
+ * memory_space (xegpu::MemorySpace): [optional] where the data is located,
global memory or shared memory. It is default to Global.
* array_length (int): [optional] The number of contiguous blocks with size as `shape`,
that will be loaded by block load at a time. It is default to 1.
@@ -63,7 +63,7 @@ def XeGPU_TensorDesc: XeGPUTypeDef<"TensorDesc", "tensor_desc",
element-type ::= float-type | integer-type | index-type
dim-list := (static-dim-list `x`)?
static-dim-list ::= decimal-literal `x` decimal-literal
- attr-list = (, memory_scope = value)? (, arr_len = value)? (, boundary_check = value)? (, scattered = value)?
+ attr-list = (, memory_space = value)? (, arr_len = value)? (, boundary_check = value)? (, scattered = value)?
```
Examples:
@@ -76,7 +76,7 @@ def XeGPU_TensorDesc: XeGPUTypeDef<"TensorDesc", "tensor_desc",
xegpu.tensor_desc<8x16xf32>
// A TensorDesc with 8x16 f32 elements for a memory region in shared memory space.
- xegpu.tensor_desc<8x16xf32, #xegpu.tdesc_attr<memory_scope = slm>>
+ xegpu.tensor_desc<8x16xf32, #xegpu.tdesc_attr<memory_space = slm>>
```
}];
@@ -88,11 +88,14 @@ def XeGPU_TensorDesc: XeGPUTypeDef<"TensorDesc", "tensor_desc",
TypeBuilderWithInferredContext<(ins
"llvm::ArrayRef<int64_t>": $shape,
"mlir::Type": $elementType,
- CArg<"bool", "false">: $scattered,
CArg<"int", "1">: $array_length,
- CArg<"xegpu::MemoryScope", "xegpu::MemoryScope::Global">:$memory_scope,
- CArg<"bool", "true">: $boundary_check
- )>
+ CArg<"bool", "true">: $boundary_check,
+ CArg<"xegpu::MemorySpace", "xegpu::MemorySpace::Global">:$memory_space)>,
+ TypeBuilderWithInferredContext<(ins
+ "llvm::ArrayRef<int64_t>": $shape,
+ "mlir::Type": $elementType,
+ CArg<"int", "1">: $chunk_size,
+ CArg<"xegpu::MemorySpace", "xegpu::MemorySpace::Global">:$memory_space)>
];
let extraClassDeclaration = [{
@@ -110,40 +113,58 @@ def XeGPU_TensorDesc: XeGPUTypeDef<"TensorDesc", "tensor_desc",
return llvm::cast<TensorDescType>(cloneWith(getShape(), elementType));
}
- TensorDescAttr getEncodingAsTensorDescAttr() const {
- return llvm::dyn_cast_if_present<TensorDescAttr>(getEncoding());
+ BlockTensorDescAttr getEncodingAsBlockTensorDescAttr() const {
+ return llvm::dyn_cast_if_present<BlockTensorDescAttr>(getEncoding());
}
- xegpu::MemoryScope getMemoryScope() const {
- auto attr = getEncodingAsTensorDescAttr();
- if (attr && attr.getMemoryScope())
- return attr.getMemoryScope().getValue();
+ ScatterTensorDescAttr getEncodingAsScatterTensorDescAttr() const {
+ return llvm::dyn_cast_if_present<ScatterTensorDescAttr>(getEncoding());
+ }
+
+ xegpu::MemorySpace getMemorySpace() const {
+ auto block_attr = getEncodingAsBlockTensorDescAttr();
+ if (block_attr && block_attr.getMemorySpace())
+ return block_attr.getMemorySpace().getValue();
+
+ auto scatter_attr = getEncodingAsScatterTensorDescAttr();
+ if (scatter_attr && scatter_attr.getMemorySpace())
+ return scatter_attr.getMemorySpace().getValue();
+
// return default value
- return MemoryScope::Global;
+ return MemorySpace::Global;
}
int getArrayLength() {
- auto attr = getEncodingAsTensorDescAttr();
- if (attr && attr.getArrayLength())
- return attr.getArrayLength().getInt();
+ auto attr = getEncoding();
+ auto block_attr = mlir::dyn_cast_if_present<BlockTensorDescAttr>(attr);
+ assert((!attr || block_attr) && "invalid on non BlockTensorDescAttr.");
+ if (block_attr && block_attr.getArrayLength())
+ return block_attr.getArrayLength().getInt();
// return default value
return 1;
}
bool getBoundaryCheck() {
- auto attr = getEncodingAsTensorDescAttr();
- if (attr && attr.getBoundaryCheck())
- return attr.getBoundaryCheck().getValue();
+ auto attr = getEncoding();
+ auto block_attr = mlir::dyn_cast_if_present<BlockTensorDescAttr>(attr);
+ assert((!attr || block_attr) && "invalid on non BlockTensorDescAttr.");
+ if (block_attr && block_attr.getBoundaryCheck())
+ return block_attr.getBoundaryCheck().getValue();
// return default value
return true;
}
- bool getScattered() {
- auto attr = getEncodingAsTensorDescAttr();
- if (attr && attr.getScattered())
- return attr.getScattered().getValue();
- // return default value
- return false;
+ bool isScattered() {
+ return bool(getEncodingAsScatterTensorDescAttr());
+ }
+
+ int getChunkSize() {
+ auto attr = getEncoding();
+ auto scatter_attr = mlir::dyn_cast_if_present<ScatterTensorDescAttr>(attr);
+ assert((!attr || scatter_attr) && "invalid on non ScatterTensorDescAttr.");
+ if (scatter_attr && scatter_attr.getChunkSize())
+ return scatter_attr.getChunkSize().getInt();
+ return 1;
}
}];
diff --git a/mlir/include/mlir/IR/Builders.h b/mlir/include/mlir/IR/Builders.h
index 196d34e..ee5d787 100644
--- a/mlir/include/mlir/IR/Builders.h
+++ b/mlir/include/mlir/IR/Builders.h
@@ -60,6 +60,7 @@ public:
Attribute metadata = Attribute());
// Types.
+ FloatType getFloat4E2M1FNType();
FloatType getFloat6E2M3FNType();
FloatType getFloat6E3M2FNType();
FloatType getFloat8E5M2Type();
diff --git a/mlir/include/mlir/IR/BuiltinTypes.h b/mlir/include/mlir/IR/BuiltinTypes.h
index f2231e9..91e68b4 100644
--- a/mlir/include/mlir/IR/BuiltinTypes.h
+++ b/mlir/include/mlir/IR/BuiltinTypes.h
@@ -67,6 +67,7 @@ public:
static FloatType getFloat8E4M3FNUZ(MLIRContext *ctx);
static FloatType getFloat8E4M3B11FNUZ(MLIRContext *ctx);
static FloatType getFloat8E3M4(MLIRContext *ctx);
+ static FloatType getFloat4E2M1FN(MLIRContext *ctx);
static FloatType getFloat6E2M3FN(MLIRContext *ctx);
static FloatType getFloat6E3M2FN(MLIRContext *ctx);
@@ -415,11 +416,15 @@ inline bool BaseMemRefType::isValidElementType(Type type) {
}
inline bool FloatType::classof(Type type) {
- return llvm::isa<Float6E2M3FNType, Float6E3M2FNType, Float8E5M2Type,
- Float8E4M3Type, Float8E4M3FNType, Float8E5M2FNUZType,
- Float8E4M3FNUZType, Float8E4M3B11FNUZType, Float8E3M4Type,
- BFloat16Type, Float16Type, FloatTF32Type, Float32Type,
- Float64Type, Float80Type, Float128Type>(type);
+ return llvm::isa<
+ Float4E2M1FNType, Float6E2M3FNType, Float6E3M2FNType, Float8E5M2Type,
+ Float8E4M3Type, Float8E4M3FNType, Float8E5M2FNUZType, Float8E4M3FNUZType,
+ Float8E4M3B11FNUZType, Float8E3M4Type, BFloat16Type, Float16Type,
+ FloatTF32Type, Float32Type, Float64Type, Float80Type, Float128Type>(type);
+}
+
+inline FloatType FloatType::getFloat4E2M1FN(MLIRContext *ctx) {
+ return Float4E2M1FNType::get(ctx);
}
inline FloatType FloatType::getFloat6E2M3FN(MLIRContext *ctx) {
diff --git a/mlir/include/mlir/IR/BuiltinTypes.td b/mlir/include/mlir/IR/BuiltinTypes.td
index c283c20..c738a8a 100644
--- a/mlir/include/mlir/IR/BuiltinTypes.td
+++ b/mlir/include/mlir/IR/BuiltinTypes.td
@@ -234,6 +234,27 @@ def Builtin_Float8E3M4 : Builtin_FloatType<"Float8E3M4", "f8E3M4"> {
}
//===----------------------------------------------------------------------===//
+// Float4E2M1FNType
+
+def Builtin_Float4E2M1FN : Builtin_FloatType<"Float4E2M1FN", "f4E2M1FN"> {
+ let summary = "4-bit floating point with 2-bit exponent and 1-bit mantissa";
+ let description = [{
+ An 4-bit floating point type with 1 sign bit, 2 bits exponent and 1 bit
+ mantissa. This is not a standard type as defined by IEEE-754, but it
+ follows similar conventions with the following characteristics:
+
+ * bit encoding: S1E2M1
+ * exponent bias: 1
+ * infinities: Not supported
+ * NaNs: Not supported
+ * denormals when exponent is 0
+
+ Open Compute Project (OCP) microscaling formats (MX) specification:
+ https://www.opencompute.org/documents/ocp-microscaling-formats-mx-v1-0-spec-final-pdf
+ }];
+}
+
+//===----------------------------------------------------------------------===//
// Float6E2M3FNType
def Builtin_Float6E2M3FN : Builtin_FloatType<"Float6E2M3FN", "f6E2M3FN"> {
diff --git a/mlir/include/mlir/IR/CommonTypeConstraints.td b/mlir/include/mlir/IR/CommonTypeConstraints.td
index c852d2c..2113852 100644
--- a/mlir/include/mlir/IR/CommonTypeConstraints.td
+++ b/mlir/include/mlir/IR/CommonTypeConstraints.td
@@ -347,6 +347,8 @@ def F8E5M2FNUZ : Type<CPred<"$_self.isFloat8E5M2FNUZ()">, "f8E5M2FNUZ type">,
BuildableType<"$_builder.getFloat8E5M2FNUZType()">;
def F8E3M4 : Type<CPred<"$_self.isFloat8E3M4()">, "f8E3M4 type">,
BuildableType<"$_builder.getFloat8E3M4Type()">;
+def F4E2M1FN : Type<CPred<"$_self.isFloat4E2M1FN()">, "f4E2M1FN type">,
+ BuildableType<"$_builder.getFloat4E2M1FNType()">;
def F6E2M3FN : Type<CPred<"$_self.isFloat6E2M3FN()">, "f6E2M3FN type">,
BuildableType<"$_builder.getFloat6E2M3FNType()">;
def F6E3M2FN : Type<CPred<"$_self.isFloat6E3M2FN()">, "f6E3M2FN type">,
diff --git a/mlir/include/mlir/IR/Types.h b/mlir/include/mlir/IR/Types.h
index 8b6f365..1b52b97 100644
--- a/mlir/include/mlir/IR/Types.h
+++ b/mlir/include/mlir/IR/Types.h
@@ -125,6 +125,7 @@ public:
// Convenience predicates. This is only for floating point types,
// derived types should use isa/dyn_cast.
bool isIndex() const;
+ bool isFloat4E2M1FN() const;
bool isFloat6E2M3FN() const;
bool isFloat6E3M2FN() const;
bool isFloat8E5M2() const;
diff --git a/mlir/lib/AsmParser/TokenKinds.def b/mlir/lib/AsmParser/TokenKinds.def
index 6ae64a1..2b29177 100644
--- a/mlir/lib/AsmParser/TokenKinds.def
+++ b/mlir/lib/AsmParser/TokenKinds.def
@@ -101,6 +101,7 @@ TOK_KEYWORD(f8E5M2FNUZ)
TOK_KEYWORD(f8E4M3FNUZ)
TOK_KEYWORD(f8E4M3B11FNUZ)
TOK_KEYWORD(f8E3M4)
+TOK_KEYWORD(f4E2M1FN)
TOK_KEYWORD(f6E2M3FN)
TOK_KEYWORD(f6E3M2FN)
TOK_KEYWORD(f128)
diff --git a/mlir/lib/AsmParser/TypeParser.cpp b/mlir/lib/AsmParser/TypeParser.cpp
index a3798ca..60903a8 100644
--- a/mlir/lib/AsmParser/TypeParser.cpp
+++ b/mlir/lib/AsmParser/TypeParser.cpp
@@ -39,6 +39,7 @@ OptionalParseResult Parser::parseOptionalType(Type &type) {
case Token::kw_tuple:
case Token::kw_vector:
case Token::inttype:
+ case Token::kw_f4E2M1FN:
case Token::kw_f6E2M3FN:
case Token::kw_f6E3M2FN:
case Token::kw_f8E5M2:
@@ -305,6 +306,9 @@ Type Parser::parseNonFunctionType() {
}
// float-type
+ case Token::kw_f4E2M1FN:
+ consumeToken(Token::kw_f4E2M1FN);
+ return builder.getFloat4E2M1FNType();
case Token::kw_f6E2M3FN:
consumeToken(Token::kw_f6E2M3FN);
return builder.getFloat6E2M3FNType();
diff --git a/mlir/lib/Bindings/Python/IRTypes.cpp b/mlir/lib/Bindings/Python/IRTypes.cpp
index 6b64bc3..5a369b5 100644
--- a/mlir/lib/Bindings/Python/IRTypes.cpp
+++ b/mlir/lib/Bindings/Python/IRTypes.cpp
@@ -124,6 +124,27 @@ public:
}
};
+/// Floating Point Type subclass - Float4E2M1FNType.
+class PyFloat4E2M1FNType
+ : public PyConcreteType<PyFloat4E2M1FNType, PyFloatType> {
+public:
+ static constexpr IsAFunctionTy isaFunction = mlirTypeIsAFloat4E2M1FN;
+ static constexpr GetTypeIDFunctionTy getTypeIdFunction =
+ mlirFloat4E2M1FNTypeGetTypeID;
+ static constexpr const char *pyClassName = "Float4E2M1FNType";
+ using PyConcreteType::PyConcreteType;
+
+ static void bindDerived(ClassTy &c) {
+ c.def_static(
+ "get",
+ [](DefaultingPyMlirContext context) {
+ MlirType t = mlirFloat4E2M1FNTypeGet(context->get());
+ return PyFloat4E2M1FNType(context->getRef(), t);
+ },
+ py::arg("context") = py::none(), "Create a float4_e2m1fn type.");
+ }
+};
+
/// Floating Point Type subclass - Float6E2M3FNType.
class PyFloat6E2M3FNType
: public PyConcreteType<PyFloat6E2M3FNType, PyFloatType> {
@@ -922,6 +943,7 @@ void mlir::python::populateIRTypes(py::module &m) {
PyIntegerType::bind(m);
PyFloatType::bind(m);
PyIndexType::bind(m);
+ PyFloat4E2M1FNType::bind(m);
PyFloat6E2M3FNType::bind(m);
PyFloat6E3M2FNType::bind(m);
PyFloat8E4M3FNType::bind(m);
diff --git a/mlir/lib/CAPI/IR/BuiltinTypes.cpp b/mlir/lib/CAPI/IR/BuiltinTypes.cpp
index f943bf7..efc1e85 100644
--- a/mlir/lib/CAPI/IR/BuiltinTypes.cpp
+++ b/mlir/lib/CAPI/IR/BuiltinTypes.cpp
@@ -85,6 +85,18 @@ unsigned mlirFloatTypeGetWidth(MlirType type) {
return llvm::cast<FloatType>(unwrap(type)).getWidth();
}
+MlirTypeID mlirFloat4E2M1FNTypeGetTypeID() {
+ return wrap(Float4E2M1FNType::getTypeID());
+}
+
+bool mlirTypeIsAFloat4E2M1FN(MlirType type) {
+ return unwrap(type).isFloat4E2M1FN();
+}
+
+MlirType mlirFloat4E2M1FNTypeGet(MlirContext ctx) {
+ return wrap(FloatType::getFloat4E2M1FN(unwrap(ctx)));
+}
+
MlirTypeID mlirFloat6E2M3FNTypeGetTypeID() {
return wrap(Float6E2M3FNType::getTypeID());
}
diff --git a/mlir/lib/Conversion/GPUCommon/OpToFuncCallLowering.h b/mlir/lib/Conversion/GPUCommon/OpToFuncCallLowering.h
index 6be5548..8ff4d4e 100644
--- a/mlir/lib/Conversion/GPUCommon/OpToFuncCallLowering.h
+++ b/mlir/lib/Conversion/GPUCommon/OpToFuncCallLowering.h
@@ -17,11 +17,13 @@
namespace mlir {
/// Rewriting that replace SourceOp with a CallOp to `f32Func` or `f64Func` or
-/// `f32ApproxFunc` depending on the element type and the fastMathFlag of that
-/// Op. The function declaration is added in case it was not added before.
+/// `f32ApproxFunc` or `f16Func` depending on the element type and the
+/// fastMathFlag of that Op. The function declaration is added in case it was
+/// not added before.
///
-/// If the input values are of f16 type, the value is first casted to f32, the
-/// function called and then the result casted back.
+/// If the input values are of bf16 type (or f16 type if f16Func is empty), the
+/// value is first casted to f32, the function called and then the result casted
+/// back.
///
/// Example with NVVM:
/// %exp_f32 = math.exp %arg_f32 : f32
@@ -41,9 +43,10 @@ template <typename SourceOp>
struct OpToFuncCallLowering : public ConvertOpToLLVMPattern<SourceOp> {
public:
explicit OpToFuncCallLowering(LLVMTypeConverter &lowering, StringRef f32Func,
- StringRef f64Func, StringRef f32ApproxFunc)
+ StringRef f64Func, StringRef f32ApproxFunc,
+ StringRef f16Func)
: ConvertOpToLLVMPattern<SourceOp>(lowering), f32Func(f32Func),
- f64Func(f64Func), f32ApproxFunc(f32ApproxFunc) {}
+ f64Func(f64Func), f32ApproxFunc(f32ApproxFunc), f16Func(f16Func) {}
LogicalResult
matchAndRewrite(SourceOp op, typename SourceOp::Adaptor adaptor,
@@ -89,7 +92,11 @@ public:
private:
Value maybeCast(Value operand, PatternRewriter &rewriter) const {
Type type = operand.getType();
- if (!isa<Float16Type>(type))
+ if (!isa<Float16Type, BFloat16Type>(type))
+ return operand;
+
+ // if there's a f16 function, no need to cast f16 values
+ if (!f16Func.empty() && isa<Float16Type>(type))
return operand;
return rewriter.create<LLVM::FPExtOp>(
@@ -102,6 +109,8 @@ private:
}
StringRef getFunctionName(Type type, arith::FastMathFlags flag) const {
+ if (isa<Float16Type>(type))
+ return f16Func;
if (isa<Float32Type>(type)) {
if (((uint32_t)arith::FastMathFlags::afn & (uint32_t)flag) &&
!f32ApproxFunc.empty())
@@ -130,6 +139,7 @@ private:
const std::string f32Func;
const std::string f64Func;
const std::string f32ApproxFunc;
+ const std::string f16Func;
};
} // namespace mlir
diff --git a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp
index 4be330b..2b91a6c 100644
--- a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp
+++ b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp
@@ -335,11 +335,11 @@ void mlir::configureGpuToNVVMConversionLegality(ConversionTarget &target) {
template <typename OpTy>
static void populateOpPatterns(LLVMTypeConverter &converter,
RewritePatternSet &patterns, StringRef f32Func,
- StringRef f64Func,
- StringRef f32ApproxFunc = "") {
+ StringRef f64Func, StringRef f32ApproxFunc = "",
+ StringRef f16Func = "") {
patterns.add<ScalarizeVectorOpLowering<OpTy>>(converter);
patterns.add<OpToFuncCallLowering<OpTy>>(converter, f32Func, f64Func,
- f32ApproxFunc);
+ f32ApproxFunc, f16Func);
}
void mlir::populateGpuSubgroupReduceOpLoweringPattern(
diff --git a/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp b/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp
index fc3e1fc..482c9e2 100644
--- a/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp
+++ b/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp
@@ -334,10 +334,9 @@ void mlir::configureGpuToROCDLConversionLegality(ConversionTarget &target) {
target.addIllegalOp<LLVM::CosOp, LLVM::ExpOp, LLVM::Exp2Op, LLVM::FCeilOp,
LLVM::FFloorOp, LLVM::FRemOp, LLVM::LogOp, LLVM::Log10Op,
LLVM::Log2Op, LLVM::PowOp, LLVM::SinOp>();
- // These ops are legal for f16 and f32 type.
+ // These ops are legal for f32 type.
target.addDynamicallyLegalOp<LLVM::ExpOp, LLVM::LogOp>([](Operation *op) {
- return any_of(op->getOperandTypes(),
- llvm::IsaPred<Float16Type, Float32Type>);
+ return any_of(op->getOperandTypes(), llvm::IsaPred<Float32Type>);
});
// TODO: Remove once we support replacing non-root ops.
target.addLegalOp<gpu::YieldOp, gpu::GPUModuleOp>();
@@ -346,9 +345,11 @@ void mlir::configureGpuToROCDLConversionLegality(ConversionTarget &target) {
template <typename OpTy>
static void populateOpPatterns(LLVMTypeConverter &converter,
RewritePatternSet &patterns, StringRef f32Func,
- StringRef f64Func) {
+ StringRef f64Func, StringRef f32ApproxFunc,
+ StringRef f16Func) {
patterns.add<ScalarizeVectorOpLowering<OpTy>>(converter);
- patterns.add<OpToFuncCallLowering<OpTy>>(converter, f32Func, f64Func);
+ patterns.add<OpToFuncCallLowering<OpTy>>(converter, f32Func, f32ApproxFunc,
+ f16Func);
}
void mlir::populateGpuToROCDLConversionPatterns(
diff --git a/mlir/lib/Conversion/LLVMCommon/TypeConverter.cpp b/mlir/lib/Conversion/LLVMCommon/TypeConverter.cpp
index 51a1b91..fd6369b 100644
--- a/mlir/lib/Conversion/LLVMCommon/TypeConverter.cpp
+++ b/mlir/lib/Conversion/LLVMCommon/TypeConverter.cpp
@@ -250,7 +250,7 @@ Type LLVMTypeConverter::convertFloatType(FloatType type) const {
if (type.isFloat8E5M2() || type.isFloat8E4M3() || type.isFloat8E4M3FN() ||
type.isFloat8E5M2FNUZ() || type.isFloat8E4M3FNUZ() ||
type.isFloat8E4M3B11FNUZ() || type.isFloat8E3M4() ||
- type.isFloat6E2M3FN() || type.isFloat6E3M2FN())
+ type.isFloat4E2M1FN() || type.isFloat6E2M3FN() || type.isFloat6E3M2FN())
return IntegerType::get(&getContext(), type.getWidth());
return type;
}
diff --git a/mlir/lib/Conversion/MathToROCDL/MathToROCDL.cpp b/mlir/lib/Conversion/MathToROCDL/MathToROCDL.cpp
index b3b4d81..8330713 100644
--- a/mlir/lib/Conversion/MathToROCDL/MathToROCDL.cpp
+++ b/mlir/lib/Conversion/MathToROCDL/MathToROCDL.cpp
@@ -38,17 +38,17 @@ using namespace mlir;
template <typename OpTy>
static void populateOpPatterns(LLVMTypeConverter &converter,
RewritePatternSet &patterns, StringRef f32Func,
- StringRef f64Func,
+ StringRef f64Func, StringRef f16Func,
StringRef f32ApproxFunc = "") {
patterns.add<ScalarizeVectorOpLowering<OpTy>>(converter);
patterns.add<OpToFuncCallLowering<OpTy>>(converter, f32Func, f64Func,
- f32ApproxFunc);
+ f32ApproxFunc, f16Func);
}
void mlir::populateMathToROCDLConversionPatterns(LLVMTypeConverter &converter,
RewritePatternSet &patterns) {
// Handled by mathToLLVM: math::AbsIOp
- // Handled by mathToLLVM: math::AbsFIOp
+ // Handled by mathToLLVM: math::AbsFOp
// Handled by mathToLLVM: math::CopySignOp
// Handled by mathToLLVM: math::CountLeadingZerosOp
// Handled by mathToLLVM: math::CountTrailingZerosOp
@@ -63,59 +63,61 @@ void mlir::populateMathToROCDLConversionPatterns(LLVMTypeConverter &converter,
// Handled by mathToLLVM: math::SqrtOp
// Handled by mathToLLVM: math::TruncOp
populateOpPatterns<math::AcosOp>(converter, patterns, "__ocml_acos_f32",
- "__ocml_acos_f64");
+ "__ocml_acos_f64", "__ocml_acos_f16");
populateOpPatterns<math::AcoshOp>(converter, patterns, "__ocml_acosh_f32",
- "__ocml_acosh_f64");
+ "__ocml_acosh_f64", "__ocml_acosh_f16");
populateOpPatterns<math::AsinOp>(converter, patterns, "__ocml_asin_f32",
- "__ocml_asin_f64");
+ "__ocml_asin_f64", "__ocml_asin_f16");
populateOpPatterns<math::AsinhOp>(converter, patterns, "__ocml_asinh_f32",
- "__ocml_asinh_f64");
+ "__ocml_asinh_f64", "__ocml_asinh_f16");
populateOpPatterns<math::AtanOp>(converter, patterns, "__ocml_atan_f32",
- "__ocml_atan_f64");
+ "__ocml_atan_f64", "__ocml_atan_f16");
populateOpPatterns<math::AtanhOp>(converter, patterns, "__ocml_atanh_f32",
- "__ocml_atanh_f64");
+ "__ocml_atanh_f64", "__ocml_atanh_f16");
populateOpPatterns<math::Atan2Op>(converter, patterns, "__ocml_atan2_f32",
- "__ocml_atan2_f64");
+ "__ocml_atan2_f64", "__ocml_atan2_f16");
populateOpPatterns<math::CbrtOp>(converter, patterns, "__ocml_cbrt_f32",
- "__ocml_cbrt_f64");
+ "__ocml_cbrt_f64", "__ocml_cbrt_f16");
populateOpPatterns<math::CeilOp>(converter, patterns, "__ocml_ceil_f32",
- "__ocml_ceil_f64");
+ "__ocml_ceil_f64", "__ocml_ceil_f16");
populateOpPatterns<math::CosOp>(converter, patterns, "__ocml_cos_f32",
- "__ocml_cos_f64");
+ "__ocml_cos_f64", "__ocml_cos_f16");
populateOpPatterns<math::CoshOp>(converter, patterns, "__ocml_cosh_f32",
- "__ocml_cosh_f64");
+ "__ocml_cosh_f64", "__ocml_cosh_f16");
populateOpPatterns<math::SinhOp>(converter, patterns, "__ocml_sinh_f32",
- "__ocml_sinh_f64");
- populateOpPatterns<math::ExpOp>(converter, patterns, "", "__ocml_exp_f64");
+ "__ocml_sinh_f64", "__ocml_sinh_f16");
+ populateOpPatterns<math::ExpOp>(converter, patterns, "", "__ocml_exp_f64",
+ "__ocml_exp_f16");
populateOpPatterns<math::Exp2Op>(converter, patterns, "__ocml_exp2_f32",
- "__ocml_exp2_f64");
+ "__ocml_exp2_f64", "__ocml_exp2_f16");
populateOpPatterns<math::ExpM1Op>(converter, patterns, "__ocml_expm1_f32",
- "__ocml_expm1_f64");
+ "__ocml_expm1_f64", "__ocml_expm1_f16");
populateOpPatterns<math::FloorOp>(converter, patterns, "__ocml_floor_f32",
- "__ocml_floor_f64");
- populateOpPatterns<math::LogOp>(converter, patterns, "", "__ocml_log_f64");
+ "__ocml_floor_f64", "__ocml_floor_f16");
+ populateOpPatterns<math::LogOp>(converter, patterns, "", "__ocml_log_f64",
+ "__ocml_log_f16");
populateOpPatterns<math::Log10Op>(converter, patterns, "__ocml_log10_f32",
- "__ocml_log10_f64");
+ "__ocml_log10_f64", "__ocml_log10_f16");
populateOpPatterns<math::Log1pOp>(converter, patterns, "__ocml_log1p_f32",
- "__ocml_log1p_f64");
+ "__ocml_log1p_f64", "__ocml_log1p_f16");
populateOpPatterns<math::Log2Op>(converter, patterns, "__ocml_log2_f32",
- "__ocml_log2_f64");
+ "__ocml_log2_f64", "__ocml_log2_f16");
populateOpPatterns<math::PowFOp>(converter, patterns, "__ocml_pow_f32",
- "__ocml_pow_f64");
+ "__ocml_pow_f64", "__ocml_pow_f16");
populateOpPatterns<math::RsqrtOp>(converter, patterns, "__ocml_rsqrt_f32",
- "__ocml_rsqrt_f64");
+ "__ocml_rsqrt_f64", "__ocml_rsqrt_f16");
populateOpPatterns<math::SinOp>(converter, patterns, "__ocml_sin_f32",
- "__ocml_sin_f64");
+ "__ocml_sin_f64", "__ocml_sin_f16");
populateOpPatterns<math::TanhOp>(converter, patterns, "__ocml_tanh_f32",
- "__ocml_tanh_f64");
+ "__ocml_tanh_f64", "__ocml_tanh_f16");
populateOpPatterns<math::TanOp>(converter, patterns, "__ocml_tan_f32",
- "__ocml_tan_f64");
+ "__ocml_tan_f64", "__ocml_tan_f16");
populateOpPatterns<math::ErfOp>(converter, patterns, "__ocml_erf_f32",
- "__ocml_erf_f64");
+ "__ocml_erf_f64", "__ocml_erf_f16");
// Single arith pattern that needs a ROCDL call, probably not
// worth creating a separate pass for it.
populateOpPatterns<arith::RemFOp>(converter, patterns, "__ocml_fmod_f32",
- "__ocml_fmod_f64");
+ "__ocml_fmod_f64", "__ocml_fmod_f16");
}
namespace {
diff --git a/mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp b/mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp
index be1581d..fa03442 100644
--- a/mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp
+++ b/mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp
@@ -168,9 +168,8 @@ struct TransferReadLowering : public OpRewritePattern<vector::TransferReadOp> {
if (isTransposeLoad)
std::reverse(descShape.begin(), descShape.end());
auto descType = xegpu::TensorDescType::get(
- descShape, elementType, /*scattered=*/false, /*array_length=*/1,
- xegpu::MemoryScope::Global,
- /*boundary_check=*/isOutOfBounds);
+ descShape, elementType, /*array_length=*/1,
+ /*boundary_check=*/isOutOfBounds, xegpu::MemorySpace::Global);
xegpu::CreateNdDescOp ndDesc =
createNdDescriptor(rewriter, loc, descType,
@@ -212,10 +211,10 @@ struct TransferWriteLowering
return rewriter.notifyMatchFailure(writeOp, "Expects identity map");
VectorType vecTy = writeOp.getVectorType();
- auto descType = xegpu::TensorDescType::get(
- vecTy.getShape(), vecTy.getElementType(),
- /*scattered=*/false, /*array_length=*/1, xegpu::MemoryScope::Global,
- /*boundary_check=*/false);
+ auto descType =
+ xegpu::TensorDescType::get(vecTy.getShape(), vecTy.getElementType(),
+ /*array_length=*/1, /*boundary_check=*/false,
+ xegpu::MemorySpace::Global);
xegpu::CreateNdDescOp ndDesc = createNdDescriptor(
rewriter, loc, descType,
dyn_cast<TypedValue<MemRefType>>(writeOp.getSource()),
diff --git a/mlir/lib/Dialect/Arith/Transforms/EmulateUnsupportedFloats.cpp b/mlir/lib/Dialect/Arith/Transforms/EmulateUnsupportedFloats.cpp
index 5e5e10b..0bf8c89 100644
--- a/mlir/lib/Dialect/Arith/Transforms/EmulateUnsupportedFloats.cpp
+++ b/mlir/lib/Dialect/Arith/Transforms/EmulateUnsupportedFloats.cpp
@@ -55,6 +55,7 @@ static std::optional<FloatType> parseFloatType(MLIRContext *ctx,
StringRef name) {
Builder b(ctx);
return llvm::StringSwitch<std::optional<FloatType>>(name)
+ .Case("f4E2M1FN", b.getFloat4E2M1FNType())
.Case("f6E2M3FN", b.getFloat6E2M3FNType())
.Case("f6E3M2FN", b.getFloat6E3M2FNType())
.Case("f8E5M2", b.getFloat8E5M2Type())
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
index 77f0ea9..e0dea8e 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
@@ -1030,11 +1030,13 @@ static Value getPackOpSourceOrPaddedSource(OpBuilder &builder,
return input;
}
+ assert(llvm::all_of(packOp.getAllOuterDims(),
+ [](int64_t val) { return val == 1; }) &&
+ "some outer dims are != 1");
+
Location loc = packOp.getLoc();
ShapedType inputType = packOp.getSourceType();
int64_t inputRank = inputType.getRank();
- assert(llvm::all_of(packOp.getDestType().getShape().take_front(inputRank),
- [](int64_t val) { return val == 1; }));
SmallVector<int64_t> paddedShape;
DenseMap<int64_t, OpFoldResult> tileAndPosMapping =
@@ -1126,12 +1128,8 @@ LogicalResult GeneralizeOuterUnitDimsPackOpPattern::matchAndRewrite(
// TODO: support the case that outer dimensions are not all 1s. A
// tensor.expand_shape will be generated in this case.
- auto innerDimsPos = packOp.getInnerDimsPos();
- int64_t srcRank = packOp.getSourceRank();
- auto destShape = packOp.getDestType().getShape();
- if (llvm::any_of(innerDimsPos, [destShape](int64_t index) {
- return destShape[index] != 1;
- })) {
+ if (llvm::any_of(packOp.getTiledOuterDims(),
+ [](int64_t dim) { return dim != 1; })) {
return rewriter.notifyMatchFailure(
packOp, "require the tiled outer dimensions of the result are all 1s");
}
@@ -1145,6 +1143,7 @@ LogicalResult GeneralizeOuterUnitDimsPackOpPattern::matchAndRewrite(
packOp.getDimAndTileMapping();
Attribute zeroIdxAttr = rewriter.getIndexAttr(0);
Attribute oneIdxAttr = rewriter.getIndexAttr(1);
+ int64_t srcRank = packOp.getSourceRank();
SmallVector<OpFoldResult> readOffsets(srcRank, zeroIdxAttr);
SmallVector<OpFoldResult> readStrides(srcRank, oneIdxAttr);
SmallVector<OpFoldResult> readSizes;
@@ -1173,9 +1172,8 @@ LogicalResult GeneralizeOuterUnitDimsPackOpPattern::matchAndRewrite(
loc, readType, input, readOffsets, readSizes, readStrides);
// 2. Transpose the tile to match the inner tile order.
-
SmallVector<int64_t> perm = getPackUnpackRankReducedPerm(
- inputShape, innerDimsPos, packOp.getOuterDimsPerm());
+ inputShape, packOp.getInnerDimsPos(), packOp.getOuterDimsPerm());
LLVM_DEBUG(DBGS() << "Pack permutation: " << packOp << "\n";
llvm::interleaveComma(perm, DBGS() << "perm: "); DBGSNL(););
@@ -1208,9 +1206,8 @@ LogicalResult GeneralizeOuterUnitDimsUnPackOpPattern::matchAndRewrite(
int64_t destRank = unpackOp.getDestRank();
ArrayRef<int64_t> srcShape = unpackOp.getSourceType().getShape();
ArrayRef<int64_t> innerDimsPos = unpackOp.getInnerDimsPos();
- if (llvm::any_of(innerDimsPos, [srcShape](int64_t index) {
- return srcShape[index] != 1;
- })) {
+ if (llvm::any_of(unpackOp.getTiledOuterDims(),
+ [](int64_t dim) { return dim != 1; })) {
return rewriter.notifyMatchFailure(
unpackOp,
"require the tiled outer dimensions of the result are all 1s");
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
index 6800a0f..fa20001 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
@@ -810,27 +810,35 @@ static Value calculateGatherOffset(RewriterBase &rewriter,
enum VectorMemoryAccessKind { ScalarBroadcast, Contiguous, Gather };
-/// Find the non-unit dim in a linalgOp.
-/// When executing this hook, it is expected that only one dim will be non-unit.
-/// Other cases (i.e. reading n-D vectors) should've been labelled as gather
-/// loads before calling this method. This is used for finding contiguous loads
-/// (represented as `tensor.extract`) within `linalg.generic` Ops. Note that
-/// this condition is expected to hold for statically shaped Linalg Ops only.
-static uint64_t getNonUnitLoopDim(LinalgOp linalgOp) {
- uint64_t nonUnitDim = 0;
- uint64_t countNonUnitDim = 0;
- for (auto tripCount : llvm::enumerate(linalgOp.getStaticLoopRanges())) {
- if (tripCount.value() != 1) {
- nonUnitDim = tripCount.index();
- countNonUnitDim++;
- }
- }
-
+/// Find the index of the trailing non-unit dim in linalgOp. This hook is used
+/// when checking whether `tensor.extract` Op (within a `linalg.generic` Op)
+/// represents a contiguous load operation.
+///
+/// Note that when calling this hook, it is assumed that the output vector is
+/// effectively 1D. Other cases (i.e. reading n-D vectors) should've been
+/// labelled as a gather load before entering this method.
+///
+/// Following on from the above, it is assumed that:
+/// * for statically shaped loops, when no masks are used, only one dim is !=
+/// 1 (that's what the shape of the output vector is based on).
+/// * for dynamically shaped loops, there might be more non-unit dims
+/// as the output vector type is user-specified.
+///
+/// TODO: Statically shaped loops + vector masking
+static uint64_t getTrailingNonUnitLoopDimIdx(LinalgOp linalgOp) {
+ SmallVector<int64_t> loopRanges = linalgOp.getStaticLoopRanges();
assert(linalgOp.hasDynamicShape() ||
- countNonUnitDim == 1 && "For statically shaped Linalg Ops, only one "
- "non-unit loop dim is expected");
- (void)countNonUnitDim;
- return nonUnitDim;
+ llvm::count_if(loopRanges, [](int64_t dim) { return dim != 1; }) ==
+ 1 &&
+ "For statically shaped Linalg Ops, only one "
+ "non-unit loop dim is expected");
+
+ size_t idx = loopRanges.size() - 1;
+ for (; idx >= 0; idx--)
+ if (loopRanges[idx] != 1)
+ break;
+
+ return idx;
}
/// Checks whether `val` can be used for calculating a loop invariant index.
@@ -854,11 +862,11 @@ static bool isLoopInvariantIdx(LinalgOp &linalgOp, Value &val,
assert(defOp && "This is neither a block argument nor an operation result");
// IndexOp is loop invariant as long as its result remains constant across
- // iterations. Given the assumptions on the loop ranges above, only the
- // trailing loop dim ever changes.
- auto trailingLoopDim = linalgOp.getStaticLoopRanges().size() - 1;
- if (auto indexOp = dyn_cast<linalg::IndexOp>(defOp))
- return (indexOp.getDim() != trailingLoopDim);
+ // iterations. Note that for dynamic shapes, the corresponding dim will also
+ // be conservatively treated as != 1.
+ if (auto indexOp = dyn_cast<linalg::IndexOp>(defOp)) {
+ return linalgOp.getStaticLoopRanges()[indexOp.getDim()] == 1;
+ }
auto *ancestor = block->findAncestorOpInBlock(*defOp);
@@ -877,7 +885,7 @@ static bool isLoopInvariantIdx(LinalgOp &linalgOp, Value &val,
return result;
}
-/// Check whether \p val could be used for calculating the trailing index for a
+/// Check whether `val` could be used for calculating the trailing index for a
/// contiguous load operation.
///
/// There are currently 3 types of values that are allowed here:
@@ -886,13 +894,14 @@ static bool isLoopInvariantIdx(LinalgOp &linalgOp, Value &val,
/// 3. results of basic arithmetic operations (linear and continuous)
/// involving 1., 2. and 3.
/// This method returns True if indeed only such values are used in calculating
-/// \p val.
+/// `val.`
///
/// Additionally, the trailing index for a contiguous load operation should
/// increment by 1 with every loop iteration, i.e. be based on:
/// * `linalg.index <dim>` ,
-/// where <dim> is the trailing dim of the iteration space. \p foundIndexOp is
-/// updated to `true` when such an op is found.
+/// where <dim> is the trailing non-unit dim of the iteration space (this way,
+/// `linalg.index <dim>` increments by 1 with every loop iteration).
+/// `foundIndexOp` is updated to `true` when such Op is found.
static bool isContiguousLoadIdx(LinalgOp &linalgOp, Value &val,
bool &foundIndexOp, VectorType resType) {
@@ -912,12 +921,10 @@ static bool isContiguousLoadIdx(LinalgOp &linalgOp, Value &val,
Operation *defOp = val.getDefiningOp();
assert(defOp && "This is neither a block argument nor an operation result");
- // Given the assumption on the loop ranges above, we expect only 1 non-unit
- // loop dim.
- auto nonUnitLoopDim = getNonUnitLoopDim(linalgOp);
-
if (auto indexOp = dyn_cast<linalg::IndexOp>(defOp)) {
- foundIndexOp = (indexOp.getDim() == nonUnitLoopDim);
+ auto loopDimThatIncrementsByOne = getTrailingNonUnitLoopDimIdx(linalgOp);
+
+ foundIndexOp = (indexOp.getDim() == loopDimThatIncrementsByOne);
return true;
}
@@ -1012,7 +1019,10 @@ getTensorExtractMemoryAccessPattern(tensor::ExtractOp extractOp,
bool foundIndexOp = false;
bool isContiguousLoad = isContiguousLoadIdx(linalgOp, extractOpTrailingIdx,
foundIndexOp, resType);
- isContiguousLoad &= foundIndexOp;
+ // TODO: Support generating contiguous loads for column vectors - that will
+ // require adding a permutation map to tranfer_read Ops.
+ bool isRowVector = resType.getShape().back() != 1;
+ isContiguousLoad &= (foundIndexOp && isRowVector);
if (isContiguousLoad) {
LDBG("Found contigous load: " << extractOp);
@@ -1073,6 +1083,11 @@ vectorizeTensorExtract(RewriterBase &rewriter, VectorizationState &state,
// b. contiguous loads.
// Both cases use vector.transfer_read.
+ assert(llvm::count_if(resultType.getShape(),
+ [](uint64_t dim) { return dim != 1; }) &&
+ "Contiguous loads and scalar loads + broadcast only support 1-D "
+ "vectors ATM!");
+
// Collect indices for `vector.transfer_read`. At this point, the indices will
// either be scalars or would have been broadcast to vectors matching the
// result type. For indices that are vectors, there are two options:
@@ -2972,10 +2987,15 @@ struct Conv1DGenerator
if (!setOperKind(reduceOp))
return;
auto maybeKind = getCombinerOpKind(reduceOp);
- if (!maybeKind || (*maybeKind != vector::CombiningKind::ADD &&
+ // Typically convolution will have a `Add` CombiningKind but for i1 type it
+ // can get strength reduced to `OR` which is also supported. This strength
+ // reduction logic is in `buildBinaryFn` helper in the Linalg dialect.
+ if (!maybeKind || ((*maybeKind != vector::CombiningKind::ADD &&
+ *maybeKind != vector::CombiningKind::OR) &&
(oper != Pool || !isSupportedPoolKind(*maybeKind)))) {
return;
}
+ reductionKind = maybeKind.value();
auto rhsRank = rhsShapedType.getRank();
switch (oper) {
@@ -3258,10 +3278,12 @@ struct Conv1DGenerator
bindDims(ctx, n, w, f, c);
lhs = promote(rewriter, loc, lhs, res.getType());
rhs = promote(rewriter, loc, rhs, res.getType());
- return rewriter.create<vector::ContractionOp>(
+ auto contrationOp = rewriter.create<vector::ContractionOp>(
loc, lhs, rhs, res,
/*indexingMaps=*/MapList{{n, w, c}, {c, f}, {n, w, f}},
/*iteratorTypes=*/ArrayRef<vector::IteratorType>{par, par, par, red});
+ contrationOp.setKind(reductionKind);
+ return contrationOp;
}
// Create an outerproduct: lhs{w} * rhs{1} -> res{w} for single channel
@@ -3651,6 +3673,7 @@ private:
int strideW, dilationW;
Value lhsShaped, rhsShaped, resShaped;
ShapedType lhsShapedType, rhsShapedType, resShapedType;
+ vector::CombiningKind reductionKind;
// Sets oper, poolExtOp and isPoolExt for valid conv/pooling ops.
// Returns true iff it is a valid conv/pooling op.
@@ -3666,7 +3689,9 @@ private:
switch (numBlockArguments) {
case 1: {
// Will be convolution if feeder is a MulOp.
- // Otherwise, if it can be pooling.
+ // A strength reduced version of MulOp for i1 type is AndOp which is also
+ // supported. Otherwise, it can be pooling. This strength reduction logic
+ // is in `buildBinaryFn` helper in the Linalg dialect.
auto feedValIt = llvm::find_if_not(reduceOp->getOperands(),
llvm::IsaPred<BlockArgument>);
Operation *feedOp = (*feedValIt).getDefiningOp();
@@ -3674,7 +3699,9 @@ private:
oper = Pool;
isPoolExt = true;
poolExtOp = feedOp->getName().getIdentifier();
- } else if (!(isa<arith::MulIOp, arith::MulFOp>(feedOp) &&
+ } else if (!((isa<arith::MulIOp, arith::MulFOp>(feedOp) ||
+ (isa<arith::AndIOp>(feedOp) &&
+ feedOp->getResultTypes()[0].isInteger(1))) &&
llvm::all_of(feedOp->getOperands(), [](Value v) {
if (isa<BlockArgument>(v))
return true;
diff --git a/mlir/lib/Dialect/SCF/Transforms/LoopPipelining.cpp b/mlir/lib/Dialect/SCF/Transforms/LoopPipelining.cpp
index 7cecd49..3d6da066 100644
--- a/mlir/lib/Dialect/SCF/Transforms/LoopPipelining.cpp
+++ b/mlir/lib/Dialect/SCF/Transforms/LoopPipelining.cpp
@@ -655,6 +655,9 @@ LoopPipelinerInternal::emitEpilogue(RewriterBase &rewriter,
Value rangeDecr = rewriter.create<arith::AddIOp>(loc, rangeIncr, minus1);
Value totalIterations = rewriter.create<arith::DivUIOp>(loc, rangeDecr, step);
+ Value zero =
+ rewriter.create<arith::ConstantOp>(loc, rewriter.getIntegerAttr(t, 0));
+
SmallVector<Value> predicates(maxStage + 1);
for (int64_t i = 0; i < maxStage; i++) {
// iterI = total_iters - 1 - i
@@ -671,9 +674,9 @@ LoopPipelinerInternal::emitEpilogue(RewriterBase &rewriter,
setValueMapping(forOp.getInductionVar(), newlastIter, maxStage - i);
if (dynamicLoop) {
- // pred = iterI >= lb
+ // pred = iterI >= 0
predicates[i + 1] = rewriter.create<arith::CmpIOp>(
- loc, arith::CmpIPredicate::sge, iterI, lb);
+ loc, arith::CmpIPredicate::sge, iterI, zero);
}
}
diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
index 47f540e..1ac9675 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
@@ -3987,6 +3987,23 @@ SmallVector<int64_t> PackOp::getStaticTiles() {
return getStaticTilesImpl(*this);
}
+ArrayRef<int64_t> PackOp::getAllOuterDims() {
+ ShapedType inputType = getSourceType();
+ int64_t inputRank = inputType.getRank();
+ return getDestType().getShape().take_front(inputRank);
+}
+
+SmallVector<int64_t> PackOp::getTiledOuterDims() {
+ auto innerDimsPos = getInnerDimsPos();
+ auto packedShape = getDestType().getShape();
+ SmallVector<int64_t> res;
+
+ for (auto index : innerDimsPos)
+ res.push_back(packedShape[index]);
+
+ return res;
+}
+
bool PackOp::requirePaddingValue(ArrayRef<int64_t> inputShape,
ArrayRef<int64_t> innerDimsPos,
ArrayRef<int64_t> outputShape,
@@ -4411,6 +4428,23 @@ SmallVector<int64_t> UnPackOp::getStaticTiles() {
return getStaticTilesImpl(*this);
}
+ArrayRef<int64_t> UnPackOp::getAllOuterDims() {
+ ShapedType destType = getDestType();
+ int64_t destRank = destType.getRank();
+ return getSourceType().getShape().take_front(destRank);
+}
+
+SmallVector<int64_t> UnPackOp::getTiledOuterDims() {
+ auto innerDimsPos = getInnerDimsPos();
+ auto packedShape = getSourceType().getShape();
+ SmallVector<int64_t> res;
+
+ for (auto index : innerDimsPos)
+ res.push_back(packedShape[index]);
+
+ return res;
+}
+
LogicalResult UnPackOp::verify() {
return commonVerifierPackAndUnPackOp(*this);
}
diff --git a/mlir/lib/Dialect/XeGPU/IR/XeGPUDialect.cpp b/mlir/lib/Dialect/XeGPU/IR/XeGPUDialect.cpp
index 24719fe..1dfbaed 100644
--- a/mlir/lib/Dialect/XeGPU/IR/XeGPUDialect.cpp
+++ b/mlir/lib/Dialect/XeGPU/IR/XeGPUDialect.cpp
@@ -30,23 +30,35 @@ void XeGPUDialect::initialize() {
}
//===----------------------------------------------------------------------===//
-// XeGPU_TensorDescAttr
+// XeGPU_BlockTensorDescAttr
//===----------------------------------------------------------------------===//
-TensorDescAttr TensorDescAttr::get(mlir::MLIRContext *context,
- xegpu::MemoryScope memory_scope,
- int array_length, bool boundary_check,
- bool scattered) {
- auto scopeAttr = MemoryScopeAttr::get(context, memory_scope);
+BlockTensorDescAttr BlockTensorDescAttr::get(mlir::MLIRContext *context,
+ xegpu::MemorySpace memory_space,
+ int array_length,
+ bool boundary_check) {
+ auto scopeAttr = MemorySpaceAttr::get(context, memory_space);
auto lengthAttr =
IntegerAttr::get(IntegerType::get(context, 64), array_length);
auto boundaryAttr = BoolAttr::get(context, boundary_check);
- auto scatteredAttr = BoolAttr::get(context, scattered);
- return Base::get(context, scopeAttr, lengthAttr, boundaryAttr, scatteredAttr);
+ return Base::get(context, scopeAttr, lengthAttr, boundaryAttr);
+}
+
+//===----------------------------------------------------------------------===//
+// XeGPU_ScatterTensorDescAttr
+//===----------------------------------------------------------------------===//
+ScatterTensorDescAttr
+ScatterTensorDescAttr::get(mlir::MLIRContext *context,
+ xegpu::MemorySpace memory_space, int chunk_size) {
+ auto scopeAttr = MemorySpaceAttr::get(context, memory_space);
+ auto chunkSizeAttr =
+ IntegerAttr::get(IntegerType::get(context, 64), chunk_size);
+ return Base::get(context, scopeAttr, chunkSizeAttr);
}
//===----------------------------------------------------------------------===//
// XeGPU_TensorDescType
//===----------------------------------------------------------------------===//
+
mlir::Type TensorDescType::parse(::mlir::AsmParser &parser) {
llvm::SmallVector<int64_t> shape;
mlir::Type elementType;
@@ -108,12 +120,20 @@ void TensorDescType::print(::mlir::AsmPrinter &printer) const {
}
TensorDescType TensorDescType::get(llvm::ArrayRef<int64_t> shape,
- mlir::Type elementType, bool scattered,
- int array_length, MemoryScope memory_scope,
- bool boundary_check) {
+ mlir::Type elementType, int array_length,
+ bool boundary_check,
+ MemorySpace memory_space) {
+ auto context = elementType.getContext();
+ auto attr = BlockTensorDescAttr::get(context, memory_space, array_length,
+ boundary_check);
+ return Base::get(context, shape, elementType, attr);
+}
+
+TensorDescType TensorDescType::get(llvm::ArrayRef<int64_t> shape,
+ mlir::Type elementType, int chunk_size,
+ MemorySpace memory_space) {
auto context = elementType.getContext();
- auto attr = TensorDescAttr::get(context, memory_scope, array_length,
- boundary_check, scattered);
+ auto attr = ScatterTensorDescAttr::get(context, memory_space, chunk_size);
return Base::get(context, shape, elementType, attr);
}
diff --git a/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp b/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp
index 9c51733..1a7a6b3 100644
--- a/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp
+++ b/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp
@@ -124,6 +124,17 @@ LogicalResult CreateNdDescOp::verify() {
bool invalidRank = false;
bool invalidElemTy = false;
+ // Memory space of created TensorDesc should match with the source.
+ // Both source and TensorDesc are considered for global memory by default,
+ // if the memory scope attr is not specified. If source is an integer,
+ // it is considered as ptr to global memory.
+ auto srcMemorySpace = getSourceMemorySpace();
+ auto tdescMemorySpace = static_cast<unsigned>(getType().getMemorySpace());
+ if (srcMemorySpace != tdescMemorySpace)
+ return emitOpError("Memory space mismatch.")
+ << " Source: " << srcMemorySpace
+ << ", TensorDesc: " << tdescMemorySpace;
+
// check source type matches the rank if it is a memref.
// It also should have the same ElementType as TensorDesc.
auto memrefTy = dyn_cast<MemRefType>(getSourceType());
@@ -152,9 +163,13 @@ LogicalResult CreateNdDescOp::verify() {
return emitOpError("TensorDesc should have the same element "
"type with the source if it is a memref.\n");
- if (getType().getScattered())
+ if (getType().isScattered())
return emitOpError("Expects a non-scattered TensorDesc.\n");
+ if (getType().getRank() == 2 &&
+ tdescMemorySpace == static_cast<unsigned>(MemorySpace::SLM))
+ return emitOpError("SLM is not supported for 2D Block TensorDesc.\n");
+
return success();
}
@@ -163,7 +178,7 @@ LogicalResult CreateNdDescOp::verify() {
//===----------------------------------------------------------------------===//
LogicalResult PrefetchNdOp::verify() {
auto tdescTy = getTensorDescType();
- if (tdescTy.getScattered())
+ if (tdescTy.isScattered())
return emitOpError("Expects a non-scattered TensorDesc.\n");
if (!isReadHintOrNone(getL1HintAttr()))
@@ -188,7 +203,7 @@ LogicalResult LoadNdOp::verify() {
if (tdescTy.getRank() > 2)
return emitOpError("Expecting a 1D/2D TensorDesc.\n");
- if (tdescTy.getScattered())
+ if (tdescTy.isScattered())
return emitOpError("Expects a non-scattered TensorDesc.\n");
if (!valueTy)
@@ -228,8 +243,8 @@ LogicalResult LoadNdOp::verify() {
tdescShape[axis] /= vnni_factor;
tdescShape.push_back(vnni_factor);
} else {
- return emitWarning("Invalid Packed Attr. It is ignored (available for 2D "
- "TensorDesc only).");
+ emitWarning("Invalid Packed Attr. It is ignored (available for 2D "
+ "TensorDesc only).");
}
}
@@ -256,7 +271,7 @@ LogicalResult StoreNdOp::verify() {
if (dstTy.getRank() > 2)
return emitOpError("Expecting a 1D/2D TensorDesc.\n");
- if (dstTy.getScattered())
+ if (dstTy.isScattered())
return emitOpError("Expects a non-scattered TensorDesc.\n");
if (!valTy)
@@ -279,7 +294,7 @@ LogicalResult StoreNdOp::verify() {
//===----------------------------------------------------------------------===//
LogicalResult UpdateNdOffsetOp::verify() {
auto ty = getTensorDescType();
- if (ty.getScattered())
+ if (ty.isScattered())
return emitOpError("Expects a non-scattered TensorDesc.\n");
// number of offsets specified must match the rank of the tensor descriptor
@@ -292,28 +307,55 @@ LogicalResult UpdateNdOffsetOp::verify() {
//===----------------------------------------------------------------------===//
// XeGPU_CreateDescOp
//===----------------------------------------------------------------------===//
-void CreateDescOp::build(OpBuilder &builder, OperationState &state,
- TensorDescType TensorDesc, Value source,
- llvm::ArrayRef<OpFoldResult> offsets,
- uint32_t chunk_size) {
- llvm::SmallVector<int64_t> staticOffsets;
- llvm::SmallVector<Value> dynamicOffsets;
- dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets);
- build(builder, state, TensorDesc, source, dynamicOffsets, staticOffsets,
- chunk_size);
-}
LogicalResult CreateDescOp::verify() {
auto tdescTy = getTensorDescType();
- auto chunkSize = getChunkSize();
if (getRankOf(getSource()) > 1)
return emitOpError(
"Expecting the source is a 1D memref or pointer (uint64_t).");
- if (!tdescTy.getScattered())
+ if (!tdescTy.isScattered())
return emitOpError("Expects a scattered TensorDesc.\n");
+ // Memory space of created TensorDesc should match with the source.
+ // Both source and TensorDesc are considered for global memory by default,
+ // if the memory scope attr is not specified. If source is an integer,
+ // it is considered as ptr to global memory.
+ auto srcMemorySpace = getSourceMemorySpace();
+ auto tdescMemorySpace = static_cast<unsigned>(tdescTy.getMemorySpace());
+ if (srcMemorySpace != tdescMemorySpace)
+ return emitOpError("Memory space mismatch.")
+ << " Source: " << srcMemorySpace
+ << ", TensorDesc: " << tdescMemorySpace;
+
+ auto chunkSize = tdescTy.getChunkSize();
+
+ // check chunk_size
+ llvm::SmallVector<int64_t> supportedChunkSizes = {1, 2, 3, 4, 8,
+ 16, 32, 64, 128, 256};
+ if (!llvm::is_contained(supportedChunkSizes, chunkSize))
+ return emitOpError("Invalid chunk_size. Supported values are 1, 2, 3, 4, "
+ "8, 16, 32, 64, 128, or 256.");
+
+ // check total size
+ auto elemBits = tdescTy.getElementType().getIntOrFloatBitWidth();
+ auto bitsPerLane = elemBits * chunkSize;
+ if (chunkSize > 1 && bitsPerLane % 32) {
+ // For 8-bit and 16-bit data, the hardware only supports chunk size of 1.
+ // For 32-bit data, the hardware can support larger larger chunk size. So
+ // we can bitcast 8-bit/16-bit data to 32-bit data for better performance.
+ // But this requires the total size is 32 bit aligned to make the
+ // optimization work.
+ return emitOpError(
+ "access size (chunk_size * sizeof(elemTy)) should be 32-bit aligned.");
+ }
+
+ auto lscConstraints = 512 * 8; // each access is upto 512 bytes.
+ if (elemBits * tdescTy.getNumElements() > lscConstraints)
+ return emitOpError("total access size (simd_lanes * chunk_size * "
+ "sizeof(elemTy)) is upto 512 bytes.");
+
SmallVector<int64_t> shape({(int64_t)getNumOffsets()});
if (chunkSize != 1)
shape.push_back(chunkSize);
@@ -331,7 +373,7 @@ LogicalResult CreateDescOp::verify() {
//===----------------------------------------------------------------------===//
LogicalResult PrefetchOp::verify() {
auto tdescTy = getTensorDescType();
- if (!tdescTy.getScattered())
+ if (!tdescTy.isScattered())
return emitOpError("Expects a scattered TensorDesc.\n");
if (!isReadHintOrNone(getL1HintAttr()))
@@ -354,7 +396,7 @@ LogicalResult LoadGatherOp::verify() {
auto maskTy = getMaskType();
auto valueTy = getValueType();
- if (!tdescTy.getScattered())
+ if (!tdescTy.isScattered())
return emitOpError("Expects a scattered TensorDesc.\n");
if (!isReadHintOrNone(getL1HintAttr()))
@@ -379,12 +421,10 @@ LogicalResult LoadGatherOp::verify() {
if (tdescShape[0] != maskShape[0])
return emitOpError("dim-0 of the Mask and TensorDesc should be the same.");
- if (getTransposeAttr()) {
- auto trans = getTranspose().value();
- if (tdescShape.size() < trans.size())
- emitWarning("Invalid transpose attr. It is ignored.");
- else
- transpose(trans, tdescShape);
+ if (tdescTy.getRank() == 2) {
+ if (!getTransposeAttr())
+ return emitOpError("load_gather has to be transposed.");
+ transpose({1, 0}, tdescShape);
}
if (valueShape != tdescShape)
@@ -400,7 +440,7 @@ LogicalResult LoadGatherOp::verify() {
//===----------------------------------------------------------------------===//
LogicalResult StoreScatterOp::verify() {
auto tdescTy = getTensorDescType();
- if (!tdescTy.getScattered())
+ if (!tdescTy.isScattered())
return emitOpError("Expects a scattered TensorDesc.\n");
if (!isWriteHintOrNone(getL1HintAttr()))
@@ -413,11 +453,24 @@ LogicalResult StoreScatterOp::verify() {
return emitOpError("invlid l3_hint: ") << getL3HintAttr();
auto maskTy = getMaskType();
+ auto valueTy = getValueType();
auto maskShape = getShapeOf(maskTy);
auto tdescShape = getShapeOf(tdescTy);
+ auto valueShape = getShapeOf(valueTy);
if (tdescShape[0] != maskShape[0])
return emitOpError("dim-0 of the Mask and TensorDesc should be the same.");
+ if (tdescTy.getRank() == 2) {
+ if (!getTransposeAttr())
+ return emitOpError("load_gather has to be transposed.");
+ transpose({1, 0}, tdescShape);
+ }
+
+ if (valueShape != tdescShape)
+ return emitOpError("Unexpected value shape")
+ << "(Expected shape: " << makeString(tdescShape)
+ << ", Given shape: " << makeString(valueShape) << ").\n";
+
return success();
}
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/IR/AsmPrinter.cpp b/mlir/lib/IR/AsmPrinter.cpp
index 32182c0..d0fd8e4 100644
--- a/mlir/lib/IR/AsmPrinter.cpp
+++ b/mlir/lib/IR/AsmPrinter.cpp
@@ -2575,6 +2575,7 @@ void AsmPrinter::Impl::printTypeImpl(Type type) {
opaqueTy.getTypeData());
})
.Case<IndexType>([&](Type) { os << "index"; })
+ .Case<Float4E2M1FNType>([&](Type) { os << "f4E2M1FN"; })
.Case<Float6E2M3FNType>([&](Type) { os << "f6E2M3FN"; })
.Case<Float6E3M2FNType>([&](Type) { os << "f6E3M2FN"; })
.Case<Float8E5M2Type>([&](Type) { os << "f8E5M2"; })
diff --git a/mlir/lib/IR/Builders.cpp b/mlir/lib/IR/Builders.cpp
index 144a13d..7aed415 100644
--- a/mlir/lib/IR/Builders.cpp
+++ b/mlir/lib/IR/Builders.cpp
@@ -34,6 +34,10 @@ Location Builder::getFusedLoc(ArrayRef<Location> locs, Attribute metadata) {
// Types.
//===----------------------------------------------------------------------===//
+FloatType Builder::getFloat4E2M1FNType() {
+ return FloatType::getFloat4E2M1FN(context);
+}
+
FloatType Builder::getFloat6E2M3FNType() {
return FloatType::getFloat6E2M3FN(context);
}
diff --git a/mlir/lib/IR/BuiltinTypes.cpp b/mlir/lib/IR/BuiltinTypes.cpp
index 702d98e..782a32b 100644
--- a/mlir/lib/IR/BuiltinTypes.cpp
+++ b/mlir/lib/IR/BuiltinTypes.cpp
@@ -101,6 +101,8 @@ unsigned FloatType::getWidth() {
/// Returns the floating semantics for the given type.
const llvm::fltSemantics &FloatType::getFloatSemantics() {
+ if (llvm::isa<Float4E2M1FNType>(*this))
+ return APFloat::Float4E2M1FN();
if (llvm::isa<Float6E2M3FNType>(*this))
return APFloat::Float6E2M3FN();
if (llvm::isa<Float6E3M2FNType>(*this))
diff --git a/mlir/lib/IR/MLIRContext.cpp b/mlir/lib/IR/MLIRContext.cpp
index 1684566..f45de17 100644
--- a/mlir/lib/IR/MLIRContext.cpp
+++ b/mlir/lib/IR/MLIRContext.cpp
@@ -221,6 +221,7 @@ public:
llvm::DenseMap<StringRef, AbstractType *> nameToType;
/// Cached Type Instances.
+ Float4E2M1FNType f4E2M1FNTy;
Float6E2M3FNType f6E2M3FNTy;
Float6E3M2FNType f6E3M2FNTy;
Float8E5M2Type f8E5M2Ty;
@@ -315,6 +316,7 @@ MLIRContext::MLIRContext(const DialectRegistry &registry, Threading setting)
//// Types.
/// Floating-point Types.
+ impl->f4E2M1FNTy = TypeUniquer::get<Float4E2M1FNType>(this);
impl->f6E2M3FNTy = TypeUniquer::get<Float6E2M3FNType>(this);
impl->f6E3M2FNTy = TypeUniquer::get<Float6E3M2FNType>(this);
impl->f8E5M2Ty = TypeUniquer::get<Float8E5M2Type>(this);
@@ -1017,6 +1019,9 @@ AbstractType::lookup(StringRef name, MLIRContext *context) {
/// This should not be used directly.
StorageUniquer &MLIRContext::getTypeUniquer() { return getImpl().typeUniquer; }
+Float4E2M1FNType Float4E2M1FNType::get(MLIRContext *context) {
+ return context->getImpl().f4E2M1FNTy;
+}
Float6E2M3FNType Float6E2M3FNType::get(MLIRContext *context) {
return context->getImpl().f6E2M3FNTy;
}
diff --git a/mlir/lib/IR/Types.cpp b/mlir/lib/IR/Types.cpp
index c828fd3..efefbc2 100644
--- a/mlir/lib/IR/Types.cpp
+++ b/mlir/lib/IR/Types.cpp
@@ -34,6 +34,7 @@ Type AbstractType::replaceImmediateSubElements(Type type,
MLIRContext *Type::getContext() const { return getDialect().getContext(); }
+bool Type::isFloat4E2M1FN() const { return llvm::isa<Float4E2M1FNType>(*this); }
bool Type::isFloat6E2M3FN() const { return llvm::isa<Float6E2M3FNType>(*this); }
bool Type::isFloat6E3M2FN() const { return llvm::isa<Float6E3M2FNType>(*this); }
bool Type::isFloat8E5M2() const { return llvm::isa<Float8E5M2Type>(*this); }
diff --git a/mlir/python/mlir/_mlir_libs/_mlir/ir.pyi b/mlir/python/mlir/_mlir_libs/_mlir/ir.pyi
index ea5c96d..4d5b4ce 100644
--- a/mlir/python/mlir/_mlir_libs/_mlir/ir.pyi
+++ b/mlir/python/mlir/_mlir_libs/_mlir/ir.pyi
@@ -120,6 +120,7 @@ __all__ = [
"F32Type",
"F64Type",
"FlatSymbolRefAttr",
+ "Float4E2M1FNType",
"Float6E2M3FNType",
"Float6E3M2FNType",
"Float8E3M4Type",
@@ -1542,6 +1543,19 @@ class FlatSymbolRefAttr(Attribute):
Returns the value of the FlatSymbolRef attribute as a string
"""
+class Float4E2M1FNType(FloatType):
+ static_typeid: ClassVar[TypeID]
+ @staticmethod
+ def get(context: Optional[Context] = None) -> Float4E2M1FNType:
+ """
+ Create a float4_e2m1fn type.
+ """
+ @staticmethod
+ def isinstance(other: Type) -> bool: ...
+ def __init__(self, cast_from_type: Type) -> None: ...
+ @property
+ def typeid(self) -> TypeID: ...
+
class Float6E2M3FNType(FloatType):
static_typeid: ClassVar[TypeID]
@staticmethod
diff --git a/mlir/python/mlir/extras/types.py b/mlir/python/mlir/extras/types.py
index 4be425f..5b24a6d 100644
--- a/mlir/python/mlir/extras/types.py
+++ b/mlir/python/mlir/extras/types.py
@@ -12,6 +12,7 @@ from ..ir import (
F16Type,
F32Type,
F64Type,
+ Float4E2M1FNType,
Float6E2M3FNType,
Float6E3M2FNType,
Float8E3M4Type,
@@ -76,6 +77,7 @@ f8E4M3 = lambda: Float8E4M3Type.get()
f8E4M3FN = lambda: Float8E4M3FNType.get()
f8E4M3B11FNUZ = lambda: Float8E4M3B11FNUZType.get()
f8E3M4 = lambda: Float8E3M4Type.get()
+f4E2M1FN = lambda: Float4E2M1FNType.get()
f6E2M3FN = lambda: Float6E2M3FNType.get()
f6E3M2FN = lambda: Float6E3M2FNType.get()
diff --git a/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir b/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir
index eb065cb..0d3e9f4 100644
--- a/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir
+++ b/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir
@@ -162,11 +162,12 @@ gpu.module @test_module {
// -----
gpu.module @test_module {
+ // CHECK: llvm.func @__ocml_exp_f16(f16) -> f16
// CHECK: llvm.func @__ocml_exp_f64(f64) -> f64
// CHECK-LABEL: func @gpu_exp
func.func @gpu_exp(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
%result16 = math.exp %arg_f16 : f16
- // CHECK: llvm.intr.exp(%{{.*}}) : (f16) -> f16
+ // CHECK: llvm.call @__ocml_exp_f16(%{{.*}}) : (f16) -> f16
%result32 = math.exp %arg_f32 : f32
// CHECK: llvm.intr.exp(%{{.*}}) : (f32) -> f32
%result64 = math.exp %arg_f64 : f64
@@ -178,11 +179,12 @@ gpu.module @test_module {
// -----
gpu.module @test_module {
+ // CHECK: llvm.func @__ocml_log_f16(f16) -> f16
// CHECK: llvm.func @__ocml_log_f64(f64) -> f64
// CHECK-LABEL: func @gpu_log
func.func @gpu_log(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
%result16 = math.log %arg_f16 : f16
- // CHECK: llvm.intr.log(%{{.*}}) : (f16) -> f16
+ // CHECK: llvm.call @__ocml_log_f16(%{{.*}}) : (f16) -> f16
%result32 = math.log %arg_f32 : f32
// CHECK: llvm.intr.log(%{{.*}}) : (f32) -> f32
%result64 = math.log %arg_f64 : f64
@@ -194,108 +196,113 @@ gpu.module @test_module {
// -----
gpu.module @test_module {
+ // CHECK: llvm.func @__ocml_cbrt_f16(f16) -> f16
// CHECK: llvm.func @__ocml_cbrt_f32(f32) -> f32
// CHECK: llvm.func @__ocml_cbrt_f64(f64) -> f64
// CHECK-LABEL: func @gpu_cbrt
- func.func @gpu_cbrt(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @gpu_cbrt(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = math.cbrt %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_cbrt_f16(%{{.*}}) : (f16) -> f16
%result32 = math.cbrt %arg_f32 : f32
// CHECK: llvm.call @__ocml_cbrt_f32(%{{.*}}) : (f32) -> f32
%result64 = math.cbrt %arg_f64 : f64
// CHECK: llvm.call @__ocml_cbrt_f64(%{{.*}}) : (f64) -> f64
- func.return %result32, %result64 : f32, f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
// -----
gpu.module @test_module {
+ // CHECK: llvm.func @__ocml_ceil_f16(f16) -> f16
// CHECK: llvm.func @__ocml_ceil_f32(f32) -> f32
// CHECK: llvm.func @__ocml_ceil_f64(f64) -> f64
// CHECK-LABEL: func @gpu_ceil
- func.func @gpu_ceil(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @gpu_ceil(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = math.ceil %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_ceil_f16(%{{.*}}) : (f16) -> f16
%result32 = math.ceil %arg_f32 : f32
// CHECK: llvm.call @__ocml_ceil_f32(%{{.*}}) : (f32) -> f32
%result64 = math.ceil %arg_f64 : f64
// CHECK: llvm.call @__ocml_ceil_f64(%{{.*}}) : (f64) -> f64
- func.return %result32, %result64 : f32, f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
// -----
gpu.module @test_module {
+ // CHECK: llvm.func @__ocml_floor_f16(f16) -> f16
// CHECK: llvm.func @__ocml_floor_f32(f32) -> f32
// CHECK: llvm.func @__ocml_floor_f64(f64) -> f64
// CHECK-LABEL: func @gpu_floor
- func.func @gpu_floor(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @gpu_floor(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = math.floor %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_floor_f16(%{{.*}}) : (f16) -> f16
%result32 = math.floor %arg_f32 : f32
// CHECK: llvm.call @__ocml_floor_f32(%{{.*}}) : (f32) -> f32
%result64 = math.floor %arg_f64 : f64
// CHECK: llvm.call @__ocml_floor_f64(%{{.*}}) : (f64) -> f64
- func.return %result32, %result64 : f32, f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
// -----
gpu.module @test_module {
+ // CHECK: llvm.func @__ocml_cos_f16(f16) -> f16
// CHECK: llvm.func @__ocml_cos_f32(f32) -> f32
// CHECK: llvm.func @__ocml_cos_f64(f64) -> f64
// CHECK-LABEL: func @gpu_cos
- func.func @gpu_cos(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @gpu_cos(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = math.cos %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_cos_f16(%{{.*}}) : (f16) -> f16
%result32 = math.cos %arg_f32 : f32
// CHECK: llvm.call @__ocml_cos_f32(%{{.*}}) : (f32) -> f32
%result64 = math.cos %arg_f64 : f64
// CHECK: llvm.call @__ocml_cos_f64(%{{.*}}) : (f64) -> f64
- func.return %result32, %result64 : f32, f64
- }
-}
-
-// -----
-
-gpu.module @test_module {
- // CHECK: llvm.func @__ocml_exp_f64(f64) -> f64
- // CHECK-LABEL: func @gpu_exp
- func.func @gpu_exp(%arg_f64 : f64) -> (f64) {
- %result64 = math.exp %arg_f64 : f64
- // CHECK: llvm.call @__ocml_exp_f64(%{{.*}}) : (f64) -> f64
- func.return %result64 : f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
// -----
gpu.module @test_module {
+ // CHECK: llvm.func @__ocml_exp2_f16(f16) -> f16
// CHECK: llvm.func @__ocml_exp2_f32(f32) -> f32
// CHECK: llvm.func @__ocml_exp2_f64(f64) -> f64
// CHECK-LABEL: func @gpu_exp2
- func.func @gpu_exp2(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @gpu_exp2(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = math.exp2 %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_exp2_f16(%{{.*}}) : (f16) -> f16
%exp2_f32 = math.exp2 %arg_f32 : f32
// CHECK: llvm.call @__ocml_exp2_f32(%{{.*}}) : (f32) -> f32
%result32 = math.exp2 %exp2_f32 : f32
// CHECK: llvm.call @__ocml_exp2_f32(%{{.*}}) : (f32) -> f32
%result64 = math.exp2 %arg_f64 : f64
// CHECK: llvm.call @__ocml_exp2_f64(%{{.*}}) : (f64) -> f64
- func.return %result32, %result64 : f32, f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
// -----
+
// Test that we handled properly operation with SymbolTable other than module op
gpu.module @test_module {
"test.symbol_scope"() ({
// CHECK: test.symbol_scope
+ // CHECK: llvm.func @__ocml_sin_f16(f16) -> f16
// CHECK: llvm.func @__ocml_sin_f32(f32) -> f32
// CHECK: llvm.func @__ocml_sin_f64(f64) -> f64
// CHECK-LABEL: func @gpu_sin
- func.func @gpu_sin(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
- %sin_f32 = math.sin %arg_f32 : f32
+ func.func @gpu_sin(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ // CHECK: llvm.call @__ocml_sin_f16(%{{.*}}) : (f16) -> f16
+ %result16 = math.sin %arg_f16 : f16
// CHECK: llvm.call @__ocml_sin_f32(%{{.*}}) : (f32) -> f32
- %result32 = math.sin %sin_f32 : f32
- // CHECK: llvm.call @__ocml_sin_f32(%{{.*}}) : (f32) -> f32
- %result64 = math.sin %arg_f64 : f64
+ %result32 = math.sin %arg_f32 : f32
// CHECK: llvm.call @__ocml_sin_f64(%{{.*}}) : (f64) -> f64
- func.return %result32, %result64 : f32, f64
+ %result64 = math.sin %arg_f64 : f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
"test.finish" () : () -> ()
}) : () -> ()
@@ -304,89 +311,102 @@ gpu.module @test_module {
// -----
gpu.module @test_module {
+ // CHECK: llvm.func @__ocml_expm1_f16(f16) -> f16
// CHECK: llvm.func @__ocml_expm1_f32(f32) -> f32
// CHECK: llvm.func @__ocml_expm1_f64(f64) -> f64
// CHECK-LABEL: func @gpu_expm1
- func.func @gpu_expm1(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @gpu_expm1(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = math.expm1 %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_expm1_f16(%{{.*}}) : (f16) -> f16
%expm1_f32 = math.expm1 %arg_f32 : f32
// CHECK: llvm.call @__ocml_expm1_f32(%{{.*}}) : (f32) -> f32
%result32 = math.expm1 %expm1_f32 : f32
// CHECK: llvm.call @__ocml_expm1_f32(%{{.*}}) : (f32) -> f32
%result64 = math.expm1 %arg_f64 : f64
// CHECK: llvm.call @__ocml_expm1_f64(%{{.*}}) : (f64) -> f64
- func.return %result32, %result64 : f32, f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
// -----
gpu.module @test_module {
+ // CHECK: llvm.func @__ocml_log_f16(f16) -> f16
// CHECK: llvm.func @__ocml_log_f64(f64) -> f64
// CHECK-LABEL: func @gpu_log
- func.func @gpu_log(%arg_f64 : f64) -> (f64) {
+ func.func @gpu_log(%arg_f16 : f16, %arg_f64 : f64) -> (f16, f64) {
+ %result16 = math.log %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_log_f16(%{{.*}}) : (f16) -> f16
%result64 = math.log %arg_f64 : f64
// CHECK: llvm.call @__ocml_log_f64(%{{.*}}) : (f64) -> f64
- func.return %result64 : f64
+ func.return %result16, %result64 : f16, f64
}
}
// -----
gpu.module @test_module {
+ // CHECK: llvm.func @__ocml_log1p_f16(f16) -> f16
// CHECK: llvm.func @__ocml_log1p_f32(f32) -> f32
// CHECK: llvm.func @__ocml_log1p_f64(f64) -> f64
// CHECK-LABEL: func @gpu_log1p
- func.func @gpu_log1p(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @gpu_log1p(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = math.log1p %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_log1p_f16(%{{.*}}) : (f16) -> f16
%result32 = math.log1p %arg_f32 : f32
// CHECK: llvm.call @__ocml_log1p_f32(%{{.*}}) : (f32) -> f32
%result64 = math.log1p %arg_f64 : f64
// CHECK: llvm.call @__ocml_log1p_f64(%{{.*}}) : (f64) -> f64
- func.return %result32, %result64 : f32, f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
// -----
gpu.module @test_module {
+ // CHECK: llvm.func @__ocml_log10_f16(f16) -> f16
// CHECK: llvm.func @__ocml_log10_f32(f32) -> f32
// CHECK: llvm.func @__ocml_log10_f64(f64) -> f64
// CHECK-LABEL: func @gpu_log10
- func.func @gpu_log10(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @gpu_log10(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = math.log10 %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_log10_f16(%{{.*}}) : (f16) -> f16
%result32 = math.log10 %arg_f32 : f32
// CHECK: llvm.call @__ocml_log10_f32(%{{.*}}) : (f32) -> f32
%result64 = math.log10 %arg_f64 : f64
// CHECK: llvm.call @__ocml_log10_f64(%{{.*}}) : (f64) -> f64
- func.return %result32, %result64 : f32, f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
// -----
gpu.module @test_module {
+ // CHECK: llvm.func @__ocml_log2_f16(f16) -> f16
// CHECK: llvm.func @__ocml_log2_f32(f32) -> f32
// CHECK: llvm.func @__ocml_log2_f64(f64) -> f64
// CHECK-LABEL: func @gpu_log2
- func.func @gpu_log2(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @gpu_log2(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = math.log2 %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_log2_f16(%{{.*}}) : (f16) -> f16
%result32 = math.log2 %arg_f32 : f32
// CHECK: llvm.call @__ocml_log2_f32(%{{.*}}) : (f32) -> f32
%result64 = math.log2 %arg_f64 : f64
// CHECK: llvm.call @__ocml_log2_f64(%{{.*}}) : (f64) -> f64
- func.return %result32, %result64 : f32, f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
// -----
gpu.module @test_module {
+ // CHECK: llvm.func @__ocml_rsqrt_f16(f16) -> f16
// CHECK: llvm.func @__ocml_rsqrt_f32(f32) -> f32
// CHECK: llvm.func @__ocml_rsqrt_f64(f64) -> f64
// CHECK-LABEL: func @gpu_rsqrt
- func.func @gpu_rsqrt(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64)
- -> (f16, f32, f64) {
+ func.func @gpu_rsqrt(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
%result16 = math.rsqrt %arg_f16 : f16
- // CHECK: llvm.fpext %{{.*}} : f16 to f32
- // CHECK-NEXT: llvm.call @__ocml_rsqrt_f32(%{{.*}}) : (f32) -> f32
- // CHECK-NEXT: llvm.fptrunc %{{.*}} : f32 to f16
+ // CHECK: llvm.call @__ocml_rsqrt_f16(%{{.*}}) : (f16) -> f16
%result32 = math.rsqrt %arg_f32 : f32
// CHECK: llvm.call @__ocml_rsqrt_f32(%{{.*}}) : (f32) -> f32
%result64 = math.rsqrt %arg_f64 : f64
@@ -398,90 +418,108 @@ gpu.module @test_module {
// -----
gpu.module @test_module {
+ // CHECK: llvm.func @__ocml_tan_f16(f16) -> f16
// CHECK: llvm.func @__ocml_tan_f32(f32) -> f32
// CHECK: llvm.func @__ocml_tan_f64(f64) -> f64
// CHECK-LABEL: func @gpu_tan
- func.func @gpu_tan(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @gpu_tan(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = math.tan %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_tan_f16(%{{.*}}) : (f16) -> f16
%result32 = math.tan %arg_f32 : f32
// CHECK: llvm.call @__ocml_tan_f32(%{{.*}}) : (f32) -> f32
%result64 = math.tan %arg_f64 : f64
// CHECK: llvm.call @__ocml_tan_f64(%{{.*}}) : (f64) -> f64
- func.return %result32, %result64 : f32, f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
// -----
gpu.module @test_module {
+ // CHECK: llvm.func @__ocml_tanh_f16(f16) -> f16
// CHECK: llvm.func @__ocml_tanh_f32(f32) -> f32
// CHECK: llvm.func @__ocml_tanh_f64(f64) -> f64
// CHECK-LABEL: func @gpu_tanh
- func.func @gpu_tanh(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @gpu_tanh(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = math.tanh %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_tanh_f16(%{{.*}}) : (f16) -> f16
%result32 = math.tanh %arg_f32 : f32
// CHECK: llvm.call @__ocml_tanh_f32(%{{.*}}) : (f32) -> f32
%result64 = math.tanh %arg_f64 : f64
// CHECK: llvm.call @__ocml_tanh_f64(%{{.*}}) : (f64) -> f64
- func.return %result32, %result64 : f32, f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
// -----
gpu.module @test_module {
+ // CHECK: llvm.func @__ocml_atan_f16(f16) -> f16
// CHECK: llvm.func @__ocml_atan_f32(f32) -> f32
// CHECK: llvm.func @__ocml_atan_f64(f64) -> f64
// CHECK-LABEL: func @gpu_atan
- func.func @gpu_atan(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @gpu_atan(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = math.atan %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_atan_f16(%{{.*}}) : (f16) -> f16
%result32 = math.atan %arg_f32 : f32
// CHECK: llvm.call @__ocml_atan_f32(%{{.*}}) : (f32) -> f32
%result64 = math.atan %arg_f64 : f64
// CHECK: llvm.call @__ocml_atan_f64(%{{.*}}) : (f64) -> f64
- func.return %result32, %result64 : f32, f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
// -----
gpu.module @test_module {
+ // CHECK: llvm.func @__ocml_atan2_f16(f16, f16) -> f16
// CHECK: llvm.func @__ocml_atan2_f32(f32, f32) -> f32
// CHECK: llvm.func @__ocml_atan2_f64(f64, f64) -> f64
// CHECK-LABEL: func @gpu_atan2
- func.func @gpu_atan2(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @gpu_atan2(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = math.atan2 %arg_f16, %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_atan2_f16(%{{.*}}) : (f16, f16) -> f16
%result32 = math.atan2 %arg_f32, %arg_f32 : f32
// CHECK: llvm.call @__ocml_atan2_f32(%{{.*}}) : (f32, f32) -> f32
%result64 = math.atan2 %arg_f64, %arg_f64 : f64
// CHECK: llvm.call @__ocml_atan2_f64(%{{.*}}) : (f64, f64) -> f64
- func.return %result32, %result64 : f32, f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
// -----
gpu.module @test_module {
+ // CHECK: llvm.func @__ocml_pow_f16(f16, f16) -> f16
// CHECK: llvm.func @__ocml_pow_f32(f32, f32) -> f32
// CHECK: llvm.func @__ocml_pow_f64(f64, f64) -> f64
// CHECK-LABEL: func @gpu_pow
- func.func @gpu_pow(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @gpu_pow(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = math.powf %arg_f16, %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_pow_f16(%{{.*}}, %{{.*}}) : (f16, f16) -> f16
%result32 = math.powf %arg_f32, %arg_f32 : f32
// CHECK: llvm.call @__ocml_pow_f32(%{{.*}}, %{{.*}}) : (f32, f32) -> f32
%result64 = math.powf %arg_f64, %arg_f64 : f64
// CHECK: llvm.call @__ocml_pow_f64(%{{.*}}, %{{.*}}) : (f64, f64) -> f64
- func.return %result32, %result64 : f32, f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
// -----
gpu.module @test_module {
+ // CHECK: llvm.func @__ocml_erf_f16(f16) -> f16
// CHECK: llvm.func @__ocml_erf_f32(f32) -> f32
// CHECK: llvm.func @__ocml_erf_f64(f64) -> f64
// CHECK-LABEL: func @gpu_erf
- func.func @gpu_erf(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @gpu_erf(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = math.erf %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_erf_f16(%{{.*}}) : (f16) -> f16
%result32 = math.erf %arg_f32 : f32
// CHECK: llvm.call @__ocml_erf_f32(%{{.*}}) : (f32) -> f32
%result64 = math.erf %arg_f64 : f64
// CHECK: llvm.call @__ocml_erf_f64(%{{.*}}) : (f64) -> f64
- func.return %result32, %result64 : f32, f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
@@ -543,9 +581,9 @@ gpu.module @test_module {
// -----
gpu.module @module {
-// CHECK-LABEL: @spirv_exp
+// CHECK-LABEL: @spirv_sin
// CHECK: llvm.call @__ocml_sin_f32
- spirv.func @spirv_exp(%arg0: vector<4xf32>) -> vector<4xf32> "None" {
+ spirv.func @spirv_sin(%arg0: vector<4xf32>) -> vector<4xf32> "None" {
%0 = math.sin %arg0 : vector<4xf32>
spirv.ReturnValue %0 : vector<4xf32>
}
@@ -602,15 +640,18 @@ gpu.module @test_module {
// -----
gpu.module @test_module {
+ // CHECK: llvm.func @__ocml_fmod_f16(f16, f16) -> f16
// CHECK: llvm.func @__ocml_fmod_f32(f32, f32) -> f32
// CHECK: llvm.func @__ocml_fmod_f64(f64, f64) -> f64
// CHECK-LABEL: func @gpu_fmod
- func.func @gpu_fmod(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @gpu_fmod(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = arith.remf %arg_f16, %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_fmod_f16(%{{.*}}, %{{.*}}) : (f16, f16) -> f16
%result32 = arith.remf %arg_f32, %arg_f32 : f32
// CHECK: llvm.call @__ocml_fmod_f32(%{{.*}}, %{{.*}}) : (f32, f32) -> f32
%result64 = arith.remf %arg_f64, %arg_f64 : f64
// CHECK: llvm.call @__ocml_fmod_f64(%{{.*}}, %{{.*}}) : (f64, f64) -> f64
- func.return %result32, %result64 : f32, f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
diff --git a/mlir/test/Conversion/MathToROCDL/math-to-rocdl.mlir b/mlir/test/Conversion/MathToROCDL/math-to-rocdl.mlir
index 19d89e0..ddd96bf 100644
--- a/mlir/test/Conversion/MathToROCDL/math-to-rocdl.mlir
+++ b/mlir/test/Conversion/MathToROCDL/math-to-rocdl.mlir
@@ -1,399 +1,483 @@
// RUN: mlir-opt %s -convert-math-to-rocdl -split-input-file | FileCheck %s
module @test_module {
+ // CHECK: llvm.func @__ocml_fmod_f16(f16, f16) -> f16
// CHECK: llvm.func @__ocml_fmod_f32(f32, f32) -> f32
// CHECK: llvm.func @__ocml_fmod_f64(f64, f64) -> f64
// CHECK-LABEL: func @arith_remf
- func.func @arith_remf(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @arith_remf(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = arith.remf %arg_f16, %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_fmod_f16(%{{.*}}, %{{.*}}) : (f16, f16) -> f16
%result32 = arith.remf %arg_f32, %arg_f32 : f32
// CHECK: llvm.call @__ocml_fmod_f32(%{{.*}}, %{{.*}}) : (f32, f32) -> f32
%result64 = arith.remf %arg_f64, %arg_f64 : f64
// CHECK: llvm.call @__ocml_fmod_f64(%{{.*}}, %{{.*}}) : (f64, f64) -> f64
- func.return %result32, %result64 : f32, f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
// -----
module @test_module {
+ // CHECK: llvm.func @__ocml_acos_f16(f16) -> f16
// CHECK: llvm.func @__ocml_acos_f32(f32) -> f32
// CHECK: llvm.func @__ocml_acos_f64(f64) -> f64
// CHECK-LABEL: func @math_acos
- func.func @math_acos(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @math_acos(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = math.acos %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_acos_f16(%{{.*}}) : (f16) -> f16
%result32 = math.acos %arg_f32 : f32
// CHECK: llvm.call @__ocml_acos_f32(%{{.*}}) : (f32) -> f32
%result64 = math.acos %arg_f64 : f64
// CHECK: llvm.call @__ocml_acos_f64(%{{.*}}) : (f64) -> f64
- func.return %result32, %result64 : f32, f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
// -----
module @test_module {
+ // CHECK: llvm.func @__ocml_acosh_f16(f16) -> f16
// CHECK: llvm.func @__ocml_acosh_f32(f32) -> f32
// CHECK: llvm.func @__ocml_acosh_f64(f64) -> f64
// CHECK-LABEL: func @math_acosh
- func.func @math_acosh(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @math_acosh(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = math.acosh %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_acosh_f16(%{{.*}}) : (f16) -> f16
%result32 = math.acosh %arg_f32 : f32
// CHECK: llvm.call @__ocml_acosh_f32(%{{.*}}) : (f32) -> f32
%result64 = math.acosh %arg_f64 : f64
// CHECK: llvm.call @__ocml_acosh_f64(%{{.*}}) : (f64) -> f64
- func.return %result32, %result64 : f32, f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
// -----
module @test_module {
+ // CHECK: llvm.func @__ocml_asin_f16(f16) -> f16
// CHECK: llvm.func @__ocml_asin_f32(f32) -> f32
// CHECK: llvm.func @__ocml_asin_f64(f64) -> f64
// CHECK-LABEL: func @math_asin
- func.func @math_asin(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @math_asin(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = math.asin %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_asin_f16(%{{.*}}) : (f16) -> f16
%result32 = math.asin %arg_f32 : f32
// CHECK: llvm.call @__ocml_asin_f32(%{{.*}}) : (f32) -> f32
%result64 = math.asin %arg_f64 : f64
// CHECK: llvm.call @__ocml_asin_f64(%{{.*}}) : (f64) -> f64
- func.return %result32, %result64 : f32, f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
// -----
module @test_module {
+ // CHECK: llvm.func @__ocml_asinh_f16(f16) -> f16
// CHECK: llvm.func @__ocml_asinh_f32(f32) -> f32
// CHECK: llvm.func @__ocml_asinh_f64(f64) -> f64
// CHECK-LABEL: func @math_asinh
- func.func @math_asinh(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @math_asinh(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = math.asinh %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_asinh_f16(%{{.*}}) : (f16) -> f16
%result32 = math.asinh %arg_f32 : f32
// CHECK: llvm.call @__ocml_asinh_f32(%{{.*}}) : (f32) -> f32
%result64 = math.asinh %arg_f64 : f64
// CHECK: llvm.call @__ocml_asinh_f64(%{{.*}}) : (f64) -> f64
- func.return %result32, %result64 : f32, f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
// -----
module @test_module {
+ // CHECK: llvm.func @__ocml_atan_f16(f16) -> f16
// CHECK: llvm.func @__ocml_atan_f32(f32) -> f32
// CHECK: llvm.func @__ocml_atan_f64(f64) -> f64
// CHECK-LABEL: func @math_atan
- func.func @math_atan(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @math_atan(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = math.atan %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_atan_f16(%{{.*}}) : (f16) -> f16
%result32 = math.atan %arg_f32 : f32
// CHECK: llvm.call @__ocml_atan_f32(%{{.*}}) : (f32) -> f32
%result64 = math.atan %arg_f64 : f64
// CHECK: llvm.call @__ocml_atan_f64(%{{.*}}) : (f64) -> f64
- func.return %result32, %result64 : f32, f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
// -----
module @test_module {
+ // CHECK: llvm.func @__ocml_atanh_f16(f16) -> f16
// CHECK: llvm.func @__ocml_atanh_f32(f32) -> f32
// CHECK: llvm.func @__ocml_atanh_f64(f64) -> f64
// CHECK-LABEL: func @math_atanh
- func.func @math_atanh(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @math_atanh(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = math.atanh %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_atanh_f16(%{{.*}}) : (f16) -> f16
%result32 = math.atanh %arg_f32 : f32
// CHECK: llvm.call @__ocml_atanh_f32(%{{.*}}) : (f32) -> f32
%result64 = math.atanh %arg_f64 : f64
// CHECK: llvm.call @__ocml_atanh_f64(%{{.*}}) : (f64) -> f64
- func.return %result32, %result64 : f32, f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
// -----
module @test_module {
+ // CHECK: llvm.func @__ocml_atan2_f16(f16, f16) -> f16
// CHECK: llvm.func @__ocml_atan2_f32(f32, f32) -> f32
// CHECK: llvm.func @__ocml_atan2_f64(f64, f64) -> f64
// CHECK-LABEL: func @math_atan2
- func.func @math_atan2(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @math_atan2(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = math.atan2 %arg_f16, %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_atan2_f16(%{{.*}}, %{{.*}}) : (f16, f16) -> f16
%result32 = math.atan2 %arg_f32, %arg_f32 : f32
// CHECK: llvm.call @__ocml_atan2_f32(%{{.*}}, %{{.*}}) : (f32, f32) -> f32
%result64 = math.atan2 %arg_f64, %arg_f64 : f64
// CHECK: llvm.call @__ocml_atan2_f64(%{{.*}}, %{{.*}}) : (f64, f64) -> f64
- func.return %result32, %result64 : f32, f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
// -----
module @test_module {
+ // CHECK: llvm.func @__ocml_cbrt_f16(f16) -> f16
// CHECK: llvm.func @__ocml_cbrt_f32(f32) -> f32
// CHECK: llvm.func @__ocml_cbrt_f64(f64) -> f64
// CHECK-LABEL: func @math_cbrt
- func.func @math_cbrt(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @math_cbrt(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = math.cbrt %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_cbrt_f16(%{{.*}}) : (f16) -> f16
%result32 = math.cbrt %arg_f32 : f32
// CHECK: llvm.call @__ocml_cbrt_f32(%{{.*}}) : (f32) -> f32
%result64 = math.cbrt %arg_f64 : f64
// CHECK: llvm.call @__ocml_cbrt_f64(%{{.*}}) : (f64) -> f64
- func.return %result32, %result64 : f32, f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
// -----
module @test_module {
+ // CHECK: llvm.func @__ocml_ceil_f16(f16) -> f16
// CHECK: llvm.func @__ocml_ceil_f32(f32) -> f32
// CHECK: llvm.func @__ocml_ceil_f64(f64) -> f64
// CHECK-LABEL: func @math_ceil
- func.func @math_ceil(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @math_ceil(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = math.ceil %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_ceil_f16(%{{.*}}) : (f16) -> f16
%result32 = math.ceil %arg_f32 : f32
// CHECK: llvm.call @__ocml_ceil_f32(%{{.*}}) : (f32) -> f32
%result64 = math.ceil %arg_f64 : f64
// CHECK: llvm.call @__ocml_ceil_f64(%{{.*}}) : (f64) -> f64
- func.return %result32, %result64 : f32, f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
// -----
module @test_module {
+ // CHECK: llvm.func @__ocml_cos_f16(f16) -> f16
// CHECK: llvm.func @__ocml_cos_f32(f32) -> f32
// CHECK: llvm.func @__ocml_cos_f64(f64) -> f64
// CHECK-LABEL: func @math_cos
- func.func @math_cos(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @math_cos(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = math.cos %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_cos_f16(%{{.*}}) : (f16) -> f16
%result32 = math.cos %arg_f32 : f32
// CHECK: llvm.call @__ocml_cos_f32(%{{.*}}) : (f32) -> f32
%result64 = math.cos %arg_f64 : f64
// CHECK: llvm.call @__ocml_cos_f64(%{{.*}}) : (f64) -> f64
- func.return %result32, %result64 : f32, f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
// -----
module @test_module {
+ // CHECK: llvm.func @__ocml_cosh_f16(f16) -> f16
// CHECK: llvm.func @__ocml_cosh_f32(f32) -> f32
// CHECK: llvm.func @__ocml_cosh_f64(f64) -> f64
// CHECK-LABEL: func @math_cosh
- func.func @math_cosh(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @math_cosh(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = math.cosh %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_cosh_f16(%{{.*}}) : (f16) -> f16
%result32 = math.cosh %arg_f32 : f32
// CHECK: llvm.call @__ocml_cosh_f32(%{{.*}}) : (f32) -> f32
%result64 = math.cosh %arg_f64 : f64
// CHECK: llvm.call @__ocml_cosh_f64(%{{.*}}) : (f64) -> f64
- func.return %result32, %result64 : f32, f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
// -----
module @test_module {
+ // CHECK: llvm.func @__ocml_sinh_f16(f16) -> f16
// CHECK: llvm.func @__ocml_sinh_f32(f32) -> f32
// CHECK: llvm.func @__ocml_sinh_f64(f64) -> f64
// CHECK-LABEL: func @math_sinh
- func.func @math_sinh(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @math_sinh(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = math.sinh %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_sinh_f16(%{{.*}}) : (f16) -> f16
%result32 = math.sinh %arg_f32 : f32
// CHECK: llvm.call @__ocml_sinh_f32(%{{.*}}) : (f32) -> f32
%result64 = math.sinh %arg_f64 : f64
// CHECK: llvm.call @__ocml_sinh_f64(%{{.*}}) : (f64) -> f64
- func.return %result32, %result64 : f32, f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
// -----
module @test_module {
+ // CHECK: llvm.func @__ocml_exp_f16(f16) -> f16
// CHECK: llvm.func @__ocml_exp_f64(f64) -> f64
// CHECK-LABEL: func @math_exp
- func.func @math_exp(%arg_f64 : f64) -> (f64) {
+ func.func @math_exp(%arg_f16 : f16, %arg_f64 : f64) -> (f16, f64) {
+ %result16 = math.exp %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_exp_f16(%{{.*}}) : (f16) -> f16
%result64 = math.exp %arg_f64 : f64
// CHECK: llvm.call @__ocml_exp_f64(%{{.*}}) : (f64) -> f64
- func.return %result64 : f64
+ func.return %result16, %result64 : f16, f64
}
}
// -----
module @test_module {
+ // CHECK: llvm.func @__ocml_exp2_f16(f16) -> f16
// CHECK: llvm.func @__ocml_exp2_f32(f32) -> f32
// CHECK: llvm.func @__ocml_exp2_f64(f64) -> f64
// CHECK-LABEL: func @math_exp2
- func.func @math_exp2(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @math_exp2(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = math.exp2 %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_exp2_f16(%{{.*}}) : (f16) -> f16
%result32 = math.exp2 %arg_f32 : f32
// CHECK: llvm.call @__ocml_exp2_f32(%{{.*}}) : (f32) -> f32
%result64 = math.exp2 %arg_f64 : f64
// CHECK: llvm.call @__ocml_exp2_f64(%{{.*}}) : (f64) -> f64
- func.return %result32, %result64 : f32, f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
// -----
module @test_module {
+ // CHECK: llvm.func @__ocml_expm1_f16(f16) -> f16
// CHECK: llvm.func @__ocml_expm1_f32(f32) -> f32
// CHECK: llvm.func @__ocml_expm1_f64(f64) -> f64
// CHECK-LABEL: func @math_expm1
- func.func @math_expm1(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @math_expm1(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = math.expm1 %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_expm1_f16(%{{.*}}) : (f16) -> f16
%result32 = math.expm1 %arg_f32 : f32
// CHECK: llvm.call @__ocml_expm1_f32(%{{.*}}) : (f32) -> f32
%result64 = math.expm1 %arg_f64 : f64
// CHECK: llvm.call @__ocml_expm1_f64(%{{.*}}) : (f64) -> f64
- func.return %result32, %result64 : f32, f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
// -----
module @test_module {
+ // CHECK: llvm.func @__ocml_floor_f16(f16) -> f16
// CHECK: llvm.func @__ocml_floor_f32(f32) -> f32
// CHECK: llvm.func @__ocml_floor_f64(f64) -> f64
// CHECK-LABEL: func @math_floor
- func.func @math_floor(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @math_floor(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = math.floor %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_floor_f16(%{{.*}}) : (f16) -> f16
%result32 = math.floor %arg_f32 : f32
// CHECK: llvm.call @__ocml_floor_f32(%{{.*}}) : (f32) -> f32
%result64 = math.floor %arg_f64 : f64
// CHECK: llvm.call @__ocml_floor_f64(%{{.*}}) : (f64) -> f64
- func.return %result32, %result64 : f32, f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
// -----
module @test_module {
+ // CHECK: llvm.func @__ocml_log_f16(f16) -> f16
// CHECK: llvm.func @__ocml_log_f64(f64) -> f64
// CHECK-LABEL: func @math_log
- func.func @math_log(%arg_f64 : f64) -> (f64) {
+ func.func @math_log(%arg_f16 : f16, %arg_f64 : f64) -> (f16, f64) {
+ %result16 = math.log %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_log_f16(%{{.*}}) : (f16) -> f16
%result64 = math.log %arg_f64 : f64
// CHECK: llvm.call @__ocml_log_f64(%{{.*}}) : (f64) -> f64
- func.return %result64 : f64
+ func.return %result16, %result64 : f16, f64
}
}
// -----
module @test_module {
+ // CHECK: llvm.func @__ocml_log10_f16(f16) -> f16
// CHECK: llvm.func @__ocml_log10_f32(f32) -> f32
// CHECK: llvm.func @__ocml_log10_f64(f64) -> f64
// CHECK-LABEL: func @math_log10
- func.func @math_log10(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @math_log10(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = math.log10 %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_log10_f16(%{{.*}}) : (f16) -> f16
%result32 = math.log10 %arg_f32 : f32
// CHECK: llvm.call @__ocml_log10_f32(%{{.*}}) : (f32) -> f32
%result64 = math.log10 %arg_f64 : f64
// CHECK: llvm.call @__ocml_log10_f64(%{{.*}}) : (f64) -> f64
- func.return %result32, %result64 : f32, f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
// -----
module @test_module {
+ // CHECK: llvm.func @__ocml_log1p_f16(f16) -> f16
// CHECK: llvm.func @__ocml_log1p_f32(f32) -> f32
// CHECK: llvm.func @__ocml_log1p_f64(f64) -> f64
// CHECK-LABEL: func @math_log1p
- func.func @math_log1p(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @math_log1p(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = math.log1p %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_log1p_f16(%{{.*}}) : (f16) -> f16
%result32 = math.log1p %arg_f32 : f32
// CHECK: llvm.call @__ocml_log1p_f32(%{{.*}}) : (f32) -> f32
%result64 = math.log1p %arg_f64 : f64
// CHECK: llvm.call @__ocml_log1p_f64(%{{.*}}) : (f64) -> f64
- func.return %result32, %result64 : f32, f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
// -----
module @test_module {
+ // CHECK: llvm.func @__ocml_pow_f16(f16, f16) -> f16
// CHECK: llvm.func @__ocml_pow_f32(f32, f32) -> f32
// CHECK: llvm.func @__ocml_pow_f64(f64, f64) -> f64
// CHECK-LABEL: func @math_powf
- func.func @math_powf(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @math_powf(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = math.powf %arg_f16, %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_pow_f16(%{{.*}}, %{{.*}}) : (f16, f16) -> f16
%result32 = math.powf %arg_f32, %arg_f32 : f32
// CHECK: llvm.call @__ocml_pow_f32(%{{.*}}, %{{.*}}) : (f32, f32) -> f32
%result64 = math.powf %arg_f64, %arg_f64 : f64
// CHECK: llvm.call @__ocml_pow_f64(%{{.*}}, %{{.*}}) : (f64, f64) -> f64
- func.return %result32, %result64 : f32, f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
// -----
module @test_module {
+ // CHECK: llvm.func @__ocml_rsqrt_f16(f16) -> f16
// CHECK: llvm.func @__ocml_rsqrt_f32(f32) -> f32
// CHECK: llvm.func @__ocml_rsqrt_f64(f64) -> f64
// CHECK-LABEL: func @math_rsqrt
- func.func @math_rsqrt(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @math_rsqrt(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = math.rsqrt %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_rsqrt_f16(%{{.*}}) : (f16) -> f16
%result32 = math.rsqrt %arg_f32 : f32
// CHECK: llvm.call @__ocml_rsqrt_f32(%{{.*}}) : (f32) -> f32
%result64 = math.rsqrt %arg_f64 : f64
// CHECK: llvm.call @__ocml_rsqrt_f64(%{{.*}}) : (f64) -> f64
- func.return %result32, %result64 : f32, f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
// -----
module @test_module {
+ // CHECK: llvm.func @__ocml_sin_f16(f16) -> f16
// CHECK: llvm.func @__ocml_sin_f32(f32) -> f32
// CHECK: llvm.func @__ocml_sin_f64(f64) -> f64
// CHECK-LABEL: func @math_sin
- func.func @math_sin(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @math_sin(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = math.sin %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_sin_f16(%{{.*}}) : (f16) -> f16
%result32 = math.sin %arg_f32 : f32
// CHECK: llvm.call @__ocml_sin_f32(%{{.*}}) : (f32) -> f32
%result64 = math.sin %arg_f64 : f64
// CHECK: llvm.call @__ocml_sin_f64(%{{.*}}) : (f64) -> f64
- func.return %result32, %result64 : f32, f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
// -----
module @test_module {
+ // CHECK: llvm.func @__ocml_tanh_f16(f16) -> f16
// CHECK: llvm.func @__ocml_tanh_f32(f32) -> f32
// CHECK: llvm.func @__ocml_tanh_f64(f64) -> f64
// CHECK-LABEL: func @math_tanh
- func.func @math_tanh(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @math_tanh(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = math.tanh %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_tanh_f16(%{{.*}}) : (f16) -> f16
%result32 = math.tanh %arg_f32 : f32
// CHECK: llvm.call @__ocml_tanh_f32(%{{.*}}) : (f32) -> f32
%result64 = math.tanh %arg_f64 : f64
// CHECK: llvm.call @__ocml_tanh_f64(%{{.*}}) : (f64) -> f64
- func.return %result32, %result64 : f32, f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
// -----
module @test_module {
+ // CHECK: llvm.func @__ocml_tan_f16(f16) -> f16
// CHECK: llvm.func @__ocml_tan_f32(f32) -> f32
// CHECK: llvm.func @__ocml_tan_f64(f64) -> f64
// CHECK-LABEL: func @math_tan
- func.func @math_tan(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @math_tan(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = math.tan %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_tan_f16(%{{.*}}) : (f16) -> f16
%result32 = math.tan %arg_f32 : f32
// CHECK: llvm.call @__ocml_tan_f32(%{{.*}}) : (f32) -> f32
%result64 = math.tan %arg_f64 : f64
// CHECK: llvm.call @__ocml_tan_f64(%{{.*}}) : (f64) -> f64
- func.return %result32, %result64 : f32, f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
// -----
module @test_module {
+ // CHECK: llvm.func @__ocml_erf_f16(f16) -> f16
// CHECK: llvm.func @__ocml_erf_f32(f32) -> f32
// CHECK: llvm.func @__ocml_erf_f64(f64) -> f64
// CHECK-LABEL: func @math_erf
- func.func @math_erf(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
+ func.func @math_erf(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
+ %result16 = math.erf %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_erf_f16(%{{.*}}) : (f16) -> f16
%result32 = math.erf %arg_f32 : f32
// CHECK: llvm.call @__ocml_erf_f32(%{{.*}}) : (f32) -> f32
%result64 = math.erf %arg_f64 : f64
// CHECK: llvm.call @__ocml_erf_f64(%{{.*}}) : (f64) -> f64
- func.return %result32, %result64 : f32, f64
+ func.return %result16, %result32, %result64 : f16, f32, f64
}
}
// -----
module @test_module {
- // CHECK: llvm.func @__ocml_fmod_f32(f32, f32) -> f32
- // CHECK: llvm.func @__ocml_fmod_f64(f64, f64) -> f64
- // CHECK-LABEL: func @arith_remf
- func.func @arith_remf(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
- %result32 = arith.remf %arg_f32, %arg_f32 : f32
- // CHECK: llvm.call @__ocml_fmod_f32(%{{.*}}, %{{.*}}) : (f32, f32) -> f32
- %result64 = arith.remf %arg_f64, %arg_f64 : f64
- // CHECK: llvm.call @__ocml_fmod_f64(%{{.*}}, %{{.*}}) : (f64, f64) -> f64
- func.return %result32, %result64 : f32, f64
+ // CHECK: llvm.func @__ocml_sin_f16(f16) -> f16
+ // CHECK: llvm.func @__ocml_sin_f32(f32) -> f32
+ // CHECK: llvm.func @__ocml_sin_f64(f64) -> f64
+ // CHECK-LABEL: func @math_casting
+ func.func @math_casting(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64, %arg_bf16 : bf16) -> (f16, f32, f64, bf16) {
+ %resultf16 = math.sin %arg_f16 : f16
+ // CHECK: llvm.call @__ocml_sin_f16(%{{.*}}) : (f16) -> f16
+ %resultf32 = math.sin %arg_f32 : f32
+ // CHECK: llvm.call @__ocml_sin_f32(%{{.*}}) : (f32) -> f32
+ %resultf64 = math.sin %arg_f64 : f64
+ // CHECK: llvm.call @__ocml_sin_f64(%{{.*}}) : (f64) -> f64
+ %resultbf16 = math.sin %arg_bf16 : bf16
+ // CHECK: llvm.fpext %{{.*}} : bf16 to f32
+ // CHECK-NEXT: llvm.call @__ocml_sin_f32(%{{.*}}) : (f32) -> f32
+ // CHECK-NEXT: llvm.fptrunc %{{.*}} : f32 to bf16
+ func.return %resultf16, %resultf32, %resultf64, %resultbf16 : f16, f32, f64, bf16
}
}
-
diff --git a/mlir/test/Dialect/Linalg/vectorize-convolution.mlir b/mlir/test/Dialect/Linalg/vectorize-convolution.mlir
index 93e36a6..7f4b9b9 100644
--- a/mlir/test/Dialect/Linalg/vectorize-convolution.mlir
+++ b/mlir/test/Dialect/Linalg/vectorize-convolution.mlir
@@ -39,6 +39,7 @@ func.func @conv1d_nwc_4x2x8_memref(%input: memref<4x6x3xf32>, %filter: memref<1x
// CHECK: %[[CONTRACT_0:.+]] = vector.contract {
// CHECK-SAME: indexing_maps = [#[[INPUT_MAP]], #[[FILTER_MAP]], #[[OUTPUT_MAP]]],
// CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel", "reduction"]
+// CHECK-SAME: kind = #vector.kind<add>
// CHECK-SAME: %[[V_INPUT_0]], %[[V_FILTER]], %[[V_OUTPUT_0]]
// CHECK-SAME: : vector<4x1x3xf32>, vector<3x8xf32> into vector<4x1x8xf32>
@@ -46,6 +47,7 @@ func.func @conv1d_nwc_4x2x8_memref(%input: memref<4x6x3xf32>, %filter: memref<1x
// CHECK: %[[CONTRACT_1:.+]] = vector.contract {
// CHECK-SAME: indexing_maps = [#[[INPUT_MAP]], #[[FILTER_MAP]], #[[OUTPUT_MAP]]],
// CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel", "reduction"]
+// CHECK-SAME: kind = #vector.kind<add>
// CHECK-SAME: %[[V_INPUT_1]], %[[V_FILTER]], %[[V_OUTPUT_1]]
// CHECK-SAME: : vector<4x1x3xf32>, vector<3x8xf32> into vector<4x1x8xf32>
@@ -61,6 +63,36 @@ func.func @conv1d_nwc_4x2x8_memref(%input: memref<4x6x3xf32>, %filter: memref<1x
// -----
+// This test is same as above but for i1 type with the only difference being that
+// the combining kind for `vector.contract` is `OR`.
+func.func @conv1d_nwc_4x2x8_memref_i1(%input: memref<4x6x3xi1>, %filter: memref<1x3x8xi1>, %output: memref<4x2x8xi1>) {
+ linalg.conv_1d_nwc_wcf
+ {dilations = dense<1> : tensor<1xi64>, strides = dense<3> : tensor<1xi64>}
+ ins(%input, %filter : memref<4x6x3xi1>, memref<1x3x8xi1>)
+ outs(%output : memref<4x2x8xi1>)
+ return
+}
+// CHECK: #[[INPUT_MAP:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>
+// CHECK: #[[FILTER_MAP:.+]] = affine_map<(d0, d1, d2, d3) -> (d3, d2)>
+// CHECK: #[[OUTPUT_MAP:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
+
+// CHECK: func @conv1d_nwc_4x2x8_memref_i1
+/// w == 0, kw == 0
+// CHECK: %[[CONTRACT_0:.+]] = vector.contract {
+// CHECK-SAME: indexing_maps = [#[[INPUT_MAP]], #[[FILTER_MAP]], #[[OUTPUT_MAP]]],
+// CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel", "reduction"]
+// CHECK-SAME: kind = #vector.kind<or>
+// CHECK-SAME: : vector<4x1x3xi1>, vector<3x8xi1> into vector<4x1x8xi1>
+
+/// w == 1, kw == 0
+// CHECK: %[[CONTRACT_1:.+]] = vector.contract {
+// CHECK-SAME: indexing_maps = [#[[INPUT_MAP]], #[[FILTER_MAP]], #[[OUTPUT_MAP]]],
+// CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel", "reduction"]
+// CHECK-SAME: kind = #vector.kind<or>
+// CHECK-SAME: : vector<4x1x3xi1>, vector<3x8xi1> into vector<4x1x8xi1>
+
+// -----
+
// The i8i8i32 case is similar to f32 case, so checking one case is enough for
// test coverage.
func.func @conv1d_nwc_4x2x8_i8i8i32_memref(%input: memref<4x6x3xi8>, %filter: memref<1x3x8xi8>, %output: memref<4x2x8xi32>) {
@@ -299,6 +331,7 @@ func.func @conv1d_ncw_4x8x2_memref(%input: memref<4x3x6xf32>, %filter: memref<8x
// CHECK: %[[CONTRACT_0:.+]] = vector.contract {
// CHECK-SAME: indexing_maps = [#[[INPUT_MAP]], #[[FILTER_MAP]], #[[OUTPUT_MAP]]],
// CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel", "reduction"]
+// CHECK-SAME: kind = #vector.kind<add>
// CHECK-SAME: %[[V_INPUT_0]], %[[V_FILTER]], %[[V_OUTPUT_0]]
// CHECK-SAME: : vector<4x1x3xf32>, vector<3x8xf32> into vector<4x1x8xf32>
@@ -306,6 +339,7 @@ func.func @conv1d_ncw_4x8x2_memref(%input: memref<4x3x6xf32>, %filter: memref<8x
// CHECK: %[[CONTRACT_1:.+]] = vector.contract {
// CHECK-SAME: indexing_maps = [#[[INPUT_MAP]], #[[FILTER_MAP]], #[[OUTPUT_MAP]]],
// CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel", "reduction"]
+// CHECK-SAME: kind = #vector.kind<add>
// CHECK-SAME: %[[V_INPUT_1]], %[[V_FILTER]], %[[V_OUTPUT_1]]
// CHECK-SAME: : vector<4x1x3xf32>, vector<3x8xf32> into vector<4x1x8xf32>
@@ -324,6 +358,37 @@ func.func @conv1d_ncw_4x8x2_memref(%input: memref<4x3x6xf32>, %filter: memref<8x
// -----
+// This test is same as above but for i1 type with the only difference being that
+// the combining kind for `vector.contract` is `OR`.
+func.func @conv1d_ncw_4x8x2_memref_i1(%input: memref<4x3x6xi1>, %filter: memref<8x3x1xi1>, %output: memref<4x8x2xi1>) {
+ linalg.conv_1d_ncw_fcw
+ {dilations = dense<1> : tensor<1xi64>, strides = dense<3> : tensor<1xi64>}
+ ins(%input, %filter : memref<4x3x6xi1>, memref<8x3x1xi1>)
+ outs(%output : memref<4x8x2xi1>)
+ return
+}
+
+// CHECK: #[[INPUT_MAP:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>
+// CHECK: #[[FILTER_MAP:.+]] = affine_map<(d0, d1, d2, d3) -> (d3, d2)>
+// CHECK: #[[OUTPUT_MAP:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
+
+// CHECK: func @conv1d_ncw_4x8x2_memref_i1
+/// w == 0, kw == 0
+// CHECK: vector.contract {
+// CHECK-SAME: indexing_maps = [#[[INPUT_MAP]], #[[FILTER_MAP]], #[[OUTPUT_MAP]]],
+// CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel", "reduction"]
+// CHECK-SAME: kind = #vector.kind<or>
+// CHECK-SAME: : vector<4x1x3xi1>, vector<3x8xi1> into vector<4x1x8xi1>
+
+/// w == 1, kw == 0
+// CHECK: vector.contract {
+// CHECK-SAME: indexing_maps = [#[[INPUT_MAP]], #[[FILTER_MAP]], #[[OUTPUT_MAP]]],
+// CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel", "reduction"]
+// CHECK-SAME: kind = #vector.kind<or>
+// CHECK-SAME: : vector<4x1x3xi1>, vector<3x8xi1> into vector<4x1x8xi1>
+
+// -----
+
func.func @conv1d_ncw_4x8x2_memref(%input: memref<4x3x6xf32>, %filter: memref<8x3x2xf32>, %output: memref<4x8x2xf32>) {
linalg.conv_1d_ncw_fcw
{dilations = dense<2> : tensor<1xi64>, strides = dense<3> : tensor<1xi64>}
diff --git a/mlir/test/Dialect/Linalg/vectorize-tensor-extract.mlir b/mlir/test/Dialect/Linalg/vectorize-tensor-extract.mlir
index ad3a8d9..2c56b71 100644
--- a/mlir/test/Dialect/Linalg/vectorize-tensor-extract.mlir
+++ b/mlir/test/Dialect/Linalg/vectorize-tensor-extract.mlir
@@ -307,6 +307,96 @@ module attributes {transform.with_named_sequence} {
// -----
+// Reading a 1D column vector (hence a candidate for a contiguous load), but given
+// %1, it's a gather load.
+
+#map = affine_map<(d0, d1) -> (d0, d1)>
+func.func @index_from_output_column_vector_gather_load(%src: tensor<8x128xf32>) -> tensor<8x1xf32> {
+ %c0 = arith.constant 0 : index
+ %0 = tensor.empty() : tensor<8x1xf32>
+ %res = linalg.generic {
+ indexing_maps = [#map],
+ iterator_types = ["parallel", "parallel"]
+ } outs(%0 : tensor<8x1xf32>) {
+ ^bb0(%arg1: f32):
+ %1 = linalg.index 0 : index
+ %extracted = tensor.extract %src[%1, %c0] : tensor<8x128xf32>
+ linalg.yield %extracted : f32
+ } -> tensor<8x1xf32>
+ return %res : tensor<8x1xf32>
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg2: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg2 : (!transform.any_op) -> !transform.any_op
+ %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+ %2 = transform.structured.vectorize_children_and_apply_patterns %1 {vectorize_nd_extract} : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
+
+// CHECK-LABEL: func.func @index_from_output_column_vector_gather_load(
+// CHECK-SAME: %[[SRC:.*]]: tensor<8x128xf32>) -> tensor<8x1xf32> {
+// CHECK: %[[C128:.*]] = arith.constant dense<128> : vector<1x8xindex>
+// CHECK: %[[C0:.*]] = arith.constant 0 : index
+// CHECK: %[[PASS_THRU:.*]] = arith.constant dense<0.000000e+00> : vector<8x1xf32>
+// CHECK: %[[MASK:.*]] = arith.constant dense<true> : vector<8x1xi1>
+// CHECK: %[[IDX_VEC:.*]] = arith.constant dense<[0, 1, 2, 3, 4, 5, 6, 7]> : vector<8xindex>
+// CHECK: %[[OUT:.*]] = tensor.empty() : tensor<8x1xf32>
+// CHECK: %[[B:.*]] = vector.broadcast %[[IDX_VEC]] : vector<8xindex> to vector<1x8xindex>
+// CHECK: %[[MUL:.*]] = arith.muli %[[B]], %[[C128]] : vector<1x8xindex>
+// CHECK: %[[TR:.*]] = vector.transpose %[[MUL]], [1, 0] : vector<1x8xindex> to vector<8x1xindex>
+// CHECK: %[[GATHER:.*]] = vector.gather %[[SRC]]{{\[}}%[[C0]], %[[C0]]] {{\[}}%[[TR]]], %[[MASK]], %[[PASS_THRU]] : tensor<8x128xf32>, vector<8x1xindex>, vector<8x1xi1>, vector<8x1xf32> into vector<8x1xf32>
+// CHECK: %[[RES:.*]] = vector.transfer_write %[[GATHER]], %[[OUT]]{{\[}}%[[C0]], %[[C0]]] {in_bounds = [true, true]} : vector<8x1xf32>, tensor<8x1xf32>
+// CHECK: return %[[RES]] : tensor<8x1xf32>
+
+// -----
+
+// Same as above, but the access indices have been swapped and hence this _is_
+// a contiguous load. Currently not supported and lowered as vector.gather
+// instead.
+// TODO: Make sure that this is lowered as a contiguous load.
+
+#map = affine_map<(d0, d1) -> (d0, d1)>
+func.func @index_from_output_column_vector_contiguous_load(%src: tensor<8x128xf32>) -> tensor<8x1xf32> {
+ %c0 = arith.constant 0 : index
+ %0 = tensor.empty() : tensor<8x1xf32>
+ %res = linalg.generic {
+ indexing_maps = [#map],
+ iterator_types = ["parallel", "parallel"]
+ } outs(%0 : tensor<8x1xf32>) {
+ ^bb0(%arg1: f32):
+ %1 = linalg.index 0 : index
+ %extracted = tensor.extract %src[%c0, %1] : tensor<8x128xf32>
+ linalg.yield %extracted : f32
+ } -> tensor<8x1xf32>
+ return %res : tensor<8x1xf32>
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg2: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg2 : (!transform.any_op) -> !transform.any_op
+ %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
+ %2 = transform.structured.vectorize_children_and_apply_patterns %1 {vectorize_nd_extract} : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
+
+// CHECK-LABEL: func.func @index_from_output_column_vector_contiguous_load(
+// CHECK-SAME: %[[SRC:.*]]: tensor<8x128xf32>) -> tensor<8x1xf32> {
+// CHECK: %[[C0:.*]] = arith.constant 0 : index
+// CHECK: %[[PASS_THRU:.*]] = arith.constant dense<0.000000e+00> : vector<8x1xf32>
+// CHECK: %[[MASK:.*]] = arith.constant dense<true> : vector<8x1xi1>
+// CHECK: %[[IDX_VEC:.*]] = arith.constant dense<[0, 1, 2, 3, 4, 5, 6, 7]> : vector<8xindex>
+// CHECK: %[[OUT:.*]] = tensor.empty() : tensor<8x1xf32>
+// CHECK: %[[B:.*]] = vector.broadcast %[[IDX_VEC]] : vector<8xindex> to vector<1x8xindex>
+// CHECK: %[[TR:.*]] = vector.transpose %[[B]], [1, 0] : vector<1x8xindex> to vector<8x1xindex>
+// CHECK: %[[GATHER:.*]] = vector.gather %[[SRC]]{{\[}}%[[C0]], %[[C0]]] {{\[}}%[[TR]]], %[[MASK]], %[[PASS_THRU]] : tensor<8x128xf32>, vector<8x1xindex>, vector<8x1xi1>, vector<8x1xf32> into vector<8x1xf32>
+// CHECK: %[[RES:.*]] = vector.transfer_write %[[GATHER]], %[[OUT]]{{\[}}%[[C0]], %[[C0]]] {in_bounds = [true, true]} : vector<8x1xf32>, tensor<8x1xf32>
+// CHECK: return %[[RES]] : tensor<8x1xf32>
+
+// -----
+
#map = affine_map<(d0) -> (d0)>
func.func @vectorize_nd_tensor_extract_contiguous_and_gather(%arg0: tensor<6xf32>, %arg1: tensor<5xi32>) -> tensor<5xf32> {
%c5 = arith.constant 5 : index
diff --git a/mlir/test/Dialect/SCF/loop-pipelining.mlir b/mlir/test/Dialect/SCF/loop-pipelining.mlir
index 4a1406f..4747aad 100644
--- a/mlir/test/Dialect/SCF/loop-pipelining.mlir
+++ b/mlir/test/Dialect/SCF/loop-pipelining.mlir
@@ -766,6 +766,7 @@ func.func @stage_0_value_escape(%A: memref<?xf32>, %result: memref<?xf32>, %ub:
// Check for predicated epilogue for dynamic loop.
// CHECK-LABEL: dynamic_loop(
+// CHECK: %[[C0:.*]] = arith.constant 0 : index
// CHECK: %{{.*}}:2 = scf.for %[[ARG5:.*]] = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%[[ARG6:.*]] = %{{.*}}, %[[ARG7:.*]] = %{{.*}})
// CHECK: memref.store %[[ARG6]], %{{.*}}[%[[ARG5]]]
// CHECK: %[[ADDF_24:.*]] = arith.addf %[[ARG7]], %{{.*}}
@@ -781,12 +782,12 @@ func.func @stage_0_value_escape(%A: memref<?xf32>, %result: memref<?xf32>, %ub:
// CHECK: %[[ADDI_14:.*]] = arith.addi %[[DIVUI_13]], %{{.*}}-1
// CHECK: %[[MULI_15:.*]] = arith.muli %{{.*}}, %[[ADDI_14]]
// CHECK: %[[ADDI_16:.*]] = arith.addi %{{.*}}, %[[MULI_15]]
-// CHECK: %[[CMPI_17:.*]] = arith.cmpi sge, %[[ADDI_14]], %{{.*}}
+// CHECK: %[[CMPI_17:.*]] = arith.cmpi sge, %[[ADDI_14]], %[[C0]]
// CHECK: %[[ADDI_18:.*]] = arith.addi %[[DIVUI_13]], %{{.*}}-1
// CHECK: %[[ADDI_19:.*]] = arith.addi %[[ADDI_18]], %{{.*}}-1
// CHECK: %[[MULI_20:.*]] = arith.muli %{{.*}}, %[[ADDI_19]]
// CHECK: %[[ADDI_21:.*]] = arith.addi %{{.*}}, %[[MULI_20]]
-// CHECK: %[[CMPI_22:.*]] = arith.cmpi sge, %[[ADDI_19]], %{{.*}}
+// CHECK: %[[CMPI_22:.*]] = arith.cmpi sge, %[[ADDI_19]], %[[C0]]
// CHECK: scf.if %[[CMPI_17]] {
// CHECK: memref.store %{{.*}}#0, %{{.*}}[%[[ADDI_21]]]
// CHECK: } else {
diff --git a/mlir/test/Dialect/XeGPU/XeGPUOps.mlir b/mlir/test/Dialect/XeGPU/XeGPUOps.mlir
index 35d44cf..c1126ef 100644
--- a/mlir/test/Dialect/XeGPU/XeGPUOps.mlir
+++ b/mlir/test/Dialect/XeGPU/XeGPUOps.mlir
@@ -24,8 +24,8 @@ gpu.func @test_create_nd_tdesc_vc_2(%src: ui64, %w : index, %h : index, %x : ind
// CHECK: gpu.func @test_create_nd_tdesc_vc_3(%[[arg0:.*]]: memref<24x32xf32>) {
gpu.func @test_create_nd_tdesc_vc_3(%src: memref<24x32xf32>) {
- // CHECK: %[[REG:.*]] = xegpu.create_nd_tdesc %[[arg0]][0, 0] : memref<24x32xf32> -> !xegpu.tensor_desc<24x16xf32, #xegpu.tdesc_attr<array_length = 2 : i64>
- %1 = xegpu.create_nd_tdesc %src[0, 0] : memref<24x32xf32> -> !xegpu.tensor_desc<24x16xf32, #xegpu.tdesc_attr<array_length = 2>>
+ // CHECK: %[[REG:.*]] = xegpu.create_nd_tdesc %[[arg0]][0, 0] : memref<24x32xf32> -> !xegpu.tensor_desc<24x16xf32, #xegpu.block_tdesc_attr<array_length = 2 : i64>
+ %1 = xegpu.create_nd_tdesc %src[0, 0] : memref<24x32xf32> -> !xegpu.tensor_desc<24x16xf32, #xegpu.block_tdesc_attr<array_length = 2>>
gpu.return
}
@@ -36,6 +36,13 @@ gpu.func @test_create_nd_tdesc_vc_4(%src: memref<2x24x32xf32>) {
gpu.return
}
+// CHECK: gpu.func @test_create_nd_tdesc_vc_5(%[[arg0:.*]]: memref<2x24x32xf32, 3>) {
+gpu.func @test_create_nd_tdesc_vc_5(%src: memref<2x24x32xf32, 3>) {
+ // CHECK: %[[REG:.*]] = xegpu.create_nd_tdesc %arg0[0, 0, 0] : memref<2x24x32xf32, 3> -> !xegpu.tensor_desc<16xf32, #xegpu.block_tdesc_attr<memory_space = slm>>
+ %1 = xegpu.create_nd_tdesc %src[0, 0, 0] : memref<2x24x32xf32, 3> -> !xegpu.tensor_desc<16xf32, #xegpu.block_tdesc_attr<memory_space = slm>>
+ gpu.return
+}
+
// CHECK: gpu.func @test_prefetch_nd_vc(%[[arg0:.*]]: memref<24x32xf16>) {
gpu.func @test_prefetch_nd_vc(%src: memref<24x32xf16>) {
// CHECK: %[[R0:.*]] = xegpu.create_nd_tdesc %[[arg0]][0, 0] : memref<24x32xf16> -> !xegpu.tensor_desc<8x16xf16>
@@ -97,17 +104,24 @@ gpu.func @test_create_update_nd_tdesc_vc(%src: memref<24x32xf32>) {
// CHECK: gpu.func @test_create_tdesc_vc(%[[arg0:.*]]: ui64) {
gpu.func @test_create_tdesc_vc(%src: ui64) {
- //CHECK: %[[R0:.*]] = xegpu.create_tdesc %arg0 [0, 8, 16, 24] {chunk_size = 2 : i64} : ui64 -> !xegpu.tensor_desc<4x2xf32, #xegpu.tdesc_attr<scattered = true>>
- %1 = xegpu.create_tdesc %src[0, 8, 16, 24] {chunk_size = 2} : ui64 -> !xegpu.tensor_desc<4x2xf32, #xegpu.tdesc_attr<scattered = true>>
+ //CHECK: %[[R0:.*]] = xegpu.create_tdesc %arg0 [0, 8, 16, 24] : ui64 -> !xegpu.tensor_desc<4x2xf32, #xegpu.scatter_tdesc_attr<chunk_size = 2 : i64>>
+ %1 = xegpu.create_tdesc %src[0, 8, 16, 24] : ui64 -> !xegpu.tensor_desc<4x2xf32, #xegpu.scatter_tdesc_attr<chunk_size = 2>>
+ gpu.return
+}
+
+// CHECK: gpu.func @test_create_tdesc_vc_1(%[[arg0:.*]]: memref<?xf32, 3>) {
+gpu.func @test_create_tdesc_vc_1(%src: memref<?xf32, 3>) {
+ //CHECK: %[[R0:.*]] = xegpu.create_tdesc %arg0 [0, 8, 16, 24] : memref<?xf32, 3> -> !xegpu.tensor_desc<4x2xf32, #xegpu.scatter_tdesc_attr<memory_space = slm, chunk_size = 2 : i64>>
+ %1 = xegpu.create_tdesc %src[0, 8, 16, 24] : memref<?xf32, 3> -> !xegpu.tensor_desc<4x2xf32, #xegpu.scatter_tdesc_attr<memory_space = slm, chunk_size = 2>>
gpu.return
}
// CHECK: gpu.func @test_prefetch_vc(%[[arg0:.*]]: ui64) {
gpu.func @test_prefetch_vc(%src: ui64) {
- //CHECK: %[[R0:.*]] = xegpu.create_tdesc %arg0 [0, 8, 16, 24] {chunk_size = 2 : i64} : ui64 -> !xegpu.tensor_desc<4x2xf32, #xegpu.tdesc_attr<scattered = true>>
- %1 = xegpu.create_tdesc %src[0, 8, 16, 24] {chunk_size = 2} : ui64 -> !xegpu.tensor_desc<4x2xf32, #xegpu.tdesc_attr<scattered = true>>
- // CHECK: xegpu.prefetch %[[R0]] <{l1_hint = #xegpu.cache_hint<cached>, l2_hint = #xegpu.cache_hint<uncached>}> : !xegpu.tensor_desc<4x2xf32, #xegpu.tdesc_attr<scattered = true>>
- xegpu.prefetch %1 <{l1_hint = #xegpu.cache_hint<cached>, l2_hint = #xegpu.cache_hint<uncached>}>: !xegpu.tensor_desc<4x2xf32, #xegpu.tdesc_attr<scattered = true>>
+ //CHECK: %[[R0:.*]] = xegpu.create_tdesc %arg0 [0, 8, 16, 24] : ui64 -> !xegpu.tensor_desc<4x2xf32, #xegpu.scatter_tdesc_attr<chunk_size = 2 : i64>>
+ %1 = xegpu.create_tdesc %src[0, 8, 16, 24] : ui64 -> !xegpu.tensor_desc<4x2xf32, #xegpu.scatter_tdesc_attr<chunk_size = 2>>
+ // CHECK: xegpu.prefetch %[[R0]] <{l1_hint = #xegpu.cache_hint<cached>, l2_hint = #xegpu.cache_hint<uncached>}> : !xegpu.tensor_desc<4x2xf32, #xegpu.scatter_tdesc_attr<chunk_size = 2 : i64>>
+ xegpu.prefetch %1 <{l1_hint = #xegpu.cache_hint<cached>, l2_hint = #xegpu.cache_hint<uncached>}>: !xegpu.tensor_desc<4x2xf32, #xegpu.scatter_tdesc_attr<chunk_size = 2>>
gpu.return
}
@@ -115,12 +129,12 @@ gpu.func @test_prefetch_vc(%src: ui64) {
gpu.func @test_load_gather_vc(%src: ui64) {
//CHECK: %[[cst:.*]] = arith.constant dense<true> : vector<4xi1>
%0 = arith.constant dense<1>: vector<4xi1>
- //CHECK: %[[R0:.*]] = xegpu.create_tdesc %arg0 [0, 8, 16, 24] {chunk_size = 2 : i64} : ui64 -> !xegpu.tensor_desc<4x2xf32, #xegpu.tdesc_attr<scattered = true>>
- %1 = xegpu.create_tdesc %src[0, 8, 16, 24] {chunk_size = 2} : ui64 -> !xegpu.tensor_desc<4x2xf32, #xegpu.tdesc_attr<scattered = true>>
- //CHECK: %[[R1:.*]] = xegpu.load %[[R0]], %[[cst]] <{l1_hint = #xegpu.cache_hint<cached>, l2_hint = #xegpu.cache_hint<uncached>}>
- //CHECK-SAME: !xegpu.tensor_desc<4x2xf32, #xegpu.tdesc_attr<scattered = true>>, vector<4xi1> -> vector<4x2xf32>
- %2 = xegpu.load %1, %0 <{l1_hint = #xegpu.cache_hint<cached>, l2_hint = #xegpu.cache_hint<uncached>}>
- : !xegpu.tensor_desc<4x2xf32, #xegpu.tdesc_attr<scattered = true>>, vector<4xi1> -> vector<4x2xf32>
+ //CHECK: %[[R0:.*]] = xegpu.create_tdesc %arg0 [0, 8, 16, 24] : ui64 -> !xegpu.tensor_desc<4x2xf32, #xegpu.scatter_tdesc_attr<chunk_size = 2 : i64>>
+ %1 = xegpu.create_tdesc %src[0, 8, 16, 24] : ui64 -> !xegpu.tensor_desc<4x2xf32, #xegpu.scatter_tdesc_attr<chunk_size = 2>>
+ //CHECK: %[[R1:.*]] = xegpu.load %[[R0]], %[[cst]] <{l1_hint = #xegpu.cache_hint<cached>, l2_hint = #xegpu.cache_hint<uncached>, transpose}>
+ //CHECK-SAME: !xegpu.tensor_desc<4x2xf32, #xegpu.scatter_tdesc_attr<chunk_size = 2 : i64>>, vector<4xi1> -> vector<2x4xf32>
+ %2 = xegpu.load %1, %0 <{l1_hint = #xegpu.cache_hint<cached>, l2_hint = #xegpu.cache_hint<uncached>, transpose}>
+ : !xegpu.tensor_desc<4x2xf32, #xegpu.scatter_tdesc_attr<chunk_size = 2>>, vector<4xi1> -> vector<2x4xf32>
gpu.return
}
@@ -128,23 +142,23 @@ gpu.func @test_load_gather_vc(%src: ui64) {
gpu.func @test_store_scatter_vc(%src: ui64) {
//CHECK: %[[c0:.*]] = arith.constant dense<true> : vector<4xi1>
%0 = arith.constant dense<1>: vector<4xi1>
- //CHECK: %[[c1:.*]] = arith.constant dense<2.900000e+00> : vector<4x2xf32>
- %1 = arith.constant dense<2.9>: vector<4x2xf32>
- //CHECK: %[[R0:.*]] = xegpu.create_tdesc %arg0 [0, 8, 16, 24] {chunk_size = 2 : i64} : ui64 -> !xegpu.tensor_desc<4x2xf32, #xegpu.tdesc_attr<scattered = true>>
- %2 = xegpu.create_tdesc %src[0, 8, 16, 24] {chunk_size = 2} : ui64 -> !xegpu.tensor_desc<4x2xf32, #xegpu.tdesc_attr<scattered = true>>
- //CHECK: xegpu.store %[[c1]], %[[R0]], %[[c0]] <{l1_hint = #xegpu.cache_hint<write_back>, l2_hint = #xegpu.cache_hint<uncached>}>
- //CHECK-SAME: vector<4x2xf32>, !xegpu.tensor_desc<4x2xf32, #xegpu.tdesc_attr<scattered = true>>, vector<4xi1>
- xegpu.store %1, %2, %0 <{l1_hint = #xegpu.cache_hint<write_back>, l2_hint = #xegpu.cache_hint<uncached>}>
- : vector<4x2xf32>, !xegpu.tensor_desc<4x2xf32, #xegpu.tdesc_attr<scattered = true>>, vector<4xi1>
+ //CHECK: %[[c1:.*]] = arith.constant dense<2.900000e+00> : vector<2x4xf32>
+ %1 = arith.constant dense<2.9>: vector<2x4xf32>
+ //CHECK: %[[R0:.*]] = xegpu.create_tdesc %arg0 [0, 8, 16, 24] : ui64 -> !xegpu.tensor_desc<4x2xf32, #xegpu.scatter_tdesc_attr<chunk_size = 2 : i64>>
+ %2 = xegpu.create_tdesc %src[0, 8, 16, 24] : ui64 -> !xegpu.tensor_desc<4x2xf32, #xegpu.scatter_tdesc_attr<chunk_size = 2>>
+ //CHECK: xegpu.store %[[c1]], %[[R0]], %[[c0]] <{l1_hint = #xegpu.cache_hint<write_back>, l2_hint = #xegpu.cache_hint<uncached>, transpose}>
+ //CHECK-SAME: vector<2x4xf32>, !xegpu.tensor_desc<4x2xf32, #xegpu.scatter_tdesc_attr<chunk_size = 2 : i64>>, vector<4xi1>
+ xegpu.store %1, %2, %0 <{l1_hint = #xegpu.cache_hint<write_back>, l2_hint = #xegpu.cache_hint<uncached>, transpose}>
+ : vector<2x4xf32>, !xegpu.tensor_desc<4x2xf32, #xegpu.scatter_tdesc_attr<chunk_size = 2>>, vector<4xi1>
gpu.return
}
// CHECK: gpu.func @test_create_update_tdesc_vc(%[[arg0:.*]]: ui64) {
gpu.func @test_create_update_tdesc_vc(%src: ui64) {
- //CHECK: %[[R0:.*]] = xegpu.create_tdesc %arg0 [0, 8, 16, 24] {chunk_size = 2 : i64} : ui64 -> !xegpu.tensor_desc<4x2xf32, #xegpu.tdesc_attr<scattered = true>>
- %1 = xegpu.create_tdesc %src[0, 8, 16, 24] {chunk_size = 2} : ui64 -> !xegpu.tensor_desc<4x2xf32, #xegpu.tdesc_attr<scattered = true>>
- //CHECK: %[[R1:.*]] = xegpu.update_offset %[[R0]], [32, 32, 32, 32] : !xegpu.tensor_desc<4x2xf32, #xegpu.tdesc_attr<scattered = true>>
- %2 = xegpu.update_offset %1, [32, 32, 32, 32] : !xegpu.tensor_desc<4x2xf32, #xegpu.tdesc_attr<scattered = true>>
+ //CHECK: %[[R0:.*]] = xegpu.create_tdesc %arg0 [0, 8, 16, 24] : ui64 -> !xegpu.tensor_desc<4x2xf32, #xegpu.scatter_tdesc_attr<chunk_size = 2 : i64>>
+ %1 = xegpu.create_tdesc %src[0, 8, 16, 24]: ui64 -> !xegpu.tensor_desc<4x2xf32, #xegpu.scatter_tdesc_attr<chunk_size = 2>>
+ //CHECK: %[[R1:.*]] = xegpu.update_offset %[[R0]], [32, 32, 32, 32] : !xegpu.tensor_desc<4x2xf32, #xegpu.scatter_tdesc_attr<chunk_size = 2 : i64>>
+ %2 = xegpu.update_offset %1, [32, 32, 32, 32] : !xegpu.tensor_desc<4x2xf32, #xegpu.scatter_tdesc_attr<chunk_size = 2>>
gpu.return
}
@@ -165,10 +179,10 @@ gpu.func @test_dpas_vc_with_packed_b(%a : vector<8x16xf16>, %b: vector<8x16x2xf1
// CHECK: gpu.func @test_atomic_rmw(%[[arg0:.*]]: ui64, %[[arg1:.*]]: vector<16xf32>, %[[arg2:.*]]: vector<16xi1>)
gpu.func @test_atomic_rmw(%src: ui64, %value : vector<16xf32>, %mask : vector<16xi1>) {
- //CHECK: %[[R0:.*]] = xegpu.create_tdesc %[[arg0]] [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] : ui64 -> !xegpu.tensor_desc<16xf32, #xegpu.tdesc_attr<scattered = true>>
- %1 = xegpu.create_tdesc %src[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]: ui64 -> !xegpu.tensor_desc<16xf32, #xegpu.tdesc_attr<scattered = true>>
- //CHECK: %[[R1:.*]] = xegpu.atomic_rmw addf %[[R0]], %[[arg2]], %[[arg1]] : !xegpu.tensor_desc<16xf32, #xegpu.tdesc_attr<scattered = true>>, vector<16xi1>, vector<16xf32> -> vector<16xf32>
- xegpu.atomic_rmw addf %1, %mask, %value: !xegpu.tensor_desc<16xf32, #xegpu.tdesc_attr<scattered = true>>, vector<16xi1>, vector<16xf32> -> vector<16xf32>
+ //CHECK: %[[R0:.*]] = xegpu.create_tdesc %[[arg0]] [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] : ui64 -> !xegpu.tensor_desc<16xf32, #xegpu.scatter_tdesc_attr<>>
+ %1 = xegpu.create_tdesc %src[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]: ui64 -> !xegpu.tensor_desc<16xf32, #xegpu.scatter_tdesc_attr<>>
+ //CHECK: %[[R1:.*]] = xegpu.atomic_rmw addf %[[R0]], %[[arg2]], %[[arg1]] : !xegpu.tensor_desc<16xf32, #xegpu.scatter_tdesc_attr<>>, vector<16xi1>, vector<16xf32> -> vector<16xf32>
+ xegpu.atomic_rmw addf %1, %mask, %value: !xegpu.tensor_desc<16xf32, #xegpu.scatter_tdesc_attr<>>, vector<16xi1>, vector<16xf32> -> vector<16xf32>
gpu.return
}
diff --git a/mlir/test/Dialect/XeGPU/invalid.mlir b/mlir/test/Dialect/XeGPU/invalid.mlir
index 7ef50bb..193dae3 100644
--- a/mlir/test/Dialect/XeGPU/invalid.mlir
+++ b/mlir/test/Dialect/XeGPU/invalid.mlir
@@ -16,6 +16,20 @@ func.func @test_create_nd_tdesc_vc_2(%src: memref<24x32xf32>) {
}
// -----
+func.func @test_create_nd_tdesc_vc_3(%src: memref<2x24x32xf32, 3>) {
+ // expected-error@+1 {{SLM is not supported for 2D Block TensorDesc}}
+ %1 = xegpu.create_nd_tdesc %src[0, 0, 0] : memref<2x24x32xf32, 3> -> !xegpu.tensor_desc<8x16xf32, #xegpu.block_tdesc_attr<memory_space = slm>>
+ return
+}
+
+// -----
+func.func @test_create_nd_tdesc_vc_4(%src: memref<2x24x32xf32, 3>) {
+ // expected-error@+1 {{Memory space mismatch}}
+ %1 = xegpu.create_nd_tdesc %src[0, 0, 0] : memref<2x24x32xf32, 3> -> !xegpu.tensor_desc<16xf32>
+ return
+}
+
+// -----
func.func @test_prefetch_nd_vc_1(%src: memref<24x32xf16>) {
%1 = xegpu.create_nd_tdesc %src[0, 0] : memref<24x32xf16> -> !xegpu.tensor_desc<8x16xf16>
// expected-error@+1 {{invlid l1_hint: #xegpu.cache_hint<write_back>}}
@@ -26,10 +40,10 @@ func.func @test_prefetch_nd_vc_1(%src: memref<24x32xf16>) {
// -----
func.func @test_prefetch_nd_vc_2(%src: memref<24xf16>) {
%1 = xegpu.create_tdesc %src[0, 1, 2, 3, 4, 5, 6, 7]
- : memref<24xf16> -> !xegpu.tensor_desc<8xf16, #xegpu.tdesc_attr<scattered=true>>
+ : memref<24xf16> -> !xegpu.tensor_desc<8xf16, #xegpu.scatter_tdesc_attr<>>
// expected-error@+1 {{Expects a non-scattered TensorDesc}}
xegpu.prefetch_nd %1 <{l1_hint = #xegpu.cache_hint<cached>}>
- : !xegpu.tensor_desc<8xf16, #xegpu.tdesc_attr<scattered=true>>
+ : !xegpu.tensor_desc<8xf16, #xegpu.scatter_tdesc_attr<>>
return
}
@@ -44,11 +58,11 @@ func.func @test_load_nd_vc_1(%src: memref<8x16xf16>) {
// -----
func.func @test_load_nd_vc_2(%src: memref<16xf16>) {
- %1 = xegpu.create_tdesc %src[0, 2, 4, 6, 8, 10, 12, 14] {chunk_size = 2}
- : memref<16xf16> -> !xegpu.tensor_desc<8x2xf16, #xegpu.tdesc_attr<scattered=true>>
+ %1 = xegpu.create_tdesc %src[0, 2, 4, 6, 8, 10, 12, 14]
+ : memref<16xf16> -> !xegpu.tensor_desc<8x2xf16, #xegpu.scatter_tdesc_attr<chunk_size = 2>>
// expected-error@+1 {{Expects a non-scattered TensorDesc.}}
%2 = xegpu.load_nd %1 <{l1_hint = #xegpu.cache_hint<cached>}>
- : !xegpu.tensor_desc<8x2xf16, #xegpu.tdesc_attr<scattered=true>> -> vector<8x2xf16>
+ : !xegpu.tensor_desc<8x2xf16, #xegpu.scatter_tdesc_attr<chunk_size = 2>> -> vector<8x2xf16>
return
}
@@ -73,28 +87,28 @@ func.func @test_store_nd_vc_1(%dst: memref<24x32xf16>) {
// -----
func.func @test_store_nd_vc_2(%dst: memref<16xf16>) {
%1 = arith.constant dense<1.0>: vector<8x2xf16>
- %2 = xegpu.create_tdesc %dst[0, 2, 4, 6, 8, 10, 12, 14] {chunk_size = 2}
- : memref<16xf16> -> !xegpu.tensor_desc<8x2xf16, #xegpu.tdesc_attr<scattered=true>>
+ %2 = xegpu.create_tdesc %dst[0, 2, 4, 6, 8, 10, 12, 14]
+ : memref<16xf16> -> !xegpu.tensor_desc<8x2xf16, #xegpu.scatter_tdesc_attr<chunk_size = 2>>
// expected-error@+1 {{Expects a non-scattered TensorDesc}}
xegpu.store_nd %1, %2 <{l1_hint = #xegpu.cache_hint<streaming>}>
- : vector<8x2xf16>, !xegpu.tensor_desc<8x2xf16, #xegpu.tdesc_attr<scattered=true>>
+ : vector<8x2xf16>, !xegpu.tensor_desc<8x2xf16, #xegpu.scatter_tdesc_attr<chunk_size = 2>>
return
}
// -----
func.func @test_update_nd_offset_1(%dst: memref<16xf16>) {
- %1 = xegpu.create_tdesc %dst[0, 2, 4, 6, 8, 10, 12, 14] {chunk_size = 2}
- : memref<16xf16> -> !xegpu.tensor_desc<8x2xf16, #xegpu.tdesc_attr<scattered=true>>
+ %1 = xegpu.create_tdesc %dst[0, 2, 4, 6, 8, 10, 12, 14]
+ : memref<16xf16> -> !xegpu.tensor_desc<8x2xf16, #xegpu.scatter_tdesc_attr<chunk_size = 2>>
// expected-error@+1 {{Expects a non-scattered TensorDesc}}
- xegpu.update_nd_offset %1, [0, 2] : !xegpu.tensor_desc<8x2xf16, #xegpu.tdesc_attr<scattered=true>>
+ xegpu.update_nd_offset %1, [0, 2] : !xegpu.tensor_desc<8x2xf16, #xegpu.scatter_tdesc_attr<chunk_size = 2>>
return
}
// -----
func.func @test_create_tdesc_vc_1(%src: ui64) {
// expected-error@+1 {{Expects a scattered TensorDesc}}
- %1 = xegpu.create_tdesc %src[0, 2, 4, 6, 8, 10, 12, 14] {chunk_size = 2}
- : ui64 -> !xegpu.tensor_desc<8x2xf16>
+ %1 = xegpu.create_tdesc %src[0, 2, 4, 6, 8, 10, 12, 14]
+ : ui64 -> !xegpu.tensor_desc<8xf16>
return
}
@@ -102,7 +116,14 @@ func.func @test_create_tdesc_vc_1(%src: ui64) {
func.func @test_create_tdesc_vc_2(%src: ui64) {
// expected-error@+1 {{Incorrect TensorDesc shape}}
%1 = xegpu.create_tdesc %src[0, 2, 4, 6, 8, 10, 12, 14] {chunk_size = 2}
- : ui64 -> !xegpu.tensor_desc<8x4xf16, #xegpu.tdesc_attr<scattered = true>>
+ : ui64 -> !xegpu.tensor_desc<8x4xf16, #xegpu.scatter_tdesc_attr<>>
+ return
+}
+
+// -----
+func.func @test_create_tdesc_vc_1(%src: memref<?xf32>) {
+ // expected-error@+1 {{Memory space mismatch}}
+ %1 = xegpu.create_tdesc %src[0, 8, 16, 24] : memref<?xf32> -> !xegpu.tensor_desc<4x2xf32, #xegpu.scatter_tdesc_attr<memory_space = slm, chunk_size = 2>>
return
}
@@ -116,9 +137,9 @@ func.func @test_prefetch_vc_1(%src: memref<24x32xf16>) {
// -----
func.func @test_prefetch_vc_2(%src: ui64) {
- %1 = xegpu.create_tdesc %src[0, 8, 16, 24] {chunk_size = 2} : ui64 -> !xegpu.tensor_desc<4x2xf32, #xegpu.tdesc_attr<scattered = true>>
+ %1 = xegpu.create_tdesc %src[0, 8, 16, 24] : ui64 -> !xegpu.tensor_desc<4x2xf32, #xegpu.scatter_tdesc_attr<chunk_size = 2>>
// expected-error@+1 {{invlid l1_hint: #xegpu.cache_hint<write_back>}}
- xegpu.prefetch %1 <{l1_hint = #xegpu.cache_hint<write_back>}>: !xegpu.tensor_desc<4x2xf32, #xegpu.tdesc_attr<scattered = true>>
+ xegpu.prefetch %1 <{l1_hint = #xegpu.cache_hint<write_back>}>: !xegpu.tensor_desc<4x2xf32, #xegpu.scatter_tdesc_attr<chunk_size = 2>>
return
}
@@ -135,11 +156,11 @@ func.func @test_load_gather_vc_1(%src: memref<24x32xf16>) {
// -----
func.func @test_load_gather_vc_2(%src: ui64) {
%0 = arith.constant dense<1>: vector<4xi1>
- %1 = xegpu.create_tdesc %src[0, 8, 16, 24] {chunk_size = 2} : ui64
- -> !xegpu.tensor_desc<4x2xf32, #xegpu.tdesc_attr<scattered = true>>
+ %1 = xegpu.create_tdesc %src[0, 8, 16, 24] : ui64
+ -> !xegpu.tensor_desc<4x2xf32, #xegpu.scatter_tdesc_attr<chunk_size = 2>>
// expected-error@+1 {{invlid l1_hint: #xegpu.cache_hint<write_back>}}
%2 = xegpu.load %1, %0 <{l1_hint = #xegpu.cache_hint<write_back>}>
- : !xegpu.tensor_desc<4x2xf32, #xegpu.tdesc_attr<scattered = true>>, vector<4xi1>
+ : !xegpu.tensor_desc<4x2xf32, #xegpu.scatter_tdesc_attr<chunk_size = 2>>, vector<4xi1>
-> vector<4x2xf32>
return
}
@@ -159,11 +180,11 @@ func.func @test_store_scatter_vc_1(%src: memref<24x32xf32>) {
func.func @test_store_scatter_vc_2(%src: ui64) {
%0 = arith.constant dense<1>: vector<4xi1>
%1 = arith.constant dense<2.9>: vector<4x2xf32>
- %2 = xegpu.create_tdesc %src[0, 8, 16, 24] {chunk_size = 2}
- : ui64 -> !xegpu.tensor_desc<4x2xf32, #xegpu.tdesc_attr<scattered = true>>
+ %2 = xegpu.create_tdesc %src[0, 8, 16, 24]
+ : ui64 -> !xegpu.tensor_desc<4x2xf32, #xegpu.scatter_tdesc_attr<chunk_size = 2>>
// expected-error@+1 {{invlid l1_hint: #xegpu.cache_hint<streaming>}}
xegpu.store %1, %2, %0 <{l1_hint = #xegpu.cache_hint<streaming>}> : vector<4x2xf32>,
- !xegpu.tensor_desc<4x2xf32, #xegpu.tdesc_attr<scattered = true>>, vector<4xi1>
+ !xegpu.tensor_desc<4x2xf32, #xegpu.scatter_tdesc_attr<chunk_size = 2>>, vector<4xi1>
return
}
@@ -182,9 +203,9 @@ func.func @test_dpas_vc_2(%a : vector<8x8x2xf16>, %b: vector<8x16x2xf16>) {
}
// -----
-func.func @test_atomic_rmw(%src: ui64, %value : vector<16x8xf32>, %mask : vector<16xi1>) {
- %1 = xegpu.create_tdesc %src[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] {chunk_size = 8}: ui64 -> !xegpu.tensor_desc<16x8xf32, #xegpu.tdesc_attr<scattered = true>>
- // expected-error@+1 {{failed to verify that all of {tensorDesc, mask, value, result} have same shape}}
- xegpu.atomic_rmw addf %1, %mask, %value: !xegpu.tensor_desc<16x8xf32, #xegpu.tdesc_attr<scattered = true>>, vector<16xi1>, vector<16x8xf32> -> vector<16x8xf32>
- gpu.return
+func.func @test_atomic_rmw(%src: ui64, %value : vector<16x4xf32>, %mask : vector<16xi1>) {
+ %1 = xegpu.create_tdesc %src[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] : ui64 -> !xegpu.tensor_desc<16x8xf32, #xegpu.scatter_tdesc_attr<chunk_size = 8>>
+ // expected-error@+1 {{failed to verify that all of {tensorDesc, value, result} have same shape}}
+ xegpu.atomic_rmw addf %1, %mask, %value: !xegpu.tensor_desc<16x8xf32, #xegpu.scatter_tdesc_attr<chunk_size = 8>>, vector<16xi1>, vector<16x4xf32> -> vector<16x8xf32>
+ return
} \ No newline at end of file
diff --git a/mlir/test/IR/attribute.mlir b/mlir/test/IR/attribute.mlir
index 23dbf0c2..31a4663 100644
--- a/mlir/test/IR/attribute.mlir
+++ b/mlir/test/IR/attribute.mlir
@@ -37,6 +37,10 @@ func.func @any_attr_of_fail() {
func.func @float_attrs_pass() {
"test.float_attrs"() {
+ // CHECK: float_attr = 2.000000e+00 : f4E2M1FN
+ float_attr = 2. : f4E2M1FN
+ } : () -> ()
+ "test.float_attrs"() {
// CHECK: float_attr = 2.000000e+00 : f6E2M3FN
float_attr = 2. : f6E2M3FN
} : () -> ()
diff --git a/mlir/test/Target/LLVMIR/llvmir.mlir b/mlir/test/Target/LLVMIR/llvmir.mlir
index de0ab1b..966a00f 100644
--- a/mlir/test/Target/LLVMIR/llvmir.mlir
+++ b/mlir/test/Target/LLVMIR/llvmir.mlir
@@ -42,6 +42,9 @@ llvm.mlir.global internal @int_global_undef() : i64
// CHECK: @externally_initialized_global = internal externally_initialized global i32 0
llvm.mlir.global internal @externally_initialized_global(0 : i32) {externally_initialized} : i32
+// CHECK: @f4E2M1FN_global_as_i4 = internal global i4 3
+llvm.mlir.global internal @f4E2M1FN_global_as_i4(1.5 : f4E2M1FN) : i4
+
// CHECK: @f6E2M3FN_global_as_i6 = internal global i6 12
llvm.mlir.global internal @f6E2M3FN_global_as_i6(1.5 : f6E2M3FN) : i6
diff --git a/mlir/test/python/ir/builtin_types.py b/mlir/test/python/ir/builtin_types.py
index bc3ba4c..6154a6f 100644
--- a/mlir/test/python/ir/builtin_types.py
+++ b/mlir/test/python/ir/builtin_types.py
@@ -113,6 +113,8 @@ def testTypeIsInstance():
def testFloatTypeSubclasses():
ctx = Context()
# CHECK: True
+ print(isinstance(Type.parse("f4E2M1FN", ctx), FloatType))
+ # CHECK: True
print(isinstance(Type.parse("f6E2M3FN", ctx), FloatType))
# CHECK: True
print(isinstance(Type.parse("f6E3M2FN", ctx), FloatType))
@@ -237,6 +239,8 @@ def testIndexType():
@run
def testFloatType():
with Context():
+ # CHECK: float: f4E2M1FN
+ print("float:", Float4E2M1FNType.get())
# CHECK: float: f6E2M3FN
print("float:", Float6E2M3FNType.get())
# CHECK: float: f6E3M2FN
@@ -617,6 +621,7 @@ def testTypeIDs():
types = [
(IntegerType, IntegerType.get_signless(16)),
(IndexType, IndexType.get()),
+ (Float4E2M1FNType, Float4E2M1FNType.get()),
(Float6E2M3FNType, Float6E2M3FNType.get()),
(Float6E3M2FNType, Float6E3M2FNType.get()),
(Float8E3M4Type, Float8E3M4Type.get()),
@@ -644,6 +649,7 @@ def testTypeIDs():
# CHECK: IntegerType(i16)
# CHECK: IndexType(index)
+ # CHECK: Float4E2M1FNType(f4E2M1FN)
# CHECK: Float6E2M3FNType(f6E2M3FN)
# CHECK: Float6E3M2FNType(f6E3M2FN)
# CHECK: Float8E3M4Type(f8E3M4)
@@ -725,6 +731,9 @@ def testConcreteTypesRoundTrip():
# CHECK: F64Type
# CHECK: F64Type(f64)
print_downcasted(F64Type.get())
+ # CHECK: Float4E2M1FNType
+ # CHECK: Float4E2M1FNType(f4E2M1FN)
+ print_downcasted(Float4E2M1FNType.get())
# CHECK: Float6E2M3FNType
# CHECK: Float6E2M3FNType(f6E2M3FN)
print_downcasted(Float6E2M3FNType.get())
diff --git a/mlir/utils/lldb-scripts/mlirDataFormatters.py b/mlir/utils/lldb-scripts/mlirDataFormatters.py
index 350a0f7..54d3d70 100644
--- a/mlir/utils/lldb-scripts/mlirDataFormatters.py
+++ b/mlir/utils/lldb-scripts/mlirDataFormatters.py
@@ -50,6 +50,7 @@ builtin_attr_type_mnemonics = {
"mlir::CallSiteLoc": '"loc(callsite(...))"',
"mlir::FusedLoc": '"loc(fused<...>[...])"',
"mlir::UnknownLoc": '"loc(unknown)"',
+ "mlir::Float4E2M1FNType": '"f4E2M1FN"',
"mlir::Float6E2M3FNType": '"f6E2M3FN"',
"mlir::Float6E3M2FNType": '"f6E3M2FN"',
"mlir::Float8E5M2Type": '"f8E5M2"',
diff --git a/mlir/utils/tree-sitter-mlir/grammar.js b/mlir/utils/tree-sitter-mlir/grammar.js
index 9df1944..f7d916d 100644
--- a/mlir/utils/tree-sitter-mlir/grammar.js
+++ b/mlir/utils/tree-sitter-mlir/grammar.js
@@ -231,7 +231,7 @@ const common = {
token(seq(choice('si', 'ui', 'i'), /[1-9]/, repeat(/[0-9]/))),
float_type : $ => token(
choice('f16', 'f32', 'f64', 'f80', 'f128', 'bf16', 'f8E3M4', 'f8E4M3FN',
- 'f8E4M3', 'f8E5M2', 'f6E2M3FN', 'f6E3M2FN')),
+ 'f8E4M3', 'f8E5M2', 'f4E2M1FN', 'f6E2M3FN', 'f6E3M2FN')),
index_type : $ => token('index'),
none_type : $ => token('none'),
complex_type : $ => seq(token('complex'), '<', $._prim_type, '>'),
diff --git a/offload/include/OpenMP/OMPT/Callback.h b/offload/include/OpenMP/OMPT/Callback.h
index 89c5731..68cb43745 100644
--- a/offload/include/OpenMP/OMPT/Callback.h
+++ b/offload/include/OpenMP/OMPT/Callback.h
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef OMPTARGET_OPENMP_OMPT_CALLBACK_H
-#define OMPTARGET_OPENMP_OMPT_CALLBACK_H
+#ifndef OFFLOAD_INCLUDE_OPENMP_OMPT_CALLBACK_H
+#define OFFLOAD_INCLUDE_OPENMP_OMPT_CALLBACK_H
#ifdef OMPT_SUPPORT
@@ -102,4 +102,4 @@ extern bool Initialized;
#define performIfOmptInitialized(stmt)
#endif // OMPT_SUPPORT
-#endif // OMPTARGET_OPENMP_OMPT_CALLBACK_H
+#endif // OFFLOAD_INCLUDE_OPENMP_OMPT_CALLBACK_H
diff --git a/offload/include/OpenMP/OMPT/Interface.h b/offload/include/OpenMP/OMPT/Interface.h
index 0dc1bad..43fb193 100644
--- a/offload/include/OpenMP/OMPT/Interface.h
+++ b/offload/include/OpenMP/OMPT/Interface.h
@@ -10,19 +10,19 @@
//
//===----------------------------------------------------------------------===//
-#ifndef _OMPTARGET_OMPTINTERFACE_H
-#define _OMPTARGET_OMPTINTERFACE_H
+#ifndef OFFLOAD_INCLUDE_OPENMP_OMPT_INTERFACE_H
+#define OFFLOAD_INCLUDE_OPENMP_OMPT_INTERFACE_H
// Only provide functionality if target OMPT support is enabled
#ifdef OMPT_SUPPORT
-#include <functional>
-#include <tuple>
-
#include "Callback.h"
#include "omp-tools.h"
#include "llvm/Support/ErrorHandling.h"
+#include <functional>
+#include <tuple>
+
#define OMPT_IF_BUILT(stmt) stmt
/// Callbacks for target regions require task_data representing the
@@ -326,4 +326,4 @@ private:
#define OMPT_IF_BUILT(stmt)
#endif
-#endif // _OMPTARGET_OMPTINTERFACE_H
+#endif // OFFLOAD_INCLUDE_OPENMP_OMPT_INTERFACE_H
diff --git a/offload/plugins-nextgen/common/CMakeLists.txt b/offload/plugins-nextgen/common/CMakeLists.txt
index 4dca542..fde4b2f 100644
--- a/offload/plugins-nextgen/common/CMakeLists.txt
+++ b/offload/plugins-nextgen/common/CMakeLists.txt
@@ -38,12 +38,6 @@ elseif(${LIBOMPTARGET_GPU_LIBC_SUPPORT})
endif()
endif()
-# If we have OMPT enabled include it in the list of sources.
-if (OMPT_TARGET_DEFAULT AND LIBOMPTARGET_OMPT_SUPPORT)
- target_sources(PluginCommon PRIVATE OMPT/OmptCallback.cpp)
- target_include_directories(PluginCommon PRIVATE OMPT)
-endif()
-
# Define the TARGET_NAME and DEBUG_PREFIX.
target_compile_definitions(PluginCommon PRIVATE
TARGET_NAME="PluginInterface"
diff --git a/offload/plugins-nextgen/common/OMPT/OmptCallback.cpp b/offload/plugins-nextgen/common/OMPT/OmptCallback.cpp
deleted file mode 100644
index fb8a156..0000000
--- a/offload/plugins-nextgen/common/OMPT/OmptCallback.cpp
+++ /dev/null
@@ -1,75 +0,0 @@
-//===---------- OmptCallback.cpp - Generic OMPT callbacks --------- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// OMPT support for PluginInterface
-//
-//===----------------------------------------------------------------------===//
-
-#ifdef OMPT_SUPPORT
-
-#include "llvm/Support/DynamicLibrary.h"
-
-#include <cstdlib>
-#include <cstring>
-#include <memory>
-
-#include "Shared/Debug.h"
-
-#include "OpenMP/OMPT/Callback.h"
-#include "OpenMP/OMPT/Connector.h"
-
-using namespace llvm::omp::target::ompt;
-
-bool llvm::omp::target::ompt::Initialized = false;
-
-ompt_get_callback_t llvm::omp::target::ompt::lookupCallbackByCode = nullptr;
-ompt_function_lookup_t llvm::omp::target::ompt::lookupCallbackByName = nullptr;
-
-int llvm::omp::target::ompt::initializeLibrary(ompt_function_lookup_t lookup,
- int initial_device_num,
- ompt_data_t *tool_data) {
- DP("OMPT: Executing initializeLibrary (libomptarget)\n");
-#define bindOmptFunctionName(OmptFunction, DestinationFunction) \
- if (lookup) \
- DestinationFunction = (OmptFunction##_t)lookup(#OmptFunction); \
- DP("OMPT: initializeLibrary (libomptarget) bound %s=%p\n", \
- #DestinationFunction, ((void *)(uint64_t)DestinationFunction));
-
- bindOmptFunctionName(ompt_get_callback, lookupCallbackByCode);
-#undef bindOmptFunctionName
-
- // Store pointer of 'ompt_libomp_target_fn_lookup' for use by the plugin
- lookupCallbackByName = lookup;
-
- Initialized = true;
-
- return 0;
-}
-
-void llvm::omp::target::ompt::finalizeLibrary(ompt_data_t *tool_data) {
- DP("OMPT: Executing finalizeLibrary (libomptarget)\n");
-}
-
-void llvm::omp::target::ompt::connectLibrary() {
- DP("OMPT: Entering connectLibrary (libomptarget)\n");
- /// Connect plugin instance with libomptarget
- OmptLibraryConnectorTy LibomptargetConnector("libomptarget");
- ompt_start_tool_result_t OmptResult;
-
- // Initialize OmptResult with the init and fini functions that will be
- // called by the connector
- OmptResult.initialize = ompt::initializeLibrary;
- OmptResult.finalize = ompt::finalizeLibrary;
- OmptResult.tool_data.value = 0;
-
- // Now call connect that causes the above init/fini functions to be called
- LibomptargetConnector.connect(&OmptResult);
- DP("OMPT: Exiting connectLibrary (libomptarget)\n");
-}
-
-#endif
diff --git a/offload/src/OpenMP/OMPT/Callback.cpp b/offload/src/OpenMP/OMPT/Callback.cpp
index f296428..ab0942e 100644
--- a/offload/src/OpenMP/OMPT/Callback.cpp
+++ b/offload/src/OpenMP/OMPT/Callback.cpp
@@ -10,14 +10,7 @@
//
//===----------------------------------------------------------------------===//
-#ifndef OMPT_SUPPORT
-
-extern "C" {
-/// Dummy definition when OMPT is disabled
-void ompt_libomptarget_connect() {}
-}
-
-#else // OMPT_SUPPORT is set
+#ifdef OMPT_SUPPORT
#include <cstdlib>
#include <cstring>
@@ -34,8 +27,6 @@ void ompt_libomptarget_connect() {}
#undef DEBUG_PREFIX
#define DEBUG_PREFIX "OMPT"
-using namespace llvm::omp::target::ompt;
-
// Define OMPT callback functions (bound to actual callbacks later on)
#define defineOmptCallback(Name, Type, Code) \
Name##_t llvm::omp::target::ompt::Name##_fn = nullptr;
@@ -43,6 +34,8 @@ FOREACH_OMPT_NOEMI_EVENT(defineOmptCallback)
FOREACH_OMPT_EMI_EVENT(defineOmptCallback)
#undef defineOmptCallback
+using namespace llvm::omp::target::ompt;
+
/// Forward declaration
class LibomptargetRtlFinalizer;
@@ -226,26 +219,26 @@ void Interface::endTargetDataRetrieve(int64_t SrcDeviceId, void *SrcPtrBegin,
endTargetDataOperation();
}
-void Interface::beginTargetSubmit(unsigned int numTeams) {
+void Interface::beginTargetSubmit(unsigned int NumTeams) {
if (ompt_callback_target_submit_emi_fn) {
// HostOpId is set by the tool. Invoke the tool supplied target submit EMI
// callback
ompt_callback_target_submit_emi_fn(ompt_scope_begin, &TargetData, &HostOpId,
- numTeams);
+ NumTeams);
} else if (ompt_callback_target_submit_fn) {
// HostOpId is set by the runtime
HostOpId = createOpId();
- ompt_callback_target_submit_fn(TargetData.value, HostOpId, numTeams);
+ ompt_callback_target_submit_fn(TargetData.value, HostOpId, NumTeams);
}
}
-void Interface::endTargetSubmit(unsigned int numTeams) {
+void Interface::endTargetSubmit(unsigned int NumTeams) {
// Only EMI callback handles end scope
if (ompt_callback_target_submit_emi_fn) {
// HostOpId is set by the tool. Invoke the tool supplied target submit EMI
// callback
ompt_callback_target_submit_emi_fn(ompt_scope_end, &TargetData, &HostOpId,
- numTeams);
+ NumTeams);
}
}
@@ -458,7 +451,7 @@ public:
void finalize() {
for (auto FinalizationFunction : RtlFinalizationFunctions)
- FinalizationFunction(/* tool_data */ nullptr);
+ FinalizationFunction(/*tool_data=*/nullptr);
RtlFinalizationFunctions.clear();
}
@@ -469,10 +462,11 @@ private:
int llvm::omp::target::ompt::initializeLibrary(ompt_function_lookup_t lookup,
int initial_device_num,
ompt_data_t *tool_data) {
- DP("Executing initializeLibrary (libomp)\n");
+ DP("Executing initializeLibrary\n");
#define bindOmptFunctionName(OmptFunction, DestinationFunction) \
- DestinationFunction = (OmptFunction##_t)lookup(#OmptFunction); \
- DP("initializeLibrary (libomp) bound %s=%p\n", #DestinationFunction, \
+ if (lookup) \
+ DestinationFunction = (OmptFunction##_t)lookup(#OmptFunction); \
+ DP("initializeLibrary bound %s=%p\n", #DestinationFunction, \
((void *)(uint64_t)DestinationFunction));
bindOmptFunctionName(ompt_get_callback, lookupCallbackByCode);
@@ -499,7 +493,7 @@ int llvm::omp::target::ompt::initializeLibrary(ompt_function_lookup_t lookup,
}
void llvm::omp::target::ompt::finalizeLibrary(ompt_data_t *data) {
- DP("Executing finalizeLibrary (libomp)\n");
+ DP("Executing finalizeLibrary\n");
// Before disabling OMPT, call the (plugin) finalizations that were registered
// with this library
LibraryFinalizer->finalize();
@@ -508,7 +502,7 @@ void llvm::omp::target::ompt::finalizeLibrary(ompt_data_t *data) {
}
void llvm::omp::target::ompt::connectLibrary() {
- DP("Entering connectLibrary (libomp)\n");
+ DP("Entering connectLibrary\n");
// Connect with libomp
static OmptLibraryConnectorTy LibompConnector("libomp");
static ompt_start_tool_result_t OmptResult;
@@ -531,23 +525,7 @@ void llvm::omp::target::ompt::connectLibrary() {
FOREACH_OMPT_EMI_EVENT(bindOmptCallback)
#undef bindOmptCallback
- DP("Exiting connectLibrary (libomp)\n");
+ DP("Exiting connectLibrary\n");
}
-extern "C" {
-/// Used for connecting libomptarget with a plugin
-void ompt_libomptarget_connect(ompt_start_tool_result_t *result) {
- DP("Enter ompt_libomptarget_connect\n");
- if (Initialized && result && LibraryFinalizer) {
- // Cache each fini function, so that they can be invoked on exit
- LibraryFinalizer->registerRtl(result->finalize);
- // Invoke the provided init function with the lookup function maintained
- // in this library so that callbacks maintained by this library are
- // retrieved.
- result->initialize(lookupCallbackByName,
- /* initial_device_num */ 0, /* tool_data */ nullptr);
- }
- DP("Leave ompt_libomptarget_connect\n");
-}
-}
#endif // OMPT_SUPPORT
diff --git a/offload/src/exports b/offload/src/exports
index 7bdc7d2..2406776 100644
--- a/offload/src/exports
+++ b/offload/src/exports
@@ -70,7 +70,6 @@ VERS1.0 {
__tgt_interop_init;
__tgt_interop_use;
__tgt_interop_destroy;
- ompt_libomptarget_connect;
__llvmPushCallConfiguration;
__llvmPopCallConfiguration;
llvmLaunchKernel;
diff --git a/polly/lib/CodeGen/RuntimeDebugBuilder.cpp b/polly/lib/CodeGen/RuntimeDebugBuilder.cpp
index 8a29b0a..5355fe2 100644
--- a/polly/lib/CodeGen/RuntimeDebugBuilder.cpp
+++ b/polly/lib/CodeGen/RuntimeDebugBuilder.cpp
@@ -23,7 +23,7 @@ llvm::Value *RuntimeDebugBuilder::getPrintableString(PollyIRBuilder &Builder,
// because CPU backends typically ignore the address space. For constant
// strings as returned by getPrintableString, the format string should instead
// directly spell out the string.
- return Builder.CreateGlobalStringPtr(Str, "", 4);
+ return Builder.CreateGlobalString(Str, "", 4);
}
Function *RuntimeDebugBuilder::getVPrintF(PollyIRBuilder &Builder) {
@@ -131,7 +131,7 @@ Function *RuntimeDebugBuilder::getPrintF(PollyIRBuilder &Builder) {
void RuntimeDebugBuilder::createPrintF(PollyIRBuilder &Builder,
std::string Format,
ArrayRef<Value *> Values) {
- Value *FormatString = Builder.CreateGlobalStringPtr(Format);
+ Value *FormatString = Builder.CreateGlobalString(Format);
std::vector<Value *> Arguments;
Arguments.push_back(FormatString);
diff --git a/utils/bazel/llvm-project-overlay/libc/BUILD.bazel b/utils/bazel/llvm-project-overlay/libc/BUILD.bazel
index 0e392119..140d48c 100644
--- a/utils/bazel/llvm-project-overlay/libc/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/libc/BUILD.bazel
@@ -725,6 +725,7 @@ libc_support_library(
deps = [
":__support_common",
":__support_cpp_type_traits",
+ ":__support_fputil_cast",
":__support_fputil_dyadic_float",
":__support_fputil_fenv_impl",
":__support_fputil_fp_bits",
@@ -805,6 +806,19 @@ libc_support_library(
)
libc_support_library(
+ name = "__support_fputil_cast",
+ hdrs = ["src/__support/FPUtil/cast.h"],
+ deps = [
+ ":__support_cpp_algorithm",
+ ":__support_cpp_type_traits",
+ ":__support_fputil_dyadic_float",
+ ":__support_fputil_fp_bits",
+ ":__support_macros_properties_types",
+ ":hdr_fenv_macros",
+ ],
+)
+
+libc_support_library(
name = "__support_fputil_division_and_remainder_operations",
hdrs = ["src/__support/FPUtil/DivisionAndRemainderOperations.h"],
deps = [
@@ -821,9 +835,12 @@ libc_support_library(
hdrs = ["src/__support/FPUtil/except_value_utils.h"],
deps = [
":__support_cpp_optional",
+ ":__support_fputil_cast",
":__support_fputil_fenv_impl",
":__support_fputil_fp_bits",
":__support_fputil_rounding_mode",
+ ":__support_macros_properties_cpu_features",
+ ":__support_macros_properties_types",
],
)
@@ -913,6 +930,7 @@ libc_support_library(
":__support_cpp_bit",
":__support_cpp_limits",
":__support_cpp_type_traits",
+ ":__support_fputil_cast",
":__support_fputil_dyadic_float",
":__support_fputil_fp_bits",
":__support_fputil_nearest_integer_operations",
@@ -970,6 +988,7 @@ libc_support_library(
":__support_common",
":__support_cpp_bit",
":__support_cpp_type_traits",
+ ":__support_fputil_cast",
":__support_fputil_dyadic_float",
":__support_fputil_fenv_impl",
":__support_fputil_fp_bits",
@@ -1075,6 +1094,7 @@ libc_support_library(
":__support_fputil_fenv_impl",
":__support_fputil_fp_bits",
":__support_fputil_multiply_add",
+ ":__support_fputil_rounding_mode",
":__support_macros_optimization",
],
)
@@ -1805,26 +1825,11 @@ libc_math_function(
],
)
-libc_math_function(
- name = "ceil",
- specializations = [
- "generic",
- ],
-)
+libc_math_function(name = "ceil")
-libc_math_function(
- name = "ceilf",
- specializations = [
- "generic",
- ],
-)
+libc_math_function(name = "ceilf")
-libc_math_function(
- name = "ceill",
- specializations = [
- "generic",
- ],
-)
+libc_math_function(name = "ceill")
libc_math_function(name = "ceilf128")
@@ -2106,19 +2111,9 @@ libc_math_function(
],
)
-libc_math_function(
- name = "floor",
- specializations = [
- "generic",
- ],
-)
+libc_math_function(name = "floor")
-libc_math_function(
- name = "floorf",
- specializations = [
- "generic",
- ],
-)
+libc_math_function(name = "floorf")
libc_math_function(name = "floorl")
@@ -2619,19 +2614,9 @@ libc_math_function(name = "rintl")
libc_math_function(name = "rintf128")
-libc_math_function(
- name = "round",
- specializations = [
- "generic",
- ],
-)
+libc_math_function(name = "round")
-libc_math_function(
- name = "roundf",
- specializations = [
- "generic",
- ],
-)
+libc_math_function(name = "roundf")
libc_math_function(name = "roundl")
@@ -2830,19 +2815,9 @@ libc_math_function(name = "totalordermagl")
libc_math_function(name = "totalordermagf128")
-libc_math_function(
- name = "trunc",
- specializations = [
- "generic",
- ],
-)
+libc_math_function(name = "trunc")
-libc_math_function(
- name = "truncf",
- specializations = [
- "generic",
- ],
-)
+libc_math_function(name = "truncf")
libc_math_function(name = "truncl")
diff --git a/utils/bazel/llvm-project-overlay/libc/libc_build_rules.bzl b/utils/bazel/llvm-project-overlay/libc/libc_build_rules.bzl
index ec37144..f298f81 100644
--- a/utils/bazel/llvm-project-overlay/libc/libc_build_rules.bzl
+++ b/utils/bazel/llvm-project-overlay/libc/libc_build_rules.bzl
@@ -129,7 +129,6 @@ def libc_function(
def libc_math_function(
name,
- specializations = None,
additional_deps = None):
"""Add a target for a math function.
@@ -142,14 +141,6 @@ def libc_math_function(
math function.
"""
additional_deps = additional_deps or []
- specializations = specializations or ["generic"]
- select_map = {}
- if "generic" in specializations:
- select_map["//conditions:default"] = ["src/math/generic/" + name + ".cpp"]
- if "aarch64" in specializations:
- select_map[PLATFORM_CPU_ARM64] = ["src/math/aarch64/" + name + ".cpp"]
- if "x86_64" in specializations:
- select_map[PLATFORM_CPU_X86_64] = ["src/math/x86_64/" + name + ".cpp"]
#TODO(michaelrj): Fix the floating point dependencies
OLD_FPUTIL_DEPS = [
@@ -166,7 +157,7 @@ def libc_math_function(
]
libc_function(
name = name,
- srcs = selects.with_or(select_map),
+ srcs = ["src/math/generic/" + name + ".cpp"],
hdrs = ["src/math/" + name + ".h"],
deps = [":__support_common"] + OLD_FPUTIL_DEPS + additional_deps,
)
diff --git a/utils/bazel/llvm-project-overlay/libc/test/src/math/libc_math_test_rules.bzl b/utils/bazel/llvm-project-overlay/libc/test/src/math/libc_math_test_rules.bzl
index d788705..16845ab 100644
--- a/utils/bazel/llvm-project-overlay/libc/test/src/math/libc_math_test_rules.bzl
+++ b/utils/bazel/llvm-project-overlay/libc/test/src/math/libc_math_test_rules.bzl
@@ -28,7 +28,9 @@ def math_test(name, hdrs = [], deps = [], **kwargs):
"//libc:__support_cpp_algorithm",
"//libc:__support_cpp_bit",
"//libc:__support_cpp_limits",
+ "//libc:__support_cpp_type_traits",
"//libc:__support_fputil_basic_operations",
+ "//libc:__support_fputil_cast",
"//libc:__support_fputil_fenv_impl",
"//libc:__support_fputil_fp_bits",
"//libc:__support_fputil_manipulation_functions",
@@ -36,6 +38,7 @@ def math_test(name, hdrs = [], deps = [], **kwargs):
"//libc:__support_fputil_normal_float",
"//libc:__support_macros_properties_architectures",
"//libc:__support_macros_properties_os",
+ "//libc:__support_macros_properties_types",
"//libc:__support_math_extras",
"//libc:__support_uint128",
"//libc:hdr_errno_macros",
diff --git a/utils/bazel/llvm-project-overlay/libc/test/src/sys/socket/BUILD.bazel b/utils/bazel/llvm-project-overlay/libc/test/src/sys/socket/BUILD.bazel
index 865f5e6..f7bce45 100644
--- a/utils/bazel/llvm-project-overlay/libc/test/src/sys/socket/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/libc/test/src/sys/socket/BUILD.bazel
@@ -2,7 +2,7 @@
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-# Tests for LLVM libc string.h functions.
+# Tests for LLVM libc socket.h functions.
load("//libc/test:libc_test_rules.bzl", "libc_test")
diff --git a/utils/bazel/llvm-project-overlay/libc/utils/MPFRWrapper/BUILD.bazel b/utils/bazel/llvm-project-overlay/libc/utils/MPFRWrapper/BUILD.bazel
index adf4b23..ca21eae 100644
--- a/utils/bazel/llvm-project-overlay/libc/utils/MPFRWrapper/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/libc/utils/MPFRWrapper/BUILD.bazel
@@ -46,6 +46,7 @@ libc_support_library(
"//libc:__support_cpp_string_view",
"//libc:__support_cpp_stringstream",
"//libc:__support_cpp_type_traits",
+ "//libc:__support_fputil_cast",
"//libc:__support_fputil_fp_bits",
"//libc:__support_fputil_fpbits_str",
"//libc:__support_macros_config",
diff --git a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
index 22a3ea8..304bee9 100644
--- a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
@@ -380,6 +380,7 @@ cc_library(
":TensorEncodingIncGen",
":config",
"//llvm:Support",
+ "//llvm:config",
],
)